1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2022 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 #ifndef INFER_ADDR_PREFIX
38 #define INFER_ADDR_PREFIX 1
42 #define DEFAULT_ARCH "i386"
47 #define INLINE __inline__
53 /* Prefixes will be emitted in the order defined below.
54 WAIT_PREFIX must be the first prefix since FWAIT is really is an
55 instruction, and so must come before any prefixes.
56 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
57 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
63 #define HLE_PREFIX REP_PREFIX
64 #define BND_PREFIX REP_PREFIX
66 #define REX_PREFIX 6 /* must come last. */
67 #define MAX_PREFIXES 7 /* max prefixes per opcode */
69 /* we define the syntax here (modulo base,index,scale syntax) */
70 #define REGISTER_PREFIX '%'
71 #define IMMEDIATE_PREFIX '$'
72 #define ABSOLUTE_PREFIX '*'
74 /* these are the instruction mnemonic suffixes in AT&T syntax or
75 memory operand size in Intel syntax. */
76 #define WORD_MNEM_SUFFIX 'w'
77 #define BYTE_MNEM_SUFFIX 'b'
78 #define SHORT_MNEM_SUFFIX 's'
79 #define LONG_MNEM_SUFFIX 'l'
80 #define QWORD_MNEM_SUFFIX 'q'
81 /* Intel Syntax. Use a non-ascii letter since since it never appears
83 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
85 #define END_OF_INSN '\0'
87 /* This matches the C -> StaticRounding alias in the opcode table. */
88 #define commutative staticrounding
91 'templates' is for grouping together 'template' structures for opcodes
92 of the same name. This is only used for storing the insns in the grand
93 ole hash table of insns.
94 The templates themselves start at START and range up to (but not including)
99 const insn_template
*start
;
100 const insn_template
*end
;
104 /* 386 operand encoding bytes: see 386 book for details of this. */
107 unsigned int regmem
; /* codes register or memory operand */
108 unsigned int reg
; /* codes register operand (or extended opcode) */
109 unsigned int mode
; /* how to interpret regmem & reg */
113 /* x86-64 extension prefix. */
114 typedef int rex_byte
;
116 /* 386 opcode byte to code indirect addressing. */
125 /* x86 arch names, types and features */
128 const char *name
; /* arch name */
129 unsigned int len
; /* arch string length */
130 enum processor_type type
; /* arch type */
131 i386_cpu_flags flags
; /* cpu feature flags */
132 unsigned int skip
; /* show_arch should skip this. */
136 /* Used to turn off indicated flags. */
139 const char *name
; /* arch name */
140 unsigned int len
; /* arch string length */
141 i386_cpu_flags flags
; /* cpu feature flags */
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
154 static void pe_directive_secrel (int);
156 static void signed_cons (int);
157 static char *output_invalid (int c
);
158 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
160 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
162 static int i386_att_operand (char *);
163 static int i386_intel_operand (char *, int);
164 static int i386_intel_simplify (expressionS
*);
165 static int i386_intel_parse_name (const char *, expressionS
*);
166 static const reg_entry
*parse_register (char *, char **);
167 static char *parse_insn (char *, char *);
168 static char *parse_operands (char *, const char *);
169 static void swap_operands (void);
170 static void swap_2_operands (unsigned int, unsigned int);
171 static enum flag_code
i386_addressing_mode (void);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template
*match_template (char);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const reg_entry
*build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS
*, offsetT
);
186 static void output_disp (fragS
*, offsetT
);
188 static void s_bss (int);
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
193 /* GNU_PROPERTY_X86_ISA_1_USED. */
194 static unsigned int x86_isa_1_used
;
195 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
196 static unsigned int x86_feature_2_used
;
197 /* Generate x86 used ISA and feature properties. */
198 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
201 static const char *default_arch
= DEFAULT_ARCH
;
203 /* parse_register() returns this when a register alias cannot be used. */
204 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
205 { Dw2Inval
, Dw2Inval
} };
207 static const reg_entry
*reg_eax
;
208 static const reg_entry
*reg_ds
;
209 static const reg_entry
*reg_es
;
210 static const reg_entry
*reg_ss
;
211 static const reg_entry
*reg_st0
;
212 static const reg_entry
*reg_k0
;
217 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
218 unsigned char bytes
[4];
220 /* Destination or source register specifier. */
221 const reg_entry
*register_specifier
;
224 /* 'md_assemble ()' gathers together information and puts it into a
231 const reg_entry
*regs
;
236 operand_size_mismatch
,
237 operand_type_mismatch
,
238 register_type_mismatch
,
239 number_of_operands_mismatch
,
240 invalid_instruction_suffix
,
242 unsupported_with_intel_mnemonic
,
246 invalid_vsib_address
,
247 invalid_vector_register_set
,
248 invalid_tmm_register_set
,
249 invalid_dest_and_src_register_set
,
250 unsupported_vector_index_register
,
251 unsupported_broadcast
,
254 mask_not_on_destination
,
257 rc_sae_operand_not_last_imm
,
258 invalid_register_operand
,
263 /* TM holds the template for the insn were currently assembling. */
266 /* SUFFIX holds the instruction size suffix for byte, word, dword
267 or qword, if given. */
270 /* OPCODE_LENGTH holds the number of base opcode bytes. */
271 unsigned char opcode_length
;
273 /* OPERANDS gives the number of given operands. */
274 unsigned int operands
;
276 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
277 of given register, displacement, memory operands and immediate
279 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
281 /* TYPES [i] is the type (see above #defines) which tells us how to
282 use OP[i] for the corresponding operand. */
283 i386_operand_type types
[MAX_OPERANDS
];
285 /* Displacement expression, immediate expression, or register for each
287 union i386_op op
[MAX_OPERANDS
];
289 /* Flags for operands. */
290 unsigned int flags
[MAX_OPERANDS
];
291 #define Operand_PCrel 1
292 #define Operand_Mem 2
294 /* Relocation type for operand */
295 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
297 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
298 the base index byte below. */
299 const reg_entry
*base_reg
;
300 const reg_entry
*index_reg
;
301 unsigned int log2_scale_factor
;
303 /* SEG gives the seg_entries of this insn. They are zero unless
304 explicit segment overrides are given. */
305 const reg_entry
*seg
[2];
307 /* Copied first memory operand string, for re-checking. */
310 /* PREFIX holds all the given prefix opcodes (usually null).
311 PREFIXES is the number of prefix opcodes. */
312 unsigned int prefixes
;
313 unsigned char prefix
[MAX_PREFIXES
];
315 /* Register is in low 3 bits of opcode. */
318 /* The operand to a branch insn indicates an absolute branch. */
321 /* Extended states. */
329 xstate_ymm
= 1 << 2 | xstate_xmm
,
331 xstate_zmm
= 1 << 3 | xstate_ymm
,
334 /* Use MASK state. */
338 /* Has GOTPC or TLS relocation. */
339 bool has_gotpc_tls_reloc
;
341 /* RM and SIB are the modrm byte and the sib byte where the
342 addressing modes of this insn are encoded. */
349 /* Masking attributes.
351 The struct describes masking, applied to OPERAND in the instruction.
352 REG is a pointer to the corresponding mask register. ZEROING tells
353 whether merging or zeroing mask is used. */
354 struct Mask_Operation
356 const reg_entry
*reg
;
357 unsigned int zeroing
;
358 /* The operand where this operation is associated. */
359 unsigned int operand
;
362 /* Rounding control and SAE attributes. */
375 unsigned int operand
;
378 /* Broadcasting attributes.
380 The struct describes broadcasting, applied to OPERAND. TYPE is
381 expresses the broadcast factor. */
382 struct Broadcast_Operation
384 /* Type of broadcast: {1to2}, {1to4}, {1to8}, {1to16} or {1to32}. */
387 /* Index of broadcasted operand. */
388 unsigned int operand
;
390 /* Number of bytes to broadcast. */
394 /* Compressed disp8*N attribute. */
395 unsigned int memshift
;
397 /* Prefer load or store in encoding. */
400 dir_encoding_default
= 0,
406 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
409 disp_encoding_default
= 0,
415 /* Prefer the REX byte in encoding. */
418 /* Disable instruction size optimization. */
421 /* How to encode vector instructions. */
424 vex_encoding_default
= 0,
432 const char *rep_prefix
;
435 const char *hle_prefix
;
437 /* Have BND prefix. */
438 const char *bnd_prefix
;
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix
;
444 enum i386_error error
;
447 typedef struct _i386_insn i386_insn
;
449 /* Link RC type with corresponding string, that'll be looked for in
458 static const struct RC_name RC_NamesTable
[] =
460 { rne
, STRING_COMMA_LEN ("rn-sae") },
461 { rd
, STRING_COMMA_LEN ("rd-sae") },
462 { ru
, STRING_COMMA_LEN ("ru-sae") },
463 { rz
, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly
, STRING_COMMA_LEN ("sae") },
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars
[] = "*%-([{}"
478 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
479 && !defined (TE_GNU) \
480 && !defined (TE_LINUX) \
481 && !defined (TE_Haiku) \
482 && !defined (TE_FreeBSD) \
483 && !defined (TE_DragonFly) \
484 && !defined (TE_NetBSD))
485 /* This array holds the chars that always start a comment. If the
486 pre-processor is disabled, these aren't very useful. The option
487 --divide will remove '/' from this list. */
488 const char *i386_comment_chars
= "#/";
489 #define SVR4_COMMENT_CHARS 1
490 #define PREFIX_SEPARATOR '\\'
493 const char *i386_comment_chars
= "#";
494 #define PREFIX_SEPARATOR '/'
497 /* This array holds the chars that only start a comment at the beginning of
498 a line. If the line seems to have the form '# 123 filename'
499 .line and .file directives will appear in the pre-processed output.
500 Note that input_file.c hand checks for '#' at the beginning of the
501 first line of the input file. This is because the compiler outputs
502 #NO_APP at the beginning of its output.
503 Also note that comments started like this one will always work if
504 '/' isn't otherwise defined. */
505 const char line_comment_chars
[] = "#/";
507 const char line_separator_chars
[] = ";";
509 /* Chars that can be used to separate mant from exp in floating point
511 const char EXP_CHARS
[] = "eE";
513 /* Chars that mean this number is a floating point constant
516 const char FLT_CHARS
[] = "fFdDxXhHbB";
518 /* Tables for lexical analysis. */
519 static char mnemonic_chars
[256];
520 static char register_chars
[256];
521 static char operand_chars
[256];
522 static char identifier_chars
[256];
524 /* Lexical macros. */
525 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
526 #define is_operand_char(x) (operand_chars[(unsigned char) x])
527 #define is_register_char(x) (register_chars[(unsigned char) x])
528 #define is_space_char(x) ((x) == ' ')
529 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
531 /* All non-digit non-letter characters that may occur in an operand. */
532 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
534 /* md_assemble() always leaves the strings it's passed unaltered. To
535 effect this we maintain a stack of saved characters that we've smashed
536 with '\0's (indicating end of strings for various sub-fields of the
537 assembler instruction). */
538 static char save_stack
[32];
539 static char *save_stack_p
;
540 #define END_STRING_AND_SAVE(s) \
541 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
542 #define RESTORE_END_STRING(s) \
543 do { *(s) = *--save_stack_p; } while (0)
545 /* The instruction we're assembling. */
548 /* Possible templates for current insn. */
549 static const templates
*current_templates
;
551 /* Per instruction expressionS buffers: max displacements & immediates. */
552 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
553 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
555 /* Current operand we are working on. */
556 static int this_operand
= -1;
558 /* We support four different modes. FLAG_CODE variable is used to distinguish
566 static enum flag_code flag_code
;
567 static unsigned int object_64bit
;
568 static unsigned int disallow_64bit_reloc
;
569 static int use_rela_relocations
= 0;
570 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
571 static const char *tls_get_addr
;
573 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
574 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
575 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
577 /* The ELF ABI to use. */
585 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
588 #if defined (TE_PE) || defined (TE_PEP)
589 /* Use big object file format. */
590 static int use_big_obj
= 0;
593 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
594 /* 1 if generating code for a shared library. */
595 static int shared
= 0;
598 /* 1 for intel syntax,
600 static int intel_syntax
= 0;
602 static enum x86_64_isa
604 amd64
= 1, /* AMD64 ISA. */
605 intel64
/* Intel64 ISA. */
608 /* 1 for intel mnemonic,
609 0 if att mnemonic. */
610 static int intel_mnemonic
= !SYSV386_COMPAT
;
612 /* 1 if pseudo registers are permitted. */
613 static int allow_pseudo_reg
= 0;
615 /* 1 if register prefix % not required. */
616 static int allow_naked_reg
= 0;
618 /* 1 if the assembler should add BND prefix for all control-transferring
619 instructions supporting it, even if this prefix wasn't specified
621 static int add_bnd_prefix
= 0;
623 /* 1 if pseudo index register, eiz/riz, is allowed . */
624 static int allow_index_reg
= 0;
626 /* 1 if the assembler should ignore LOCK prefix, even if it was
627 specified explicitly. */
628 static int omit_lock_prefix
= 0;
630 /* 1 if the assembler should encode lfence, mfence, and sfence as
631 "lock addl $0, (%{re}sp)". */
632 static int avoid_fence
= 0;
634 /* 1 if lfence should be inserted after every load. */
635 static int lfence_after_load
= 0;
637 /* Non-zero if lfence should be inserted before indirect branch. */
638 static enum lfence_before_indirect_branch_kind
640 lfence_branch_none
= 0,
641 lfence_branch_register
,
642 lfence_branch_memory
,
645 lfence_before_indirect_branch
;
647 /* Non-zero if lfence should be inserted before ret. */
648 static enum lfence_before_ret_kind
650 lfence_before_ret_none
= 0,
651 lfence_before_ret_not
,
652 lfence_before_ret_or
,
653 lfence_before_ret_shl
657 /* Types of previous instruction is .byte or prefix. */
672 /* 1 if the assembler should generate relax relocations. */
674 static int generate_relax_relocations
675 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
677 static enum check_kind
683 sse_check
, operand_check
= check_warning
;
685 /* Non-zero if branches should be aligned within power of 2 boundary. */
686 static int align_branch_power
= 0;
688 /* Types of branches to align. */
689 enum align_branch_kind
691 align_branch_none
= 0,
692 align_branch_jcc
= 1,
693 align_branch_fused
= 2,
694 align_branch_jmp
= 3,
695 align_branch_call
= 4,
696 align_branch_indirect
= 5,
700 /* Type bits of branches to align. */
701 enum align_branch_bit
703 align_branch_jcc_bit
= 1 << align_branch_jcc
,
704 align_branch_fused_bit
= 1 << align_branch_fused
,
705 align_branch_jmp_bit
= 1 << align_branch_jmp
,
706 align_branch_call_bit
= 1 << align_branch_call
,
707 align_branch_indirect_bit
= 1 << align_branch_indirect
,
708 align_branch_ret_bit
= 1 << align_branch_ret
711 static unsigned int align_branch
= (align_branch_jcc_bit
712 | align_branch_fused_bit
713 | align_branch_jmp_bit
);
715 /* Types of condition jump used by macro-fusion. */
718 mf_jcc_jo
= 0, /* base opcode 0x70 */
719 mf_jcc_jc
, /* base opcode 0x72 */
720 mf_jcc_je
, /* base opcode 0x74 */
721 mf_jcc_jna
, /* base opcode 0x76 */
722 mf_jcc_js
, /* base opcode 0x78 */
723 mf_jcc_jp
, /* base opcode 0x7a */
724 mf_jcc_jl
, /* base opcode 0x7c */
725 mf_jcc_jle
, /* base opcode 0x7e */
728 /* Types of compare flag-modifying insntructions used by macro-fusion. */
731 mf_cmp_test_and
, /* test/cmp */
732 mf_cmp_alu_cmp
, /* add/sub/cmp */
733 mf_cmp_incdec
/* inc/dec */
736 /* The maximum padding size for fused jcc. CMP like instruction can
737 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
739 #define MAX_FUSED_JCC_PADDING_SIZE 20
741 /* The maximum number of prefixes added for an instruction. */
742 static unsigned int align_branch_prefix_size
= 5;
745 1. Clear the REX_W bit with register operand if possible.
746 2. Above plus use 128bit vector instruction to clear the full vector
749 static int optimize
= 0;
752 1. Clear the REX_W bit with register operand if possible.
753 2. Above plus use 128bit vector instruction to clear the full vector
755 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
758 static int optimize_for_space
= 0;
760 /* Register prefix used for error message. */
761 static const char *register_prefix
= "%";
763 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
764 leave, push, and pop instructions so that gcc has the same stack
765 frame as in 32 bit mode. */
766 static char stackop_size
= '\0';
768 /* Non-zero to optimize code alignment. */
769 int optimize_align_code
= 1;
771 /* Non-zero to quieten some warnings. */
772 static int quiet_warnings
= 0;
775 static const char *cpu_arch_name
= NULL
;
776 static char *cpu_sub_arch_name
= NULL
;
778 /* CPU feature flags. */
779 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
781 /* If we have selected a cpu we are generating instructions for. */
782 static int cpu_arch_tune_set
= 0;
784 /* Cpu we are generating instructions for. */
785 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
787 /* CPU feature flags of cpu we are generating instructions for. */
788 static i386_cpu_flags cpu_arch_tune_flags
;
790 /* CPU instruction set architecture used. */
791 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
793 /* CPU feature flags of instruction set architecture used. */
794 i386_cpu_flags cpu_arch_isa_flags
;
796 /* If set, conditional jumps are not automatically promoted to handle
797 larger than a byte offset. */
798 static unsigned int no_cond_jump_promotion
= 0;
800 /* Encode SSE instructions with VEX prefix. */
801 static unsigned int sse2avx
;
803 /* Encode aligned vector move as unaligned vector move. */
804 static unsigned int use_unaligned_vector_move
;
806 /* Encode scalar AVX instructions with specific vector length. */
813 /* Encode VEX WIG instructions with specific vex.w. */
820 /* Encode scalar EVEX LIG instructions with specific vector length. */
828 /* Encode EVEX WIG instructions with specific evex.w. */
835 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
836 static enum rc_type evexrcig
= rne
;
838 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
839 static symbolS
*GOT_symbol
;
841 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
842 unsigned int x86_dwarf2_return_column
;
844 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
845 int x86_cie_data_alignment
;
847 /* Interface to relax_segment.
848 There are 3 major relax states for 386 jump insns because the
849 different types of jumps add different sizes to frags when we're
850 figuring out what sort of jump to choose to reach a given label.
852 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
853 branches which are handled by md_estimate_size_before_relax() and
854 i386_generic_table_relax_frag(). */
857 #define UNCOND_JUMP 0
859 #define COND_JUMP86 2
860 #define BRANCH_PADDING 3
861 #define BRANCH_PREFIX 4
862 #define FUSED_JCC_PADDING 5
867 #define SMALL16 (SMALL | CODE16)
869 #define BIG16 (BIG | CODE16)
873 #define INLINE __inline__
879 #define ENCODE_RELAX_STATE(type, size) \
880 ((relax_substateT) (((type) << 2) | (size)))
881 #define TYPE_FROM_RELAX_STATE(s) \
883 #define DISP_SIZE_FROM_RELAX_STATE(s) \
884 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
886 /* This table is used by relax_frag to promote short jumps to long
887 ones where necessary. SMALL (short) jumps may be promoted to BIG
888 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
889 don't allow a short jump in a 32 bit code segment to be promoted to
890 a 16 bit offset jump because it's slower (requires data size
891 prefix), and doesn't work, unless the destination is in the bottom
892 64k of the code segment (The top 16 bits of eip are zeroed). */
894 const relax_typeS md_relax_table
[] =
897 1) most positive reach of this state,
898 2) most negative reach of this state,
899 3) how many bytes this mode will have in the variable part of the frag
900 4) which index into the table to try if we can't fit into this one. */
902 /* UNCOND_JUMP states. */
903 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
904 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
905 /* dword jmp adds 4 bytes to frag:
906 0 extra opcode bytes, 4 displacement bytes. */
908 /* word jmp adds 2 byte2 to frag:
909 0 extra opcode bytes, 2 displacement bytes. */
912 /* COND_JUMP states. */
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
914 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
915 /* dword conditionals adds 5 bytes to frag:
916 1 extra opcode byte, 4 displacement bytes. */
918 /* word conditionals add 3 bytes to frag:
919 1 extra opcode byte, 2 displacement bytes. */
922 /* COND_JUMP86 states. */
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
924 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
925 /* dword conditionals adds 5 bytes to frag:
926 1 extra opcode byte, 4 displacement bytes. */
928 /* word conditionals add 4 bytes to frag:
929 1 displacement byte and a 3 byte long branch insn. */
933 static const arch_entry cpu_arch
[] =
935 /* Do not replace the first two entries - i386_target_format()
936 relies on them being there in this order. */
937 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
938 CPU_GENERIC32_FLAGS
, 0 },
939 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
940 CPU_GENERIC64_FLAGS
, 0 },
941 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
943 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
945 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
947 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
949 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
951 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
953 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
955 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
957 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
958 CPU_PENTIUMPRO_FLAGS
, 0 },
959 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
961 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
963 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
965 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
967 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
968 CPU_NOCONA_FLAGS
, 0 },
969 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
971 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
973 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
974 CPU_CORE2_FLAGS
, 1 },
975 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
976 CPU_CORE2_FLAGS
, 0 },
977 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
978 CPU_COREI7_FLAGS
, 0 },
979 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
980 CPU_IAMCU_FLAGS
, 0 },
981 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
983 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
985 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
986 CPU_ATHLON_FLAGS
, 0 },
987 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
989 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
991 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
993 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
994 CPU_AMDFAM10_FLAGS
, 0 },
995 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
996 CPU_BDVER1_FLAGS
, 0 },
997 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
998 CPU_BDVER2_FLAGS
, 0 },
999 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1000 CPU_BDVER3_FLAGS
, 0 },
1001 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1002 CPU_BDVER4_FLAGS
, 0 },
1003 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1004 CPU_ZNVER1_FLAGS
, 0 },
1005 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1006 CPU_ZNVER2_FLAGS
, 0 },
1007 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1008 CPU_ZNVER3_FLAGS
, 0 },
1009 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1010 CPU_BTVER1_FLAGS
, 0 },
1011 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1012 CPU_BTVER2_FLAGS
, 0 },
1013 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1014 CPU_8087_FLAGS
, 0 },
1015 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1017 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1019 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1021 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1022 CPU_CMOV_FLAGS
, 0 },
1023 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1024 CPU_FXSR_FLAGS
, 0 },
1025 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1027 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1029 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1030 CPU_SSE2_FLAGS
, 0 },
1031 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1032 CPU_SSE3_FLAGS
, 0 },
1033 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1034 CPU_SSE4A_FLAGS
, 0 },
1035 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1036 CPU_SSSE3_FLAGS
, 0 },
1037 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1038 CPU_SSE4_1_FLAGS
, 0 },
1039 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1040 CPU_SSE4_2_FLAGS
, 0 },
1041 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1042 CPU_SSE4_2_FLAGS
, 0 },
1043 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1045 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1046 CPU_AVX2_FLAGS
, 0 },
1047 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1048 CPU_AVX512F_FLAGS
, 0 },
1049 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1050 CPU_AVX512CD_FLAGS
, 0 },
1051 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1052 CPU_AVX512ER_FLAGS
, 0 },
1053 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1054 CPU_AVX512PF_FLAGS
, 0 },
1055 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1056 CPU_AVX512DQ_FLAGS
, 0 },
1057 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1058 CPU_AVX512BW_FLAGS
, 0 },
1059 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1060 CPU_AVX512VL_FLAGS
, 0 },
1061 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1063 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1064 CPU_VMFUNC_FLAGS
, 0 },
1065 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1067 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1068 CPU_XSAVE_FLAGS
, 0 },
1069 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1070 CPU_XSAVEOPT_FLAGS
, 0 },
1071 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1072 CPU_XSAVEC_FLAGS
, 0 },
1073 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1074 CPU_XSAVES_FLAGS
, 0 },
1075 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1077 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1078 CPU_PCLMUL_FLAGS
, 0 },
1079 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1080 CPU_PCLMUL_FLAGS
, 1 },
1081 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1082 CPU_FSGSBASE_FLAGS
, 0 },
1083 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1084 CPU_RDRND_FLAGS
, 0 },
1085 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1086 CPU_F16C_FLAGS
, 0 },
1087 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1088 CPU_BMI2_FLAGS
, 0 },
1089 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1091 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1092 CPU_FMA4_FLAGS
, 0 },
1093 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1095 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1097 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1098 CPU_MOVBE_FLAGS
, 0 },
1099 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1100 CPU_CX16_FLAGS
, 0 },
1101 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1103 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1104 CPU_LZCNT_FLAGS
, 0 },
1105 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1106 CPU_POPCNT_FLAGS
, 0 },
1107 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1109 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1111 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1112 CPU_INVPCID_FLAGS
, 0 },
1113 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1114 CPU_CLFLUSH_FLAGS
, 0 },
1115 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1117 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1118 CPU_SYSCALL_FLAGS
, 0 },
1119 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1120 CPU_RDTSCP_FLAGS
, 0 },
1121 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1122 CPU_3DNOW_FLAGS
, 0 },
1123 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1124 CPU_3DNOWA_FLAGS
, 0 },
1125 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1126 CPU_PADLOCK_FLAGS
, 0 },
1127 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1128 CPU_SVME_FLAGS
, 1 },
1129 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1130 CPU_SVME_FLAGS
, 0 },
1131 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1132 CPU_SSE4A_FLAGS
, 0 },
1133 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1135 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1137 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1139 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1141 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1142 CPU_RDSEED_FLAGS
, 0 },
1143 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1144 CPU_PRFCHW_FLAGS
, 0 },
1145 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1146 CPU_SMAP_FLAGS
, 0 },
1147 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1149 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1151 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1152 CPU_CLFLUSHOPT_FLAGS
, 0 },
1153 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1154 CPU_PREFETCHWT1_FLAGS
, 0 },
1155 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1157 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1158 CPU_CLWB_FLAGS
, 0 },
1159 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1160 CPU_AVX512IFMA_FLAGS
, 0 },
1161 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1162 CPU_AVX512VBMI_FLAGS
, 0 },
1163 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1164 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1165 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1166 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1167 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1168 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1169 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1170 CPU_AVX512_VBMI2_FLAGS
, 0 },
1171 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1172 CPU_AVX512_VNNI_FLAGS
, 0 },
1173 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1174 CPU_AVX512_BITALG_FLAGS
, 0 },
1175 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1176 CPU_AVX_VNNI_FLAGS
, 0 },
1177 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1178 CPU_CLZERO_FLAGS
, 0 },
1179 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1180 CPU_MWAITX_FLAGS
, 0 },
1181 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1182 CPU_OSPKE_FLAGS
, 0 },
1183 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1184 CPU_RDPID_FLAGS
, 0 },
1185 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1186 CPU_PTWRITE_FLAGS
, 0 },
1187 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1189 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1190 CPU_SHSTK_FLAGS
, 0 },
1191 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1192 CPU_GFNI_FLAGS
, 0 },
1193 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1194 CPU_VAES_FLAGS
, 0 },
1195 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1196 CPU_VPCLMULQDQ_FLAGS
, 0 },
1197 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1198 CPU_WBNOINVD_FLAGS
, 0 },
1199 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1200 CPU_PCONFIG_FLAGS
, 0 },
1201 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1202 CPU_WAITPKG_FLAGS
, 0 },
1203 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1204 CPU_CLDEMOTE_FLAGS
, 0 },
1205 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1206 CPU_AMX_INT8_FLAGS
, 0 },
1207 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1208 CPU_AMX_BF16_FLAGS
, 0 },
1209 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1210 CPU_AMX_TILE_FLAGS
, 0 },
1211 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1212 CPU_MOVDIRI_FLAGS
, 0 },
1213 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1214 CPU_MOVDIR64B_FLAGS
, 0 },
1215 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1216 CPU_AVX512_BF16_FLAGS
, 0 },
1217 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1218 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1219 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1221 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1222 CPU_ENQCMD_FLAGS
, 0 },
1223 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1224 CPU_SERIALIZE_FLAGS
, 0 },
1225 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1226 CPU_RDPRU_FLAGS
, 0 },
1227 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1228 CPU_MCOMMIT_FLAGS
, 0 },
1229 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1230 CPU_SEV_ES_FLAGS
, 0 },
1231 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1232 CPU_TSXLDTRK_FLAGS
, 0 },
1233 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1235 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1236 CPU_WIDEKL_FLAGS
, 0 },
1237 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1238 CPU_UINTR_FLAGS
, 0 },
1239 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1240 CPU_HRESET_FLAGS
, 0 },
1241 { STRING_COMMA_LEN (".avx512_fp16"), PROCESSOR_UNKNOWN
,
1242 CPU_AVX512_FP16_FLAGS
, 0 },
1245 static const noarch_entry cpu_noarch
[] =
1247 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1248 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1249 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1250 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1251 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1252 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1253 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1254 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1255 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1256 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1257 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1258 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1259 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1260 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1261 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1262 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1263 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1264 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1265 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1266 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1267 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1268 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1269 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1270 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1271 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1279 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1280 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1281 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1282 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1283 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1284 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1285 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1286 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1287 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1288 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1289 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1290 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1291 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1292 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1293 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1294 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1295 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1296 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1297 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1298 { STRING_COMMA_LEN ("noavx512_fp16"), CPU_ANY_AVX512_FP16_FLAGS
},
1302 /* Like s_lcomm_internal in gas/read.c but the alignment string
1303 is allowed to be optional. */
1306 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1313 && *input_line_pointer
== ',')
1315 align
= parse_align (needs_align
- 1);
1317 if (align
== (addressT
) -1)
1332 bss_alloc (symbolP
, size
, align
);
1337 pe_lcomm (int needs_align
)
1339 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1343 const pseudo_typeS md_pseudo_table
[] =
1345 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1346 {"align", s_align_bytes
, 0},
1348 {"align", s_align_ptwo
, 0},
1350 {"arch", set_cpu_arch
, 0},
1354 {"lcomm", pe_lcomm
, 1},
1356 {"ffloat", float_cons
, 'f'},
1357 {"dfloat", float_cons
, 'd'},
1358 {"tfloat", float_cons
, 'x'},
1359 {"hfloat", float_cons
, 'h'},
1360 {"bfloat16", float_cons
, 'b'},
1362 {"slong", signed_cons
, 4},
1363 {"noopt", s_ignore
, 0},
1364 {"optim", s_ignore
, 0},
1365 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1366 {"code16", set_code_flag
, CODE_16BIT
},
1367 {"code32", set_code_flag
, CODE_32BIT
},
1369 {"code64", set_code_flag
, CODE_64BIT
},
1371 {"intel_syntax", set_intel_syntax
, 1},
1372 {"att_syntax", set_intel_syntax
, 0},
1373 {"intel_mnemonic", set_intel_mnemonic
, 1},
1374 {"att_mnemonic", set_intel_mnemonic
, 0},
1375 {"allow_index_reg", set_allow_index_reg
, 1},
1376 {"disallow_index_reg", set_allow_index_reg
, 0},
1377 {"sse_check", set_check
, 0},
1378 {"operand_check", set_check
, 1},
1379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1380 {"largecomm", handle_large_common
, 0},
1382 {"file", dwarf2_directive_file
, 0},
1383 {"loc", dwarf2_directive_loc
, 0},
1384 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1387 {"secrel32", pe_directive_secrel
, 0},
1392 /* For interface with expression (). */
1393 extern char *input_line_pointer
;
1395 /* Hash table for instruction mnemonic lookup. */
1396 static htab_t op_hash
;
1398 /* Hash table for register lookup. */
1399 static htab_t reg_hash
;
1401 /* Various efficient no-op patterns for aligning code labels.
1402 Note: Don't try to assemble the instructions in the comments.
1403 0L and 0w are not legal. */
1404 static const unsigned char f32_1
[] =
1406 static const unsigned char f32_2
[] =
1407 {0x66,0x90}; /* xchg %ax,%ax */
1408 static const unsigned char f32_3
[] =
1409 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1410 static const unsigned char f32_4
[] =
1411 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1412 static const unsigned char f32_6
[] =
1413 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1414 static const unsigned char f32_7
[] =
1415 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1416 static const unsigned char f16_3
[] =
1417 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1418 static const unsigned char f16_4
[] =
1419 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1420 static const unsigned char jump_disp8
[] =
1421 {0xeb}; /* jmp disp8 */
1422 static const unsigned char jump32_disp32
[] =
1423 {0xe9}; /* jmp disp32 */
1424 static const unsigned char jump16_disp32
[] =
1425 {0x66,0xe9}; /* jmp disp32 */
1426 /* 32-bit NOPs patterns. */
1427 static const unsigned char *const f32_patt
[] = {
1428 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1430 /* 16-bit NOPs patterns. */
1431 static const unsigned char *const f16_patt
[] = {
1432 f32_1
, f32_2
, f16_3
, f16_4
1434 /* nopl (%[re]ax) */
1435 static const unsigned char alt_3
[] =
1437 /* nopl 0(%[re]ax) */
1438 static const unsigned char alt_4
[] =
1439 {0x0f,0x1f,0x40,0x00};
1440 /* nopl 0(%[re]ax,%[re]ax,1) */
1441 static const unsigned char alt_5
[] =
1442 {0x0f,0x1f,0x44,0x00,0x00};
1443 /* nopw 0(%[re]ax,%[re]ax,1) */
1444 static const unsigned char alt_6
[] =
1445 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1446 /* nopl 0L(%[re]ax) */
1447 static const unsigned char alt_7
[] =
1448 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1449 /* nopl 0L(%[re]ax,%[re]ax,1) */
1450 static const unsigned char alt_8
[] =
1451 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1452 /* nopw 0L(%[re]ax,%[re]ax,1) */
1453 static const unsigned char alt_9
[] =
1454 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1455 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1456 static const unsigned char alt_10
[] =
1457 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1458 /* data16 nopw %cs:0L(%eax,%eax,1) */
1459 static const unsigned char alt_11
[] =
1460 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1461 /* 32-bit and 64-bit NOPs patterns. */
1462 static const unsigned char *const alt_patt
[] = {
1463 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1464 alt_9
, alt_10
, alt_11
1467 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1468 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1471 i386_output_nops (char *where
, const unsigned char *const *patt
,
1472 int count
, int max_single_nop_size
)
1475 /* Place the longer NOP first. */
1478 const unsigned char *nops
;
1480 if (max_single_nop_size
< 1)
1482 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1483 max_single_nop_size
);
1487 nops
= patt
[max_single_nop_size
- 1];
1489 /* Use the smaller one if the requsted one isn't available. */
1492 max_single_nop_size
--;
1493 nops
= patt
[max_single_nop_size
- 1];
1496 last
= count
% max_single_nop_size
;
1499 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1500 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1504 nops
= patt
[last
- 1];
1507 /* Use the smaller one plus one-byte NOP if the needed one
1510 nops
= patt
[last
- 1];
1511 memcpy (where
+ offset
, nops
, last
);
1512 where
[offset
+ last
] = *patt
[0];
1515 memcpy (where
+ offset
, nops
, last
);
1520 fits_in_imm7 (offsetT num
)
1522 return (num
& 0x7f) == num
;
1526 fits_in_imm31 (offsetT num
)
1528 return (num
& 0x7fffffff) == num
;
1531 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1532 single NOP instruction LIMIT. */
1535 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1537 const unsigned char *const *patt
= NULL
;
1538 int max_single_nop_size
;
1539 /* Maximum number of NOPs before switching to jump over NOPs. */
1540 int max_number_of_nops
;
1542 switch (fragP
->fr_type
)
1547 case rs_machine_dependent
:
1548 /* Allow NOP padding for jumps and calls. */
1549 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1550 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1557 /* We need to decide which NOP sequence to use for 32bit and
1558 64bit. When -mtune= is used:
1560 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1561 PROCESSOR_GENERIC32, f32_patt will be used.
1562 2. For the rest, alt_patt will be used.
1564 When -mtune= isn't used, alt_patt will be used if
1565 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1568 When -march= or .arch is used, we can't use anything beyond
1569 cpu_arch_isa_flags. */
1571 if (flag_code
== CODE_16BIT
)
1574 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1575 /* Limit number of NOPs to 2 in 16-bit mode. */
1576 max_number_of_nops
= 2;
1580 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1582 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1583 switch (cpu_arch_tune
)
1585 case PROCESSOR_UNKNOWN
:
1586 /* We use cpu_arch_isa_flags to check if we SHOULD
1587 optimize with nops. */
1588 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1593 case PROCESSOR_PENTIUM4
:
1594 case PROCESSOR_NOCONA
:
1595 case PROCESSOR_CORE
:
1596 case PROCESSOR_CORE2
:
1597 case PROCESSOR_COREI7
:
1598 case PROCESSOR_GENERIC64
:
1600 case PROCESSOR_ATHLON
:
1602 case PROCESSOR_AMDFAM10
:
1604 case PROCESSOR_ZNVER
:
1608 case PROCESSOR_I386
:
1609 case PROCESSOR_I486
:
1610 case PROCESSOR_PENTIUM
:
1611 case PROCESSOR_PENTIUMPRO
:
1612 case PROCESSOR_IAMCU
:
1613 case PROCESSOR_GENERIC32
:
1620 switch (fragP
->tc_frag_data
.tune
)
1622 case PROCESSOR_UNKNOWN
:
1623 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1624 PROCESSOR_UNKNOWN. */
1628 case PROCESSOR_I386
:
1629 case PROCESSOR_I486
:
1630 case PROCESSOR_PENTIUM
:
1631 case PROCESSOR_IAMCU
:
1633 case PROCESSOR_ATHLON
:
1635 case PROCESSOR_AMDFAM10
:
1637 case PROCESSOR_ZNVER
:
1639 case PROCESSOR_GENERIC32
:
1640 /* We use cpu_arch_isa_flags to check if we CAN optimize
1642 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1647 case PROCESSOR_PENTIUMPRO
:
1648 case PROCESSOR_PENTIUM4
:
1649 case PROCESSOR_NOCONA
:
1650 case PROCESSOR_CORE
:
1651 case PROCESSOR_CORE2
:
1652 case PROCESSOR_COREI7
:
1653 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1658 case PROCESSOR_GENERIC64
:
1664 if (patt
== f32_patt
)
1666 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1667 /* Limit number of NOPs to 2 for older processors. */
1668 max_number_of_nops
= 2;
1672 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1673 /* Limit number of NOPs to 7 for newer processors. */
1674 max_number_of_nops
= 7;
1679 limit
= max_single_nop_size
;
1681 if (fragP
->fr_type
== rs_fill_nop
)
1683 /* Output NOPs for .nop directive. */
1684 if (limit
> max_single_nop_size
)
1686 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1687 _("invalid single nop size: %d "
1688 "(expect within [0, %d])"),
1689 limit
, max_single_nop_size
);
1693 else if (fragP
->fr_type
!= rs_machine_dependent
)
1694 fragP
->fr_var
= count
;
1696 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1698 /* Generate jump over NOPs. */
1699 offsetT disp
= count
- 2;
1700 if (fits_in_imm7 (disp
))
1702 /* Use "jmp disp8" if possible. */
1704 where
[0] = jump_disp8
[0];
1710 unsigned int size_of_jump
;
1712 if (flag_code
== CODE_16BIT
)
1714 where
[0] = jump16_disp32
[0];
1715 where
[1] = jump16_disp32
[1];
1720 where
[0] = jump32_disp32
[0];
1724 count
-= size_of_jump
+ 4;
1725 if (!fits_in_imm31 (count
))
1727 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1728 _("jump over nop padding out of range"));
1732 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1733 where
+= size_of_jump
+ 4;
1737 /* Generate multiple NOPs. */
1738 i386_output_nops (where
, patt
, count
, limit
);
1742 operand_type_all_zero (const union i386_operand_type
*x
)
1744 switch (ARRAY_SIZE(x
->array
))
1755 return !x
->array
[0];
1762 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1764 switch (ARRAY_SIZE(x
->array
))
1780 x
->bitfield
.class = ClassNone
;
1781 x
->bitfield
.instance
= InstanceNone
;
1785 operand_type_equal (const union i386_operand_type
*x
,
1786 const union i386_operand_type
*y
)
1788 switch (ARRAY_SIZE(x
->array
))
1791 if (x
->array
[2] != y
->array
[2])
1795 if (x
->array
[1] != y
->array
[1])
1799 return x
->array
[0] == y
->array
[0];
1807 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1809 switch (ARRAY_SIZE(x
->array
))
1824 return !x
->array
[0];
1831 cpu_flags_equal (const union i386_cpu_flags
*x
,
1832 const union i386_cpu_flags
*y
)
1834 switch (ARRAY_SIZE(x
->array
))
1837 if (x
->array
[3] != y
->array
[3])
1841 if (x
->array
[2] != y
->array
[2])
1845 if (x
->array
[1] != y
->array
[1])
1849 return x
->array
[0] == y
->array
[0];
1857 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1859 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1860 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1863 static INLINE i386_cpu_flags
1864 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1866 switch (ARRAY_SIZE (x
.array
))
1869 x
.array
[3] &= y
.array
[3];
1872 x
.array
[2] &= y
.array
[2];
1875 x
.array
[1] &= y
.array
[1];
1878 x
.array
[0] &= y
.array
[0];
1886 static INLINE i386_cpu_flags
1887 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1889 switch (ARRAY_SIZE (x
.array
))
1892 x
.array
[3] |= y
.array
[3];
1895 x
.array
[2] |= y
.array
[2];
1898 x
.array
[1] |= y
.array
[1];
1901 x
.array
[0] |= y
.array
[0];
1909 static INLINE i386_cpu_flags
1910 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1912 switch (ARRAY_SIZE (x
.array
))
1915 x
.array
[3] &= ~y
.array
[3];
1918 x
.array
[2] &= ~y
.array
[2];
1921 x
.array
[1] &= ~y
.array
[1];
1924 x
.array
[0] &= ~y
.array
[0];
1932 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1934 #define CPU_FLAGS_ARCH_MATCH 0x1
1935 #define CPU_FLAGS_64BIT_MATCH 0x2
1937 #define CPU_FLAGS_PERFECT_MATCH \
1938 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1940 /* Return CPU flags match bits. */
1943 cpu_flags_match (const insn_template
*t
)
1945 i386_cpu_flags x
= t
->cpu_flags
;
1946 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1948 x
.bitfield
.cpu64
= 0;
1949 x
.bitfield
.cpuno64
= 0;
1951 if (cpu_flags_all_zero (&x
))
1953 /* This instruction is available on all archs. */
1954 match
|= CPU_FLAGS_ARCH_MATCH
;
1958 /* This instruction is available only on some archs. */
1959 i386_cpu_flags cpu
= cpu_arch_flags
;
1961 /* AVX512VL is no standalone feature - match it and then strip it. */
1962 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1964 x
.bitfield
.cpuavx512vl
= 0;
1966 /* AVX and AVX2 present at the same time express an operand size
1967 dependency - strip AVX2 for the purposes here. The operand size
1968 dependent check occurs in check_vecOperands(). */
1969 if (x
.bitfield
.cpuavx
&& x
.bitfield
.cpuavx2
)
1970 x
.bitfield
.cpuavx2
= 0;
1972 cpu
= cpu_flags_and (x
, cpu
);
1973 if (!cpu_flags_all_zero (&cpu
))
1975 if (x
.bitfield
.cpuavx
)
1977 /* We need to check a few extra flags with AVX. */
1978 if (cpu
.bitfield
.cpuavx
1979 && (!t
->opcode_modifier
.sse2avx
1980 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1981 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1982 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1983 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1984 match
|= CPU_FLAGS_ARCH_MATCH
;
1986 else if (x
.bitfield
.cpuavx512f
)
1988 /* We need to check a few extra flags with AVX512F. */
1989 if (cpu
.bitfield
.cpuavx512f
1990 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1991 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1992 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1993 match
|= CPU_FLAGS_ARCH_MATCH
;
1996 match
|= CPU_FLAGS_ARCH_MATCH
;
2002 static INLINE i386_operand_type
2003 operand_type_and (i386_operand_type x
, i386_operand_type y
)
2005 if (x
.bitfield
.class != y
.bitfield
.class)
2006 x
.bitfield
.class = ClassNone
;
2007 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2008 x
.bitfield
.instance
= InstanceNone
;
2010 switch (ARRAY_SIZE (x
.array
))
2013 x
.array
[2] &= y
.array
[2];
2016 x
.array
[1] &= y
.array
[1];
2019 x
.array
[0] &= y
.array
[0];
2027 static INLINE i386_operand_type
2028 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2030 gas_assert (y
.bitfield
.class == ClassNone
);
2031 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2033 switch (ARRAY_SIZE (x
.array
))
2036 x
.array
[2] &= ~y
.array
[2];
2039 x
.array
[1] &= ~y
.array
[1];
2042 x
.array
[0] &= ~y
.array
[0];
2050 static INLINE i386_operand_type
2051 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2053 gas_assert (x
.bitfield
.class == ClassNone
||
2054 y
.bitfield
.class == ClassNone
||
2055 x
.bitfield
.class == y
.bitfield
.class);
2056 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2057 y
.bitfield
.instance
== InstanceNone
||
2058 x
.bitfield
.instance
== y
.bitfield
.instance
);
2060 switch (ARRAY_SIZE (x
.array
))
2063 x
.array
[2] |= y
.array
[2];
2066 x
.array
[1] |= y
.array
[1];
2069 x
.array
[0] |= y
.array
[0];
2077 static INLINE i386_operand_type
2078 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2080 gas_assert (y
.bitfield
.class == ClassNone
);
2081 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2083 switch (ARRAY_SIZE (x
.array
))
2086 x
.array
[2] ^= y
.array
[2];
2089 x
.array
[1] ^= y
.array
[1];
2092 x
.array
[0] ^= y
.array
[0];
2100 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2101 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2102 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2103 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2104 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2105 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2106 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2107 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2108 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2109 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2110 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2111 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2112 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2113 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2124 operand_type_check (i386_operand_type t
, enum operand_type c
)
2129 return t
.bitfield
.class == Reg
;
2132 return (t
.bitfield
.imm8
2136 || t
.bitfield
.imm32s
2137 || t
.bitfield
.imm64
);
2140 return (t
.bitfield
.disp8
2141 || t
.bitfield
.disp16
2142 || t
.bitfield
.disp32
2143 || t
.bitfield
.disp32s
2144 || t
.bitfield
.disp64
);
2147 return (t
.bitfield
.disp8
2148 || t
.bitfield
.disp16
2149 || t
.bitfield
.disp32
2150 || t
.bitfield
.disp32s
2151 || t
.bitfield
.disp64
2152 || t
.bitfield
.baseindex
);
2161 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2162 between operand GIVEN and opeand WANTED for instruction template T. */
2165 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2168 return !((i
.types
[given
].bitfield
.byte
2169 && !t
->operand_types
[wanted
].bitfield
.byte
)
2170 || (i
.types
[given
].bitfield
.word
2171 && !t
->operand_types
[wanted
].bitfield
.word
)
2172 || (i
.types
[given
].bitfield
.dword
2173 && !t
->operand_types
[wanted
].bitfield
.dword
)
2174 || (i
.types
[given
].bitfield
.qword
2175 && !t
->operand_types
[wanted
].bitfield
.qword
)
2176 || (i
.types
[given
].bitfield
.tbyte
2177 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2180 /* Return 1 if there is no conflict in SIMD register between operand
2181 GIVEN and opeand WANTED for instruction template T. */
2184 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2187 return !((i
.types
[given
].bitfield
.xmmword
2188 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2189 || (i
.types
[given
].bitfield
.ymmword
2190 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2191 || (i
.types
[given
].bitfield
.zmmword
2192 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2193 || (i
.types
[given
].bitfield
.tmmword
2194 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2197 /* Return 1 if there is no conflict in any size between operand GIVEN
2198 and opeand WANTED for instruction template T. */
2201 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2204 return (match_operand_size (t
, wanted
, given
)
2205 && !((i
.types
[given
].bitfield
.unspecified
2206 && !i
.broadcast
.type
2207 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2208 || (i
.types
[given
].bitfield
.fword
2209 && !t
->operand_types
[wanted
].bitfield
.fword
)
2210 /* For scalar opcode templates to allow register and memory
2211 operands at the same time, some special casing is needed
2212 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2213 down-conversion vpmov*. */
2214 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2215 && t
->operand_types
[wanted
].bitfield
.byte
2216 + t
->operand_types
[wanted
].bitfield
.word
2217 + t
->operand_types
[wanted
].bitfield
.dword
2218 + t
->operand_types
[wanted
].bitfield
.qword
2219 > !!t
->opcode_modifier
.broadcast
)
2220 ? (i
.types
[given
].bitfield
.xmmword
2221 || i
.types
[given
].bitfield
.ymmword
2222 || i
.types
[given
].bitfield
.zmmword
)
2223 : !match_simd_size(t
, wanted
, given
))));
2226 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2227 operands for instruction template T, and it has MATCH_REVERSE set if there
2228 is no size conflict on any operands for the template with operands reversed
2229 (and the template allows for reversing in the first place). */
2231 #define MATCH_STRAIGHT 1
2232 #define MATCH_REVERSE 2
2234 static INLINE
unsigned int
2235 operand_size_match (const insn_template
*t
)
2237 unsigned int j
, match
= MATCH_STRAIGHT
;
2239 /* Don't check non-absolute jump instructions. */
2240 if (t
->opcode_modifier
.jump
2241 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2244 /* Check memory and accumulator operand size. */
2245 for (j
= 0; j
< i
.operands
; j
++)
2247 if (i
.types
[j
].bitfield
.class != Reg
2248 && i
.types
[j
].bitfield
.class != RegSIMD
2249 && t
->opcode_modifier
.anysize
)
2252 if (t
->operand_types
[j
].bitfield
.class == Reg
2253 && !match_operand_size (t
, j
, j
))
2259 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2260 && !match_simd_size (t
, j
, j
))
2266 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2267 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2273 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2280 if (!t
->opcode_modifier
.d
)
2284 i
.error
= operand_size_mismatch
;
2288 /* Check reverse. */
2289 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2291 for (j
= 0; j
< i
.operands
; j
++)
2293 unsigned int given
= i
.operands
- j
- 1;
2295 if (t
->operand_types
[j
].bitfield
.class == Reg
2296 && !match_operand_size (t
, j
, given
))
2299 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2300 && !match_simd_size (t
, j
, given
))
2303 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2304 && (!match_operand_size (t
, j
, given
)
2305 || !match_simd_size (t
, j
, given
)))
2308 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2312 return match
| MATCH_REVERSE
;
2316 operand_type_match (i386_operand_type overlap
,
2317 i386_operand_type given
)
2319 i386_operand_type temp
= overlap
;
2321 temp
.bitfield
.unspecified
= 0;
2322 temp
.bitfield
.byte
= 0;
2323 temp
.bitfield
.word
= 0;
2324 temp
.bitfield
.dword
= 0;
2325 temp
.bitfield
.fword
= 0;
2326 temp
.bitfield
.qword
= 0;
2327 temp
.bitfield
.tbyte
= 0;
2328 temp
.bitfield
.xmmword
= 0;
2329 temp
.bitfield
.ymmword
= 0;
2330 temp
.bitfield
.zmmword
= 0;
2331 temp
.bitfield
.tmmword
= 0;
2332 if (operand_type_all_zero (&temp
))
2335 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2339 i
.error
= operand_type_mismatch
;
2343 /* If given types g0 and g1 are registers they must be of the same type
2344 unless the expected operand type register overlap is null.
2345 Some Intel syntax memory operand size checking also happens here. */
2348 operand_type_register_match (i386_operand_type g0
,
2349 i386_operand_type t0
,
2350 i386_operand_type g1
,
2351 i386_operand_type t1
)
2353 if (g0
.bitfield
.class != Reg
2354 && g0
.bitfield
.class != RegSIMD
2355 && (!operand_type_check (g0
, anymem
)
2356 || g0
.bitfield
.unspecified
2357 || (t0
.bitfield
.class != Reg
2358 && t0
.bitfield
.class != RegSIMD
)))
2361 if (g1
.bitfield
.class != Reg
2362 && g1
.bitfield
.class != RegSIMD
2363 && (!operand_type_check (g1
, anymem
)
2364 || g1
.bitfield
.unspecified
2365 || (t1
.bitfield
.class != Reg
2366 && t1
.bitfield
.class != RegSIMD
)))
2369 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2370 && g0
.bitfield
.word
== g1
.bitfield
.word
2371 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2372 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2373 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2374 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2375 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2378 /* If expectations overlap in no more than a single size, all is fine. */
2379 g0
= operand_type_and (t0
, t1
);
2380 if (g0
.bitfield
.byte
2384 + g0
.bitfield
.xmmword
2385 + g0
.bitfield
.ymmword
2386 + g0
.bitfield
.zmmword
<= 1)
2389 i
.error
= register_type_mismatch
;
2394 static INLINE
unsigned int
2395 register_number (const reg_entry
*r
)
2397 unsigned int nr
= r
->reg_num
;
2399 if (r
->reg_flags
& RegRex
)
2402 if (r
->reg_flags
& RegVRex
)
2408 static INLINE
unsigned int
2409 mode_from_disp_size (i386_operand_type t
)
2411 if (t
.bitfield
.disp8
)
2413 else if (t
.bitfield
.disp16
2414 || t
.bitfield
.disp32
2415 || t
.bitfield
.disp32s
)
2422 fits_in_signed_byte (addressT num
)
2424 return num
+ 0x80 <= 0xff;
2428 fits_in_unsigned_byte (addressT num
)
2434 fits_in_unsigned_word (addressT num
)
2436 return num
<= 0xffff;
2440 fits_in_signed_word (addressT num
)
2442 return num
+ 0x8000 <= 0xffff;
2446 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2451 return num
+ 0x80000000 <= 0xffffffff;
2453 } /* fits_in_signed_long() */
2456 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2461 return num
<= 0xffffffff;
2463 } /* fits_in_unsigned_long() */
2465 static INLINE valueT
extend_to_32bit_address (addressT num
)
2468 if (fits_in_unsigned_long(num
))
2469 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2471 if (!fits_in_signed_long (num
))
2472 return num
& 0xffffffff;
2479 fits_in_disp8 (offsetT num
)
2481 int shift
= i
.memshift
;
2487 mask
= (1 << shift
) - 1;
2489 /* Return 0 if NUM isn't properly aligned. */
2493 /* Check if NUM will fit in 8bit after shift. */
2494 return fits_in_signed_byte (num
>> shift
);
2498 fits_in_imm4 (offsetT num
)
2500 return (num
& 0xf) == num
;
2503 static i386_operand_type
2504 smallest_imm_type (offsetT num
)
2506 i386_operand_type t
;
2508 operand_type_set (&t
, 0);
2509 t
.bitfield
.imm64
= 1;
2511 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2513 /* This code is disabled on the 486 because all the Imm1 forms
2514 in the opcode table are slower on the i486. They're the
2515 versions with the implicitly specified single-position
2516 displacement, which has another syntax if you really want to
2518 t
.bitfield
.imm1
= 1;
2519 t
.bitfield
.imm8
= 1;
2520 t
.bitfield
.imm8s
= 1;
2521 t
.bitfield
.imm16
= 1;
2522 t
.bitfield
.imm32
= 1;
2523 t
.bitfield
.imm32s
= 1;
2525 else if (fits_in_signed_byte (num
))
2527 t
.bitfield
.imm8
= 1;
2528 t
.bitfield
.imm8s
= 1;
2529 t
.bitfield
.imm16
= 1;
2530 t
.bitfield
.imm32
= 1;
2531 t
.bitfield
.imm32s
= 1;
2533 else if (fits_in_unsigned_byte (num
))
2535 t
.bitfield
.imm8
= 1;
2536 t
.bitfield
.imm16
= 1;
2537 t
.bitfield
.imm32
= 1;
2538 t
.bitfield
.imm32s
= 1;
2540 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2542 t
.bitfield
.imm16
= 1;
2543 t
.bitfield
.imm32
= 1;
2544 t
.bitfield
.imm32s
= 1;
2546 else if (fits_in_signed_long (num
))
2548 t
.bitfield
.imm32
= 1;
2549 t
.bitfield
.imm32s
= 1;
2551 else if (fits_in_unsigned_long (num
))
2552 t
.bitfield
.imm32
= 1;
2558 offset_in_range (offsetT val
, int size
)
2564 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2565 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2567 case 4: mask
= ((addressT
) 1 << 32) - 1; break;
2569 case sizeof (val
): return val
;
2573 if ((val
& ~mask
) != 0 && (-val
& ~mask
) != 0)
2576 char masked_buf
[128];
2578 /* Coded this way in order to ease translation. */
2579 sprintf_vma (val_buf
, val
);
2580 sprintf_vma (masked_buf
, val
& mask
);
2581 as_warn (_("0x%s shortened to 0x%s"), val_buf
, masked_buf
);
2597 a. PREFIX_EXIST if attempting to add a prefix where one from the
2598 same class already exists.
2599 b. PREFIX_LOCK if lock prefix is added.
2600 c. PREFIX_REP if rep/repne prefix is added.
2601 d. PREFIX_DS if ds prefix is added.
2602 e. PREFIX_OTHER if other prefix is added.
2605 static enum PREFIX_GROUP
2606 add_prefix (unsigned int prefix
)
2608 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2611 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2612 && flag_code
== CODE_64BIT
)
2614 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2615 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2616 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2617 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2628 case DS_PREFIX_OPCODE
:
2631 case CS_PREFIX_OPCODE
:
2632 case ES_PREFIX_OPCODE
:
2633 case FS_PREFIX_OPCODE
:
2634 case GS_PREFIX_OPCODE
:
2635 case SS_PREFIX_OPCODE
:
2639 case REPNE_PREFIX_OPCODE
:
2640 case REPE_PREFIX_OPCODE
:
2645 case LOCK_PREFIX_OPCODE
:
2654 case ADDR_PREFIX_OPCODE
:
2658 case DATA_PREFIX_OPCODE
:
2662 if (i
.prefix
[q
] != 0)
2670 i
.prefix
[q
] |= prefix
;
2673 as_bad (_("same type of prefix used twice"));
2679 update_code_flag (int value
, int check
)
2681 PRINTF_LIKE ((*as_error
));
2683 flag_code
= (enum flag_code
) value
;
2684 if (flag_code
== CODE_64BIT
)
2686 cpu_arch_flags
.bitfield
.cpu64
= 1;
2687 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2691 cpu_arch_flags
.bitfield
.cpu64
= 0;
2692 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2694 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2697 as_error
= as_fatal
;
2700 (*as_error
) (_("64bit mode not supported on `%s'."),
2701 cpu_arch_name
? cpu_arch_name
: default_arch
);
2703 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2706 as_error
= as_fatal
;
2709 (*as_error
) (_("32bit mode not supported on `%s'."),
2710 cpu_arch_name
? cpu_arch_name
: default_arch
);
2712 stackop_size
= '\0';
2716 set_code_flag (int value
)
2718 update_code_flag (value
, 0);
2722 set_16bit_gcc_code_flag (int new_code_flag
)
2724 flag_code
= (enum flag_code
) new_code_flag
;
2725 if (flag_code
!= CODE_16BIT
)
2727 cpu_arch_flags
.bitfield
.cpu64
= 0;
2728 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2729 stackop_size
= LONG_MNEM_SUFFIX
;
2733 set_intel_syntax (int syntax_flag
)
2735 /* Find out if register prefixing is specified. */
2736 int ask_naked_reg
= 0;
2739 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2742 int e
= get_symbol_name (&string
);
2744 if (strcmp (string
, "prefix") == 0)
2746 else if (strcmp (string
, "noprefix") == 0)
2749 as_bad (_("bad argument to syntax directive."));
2750 (void) restore_line_pointer (e
);
2752 demand_empty_rest_of_line ();
2754 intel_syntax
= syntax_flag
;
2756 if (ask_naked_reg
== 0)
2757 allow_naked_reg
= (intel_syntax
2758 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2760 allow_naked_reg
= (ask_naked_reg
< 0);
2762 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2764 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2765 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2766 register_prefix
= allow_naked_reg
? "" : "%";
2770 set_intel_mnemonic (int mnemonic_flag
)
2772 intel_mnemonic
= mnemonic_flag
;
2776 set_allow_index_reg (int flag
)
2778 allow_index_reg
= flag
;
2782 set_check (int what
)
2784 enum check_kind
*kind
;
2789 kind
= &operand_check
;
2800 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2803 int e
= get_symbol_name (&string
);
2805 if (strcmp (string
, "none") == 0)
2807 else if (strcmp (string
, "warning") == 0)
2808 *kind
= check_warning
;
2809 else if (strcmp (string
, "error") == 0)
2810 *kind
= check_error
;
2812 as_bad (_("bad argument to %s_check directive."), str
);
2813 (void) restore_line_pointer (e
);
2816 as_bad (_("missing argument for %s_check directive"), str
);
2818 demand_empty_rest_of_line ();
2822 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2823 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2825 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2826 static const char *arch
;
2828 /* Intel MCU is only supported on ELF. */
2834 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2835 use default_arch. */
2836 arch
= cpu_arch_name
;
2838 arch
= default_arch
;
2841 /* If we are targeting Intel MCU, we must enable it. */
2842 if ((get_elf_backend_data (stdoutput
)->elf_machine_code
== EM_IAMCU
)
2843 == new_flag
.bitfield
.cpuiamcu
)
2846 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2851 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2855 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2858 int e
= get_symbol_name (&string
);
2860 i386_cpu_flags flags
;
2862 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2864 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2868 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2870 cpu_arch_name
= cpu_arch
[j
].name
;
2871 cpu_sub_arch_name
= NULL
;
2872 cpu_arch_flags
= cpu_arch
[j
].flags
;
2873 if (flag_code
== CODE_64BIT
)
2875 cpu_arch_flags
.bitfield
.cpu64
= 1;
2876 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2880 cpu_arch_flags
.bitfield
.cpu64
= 0;
2881 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2883 cpu_arch_isa
= cpu_arch
[j
].type
;
2884 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2885 if (!cpu_arch_tune_set
)
2887 cpu_arch_tune
= cpu_arch_isa
;
2888 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2893 flags
= cpu_flags_or (cpu_arch_flags
,
2896 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2898 if (cpu_sub_arch_name
)
2900 char *name
= cpu_sub_arch_name
;
2901 cpu_sub_arch_name
= concat (name
,
2903 (const char *) NULL
);
2907 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2908 cpu_arch_flags
= flags
;
2909 cpu_arch_isa_flags
= flags
;
2913 = cpu_flags_or (cpu_arch_isa_flags
,
2915 (void) restore_line_pointer (e
);
2916 demand_empty_rest_of_line ();
2921 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2923 /* Disable an ISA extension. */
2924 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2925 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2927 flags
= cpu_flags_and_not (cpu_arch_flags
,
2928 cpu_noarch
[j
].flags
);
2929 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2931 if (cpu_sub_arch_name
)
2933 char *name
= cpu_sub_arch_name
;
2934 cpu_sub_arch_name
= concat (name
, string
,
2935 (const char *) NULL
);
2939 cpu_sub_arch_name
= xstrdup (string
);
2940 cpu_arch_flags
= flags
;
2941 cpu_arch_isa_flags
= flags
;
2943 (void) restore_line_pointer (e
);
2944 demand_empty_rest_of_line ();
2948 j
= ARRAY_SIZE (cpu_arch
);
2951 if (j
>= ARRAY_SIZE (cpu_arch
))
2952 as_bad (_("no such architecture: `%s'"), string
);
2954 *input_line_pointer
= e
;
2957 as_bad (_("missing cpu architecture"));
2959 no_cond_jump_promotion
= 0;
2960 if (*input_line_pointer
== ','
2961 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2966 ++input_line_pointer
;
2967 e
= get_symbol_name (&string
);
2969 if (strcmp (string
, "nojumps") == 0)
2970 no_cond_jump_promotion
= 1;
2971 else if (strcmp (string
, "jumps") == 0)
2974 as_bad (_("no such architecture modifier: `%s'"), string
);
2976 (void) restore_line_pointer (e
);
2979 demand_empty_rest_of_line ();
2982 enum bfd_architecture
2985 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2987 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2988 || flag_code
== CODE_64BIT
)
2989 as_fatal (_("Intel MCU is 32bit ELF only"));
2990 return bfd_arch_iamcu
;
2993 return bfd_arch_i386
;
2999 if (startswith (default_arch
, "x86_64"))
3001 if (default_arch
[6] == '\0')
3002 return bfd_mach_x86_64
;
3004 return bfd_mach_x64_32
;
3006 else if (!strcmp (default_arch
, "i386")
3007 || !strcmp (default_arch
, "iamcu"))
3009 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3011 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3012 as_fatal (_("Intel MCU is 32bit ELF only"));
3013 return bfd_mach_i386_iamcu
;
3016 return bfd_mach_i386_i386
;
3019 as_fatal (_("unknown architecture"));
3025 /* Support pseudo prefixes like {disp32}. */
3026 lex_type
['{'] = LEX_BEGIN_NAME
;
3028 /* Initialize op_hash hash table. */
3029 op_hash
= str_htab_create ();
3032 const insn_template
*optab
;
3033 templates
*core_optab
;
3035 /* Setup for loop. */
3037 core_optab
= XNEW (templates
);
3038 core_optab
->start
= optab
;
3043 if (optab
->name
== NULL
3044 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3046 /* different name --> ship out current template list;
3047 add to hash table; & begin anew. */
3048 core_optab
->end
= optab
;
3049 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3050 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3052 if (optab
->name
== NULL
)
3054 core_optab
= XNEW (templates
);
3055 core_optab
->start
= optab
;
3060 /* Initialize reg_hash hash table. */
3061 reg_hash
= str_htab_create ();
3063 const reg_entry
*regtab
;
3064 unsigned int regtab_size
= i386_regtab_size
;
3066 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3068 switch (regtab
->reg_type
.bitfield
.class)
3071 if (regtab
->reg_type
.bitfield
.dword
)
3073 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3076 else if (regtab
->reg_type
.bitfield
.tbyte
)
3078 /* There's no point inserting st(<N>) in the hash table, as
3079 parentheses aren't included in register_chars[] anyway. */
3080 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3087 switch (regtab
->reg_num
)
3089 case 0: reg_es
= regtab
; break;
3090 case 2: reg_ss
= regtab
; break;
3091 case 3: reg_ds
= regtab
; break;
3096 if (!regtab
->reg_num
)
3101 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3102 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3106 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3111 for (c
= 0; c
< 256; c
++)
3113 if (ISDIGIT (c
) || ISLOWER (c
))
3115 mnemonic_chars
[c
] = c
;
3116 register_chars
[c
] = c
;
3117 operand_chars
[c
] = c
;
3119 else if (ISUPPER (c
))
3121 mnemonic_chars
[c
] = TOLOWER (c
);
3122 register_chars
[c
] = mnemonic_chars
[c
];
3123 operand_chars
[c
] = c
;
3125 else if (c
== '{' || c
== '}')
3127 mnemonic_chars
[c
] = c
;
3128 operand_chars
[c
] = c
;
3130 #ifdef SVR4_COMMENT_CHARS
3131 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3132 operand_chars
[c
] = c
;
3135 if (ISALPHA (c
) || ISDIGIT (c
))
3136 identifier_chars
[c
] = c
;
3139 identifier_chars
[c
] = c
;
3140 operand_chars
[c
] = c
;
3145 identifier_chars
['@'] = '@';
3148 identifier_chars
['?'] = '?';
3149 operand_chars
['?'] = '?';
3151 mnemonic_chars
['_'] = '_';
3152 mnemonic_chars
['-'] = '-';
3153 mnemonic_chars
['.'] = '.';
3154 identifier_chars
['_'] = '_';
3155 identifier_chars
['.'] = '.';
3157 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3158 operand_chars
[(unsigned char) *p
] = *p
;
3161 if (flag_code
== CODE_64BIT
)
3163 #if defined (OBJ_COFF) && defined (TE_PE)
3164 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3167 x86_dwarf2_return_column
= 16;
3169 x86_cie_data_alignment
= -8;
3173 x86_dwarf2_return_column
= 8;
3174 x86_cie_data_alignment
= -4;
3177 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3178 can be turned into BRANCH_PREFIX frag. */
3179 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3184 i386_print_statistics (FILE *file
)
3186 htab_print_statistics (file
, "i386 opcode", op_hash
);
3187 htab_print_statistics (file
, "i386 register", reg_hash
);
3192 /* Debugging routines for md_assemble. */
3193 static void pte (insn_template
*);
3194 static void pt (i386_operand_type
);
3195 static void pe (expressionS
*);
3196 static void ps (symbolS
*);
3199 pi (const char *line
, i386_insn
*x
)
3203 fprintf (stdout
, "%s: template ", line
);
3205 fprintf (stdout
, " address: base %s index %s scale %x\n",
3206 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3207 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3208 x
->log2_scale_factor
);
3209 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3210 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3211 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3212 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3213 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3214 (x
->rex
& REX_W
) != 0,
3215 (x
->rex
& REX_R
) != 0,
3216 (x
->rex
& REX_X
) != 0,
3217 (x
->rex
& REX_B
) != 0);
3218 for (j
= 0; j
< x
->operands
; j
++)
3220 fprintf (stdout
, " #%d: ", j
+ 1);
3222 fprintf (stdout
, "\n");
3223 if (x
->types
[j
].bitfield
.class == Reg
3224 || x
->types
[j
].bitfield
.class == RegMMX
3225 || x
->types
[j
].bitfield
.class == RegSIMD
3226 || x
->types
[j
].bitfield
.class == RegMask
3227 || x
->types
[j
].bitfield
.class == SReg
3228 || x
->types
[j
].bitfield
.class == RegCR
3229 || x
->types
[j
].bitfield
.class == RegDR
3230 || x
->types
[j
].bitfield
.class == RegTR
3231 || x
->types
[j
].bitfield
.class == RegBND
)
3232 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3233 if (operand_type_check (x
->types
[j
], imm
))
3235 if (operand_type_check (x
->types
[j
], disp
))
3236 pe (x
->op
[j
].disps
);
3241 pte (insn_template
*t
)
3243 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3244 static const char *const opc_spc
[] = {
3245 NULL
, "0f", "0f38", "0f3a", NULL
, "evexmap5", "evexmap6", NULL
,
3246 "XOP08", "XOP09", "XOP0A",
3250 fprintf (stdout
, " %d operands ", t
->operands
);
3251 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3252 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3253 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3254 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3255 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3256 if (t
->extension_opcode
!= None
)
3257 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3258 if (t
->opcode_modifier
.d
)
3259 fprintf (stdout
, "D");
3260 if (t
->opcode_modifier
.w
)
3261 fprintf (stdout
, "W");
3262 fprintf (stdout
, "\n");
3263 for (j
= 0; j
< t
->operands
; j
++)
3265 fprintf (stdout
, " #%d type ", j
+ 1);
3266 pt (t
->operand_types
[j
]);
3267 fprintf (stdout
, "\n");
3274 fprintf (stdout
, " operation %d\n", e
->X_op
);
3275 fprintf (stdout
, " add_number %" BFD_VMA_FMT
"d (%" BFD_VMA_FMT
"x)\n",
3276 e
->X_add_number
, e
->X_add_number
);
3277 if (e
->X_add_symbol
)
3279 fprintf (stdout
, " add_symbol ");
3280 ps (e
->X_add_symbol
);
3281 fprintf (stdout
, "\n");
3285 fprintf (stdout
, " op_symbol ");
3286 ps (e
->X_op_symbol
);
3287 fprintf (stdout
, "\n");
3294 fprintf (stdout
, "%s type %s%s",
3296 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3297 segment_name (S_GET_SEGMENT (s
)));
3300 static struct type_name
3302 i386_operand_type mask
;
3305 const type_names
[] =
3307 { OPERAND_TYPE_REG8
, "r8" },
3308 { OPERAND_TYPE_REG16
, "r16" },
3309 { OPERAND_TYPE_REG32
, "r32" },
3310 { OPERAND_TYPE_REG64
, "r64" },
3311 { OPERAND_TYPE_ACC8
, "acc8" },
3312 { OPERAND_TYPE_ACC16
, "acc16" },
3313 { OPERAND_TYPE_ACC32
, "acc32" },
3314 { OPERAND_TYPE_ACC64
, "acc64" },
3315 { OPERAND_TYPE_IMM8
, "i8" },
3316 { OPERAND_TYPE_IMM8
, "i8s" },
3317 { OPERAND_TYPE_IMM16
, "i16" },
3318 { OPERAND_TYPE_IMM32
, "i32" },
3319 { OPERAND_TYPE_IMM32S
, "i32s" },
3320 { OPERAND_TYPE_IMM64
, "i64" },
3321 { OPERAND_TYPE_IMM1
, "i1" },
3322 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3323 { OPERAND_TYPE_DISP8
, "d8" },
3324 { OPERAND_TYPE_DISP16
, "d16" },
3325 { OPERAND_TYPE_DISP32
, "d32" },
3326 { OPERAND_TYPE_DISP32S
, "d32s" },
3327 { OPERAND_TYPE_DISP64
, "d64" },
3328 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3329 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3330 { OPERAND_TYPE_CONTROL
, "control reg" },
3331 { OPERAND_TYPE_TEST
, "test reg" },
3332 { OPERAND_TYPE_DEBUG
, "debug reg" },
3333 { OPERAND_TYPE_FLOATREG
, "FReg" },
3334 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3335 { OPERAND_TYPE_SREG
, "SReg" },
3336 { OPERAND_TYPE_REGMMX
, "rMMX" },
3337 { OPERAND_TYPE_REGXMM
, "rXMM" },
3338 { OPERAND_TYPE_REGYMM
, "rYMM" },
3339 { OPERAND_TYPE_REGZMM
, "rZMM" },
3340 { OPERAND_TYPE_REGTMM
, "rTMM" },
3341 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3345 pt (i386_operand_type t
)
3348 i386_operand_type a
;
3350 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3352 a
= operand_type_and (t
, type_names
[j
].mask
);
3353 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3354 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3359 #endif /* DEBUG386 */
3361 static bfd_reloc_code_real_type
3362 reloc (unsigned int size
,
3365 bfd_reloc_code_real_type other
)
3367 if (other
!= NO_RELOC
)
3369 reloc_howto_type
*rel
;
3374 case BFD_RELOC_X86_64_GOT32
:
3375 return BFD_RELOC_X86_64_GOT64
;
3377 case BFD_RELOC_X86_64_GOTPLT64
:
3378 return BFD_RELOC_X86_64_GOTPLT64
;
3380 case BFD_RELOC_X86_64_PLTOFF64
:
3381 return BFD_RELOC_X86_64_PLTOFF64
;
3383 case BFD_RELOC_X86_64_GOTPC32
:
3384 other
= BFD_RELOC_X86_64_GOTPC64
;
3386 case BFD_RELOC_X86_64_GOTPCREL
:
3387 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3389 case BFD_RELOC_X86_64_TPOFF32
:
3390 other
= BFD_RELOC_X86_64_TPOFF64
;
3392 case BFD_RELOC_X86_64_DTPOFF32
:
3393 other
= BFD_RELOC_X86_64_DTPOFF64
;
3399 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3400 if (other
== BFD_RELOC_SIZE32
)
3403 other
= BFD_RELOC_SIZE64
;
3406 as_bad (_("there are no pc-relative size relocations"));
3412 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3413 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3416 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3418 as_bad (_("unknown relocation (%u)"), other
);
3419 else if (size
!= bfd_get_reloc_size (rel
))
3420 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3421 bfd_get_reloc_size (rel
),
3423 else if (pcrel
&& !rel
->pc_relative
)
3424 as_bad (_("non-pc-relative relocation for pc-relative field"));
3425 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3427 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3429 as_bad (_("relocated field and relocation type differ in signedness"));
3438 as_bad (_("there are no unsigned pc-relative relocations"));
3441 case 1: return BFD_RELOC_8_PCREL
;
3442 case 2: return BFD_RELOC_16_PCREL
;
3443 case 4: return BFD_RELOC_32_PCREL
;
3444 case 8: return BFD_RELOC_64_PCREL
;
3446 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3453 case 4: return BFD_RELOC_X86_64_32S
;
3458 case 1: return BFD_RELOC_8
;
3459 case 2: return BFD_RELOC_16
;
3460 case 4: return BFD_RELOC_32
;
3461 case 8: return BFD_RELOC_64
;
3463 as_bad (_("cannot do %s %u byte relocation"),
3464 sign
> 0 ? "signed" : "unsigned", size
);
3470 /* Here we decide which fixups can be adjusted to make them relative to
3471 the beginning of the section instead of the symbol. Basically we need
3472 to make sure that the dynamic relocations are done correctly, so in
3473 some cases we force the original symbol to be used. */
3476 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3478 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3482 /* Don't adjust pc-relative references to merge sections in 64-bit
3484 if (use_rela_relocations
3485 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3489 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3490 and changed later by validate_fix. */
3491 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3492 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3495 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3496 for size relocations. */
3497 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3498 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3499 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3500 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3501 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3502 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3503 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3504 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3505 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3506 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3507 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3508 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3509 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3510 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3511 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3512 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3513 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3514 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3515 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3516 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3517 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3518 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3519 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3520 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3521 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3522 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3523 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3524 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3525 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3526 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3527 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3534 want_disp32 (const insn_template
*t
)
3536 return flag_code
!= CODE_64BIT
3537 || i
.prefix
[ADDR_PREFIX
]
3538 || (t
->base_opcode
== 0x8d
3539 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
3540 && (!i
.types
[1].bitfield
.qword
3541 || t
->opcode_modifier
.size
== SIZE32
));
3545 intel_float_operand (const char *mnemonic
)
3547 /* Note that the value returned is meaningful only for opcodes with (memory)
3548 operands, hence the code here is free to improperly handle opcodes that
3549 have no operands (for better performance and smaller code). */
3551 if (mnemonic
[0] != 'f')
3552 return 0; /* non-math */
3554 switch (mnemonic
[1])
3556 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3557 the fs segment override prefix not currently handled because no
3558 call path can make opcodes without operands get here */
3560 return 2 /* integer op */;
3562 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3563 return 3; /* fldcw/fldenv */
3566 if (mnemonic
[2] != 'o' /* fnop */)
3567 return 3; /* non-waiting control op */
3570 if (mnemonic
[2] == 's')
3571 return 3; /* frstor/frstpm */
3574 if (mnemonic
[2] == 'a')
3575 return 3; /* fsave */
3576 if (mnemonic
[2] == 't')
3578 switch (mnemonic
[3])
3580 case 'c': /* fstcw */
3581 case 'd': /* fstdw */
3582 case 'e': /* fstenv */
3583 case 's': /* fsts[gw] */
3589 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3590 return 0; /* fxsave/fxrstor are not really math ops */
3598 install_template (const insn_template
*t
)
3604 /* Note that for pseudo prefixes this produces a length of 1. But for them
3605 the length isn't interesting at all. */
3606 for (l
= 1; l
< 4; ++l
)
3607 if (!(t
->base_opcode
>> (8 * l
)))
3610 i
.opcode_length
= l
;
3613 /* Build the VEX prefix. */
3616 build_vex_prefix (const insn_template
*t
)
3618 unsigned int register_specifier
;
3619 unsigned int vector_length
;
3622 /* Check register specifier. */
3623 if (i
.vex
.register_specifier
)
3625 register_specifier
=
3626 ~register_number (i
.vex
.register_specifier
) & 0xf;
3627 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3630 register_specifier
= 0xf;
3632 /* Use 2-byte VEX prefix by swapping destination and source operand
3633 if there are more than 1 register operand. */
3634 if (i
.reg_operands
> 1
3635 && i
.vec_encoding
!= vex_encoding_vex3
3636 && i
.dir_encoding
== dir_encoding_default
3637 && i
.operands
== i
.reg_operands
3638 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3639 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3640 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3643 unsigned int xchg
= i
.operands
- 1;
3644 union i386_op temp_op
;
3645 i386_operand_type temp_type
;
3647 temp_type
= i
.types
[xchg
];
3648 i
.types
[xchg
] = i
.types
[0];
3649 i
.types
[0] = temp_type
;
3650 temp_op
= i
.op
[xchg
];
3651 i
.op
[xchg
] = i
.op
[0];
3654 gas_assert (i
.rm
.mode
== 3);
3658 i
.rm
.regmem
= i
.rm
.reg
;
3661 if (i
.tm
.opcode_modifier
.d
)
3662 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3663 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3664 else /* Use the next insn. */
3665 install_template (&t
[1]);
3668 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3669 are no memory operands and at least 3 register ones. */
3670 if (i
.reg_operands
>= 3
3671 && i
.vec_encoding
!= vex_encoding_vex3
3672 && i
.reg_operands
== i
.operands
- i
.imm_operands
3673 && i
.tm
.opcode_modifier
.vex
3674 && i
.tm
.opcode_modifier
.commutative
3675 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3677 && i
.vex
.register_specifier
3678 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3680 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3681 union i386_op temp_op
;
3682 i386_operand_type temp_type
;
3684 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3685 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3686 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3687 &i
.types
[i
.operands
- 3]));
3688 gas_assert (i
.rm
.mode
== 3);
3690 temp_type
= i
.types
[xchg
];
3691 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3692 i
.types
[xchg
+ 1] = temp_type
;
3693 temp_op
= i
.op
[xchg
];
3694 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3695 i
.op
[xchg
+ 1] = temp_op
;
3698 xchg
= i
.rm
.regmem
| 8;
3699 i
.rm
.regmem
= ~register_specifier
& 0xf;
3700 gas_assert (!(i
.rm
.regmem
& 8));
3701 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3702 register_specifier
= ~xchg
& 0xf;
3705 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3706 vector_length
= avxscalar
;
3707 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3713 /* Determine vector length from the last multi-length vector
3716 for (op
= t
->operands
; op
--;)
3717 if (t
->operand_types
[op
].bitfield
.xmmword
3718 && t
->operand_types
[op
].bitfield
.ymmword
3719 && i
.types
[op
].bitfield
.ymmword
)
3726 /* Check the REX.W bit and VEXW. */
3727 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3728 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3729 else if (i
.tm
.opcode_modifier
.vexw
)
3730 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3732 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3734 /* Use 2-byte VEX prefix if possible. */
3736 && i
.vec_encoding
!= vex_encoding_vex3
3737 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3738 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3740 /* 2-byte VEX prefix. */
3744 i
.vex
.bytes
[0] = 0xc5;
3746 /* Check the REX.R bit. */
3747 r
= (i
.rex
& REX_R
) ? 0 : 1;
3748 i
.vex
.bytes
[1] = (r
<< 7
3749 | register_specifier
<< 3
3750 | vector_length
<< 2
3751 | i
.tm
.opcode_modifier
.opcodeprefix
);
3755 /* 3-byte VEX prefix. */
3758 switch (i
.tm
.opcode_modifier
.opcodespace
)
3763 i
.vex
.bytes
[0] = 0xc4;
3768 i
.vex
.bytes
[0] = 0x8f;
3774 /* The high 3 bits of the second VEX byte are 1's compliment
3775 of RXB bits from REX. */
3776 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3778 i
.vex
.bytes
[2] = (w
<< 7
3779 | register_specifier
<< 3
3780 | vector_length
<< 2
3781 | i
.tm
.opcode_modifier
.opcodeprefix
);
3786 is_evex_encoding (const insn_template
*t
)
3788 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3789 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3790 || t
->opcode_modifier
.sae
;
3794 is_any_vex_encoding (const insn_template
*t
)
3796 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3799 /* Build the EVEX prefix. */
3802 build_evex_prefix (void)
3804 unsigned int register_specifier
, w
;
3805 rex_byte vrex_used
= 0;
3807 /* Check register specifier. */
3808 if (i
.vex
.register_specifier
)
3810 gas_assert ((i
.vrex
& REX_X
) == 0);
3812 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3813 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3814 register_specifier
+= 8;
3815 /* The upper 16 registers are encoded in the fourth byte of the
3817 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3818 i
.vex
.bytes
[3] = 0x8;
3819 register_specifier
= ~register_specifier
& 0xf;
3823 register_specifier
= 0xf;
3825 /* Encode upper 16 vector index register in the fourth byte of
3827 if (!(i
.vrex
& REX_X
))
3828 i
.vex
.bytes
[3] = 0x8;
3833 /* 4 byte EVEX prefix. */
3835 i
.vex
.bytes
[0] = 0x62;
3837 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3839 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3840 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_EVEXMAP6
);
3841 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3843 /* The fifth bit of the second EVEX byte is 1's compliment of the
3844 REX_R bit in VREX. */
3845 if (!(i
.vrex
& REX_R
))
3846 i
.vex
.bytes
[1] |= 0x10;
3850 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3852 /* When all operands are registers, the REX_X bit in REX is not
3853 used. We reuse it to encode the upper 16 registers, which is
3854 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3855 as 1's compliment. */
3856 if ((i
.vrex
& REX_B
))
3859 i
.vex
.bytes
[1] &= ~0x40;
3863 /* EVEX instructions shouldn't need the REX prefix. */
3864 i
.vrex
&= ~vrex_used
;
3865 gas_assert (i
.vrex
== 0);
3867 /* Check the REX.W bit and VEXW. */
3868 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3869 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3870 else if (i
.tm
.opcode_modifier
.vexw
)
3871 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3873 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3875 /* The third byte of the EVEX prefix. */
3876 i
.vex
.bytes
[2] = ((w
<< 7)
3877 | (register_specifier
<< 3)
3878 | 4 /* Encode the U bit. */
3879 | i
.tm
.opcode_modifier
.opcodeprefix
);
3881 /* The fourth byte of the EVEX prefix. */
3882 /* The zeroing-masking bit. */
3883 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3884 i
.vex
.bytes
[3] |= 0x80;
3886 /* Don't always set the broadcast bit if there is no RC. */
3887 if (i
.rounding
.type
== rc_none
)
3889 /* Encode the vector length. */
3890 unsigned int vec_length
;
3892 if (!i
.tm
.opcode_modifier
.evex
3893 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3897 /* Determine vector length from the last multi-length vector
3899 for (op
= i
.operands
; op
--;)
3900 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3901 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3902 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3904 if (i
.types
[op
].bitfield
.zmmword
)
3906 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3909 else if (i
.types
[op
].bitfield
.ymmword
)
3911 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3914 else if (i
.types
[op
].bitfield
.xmmword
)
3916 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3919 else if (i
.broadcast
.type
&& op
== i
.broadcast
.operand
)
3921 switch (i
.broadcast
.bytes
)
3924 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3927 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3930 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3939 if (op
>= MAX_OPERANDS
)
3943 switch (i
.tm
.opcode_modifier
.evex
)
3945 case EVEXLIG
: /* LL' is ignored */
3946 vec_length
= evexlig
<< 5;
3949 vec_length
= 0 << 5;
3952 vec_length
= 1 << 5;
3955 vec_length
= 2 << 5;
3961 i
.vex
.bytes
[3] |= vec_length
;
3962 /* Encode the broadcast bit. */
3963 if (i
.broadcast
.type
)
3964 i
.vex
.bytes
[3] |= 0x10;
3966 else if (i
.rounding
.type
!= saeonly
)
3967 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
3969 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3972 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
3976 process_immext (void)
3980 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3981 which is coded in the same place as an 8-bit immediate field
3982 would be. Here we fake an 8-bit immediate operand from the
3983 opcode suffix stored in tm.extension_opcode.
3985 AVX instructions also use this encoding, for some of
3986 3 argument instructions. */
3988 gas_assert (i
.imm_operands
<= 1
3990 || (is_any_vex_encoding (&i
.tm
)
3991 && i
.operands
<= 4)));
3993 exp
= &im_expressions
[i
.imm_operands
++];
3994 i
.op
[i
.operands
].imms
= exp
;
3995 i
.types
[i
.operands
] = imm8
;
3997 exp
->X_op
= O_constant
;
3998 exp
->X_add_number
= i
.tm
.extension_opcode
;
3999 i
.tm
.extension_opcode
= None
;
4006 switch (i
.tm
.opcode_modifier
.prefixok
)
4014 as_bad (_("invalid instruction `%s' after `%s'"),
4015 i
.tm
.name
, i
.hle_prefix
);
4018 if (i
.prefix
[LOCK_PREFIX
])
4020 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4024 case PrefixHLERelease
:
4025 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4027 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4031 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4033 as_bad (_("memory destination needed for instruction `%s'"
4034 " after `xrelease'"), i
.tm
.name
);
4041 /* Encode aligned vector move as unaligned vector move. */
4044 encode_with_unaligned_vector_move (void)
4046 switch (i
.tm
.base_opcode
)
4048 case 0x28: /* Load instructions. */
4049 case 0x29: /* Store instructions. */
4050 /* movaps/movapd/vmovaps/vmovapd. */
4051 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4052 && i
.tm
.opcode_modifier
.opcodeprefix
<= PREFIX_0X66
)
4053 i
.tm
.base_opcode
= 0x10 | (i
.tm
.base_opcode
& 1);
4055 case 0x6f: /* Load instructions. */
4056 case 0x7f: /* Store instructions. */
4057 /* movdqa/vmovdqa/vmovdqa64/vmovdqa32. */
4058 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4059 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0X66
)
4060 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4067 /* Try the shortest encoding by shortening operand size. */
4070 optimize_encoding (void)
4074 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4075 && i
.tm
.base_opcode
== 0x8d)
4078 lea symbol, %rN -> mov $symbol, %rN
4079 lea (%rM), %rN -> mov %rM, %rN
4080 lea (,%rM,1), %rN -> mov %rM, %rN
4082 and in 32-bit mode for 16-bit addressing
4084 lea (%rM), %rN -> movzx %rM, %rN
4086 and in 64-bit mode zap 32-bit addressing in favor of using a
4087 32-bit (or less) destination.
4089 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4091 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4092 i
.tm
.opcode_modifier
.size
= SIZE32
;
4093 i
.prefix
[ADDR_PREFIX
] = 0;
4096 if (!i
.index_reg
&& !i
.base_reg
)
4099 lea symbol, %rN -> mov $symbol, %rN
4101 if (flag_code
== CODE_64BIT
)
4103 /* Don't transform a relocation to a 16-bit one. */
4105 && i
.op
[0].disps
->X_op
!= O_constant
4106 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4109 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4110 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4112 i
.tm
.base_opcode
= 0xb8;
4113 i
.tm
.opcode_modifier
.modrm
= 0;
4114 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4115 i
.types
[0].bitfield
.imm32
= 1;
4118 i
.tm
.opcode_modifier
.size
= SIZE16
;
4119 i
.types
[0].bitfield
.imm16
= 1;
4124 /* Subject to further optimization below. */
4125 i
.tm
.base_opcode
= 0xc7;
4126 i
.tm
.extension_opcode
= 0;
4127 i
.types
[0].bitfield
.imm32s
= 1;
4128 i
.types
[0].bitfield
.baseindex
= 0;
4131 /* Outside of 64-bit mode address and operand sizes have to match if
4132 a relocation is involved, as otherwise we wouldn't (currently) or
4133 even couldn't express the relocation correctly. */
4134 else if (i
.op
[0].disps
4135 && i
.op
[0].disps
->X_op
!= O_constant
4136 && ((!i
.prefix
[ADDR_PREFIX
])
4137 != (flag_code
== CODE_32BIT
4138 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4139 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4141 /* In 16-bit mode converting LEA with 16-bit addressing and a 32-bit
4142 destination is going to grow encoding size. */
4143 else if (flag_code
== CODE_16BIT
4144 && (optimize
<= 1 || optimize_for_space
)
4145 && !i
.prefix
[ADDR_PREFIX
]
4146 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4150 i
.tm
.base_opcode
= 0xb8;
4151 i
.tm
.opcode_modifier
.modrm
= 0;
4152 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4153 i
.types
[0].bitfield
.imm32
= 1;
4155 i
.types
[0].bitfield
.imm16
= 1;
4158 && i
.op
[0].disps
->X_op
== O_constant
4159 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4160 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4162 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4163 i
.op
[0].disps
->X_add_number
&= 0xffff;
4166 i
.tm
.operand_types
[0] = i
.types
[0];
4170 i
.op
[0].imms
= &im_expressions
[0];
4171 i
.op
[0].imms
->X_op
= O_absent
;
4174 else if (i
.op
[0].disps
4175 && (i
.op
[0].disps
->X_op
!= O_constant
4176 || i
.op
[0].disps
->X_add_number
))
4181 lea (%rM), %rN -> mov %rM, %rN
4182 lea (,%rM,1), %rN -> mov %rM, %rN
4183 lea (%rM), %rN -> movzx %rM, %rN
4185 const reg_entry
*addr_reg
;
4187 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4188 addr_reg
= i
.base_reg
;
4189 else if (!i
.base_reg
4190 && i
.index_reg
->reg_num
!= RegIZ
4191 && !i
.log2_scale_factor
)
4192 addr_reg
= i
.index_reg
;
4196 if (addr_reg
->reg_type
.bitfield
.word
4197 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4199 if (flag_code
!= CODE_32BIT
)
4201 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
4202 i
.tm
.base_opcode
= 0xb7;
4205 i
.tm
.base_opcode
= 0x8b;
4207 if (addr_reg
->reg_type
.bitfield
.dword
4208 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4209 i
.tm
.opcode_modifier
.size
= SIZE32
;
4211 i
.op
[0].regs
= addr_reg
;
4216 i
.disp_operands
= 0;
4217 i
.prefix
[ADDR_PREFIX
] = 0;
4218 i
.prefix
[SEG_PREFIX
] = 0;
4222 if (optimize_for_space
4223 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4224 && i
.reg_operands
== 1
4225 && i
.imm_operands
== 1
4226 && !i
.types
[1].bitfield
.byte
4227 && i
.op
[0].imms
->X_op
== O_constant
4228 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4229 && (i
.tm
.base_opcode
== 0xa8
4230 || (i
.tm
.base_opcode
== 0xf6
4231 && i
.tm
.extension_opcode
== 0x0)))
4234 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4236 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4237 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4239 i
.types
[1].bitfield
.byte
= 1;
4240 /* Ignore the suffix. */
4242 /* Convert to byte registers. */
4243 if (i
.types
[1].bitfield
.word
)
4245 else if (i
.types
[1].bitfield
.dword
)
4249 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4254 else if (flag_code
== CODE_64BIT
4255 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4256 && ((i
.types
[1].bitfield
.qword
4257 && i
.reg_operands
== 1
4258 && i
.imm_operands
== 1
4259 && i
.op
[0].imms
->X_op
== O_constant
4260 && ((i
.tm
.base_opcode
== 0xb8
4261 && i
.tm
.extension_opcode
== None
4262 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4263 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4264 && ((i
.tm
.base_opcode
== 0x24
4265 || i
.tm
.base_opcode
== 0xa8)
4266 || (i
.tm
.base_opcode
== 0x80
4267 && i
.tm
.extension_opcode
== 0x4)
4268 || ((i
.tm
.base_opcode
== 0xf6
4269 || (i
.tm
.base_opcode
| 1) == 0xc7)
4270 && i
.tm
.extension_opcode
== 0x0)))
4271 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4272 && i
.tm
.base_opcode
== 0x83
4273 && i
.tm
.extension_opcode
== 0x4)))
4274 || (i
.types
[0].bitfield
.qword
4275 && ((i
.reg_operands
== 2
4276 && i
.op
[0].regs
== i
.op
[1].regs
4277 && (i
.tm
.base_opcode
== 0x30
4278 || i
.tm
.base_opcode
== 0x28))
4279 || (i
.reg_operands
== 1
4281 && i
.tm
.base_opcode
== 0x30)))))
4284 andq $imm31, %r64 -> andl $imm31, %r32
4285 andq $imm7, %r64 -> andl $imm7, %r32
4286 testq $imm31, %r64 -> testl $imm31, %r32
4287 xorq %r64, %r64 -> xorl %r32, %r32
4288 subq %r64, %r64 -> subl %r32, %r32
4289 movq $imm31, %r64 -> movl $imm31, %r32
4290 movq $imm32, %r64 -> movl $imm32, %r32
4292 i
.tm
.opcode_modifier
.norex64
= 1;
4293 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4296 movq $imm31, %r64 -> movl $imm31, %r32
4297 movq $imm32, %r64 -> movl $imm32, %r32
4299 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4300 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4301 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4302 i
.types
[0].bitfield
.imm32
= 1;
4303 i
.types
[0].bitfield
.imm32s
= 0;
4304 i
.types
[0].bitfield
.imm64
= 0;
4305 i
.types
[1].bitfield
.dword
= 1;
4306 i
.types
[1].bitfield
.qword
= 0;
4307 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4310 movq $imm31, %r64 -> movl $imm31, %r32
4312 i
.tm
.base_opcode
= 0xb8;
4313 i
.tm
.extension_opcode
= None
;
4314 i
.tm
.opcode_modifier
.w
= 0;
4315 i
.tm
.opcode_modifier
.modrm
= 0;
4319 else if (optimize
> 1
4320 && !optimize_for_space
4321 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4322 && i
.reg_operands
== 2
4323 && i
.op
[0].regs
== i
.op
[1].regs
4324 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4325 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4326 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4329 andb %rN, %rN -> testb %rN, %rN
4330 andw %rN, %rN -> testw %rN, %rN
4331 andq %rN, %rN -> testq %rN, %rN
4332 orb %rN, %rN -> testb %rN, %rN
4333 orw %rN, %rN -> testw %rN, %rN
4334 orq %rN, %rN -> testq %rN, %rN
4336 and outside of 64-bit mode
4338 andl %rN, %rN -> testl %rN, %rN
4339 orl %rN, %rN -> testl %rN, %rN
4341 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4343 else if (i
.reg_operands
== 3
4344 && i
.op
[0].regs
== i
.op
[1].regs
4345 && !i
.types
[2].bitfield
.xmmword
4346 && (i
.tm
.opcode_modifier
.vex
4347 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4348 && i
.rounding
.type
== rc_none
4349 && is_evex_encoding (&i
.tm
)
4350 && (i
.vec_encoding
!= vex_encoding_evex
4351 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4352 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4353 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4354 && i
.types
[2].bitfield
.ymmword
))))
4355 && ((i
.tm
.base_opcode
== 0x55
4356 || i
.tm
.base_opcode
== 0x57
4357 || i
.tm
.base_opcode
== 0xdf
4358 || i
.tm
.base_opcode
== 0xef
4359 || i
.tm
.base_opcode
== 0xf8
4360 || i
.tm
.base_opcode
== 0xf9
4361 || i
.tm
.base_opcode
== 0xfa
4362 || i
.tm
.base_opcode
== 0xfb
4363 || i
.tm
.base_opcode
== 0x42
4364 || i
.tm
.base_opcode
== 0x47)
4365 && i
.tm
.extension_opcode
== None
))
4368 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4370 EVEX VOP %zmmM, %zmmM, %zmmN
4371 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4372 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4373 EVEX VOP %ymmM, %ymmM, %ymmN
4374 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4375 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4376 VEX VOP %ymmM, %ymmM, %ymmN
4377 -> VEX VOP %xmmM, %xmmM, %xmmN
4378 VOP, one of vpandn and vpxor:
4379 VEX VOP %ymmM, %ymmM, %ymmN
4380 -> VEX VOP %xmmM, %xmmM, %xmmN
4381 VOP, one of vpandnd and vpandnq:
4382 EVEX VOP %zmmM, %zmmM, %zmmN
4383 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4384 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4385 EVEX VOP %ymmM, %ymmM, %ymmN
4386 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4387 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4388 VOP, one of vpxord and vpxorq:
4389 EVEX VOP %zmmM, %zmmM, %zmmN
4390 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4391 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4392 EVEX VOP %ymmM, %ymmM, %ymmN
4393 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4394 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4395 VOP, one of kxord and kxorq:
4396 VEX VOP %kM, %kM, %kN
4397 -> VEX kxorw %kM, %kM, %kN
4398 VOP, one of kandnd and kandnq:
4399 VEX VOP %kM, %kM, %kN
4400 -> VEX kandnw %kM, %kM, %kN
4402 if (is_evex_encoding (&i
.tm
))
4404 if (i
.vec_encoding
!= vex_encoding_evex
)
4406 i
.tm
.opcode_modifier
.vex
= VEX128
;
4407 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4408 i
.tm
.opcode_modifier
.evex
= 0;
4410 else if (optimize
> 1)
4411 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4415 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4417 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4418 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4421 i
.tm
.opcode_modifier
.vex
= VEX128
;
4423 if (i
.tm
.opcode_modifier
.vex
)
4424 for (j
= 0; j
< 3; j
++)
4426 i
.types
[j
].bitfield
.xmmword
= 1;
4427 i
.types
[j
].bitfield
.ymmword
= 0;
4430 else if (i
.vec_encoding
!= vex_encoding_evex
4431 && !i
.types
[0].bitfield
.zmmword
4432 && !i
.types
[1].bitfield
.zmmword
4434 && !i
.broadcast
.type
4435 && is_evex_encoding (&i
.tm
)
4436 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4437 || (i
.tm
.base_opcode
& ~4) == 0xdb
4438 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4439 && i
.tm
.extension_opcode
== None
)
4442 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4443 vmovdqu32 and vmovdqu64:
4444 EVEX VOP %xmmM, %xmmN
4445 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4446 EVEX VOP %ymmM, %ymmN
4447 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4449 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4451 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4453 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4455 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4456 VOP, one of vpand, vpandn, vpor, vpxor:
4457 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4458 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4459 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4460 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4461 EVEX VOP{d,q} mem, %xmmM, %xmmN
4462 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4463 EVEX VOP{d,q} mem, %ymmM, %ymmN
4464 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4466 for (j
= 0; j
< i
.operands
; j
++)
4467 if (operand_type_check (i
.types
[j
], disp
)
4468 && i
.op
[j
].disps
->X_op
== O_constant
)
4470 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4471 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4472 bytes, we choose EVEX Disp8 over VEX Disp32. */
4473 int evex_disp8
, vex_disp8
;
4474 unsigned int memshift
= i
.memshift
;
4475 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4477 evex_disp8
= fits_in_disp8 (n
);
4479 vex_disp8
= fits_in_disp8 (n
);
4480 if (evex_disp8
!= vex_disp8
)
4482 i
.memshift
= memshift
;
4486 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4489 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4490 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4491 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4492 i
.tm
.opcode_modifier
.vex
4493 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4494 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4495 /* VPAND, VPOR, and VPXOR are commutative. */
4496 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4497 i
.tm
.opcode_modifier
.commutative
= 1;
4498 i
.tm
.opcode_modifier
.evex
= 0;
4499 i
.tm
.opcode_modifier
.masking
= 0;
4500 i
.tm
.opcode_modifier
.broadcast
= 0;
4501 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4504 i
.types
[j
].bitfield
.disp8
4505 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4509 /* Return non-zero for load instruction. */
4515 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4516 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4520 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4521 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4522 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4523 if (i
.tm
.opcode_modifier
.anysize
)
4527 if (strcmp (i
.tm
.name
, "pop") == 0)
4531 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4534 if (i
.tm
.base_opcode
== 0x9d
4535 || i
.tm
.base_opcode
== 0x61)
4538 /* movs, cmps, lods, scas. */
4539 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4543 if (base_opcode
== 0x6f
4544 || i
.tm
.base_opcode
== 0xd7)
4546 /* NB: For AMD-specific insns with implicit memory operands,
4547 they're intentionally not covered. */
4550 /* No memory operand. */
4551 if (!i
.mem_operands
)
4557 if (i
.tm
.base_opcode
== 0xae
4558 && i
.tm
.opcode_modifier
.vex
4559 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4560 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4561 && i
.tm
.extension_opcode
== 2)
4564 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4566 /* test, not, neg, mul, imul, div, idiv. */
4567 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4568 && i
.tm
.extension_opcode
!= 1)
4572 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4575 /* add, or, adc, sbb, and, sub, xor, cmp. */
4576 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4579 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4580 if ((base_opcode
== 0xc1
4581 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4582 && i
.tm
.extension_opcode
!= 6)
4585 /* Check for x87 instructions. */
4586 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4588 /* Skip fst, fstp, fstenv, fstcw. */
4589 if (i
.tm
.base_opcode
== 0xd9
4590 && (i
.tm
.extension_opcode
== 2
4591 || i
.tm
.extension_opcode
== 3
4592 || i
.tm
.extension_opcode
== 6
4593 || i
.tm
.extension_opcode
== 7))
4596 /* Skip fisttp, fist, fistp, fstp. */
4597 if (i
.tm
.base_opcode
== 0xdb
4598 && (i
.tm
.extension_opcode
== 1
4599 || i
.tm
.extension_opcode
== 2
4600 || i
.tm
.extension_opcode
== 3
4601 || i
.tm
.extension_opcode
== 7))
4604 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4605 if (i
.tm
.base_opcode
== 0xdd
4606 && (i
.tm
.extension_opcode
== 1
4607 || i
.tm
.extension_opcode
== 2
4608 || i
.tm
.extension_opcode
== 3
4609 || i
.tm
.extension_opcode
== 6
4610 || i
.tm
.extension_opcode
== 7))
4613 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4614 if (i
.tm
.base_opcode
== 0xdf
4615 && (i
.tm
.extension_opcode
== 1
4616 || i
.tm
.extension_opcode
== 2
4617 || i
.tm
.extension_opcode
== 3
4618 || i
.tm
.extension_opcode
== 6
4619 || i
.tm
.extension_opcode
== 7))
4625 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4627 /* bt, bts, btr, btc. */
4628 if (i
.tm
.base_opcode
== 0xba
4629 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4632 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4633 if (i
.tm
.base_opcode
== 0xc7
4634 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4635 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4636 || i
.tm
.extension_opcode
== 6))
4639 /* fxrstor, ldmxcsr, xrstor. */
4640 if (i
.tm
.base_opcode
== 0xae
4641 && (i
.tm
.extension_opcode
== 1
4642 || i
.tm
.extension_opcode
== 2
4643 || i
.tm
.extension_opcode
== 5))
4646 /* lgdt, lidt, lmsw. */
4647 if (i
.tm
.base_opcode
== 0x01
4648 && (i
.tm
.extension_opcode
== 2
4649 || i
.tm
.extension_opcode
== 3
4650 || i
.tm
.extension_opcode
== 6))
4654 dest
= i
.operands
- 1;
4656 /* Check fake imm8 operand and 3 source operands. */
4657 if ((i
.tm
.opcode_modifier
.immext
4658 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4659 && i
.types
[dest
].bitfield
.imm8
)
4662 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4663 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4664 && (base_opcode
== 0x1
4665 || base_opcode
== 0x9
4666 || base_opcode
== 0x11
4667 || base_opcode
== 0x19
4668 || base_opcode
== 0x21
4669 || base_opcode
== 0x29
4670 || base_opcode
== 0x31
4671 || base_opcode
== 0x39
4672 || (base_opcode
| 2) == 0x87))
4676 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4677 && base_opcode
== 0xc1)
4680 /* Check for load instruction. */
4681 return (i
.types
[dest
].bitfield
.class != ClassNone
4682 || i
.types
[dest
].bitfield
.instance
== Accum
);
4685 /* Output lfence, 0xfaee8, after instruction. */
4688 insert_lfence_after (void)
4690 if (lfence_after_load
&& load_insn_p ())
4692 /* There are also two REP string instructions that require
4693 special treatment. Specifically, the compare string (CMPS)
4694 and scan string (SCAS) instructions set EFLAGS in a manner
4695 that depends on the data being compared/scanned. When used
4696 with a REP prefix, the number of iterations may therefore
4697 vary depending on this data. If the data is a program secret
4698 chosen by the adversary using an LVI method,
4699 then this data-dependent behavior may leak some aspect
4701 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4702 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4703 && i
.prefix
[REP_PREFIX
])
4705 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4708 char *p
= frag_more (3);
4715 /* Output lfence, 0xfaee8, before instruction. */
4718 insert_lfence_before (void)
4722 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4725 if (i
.tm
.base_opcode
== 0xff
4726 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4728 /* Insert lfence before indirect branch if needed. */
4730 if (lfence_before_indirect_branch
== lfence_branch_none
)
4733 if (i
.operands
!= 1)
4736 if (i
.reg_operands
== 1)
4738 /* Indirect branch via register. Don't insert lfence with
4739 -mlfence-after-load=yes. */
4740 if (lfence_after_load
4741 || lfence_before_indirect_branch
== lfence_branch_memory
)
4744 else if (i
.mem_operands
== 1
4745 && lfence_before_indirect_branch
!= lfence_branch_register
)
4747 as_warn (_("indirect `%s` with memory operand should be avoided"),
4754 if (last_insn
.kind
!= last_insn_other
4755 && last_insn
.seg
== now_seg
)
4757 as_warn_where (last_insn
.file
, last_insn
.line
,
4758 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4759 last_insn
.name
, i
.tm
.name
);
4770 /* Output or/not/shl and lfence before near ret. */
4771 if (lfence_before_ret
!= lfence_before_ret_none
4772 && (i
.tm
.base_opcode
== 0xc2
4773 || i
.tm
.base_opcode
== 0xc3))
4775 if (last_insn
.kind
!= last_insn_other
4776 && last_insn
.seg
== now_seg
)
4778 as_warn_where (last_insn
.file
, last_insn
.line
,
4779 _("`%s` skips -mlfence-before-ret on `%s`"),
4780 last_insn
.name
, i
.tm
.name
);
4784 /* Near ret ingore operand size override under CPU64. */
4785 char prefix
= flag_code
== CODE_64BIT
4787 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4789 if (lfence_before_ret
== lfence_before_ret_not
)
4791 /* not: 0xf71424, may add prefix
4792 for operand size override or 64-bit code. */
4793 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4807 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4810 if (lfence_before_ret
== lfence_before_ret_or
)
4812 /* or: 0x830c2400, may add prefix
4813 for operand size override or 64-bit code. */
4819 /* shl: 0xc1242400, may add prefix
4820 for operand size override or 64-bit code. */
4835 /* This is the guts of the machine-dependent assembler. LINE points to a
4836 machine dependent instruction. This function is supposed to emit
4837 the frags/bytes it assembles to. */
4840 md_assemble (char *line
)
4843 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4844 const insn_template
*t
;
4846 /* Initialize globals. */
4847 memset (&i
, '\0', sizeof (i
));
4848 i
.rounding
.type
= rc_none
;
4849 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4850 i
.reloc
[j
] = NO_RELOC
;
4851 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4852 memset (im_expressions
, '\0', sizeof (im_expressions
));
4853 save_stack_p
= save_stack
;
4855 /* First parse an instruction mnemonic & call i386_operand for the operands.
4856 We assume that the scrubber has arranged it so that line[0] is the valid
4857 start of a (possibly prefixed) mnemonic. */
4859 line
= parse_insn (line
, mnemonic
);
4862 mnem_suffix
= i
.suffix
;
4864 line
= parse_operands (line
, mnemonic
);
4866 xfree (i
.memop1_string
);
4867 i
.memop1_string
= NULL
;
4871 /* Now we've parsed the mnemonic into a set of templates, and have the
4872 operands at hand. */
4874 /* All Intel opcodes have reversed operands except for "bound", "enter",
4875 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4876 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4877 and "call" instructions with 2 immediate operands so that the immediate
4878 segment precedes the offset consistently in Intel and AT&T modes. */
4881 && (strcmp (mnemonic
, "bound") != 0)
4882 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4883 && !startswith (mnemonic
, "monitor")
4884 && !startswith (mnemonic
, "mwait")
4885 && (strcmp (mnemonic
, "pvalidate") != 0)
4886 && !startswith (mnemonic
, "rmp")
4887 && (strcmp (mnemonic
, "tpause") != 0)
4888 && (strcmp (mnemonic
, "umwait") != 0)
4889 && !(operand_type_check (i
.types
[0], imm
)
4890 && operand_type_check (i
.types
[1], imm
)))
4893 /* The order of the immediates should be reversed
4894 for 2 immediates extrq and insertq instructions */
4895 if (i
.imm_operands
== 2
4896 && (strcmp (mnemonic
, "extrq") == 0
4897 || strcmp (mnemonic
, "insertq") == 0))
4898 swap_2_operands (0, 1);
4903 if (i
.disp_operands
&& !want_disp32 (current_templates
->start
))
4905 for (j
= 0; j
< i
.operands
; ++j
)
4907 const expressionS
*exp
= i
.op
[j
].disps
;
4909 if (!operand_type_check (i
.types
[j
], disp
))
4912 if (exp
->X_op
!= O_constant
)
4915 /* Since displacement is signed extended to 64bit, don't allow
4916 disp32 and turn off disp32s if they are out of range. */
4917 i
.types
[j
].bitfield
.disp32
= 0;
4918 if (fits_in_signed_long (exp
->X_add_number
))
4921 i
.types
[j
].bitfield
.disp32s
= 0;
4922 if (i
.types
[j
].bitfield
.baseindex
)
4924 char number_buf
[128];
4926 /* Coded this way in order to allow for ease of translation. */
4927 sprintf_vma (number_buf
, exp
->X_add_number
);
4928 as_bad (_("0x%s out of range of signed 32bit displacement"),
4935 /* Don't optimize displacement for movabs since it only takes 64bit
4938 && i
.disp_encoding
!= disp_encoding_32bit
4939 && (flag_code
!= CODE_64BIT
4940 || strcmp (mnemonic
, "movabs") != 0))
4943 /* Next, we find a template that matches the given insn,
4944 making sure the overlap of the given operands types is consistent
4945 with the template operand types. */
4947 if (!(t
= match_template (mnem_suffix
)))
4950 if (sse_check
!= check_none
4951 /* The opcode space check isn't strictly needed; it's there only to
4952 bypass the logic below when easily possible. */
4953 && t
->opcode_modifier
.opcodespace
>= SPACE_0F
4954 && t
->opcode_modifier
.opcodespace
<= SPACE_0F3A
4955 && !i
.tm
.cpu_flags
.bitfield
.cpusse4a
4956 && !is_any_vex_encoding (t
))
4960 for (j
= 0; j
< t
->operands
; ++j
)
4962 if (t
->operand_types
[j
].bitfield
.class == RegMMX
)
4964 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
)
4968 if (j
>= t
->operands
&& simd
)
4969 (sse_check
== check_warning
4971 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4974 if (i
.tm
.opcode_modifier
.fwait
)
4975 if (!add_prefix (FWAIT_OPCODE
))
4978 /* Check if REP prefix is OK. */
4979 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4981 as_bad (_("invalid instruction `%s' after `%s'"),
4982 i
.tm
.name
, i
.rep_prefix
);
4986 /* Check for lock without a lockable instruction. Destination operand
4987 must be memory unless it is xchg (0x86). */
4988 if (i
.prefix
[LOCK_PREFIX
]
4989 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
4990 || i
.mem_operands
== 0
4991 || (i
.tm
.base_opcode
!= 0x86
4992 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4994 as_bad (_("expecting lockable instruction after `lock'"));
4998 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4999 if (i
.prefix
[DATA_PREFIX
]
5000 && (is_any_vex_encoding (&i
.tm
)
5001 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
5002 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
5004 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
5008 /* Check if HLE prefix is OK. */
5009 if (i
.hle_prefix
&& !check_hle ())
5012 /* Check BND prefix. */
5013 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5014 as_bad (_("expecting valid branch instruction after `bnd'"));
5016 /* Check NOTRACK prefix. */
5017 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5018 as_bad (_("expecting indirect branch instruction after `notrack'"));
5020 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5022 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5023 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5024 else if (flag_code
!= CODE_16BIT
5025 ? i
.prefix
[ADDR_PREFIX
]
5026 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5027 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5030 /* Insert BND prefix. */
5031 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5033 if (!i
.prefix
[BND_PREFIX
])
5034 add_prefix (BND_PREFIX_OPCODE
);
5035 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5037 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5038 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5042 /* Check string instruction segment overrides. */
5043 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5045 gas_assert (i
.mem_operands
);
5046 if (!check_string ())
5048 i
.disp_operands
= 0;
5051 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5052 optimize_encoding ();
5054 if (use_unaligned_vector_move
)
5055 encode_with_unaligned_vector_move ();
5057 if (!process_suffix ())
5060 /* Update operand types and check extended states. */
5061 for (j
= 0; j
< i
.operands
; j
++)
5063 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5064 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5069 i
.xstate
|= xstate_mmx
;
5072 i
.xstate
|= xstate_mask
;
5075 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5076 i
.xstate
|= xstate_tmm
;
5077 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5078 i
.xstate
|= xstate_zmm
;
5079 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5080 i
.xstate
|= xstate_ymm
;
5081 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5082 i
.xstate
|= xstate_xmm
;
5087 /* Make still unresolved immediate matches conform to size of immediate
5088 given in i.suffix. */
5089 if (!finalize_imm ())
5092 if (i
.types
[0].bitfield
.imm1
)
5093 i
.imm_operands
= 0; /* kludge for shift insns. */
5095 /* We only need to check those implicit registers for instructions
5096 with 3 operands or less. */
5097 if (i
.operands
<= 3)
5098 for (j
= 0; j
< i
.operands
; j
++)
5099 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
5100 && !i
.types
[j
].bitfield
.xmmword
)
5103 /* For insns with operands there are more diddles to do to the opcode. */
5106 if (!process_operands ())
5109 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5111 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5112 as_warn (_("translating to `%sp'"), i
.tm
.name
);
5115 if (is_any_vex_encoding (&i
.tm
))
5117 if (!cpu_arch_flags
.bitfield
.cpui286
)
5119 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5124 /* Check for explicit REX prefix. */
5125 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5127 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
5131 if (i
.tm
.opcode_modifier
.vex
)
5132 build_vex_prefix (t
);
5134 build_evex_prefix ();
5136 /* The individual REX.RXBW bits got consumed. */
5137 i
.rex
&= REX_OPCODE
;
5140 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
5141 instructions may define INT_OPCODE as well, so avoid this corner
5142 case for those instructions that use MODRM. */
5143 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
5144 && i
.tm
.base_opcode
== INT_OPCODE
5145 && !i
.tm
.opcode_modifier
.modrm
5146 && i
.op
[0].imms
->X_add_number
== 3)
5148 i
.tm
.base_opcode
= INT3_OPCODE
;
5152 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5153 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5154 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5155 && i
.op
[0].disps
->X_op
== O_constant
)
5157 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5158 the absolute address given by the constant. Since ix86 jumps and
5159 calls are pc relative, we need to generate a reloc. */
5160 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5161 i
.op
[0].disps
->X_op
= O_symbol
;
5164 /* For 8 bit registers we need an empty rex prefix. Also if the
5165 instruction already has a prefix, we need to convert old
5166 registers to new ones. */
5168 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5169 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5170 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5171 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5172 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5173 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5178 i
.rex
|= REX_OPCODE
;
5179 for (x
= 0; x
< 2; x
++)
5181 /* Look for 8 bit operand that uses old registers. */
5182 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5183 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5185 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5186 /* In case it is "hi" register, give up. */
5187 if (i
.op
[x
].regs
->reg_num
> 3)
5188 as_bad (_("can't encode register '%s%s' in an "
5189 "instruction requiring REX prefix."),
5190 register_prefix
, i
.op
[x
].regs
->reg_name
);
5192 /* Otherwise it is equivalent to the extended register.
5193 Since the encoding doesn't change this is merely
5194 cosmetic cleanup for debug output. */
5196 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5201 if (i
.rex
== 0 && i
.rex_encoding
)
5203 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5204 that uses legacy register. If it is "hi" register, don't add
5205 the REX_OPCODE byte. */
5207 for (x
= 0; x
< 2; x
++)
5208 if (i
.types
[x
].bitfield
.class == Reg
5209 && i
.types
[x
].bitfield
.byte
5210 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5211 && i
.op
[x
].regs
->reg_num
> 3)
5213 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5214 i
.rex_encoding
= false;
5223 add_prefix (REX_OPCODE
| i
.rex
);
5225 insert_lfence_before ();
5227 /* We are ready to output the insn. */
5230 insert_lfence_after ();
5232 last_insn
.seg
= now_seg
;
5234 if (i
.tm
.opcode_modifier
.isprefix
)
5236 last_insn
.kind
= last_insn_prefix
;
5237 last_insn
.name
= i
.tm
.name
;
5238 last_insn
.file
= as_where (&last_insn
.line
);
5241 last_insn
.kind
= last_insn_other
;
5245 parse_insn (char *line
, char *mnemonic
)
5248 char *token_start
= l
;
5251 const insn_template
*t
;
5257 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5262 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5264 as_bad (_("no such instruction: `%s'"), token_start
);
5269 if (!is_space_char (*l
)
5270 && *l
!= END_OF_INSN
5272 || (*l
!= PREFIX_SEPARATOR
5275 as_bad (_("invalid character %s in mnemonic"),
5276 output_invalid (*l
));
5279 if (token_start
== l
)
5281 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5282 as_bad (_("expecting prefix; got nothing"));
5284 as_bad (_("expecting mnemonic; got nothing"));
5288 /* Look up instruction (or prefix) via hash table. */
5289 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5291 if (*l
!= END_OF_INSN
5292 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5293 && current_templates
5294 && current_templates
->start
->opcode_modifier
.isprefix
)
5296 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5298 as_bad ((flag_code
!= CODE_64BIT
5299 ? _("`%s' is only supported in 64-bit mode")
5300 : _("`%s' is not supported in 64-bit mode")),
5301 current_templates
->start
->name
);
5304 /* If we are in 16-bit mode, do not allow addr16 or data16.
5305 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5306 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5307 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5308 && flag_code
!= CODE_64BIT
5309 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5310 ^ (flag_code
== CODE_16BIT
)))
5312 as_bad (_("redundant %s prefix"),
5313 current_templates
->start
->name
);
5317 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5319 /* Handle pseudo prefixes. */
5320 switch (current_templates
->start
->extension_opcode
)
5324 i
.disp_encoding
= disp_encoding_8bit
;
5328 i
.disp_encoding
= disp_encoding_16bit
;
5332 i
.disp_encoding
= disp_encoding_32bit
;
5336 i
.dir_encoding
= dir_encoding_load
;
5340 i
.dir_encoding
= dir_encoding_store
;
5344 i
.vec_encoding
= vex_encoding_vex
;
5348 i
.vec_encoding
= vex_encoding_vex3
;
5352 i
.vec_encoding
= vex_encoding_evex
;
5356 i
.rex_encoding
= true;
5358 case Prefix_NoOptimize
:
5360 i
.no_optimize
= true;
5368 /* Add prefix, checking for repeated prefixes. */
5369 switch (add_prefix (current_templates
->start
->base_opcode
))
5374 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5375 i
.notrack_prefix
= current_templates
->start
->name
;
5378 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5379 i
.hle_prefix
= current_templates
->start
->name
;
5380 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5381 i
.bnd_prefix
= current_templates
->start
->name
;
5383 i
.rep_prefix
= current_templates
->start
->name
;
5389 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5396 if (!current_templates
)
5398 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5399 Check if we should swap operand or force 32bit displacement in
5401 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5402 i
.dir_encoding
= dir_encoding_swap
;
5403 else if (mnem_p
- 3 == dot_p
5406 i
.disp_encoding
= disp_encoding_8bit
;
5407 else if (mnem_p
- 4 == dot_p
5411 i
.disp_encoding
= disp_encoding_32bit
;
5416 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5419 if (!current_templates
)
5422 if (mnem_p
> mnemonic
)
5424 /* See if we can get a match by trimming off a suffix. */
5427 case WORD_MNEM_SUFFIX
:
5428 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5429 i
.suffix
= SHORT_MNEM_SUFFIX
;
5432 case BYTE_MNEM_SUFFIX
:
5433 case QWORD_MNEM_SUFFIX
:
5434 i
.suffix
= mnem_p
[-1];
5437 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5439 case SHORT_MNEM_SUFFIX
:
5440 case LONG_MNEM_SUFFIX
:
5443 i
.suffix
= mnem_p
[-1];
5446 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5454 if (intel_float_operand (mnemonic
) == 1)
5455 i
.suffix
= SHORT_MNEM_SUFFIX
;
5457 i
.suffix
= LONG_MNEM_SUFFIX
;
5460 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5466 if (!current_templates
)
5468 as_bad (_("no such instruction: `%s'"), token_start
);
5473 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5474 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5476 /* Check for a branch hint. We allow ",pt" and ",pn" for
5477 predict taken and predict not taken respectively.
5478 I'm not sure that branch hints actually do anything on loop
5479 and jcxz insns (JumpByte) for current Pentium4 chips. They
5480 may work in the future and it doesn't hurt to accept them
5482 if (l
[0] == ',' && l
[1] == 'p')
5486 if (!add_prefix (DS_PREFIX_OPCODE
))
5490 else if (l
[2] == 'n')
5492 if (!add_prefix (CS_PREFIX_OPCODE
))
5498 /* Any other comma loses. */
5501 as_bad (_("invalid character %s in mnemonic"),
5502 output_invalid (*l
));
5506 /* Check if instruction is supported on specified architecture. */
5508 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5510 supported
|= cpu_flags_match (t
);
5511 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5513 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5514 as_warn (_("use .code16 to ensure correct addressing mode"));
5520 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5521 as_bad (flag_code
== CODE_64BIT
5522 ? _("`%s' is not supported in 64-bit mode")
5523 : _("`%s' is only supported in 64-bit mode"),
5524 current_templates
->start
->name
);
5526 as_bad (_("`%s' is not supported on `%s%s'"),
5527 current_templates
->start
->name
,
5528 cpu_arch_name
? cpu_arch_name
: default_arch
,
5529 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5535 parse_operands (char *l
, const char *mnemonic
)
5539 /* 1 if operand is pending after ','. */
5540 unsigned int expecting_operand
= 0;
5542 while (*l
!= END_OF_INSN
)
5544 /* Non-zero if operand parens not balanced. */
5545 unsigned int paren_not_balanced
= 0;
5546 /* True if inside double quotes. */
5547 bool in_quotes
= false;
5549 /* Skip optional white space before operand. */
5550 if (is_space_char (*l
))
5552 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5554 as_bad (_("invalid character %s before operand %d"),
5555 output_invalid (*l
),
5559 token_start
= l
; /* After white space. */
5560 while (in_quotes
|| paren_not_balanced
|| *l
!= ',')
5562 if (*l
== END_OF_INSN
)
5566 as_bad (_("unbalanced double quotes in operand %d."),
5570 if (paren_not_balanced
)
5572 know (!intel_syntax
);
5573 as_bad (_("unbalanced parenthesis in operand %d."),
5578 break; /* we are done */
5580 else if (*l
== '\\' && l
[1] == '"')
5583 in_quotes
= !in_quotes
;
5584 else if (!in_quotes
&& !is_operand_char (*l
) && !is_space_char (*l
))
5586 as_bad (_("invalid character %s in operand %d"),
5587 output_invalid (*l
),
5591 if (!intel_syntax
&& !in_quotes
)
5594 ++paren_not_balanced
;
5596 --paren_not_balanced
;
5600 if (l
!= token_start
)
5601 { /* Yes, we've read in another operand. */
5602 unsigned int operand_ok
;
5603 this_operand
= i
.operands
++;
5604 if (i
.operands
> MAX_OPERANDS
)
5606 as_bad (_("spurious operands; (%d operands/instruction max)"),
5610 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5611 /* Now parse operand adding info to 'i' as we go along. */
5612 END_STRING_AND_SAVE (l
);
5614 if (i
.mem_operands
> 1)
5616 as_bad (_("too many memory references for `%s'"),
5623 i386_intel_operand (token_start
,
5624 intel_float_operand (mnemonic
));
5626 operand_ok
= i386_att_operand (token_start
);
5628 RESTORE_END_STRING (l
);
5634 if (expecting_operand
)
5636 expecting_operand_after_comma
:
5637 as_bad (_("expecting operand after ','; got nothing"));
5642 as_bad (_("expecting operand before ','; got nothing"));
5647 /* Now *l must be either ',' or END_OF_INSN. */
5650 if (*++l
== END_OF_INSN
)
5652 /* Just skip it, if it's \n complain. */
5653 goto expecting_operand_after_comma
;
5655 expecting_operand
= 1;
5662 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5664 union i386_op temp_op
;
5665 i386_operand_type temp_type
;
5666 unsigned int temp_flags
;
5667 enum bfd_reloc_code_real temp_reloc
;
5669 temp_type
= i
.types
[xchg2
];
5670 i
.types
[xchg2
] = i
.types
[xchg1
];
5671 i
.types
[xchg1
] = temp_type
;
5673 temp_flags
= i
.flags
[xchg2
];
5674 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5675 i
.flags
[xchg1
] = temp_flags
;
5677 temp_op
= i
.op
[xchg2
];
5678 i
.op
[xchg2
] = i
.op
[xchg1
];
5679 i
.op
[xchg1
] = temp_op
;
5681 temp_reloc
= i
.reloc
[xchg2
];
5682 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5683 i
.reloc
[xchg1
] = temp_reloc
;
5687 if (i
.mask
.operand
== xchg1
)
5688 i
.mask
.operand
= xchg2
;
5689 else if (i
.mask
.operand
== xchg2
)
5690 i
.mask
.operand
= xchg1
;
5692 if (i
.broadcast
.type
)
5694 if (i
.broadcast
.operand
== xchg1
)
5695 i
.broadcast
.operand
= xchg2
;
5696 else if (i
.broadcast
.operand
== xchg2
)
5697 i
.broadcast
.operand
= xchg1
;
5699 if (i
.rounding
.type
!= rc_none
)
5701 if (i
.rounding
.operand
== xchg1
)
5702 i
.rounding
.operand
= xchg2
;
5703 else if (i
.rounding
.operand
== xchg2
)
5704 i
.rounding
.operand
= xchg1
;
5709 swap_operands (void)
5715 swap_2_operands (1, i
.operands
- 2);
5719 swap_2_operands (0, i
.operands
- 1);
5725 if (i
.mem_operands
== 2)
5727 const reg_entry
*temp_seg
;
5728 temp_seg
= i
.seg
[0];
5729 i
.seg
[0] = i
.seg
[1];
5730 i
.seg
[1] = temp_seg
;
5734 /* Try to ensure constant immediates are represented in the smallest
5739 char guess_suffix
= 0;
5743 guess_suffix
= i
.suffix
;
5744 else if (i
.reg_operands
)
5746 /* Figure out a suffix from the last register operand specified.
5747 We can't do this properly yet, i.e. excluding special register
5748 instances, but the following works for instructions with
5749 immediates. In any case, we can't set i.suffix yet. */
5750 for (op
= i
.operands
; --op
>= 0;)
5751 if (i
.types
[op
].bitfield
.class != Reg
)
5753 else if (i
.types
[op
].bitfield
.byte
)
5755 guess_suffix
= BYTE_MNEM_SUFFIX
;
5758 else if (i
.types
[op
].bitfield
.word
)
5760 guess_suffix
= WORD_MNEM_SUFFIX
;
5763 else if (i
.types
[op
].bitfield
.dword
)
5765 guess_suffix
= LONG_MNEM_SUFFIX
;
5768 else if (i
.types
[op
].bitfield
.qword
)
5770 guess_suffix
= QWORD_MNEM_SUFFIX
;
5774 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5775 guess_suffix
= WORD_MNEM_SUFFIX
;
5777 for (op
= i
.operands
; --op
>= 0;)
5778 if (operand_type_check (i
.types
[op
], imm
))
5780 switch (i
.op
[op
].imms
->X_op
)
5783 /* If a suffix is given, this operand may be shortened. */
5784 switch (guess_suffix
)
5786 case LONG_MNEM_SUFFIX
:
5787 i
.types
[op
].bitfield
.imm32
= 1;
5788 i
.types
[op
].bitfield
.imm64
= 1;
5790 case WORD_MNEM_SUFFIX
:
5791 i
.types
[op
].bitfield
.imm16
= 1;
5792 i
.types
[op
].bitfield
.imm32
= 1;
5793 i
.types
[op
].bitfield
.imm32s
= 1;
5794 i
.types
[op
].bitfield
.imm64
= 1;
5796 case BYTE_MNEM_SUFFIX
:
5797 i
.types
[op
].bitfield
.imm8
= 1;
5798 i
.types
[op
].bitfield
.imm8s
= 1;
5799 i
.types
[op
].bitfield
.imm16
= 1;
5800 i
.types
[op
].bitfield
.imm32
= 1;
5801 i
.types
[op
].bitfield
.imm32s
= 1;
5802 i
.types
[op
].bitfield
.imm64
= 1;
5806 /* If this operand is at most 16 bits, convert it
5807 to a signed 16 bit number before trying to see
5808 whether it will fit in an even smaller size.
5809 This allows a 16-bit operand such as $0xffe0 to
5810 be recognised as within Imm8S range. */
5811 if ((i
.types
[op
].bitfield
.imm16
)
5812 && fits_in_unsigned_word (i
.op
[op
].imms
->X_add_number
))
5814 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5815 ^ 0x8000) - 0x8000);
5818 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5819 if ((i
.types
[op
].bitfield
.imm32
)
5820 && fits_in_unsigned_long (i
.op
[op
].imms
->X_add_number
))
5822 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5823 ^ ((offsetT
) 1 << 31))
5824 - ((offsetT
) 1 << 31));
5828 = operand_type_or (i
.types
[op
],
5829 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5831 /* We must avoid matching of Imm32 templates when 64bit
5832 only immediate is available. */
5833 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5834 i
.types
[op
].bitfield
.imm32
= 0;
5841 /* Symbols and expressions. */
5843 /* Convert symbolic operand to proper sizes for matching, but don't
5844 prevent matching a set of insns that only supports sizes other
5845 than those matching the insn suffix. */
5847 i386_operand_type mask
, allowed
;
5848 const insn_template
*t
= current_templates
->start
;
5850 operand_type_set (&mask
, 0);
5851 allowed
= t
->operand_types
[op
];
5853 while (++t
< current_templates
->end
)
5855 allowed
= operand_type_and (allowed
, anyimm
);
5856 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5858 switch (guess_suffix
)
5860 case QWORD_MNEM_SUFFIX
:
5861 mask
.bitfield
.imm64
= 1;
5862 mask
.bitfield
.imm32s
= 1;
5864 case LONG_MNEM_SUFFIX
:
5865 mask
.bitfield
.imm32
= 1;
5867 case WORD_MNEM_SUFFIX
:
5868 mask
.bitfield
.imm16
= 1;
5870 case BYTE_MNEM_SUFFIX
:
5871 mask
.bitfield
.imm8
= 1;
5876 allowed
= operand_type_and (mask
, allowed
);
5877 if (!operand_type_all_zero (&allowed
))
5878 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5885 /* Try to use the smallest displacement type too. */
5887 optimize_disp (void)
5891 for (op
= i
.operands
; --op
>= 0;)
5892 if (operand_type_check (i
.types
[op
], disp
))
5894 if (i
.op
[op
].disps
->X_op
== O_constant
)
5896 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5898 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5900 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5901 i
.op
[op
].disps
= NULL
;
5906 if (i
.types
[op
].bitfield
.disp16
5907 && fits_in_unsigned_word (op_disp
))
5909 /* If this operand is at most 16 bits, convert
5910 to a signed 16 bit number and don't use 64bit
5912 op_disp
= ((op_disp
^ 0x8000) - 0x8000);
5913 i
.types
[op
].bitfield
.disp64
= 0;
5917 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5918 if ((i
.types
[op
].bitfield
.disp32
5919 || (flag_code
== CODE_64BIT
5920 && want_disp32 (current_templates
->start
)))
5921 && fits_in_unsigned_long (op_disp
))
5923 /* If this operand is at most 32 bits, convert
5924 to a signed 32 bit number and don't use 64bit
5926 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5927 i
.types
[op
].bitfield
.disp64
= 0;
5928 i
.types
[op
].bitfield
.disp32
= 1;
5931 if (flag_code
== CODE_64BIT
&& fits_in_signed_long (op_disp
))
5933 i
.types
[op
].bitfield
.disp64
= 0;
5934 i
.types
[op
].bitfield
.disp32s
= 1;
5937 if ((i
.types
[op
].bitfield
.disp32
5938 || i
.types
[op
].bitfield
.disp32s
5939 || i
.types
[op
].bitfield
.disp16
)
5940 && fits_in_disp8 (op_disp
))
5941 i
.types
[op
].bitfield
.disp8
= 1;
5943 i
.op
[op
].disps
->X_add_number
= op_disp
;
5945 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5946 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5948 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5949 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5950 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5953 /* We only support 64bit displacement on constants. */
5954 i
.types
[op
].bitfield
.disp64
= 0;
5958 /* Return 1 if there is a match in broadcast bytes between operand
5959 GIVEN and instruction template T. */
5962 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5964 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5965 && i
.types
[given
].bitfield
.byte
)
5966 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5967 && i
.types
[given
].bitfield
.word
)
5968 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5969 && i
.types
[given
].bitfield
.dword
)
5970 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5971 && i
.types
[given
].bitfield
.qword
));
5974 /* Check if operands are valid for the instruction. */
5977 check_VecOperands (const insn_template
*t
)
5982 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5983 any one operand are implicity requiring AVX512VL support if the actual
5984 operand size is YMMword or XMMword. Since this function runs after
5985 template matching, there's no need to check for YMMword/XMMword in
5987 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5988 if (!cpu_flags_all_zero (&cpu
)
5989 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5990 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5992 for (op
= 0; op
< t
->operands
; ++op
)
5994 if (t
->operand_types
[op
].bitfield
.zmmword
5995 && (i
.types
[op
].bitfield
.ymmword
5996 || i
.types
[op
].bitfield
.xmmword
))
5998 i
.error
= unsupported
;
6004 /* Somewhat similarly, templates specifying both AVX and AVX2 are
6005 requiring AVX2 support if the actual operand size is YMMword. */
6006 if (t
->cpu_flags
.bitfield
.cpuavx
6007 && t
->cpu_flags
.bitfield
.cpuavx2
6008 && !cpu_arch_flags
.bitfield
.cpuavx2
)
6010 for (op
= 0; op
< t
->operands
; ++op
)
6012 if (t
->operand_types
[op
].bitfield
.xmmword
6013 && i
.types
[op
].bitfield
.ymmword
)
6015 i
.error
= unsupported
;
6021 /* Without VSIB byte, we can't have a vector register for index. */
6022 if (!t
->opcode_modifier
.sib
6024 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6025 || i
.index_reg
->reg_type
.bitfield
.ymmword
6026 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6028 i
.error
= unsupported_vector_index_register
;
6032 /* Check if default mask is allowed. */
6033 if (t
->opcode_modifier
.nodefmask
6034 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6036 i
.error
= no_default_mask
;
6040 /* For VSIB byte, we need a vector register for index, and all vector
6041 registers must be distinct. */
6042 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6045 || !((t
->opcode_modifier
.sib
== VECSIB128
6046 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6047 || (t
->opcode_modifier
.sib
== VECSIB256
6048 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6049 || (t
->opcode_modifier
.sib
== VECSIB512
6050 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6052 i
.error
= invalid_vsib_address
;
6056 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6057 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6059 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6060 gas_assert (i
.types
[0].bitfield
.xmmword
6061 || i
.types
[0].bitfield
.ymmword
);
6062 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6063 gas_assert (i
.types
[2].bitfield
.xmmword
6064 || i
.types
[2].bitfield
.ymmword
);
6065 if (operand_check
== check_none
)
6067 if (register_number (i
.op
[0].regs
)
6068 != register_number (i
.index_reg
)
6069 && register_number (i
.op
[2].regs
)
6070 != register_number (i
.index_reg
)
6071 && register_number (i
.op
[0].regs
)
6072 != register_number (i
.op
[2].regs
))
6074 if (operand_check
== check_error
)
6076 i
.error
= invalid_vector_register_set
;
6079 as_warn (_("mask, index, and destination registers should be distinct"));
6081 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6083 if (i
.types
[1].bitfield
.class == RegSIMD
6084 && (i
.types
[1].bitfield
.xmmword
6085 || i
.types
[1].bitfield
.ymmword
6086 || i
.types
[1].bitfield
.zmmword
)
6087 && (register_number (i
.op
[1].regs
)
6088 == register_number (i
.index_reg
)))
6090 if (operand_check
== check_error
)
6092 i
.error
= invalid_vector_register_set
;
6095 if (operand_check
!= check_none
)
6096 as_warn (_("index and destination registers should be distinct"));
6101 /* For AMX instructions with 3 TMM register operands, all operands
6102 must be distinct. */
6103 if (i
.reg_operands
== 3
6104 && t
->operand_types
[0].bitfield
.tmmword
6105 && (i
.op
[0].regs
== i
.op
[1].regs
6106 || i
.op
[0].regs
== i
.op
[2].regs
6107 || i
.op
[1].regs
== i
.op
[2].regs
))
6109 i
.error
= invalid_tmm_register_set
;
6113 /* For some special instructions require that destination must be distinct
6114 from source registers. */
6115 if (t
->opcode_modifier
.distinctdest
)
6117 unsigned int dest_reg
= i
.operands
- 1;
6119 know (i
.operands
>= 3);
6121 /* #UD if dest_reg == src1_reg or dest_reg == src2_reg. */
6122 if (i
.op
[dest_reg
- 1].regs
== i
.op
[dest_reg
].regs
6123 || (i
.reg_operands
> 2
6124 && i
.op
[dest_reg
- 2].regs
== i
.op
[dest_reg
].regs
))
6126 i
.error
= invalid_dest_and_src_register_set
;
6131 /* Check if broadcast is supported by the instruction and is applied
6132 to the memory operand. */
6133 if (i
.broadcast
.type
)
6135 i386_operand_type type
, overlap
;
6137 /* Check if specified broadcast is supported in this instruction,
6138 and its broadcast bytes match the memory operand. */
6139 op
= i
.broadcast
.operand
;
6140 if (!t
->opcode_modifier
.broadcast
6141 || !(i
.flags
[op
] & Operand_Mem
)
6142 || (!i
.types
[op
].bitfield
.unspecified
6143 && !match_broadcast_size (t
, op
)))
6146 i
.error
= unsupported_broadcast
;
6150 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
6151 * i
.broadcast
.type
);
6152 operand_type_set (&type
, 0);
6153 switch (i
.broadcast
.bytes
)
6156 type
.bitfield
.word
= 1;
6159 type
.bitfield
.dword
= 1;
6162 type
.bitfield
.qword
= 1;
6165 type
.bitfield
.xmmword
= 1;
6168 type
.bitfield
.ymmword
= 1;
6171 type
.bitfield
.zmmword
= 1;
6177 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6178 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6179 && t
->operand_types
[op
].bitfield
.byte
6180 + t
->operand_types
[op
].bitfield
.word
6181 + t
->operand_types
[op
].bitfield
.dword
6182 + t
->operand_types
[op
].bitfield
.qword
> 1)
6184 overlap
.bitfield
.xmmword
= 0;
6185 overlap
.bitfield
.ymmword
= 0;
6186 overlap
.bitfield
.zmmword
= 0;
6188 if (operand_type_all_zero (&overlap
))
6191 if (t
->opcode_modifier
.checkregsize
)
6195 type
.bitfield
.baseindex
= 1;
6196 for (j
= 0; j
< i
.operands
; ++j
)
6199 && !operand_type_register_match(i
.types
[j
],
6200 t
->operand_types
[j
],
6202 t
->operand_types
[op
]))
6207 /* If broadcast is supported in this instruction, we need to check if
6208 operand of one-element size isn't specified without broadcast. */
6209 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6211 /* Find memory operand. */
6212 for (op
= 0; op
< i
.operands
; op
++)
6213 if (i
.flags
[op
] & Operand_Mem
)
6215 gas_assert (op
< i
.operands
);
6216 /* Check size of the memory operand. */
6217 if (match_broadcast_size (t
, op
))
6219 i
.error
= broadcast_needed
;
6224 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6226 /* Check if requested masking is supported. */
6229 switch (t
->opcode_modifier
.masking
)
6233 case MERGING_MASKING
:
6237 i
.error
= unsupported_masking
;
6241 case DYNAMIC_MASKING
:
6242 /* Memory destinations allow only merging masking. */
6243 if (i
.mask
.zeroing
&& i
.mem_operands
)
6245 /* Find memory operand. */
6246 for (op
= 0; op
< i
.operands
; op
++)
6247 if (i
.flags
[op
] & Operand_Mem
)
6249 gas_assert (op
< i
.operands
);
6250 if (op
== i
.operands
- 1)
6252 i
.error
= unsupported_masking
;
6262 /* Check if masking is applied to dest operand. */
6263 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6265 i
.error
= mask_not_on_destination
;
6270 if (i
.rounding
.type
!= rc_none
)
6272 if (!t
->opcode_modifier
.sae
6273 || (i
.rounding
.type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
6275 i
.error
= unsupported_rc_sae
;
6278 /* If the instruction has several immediate operands and one of
6279 them is rounding, the rounding operand should be the last
6280 immediate operand. */
6281 if (i
.imm_operands
> 1
6282 && i
.rounding
.operand
!= i
.imm_operands
- 1)
6284 i
.error
= rc_sae_operand_not_last_imm
;
6289 /* Check the special Imm4 cases; must be the first operand. */
6290 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6292 if (i
.op
[0].imms
->X_op
!= O_constant
6293 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6299 /* Turn off Imm<N> so that update_imm won't complain. */
6300 operand_type_set (&i
.types
[0], 0);
6303 /* Check vector Disp8 operand. */
6304 if (t
->opcode_modifier
.disp8memshift
6305 && i
.disp_encoding
!= disp_encoding_32bit
)
6307 if (i
.broadcast
.type
)
6308 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6309 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6310 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6313 const i386_operand_type
*type
= NULL
, *fallback
= NULL
;
6316 for (op
= 0; op
< i
.operands
; op
++)
6317 if (i
.flags
[op
] & Operand_Mem
)
6319 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6320 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6321 else if (t
->operand_types
[op
].bitfield
.xmmword
6322 + t
->operand_types
[op
].bitfield
.ymmword
6323 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6324 type
= &t
->operand_types
[op
];
6325 else if (!i
.types
[op
].bitfield
.unspecified
)
6326 type
= &i
.types
[op
];
6327 else /* Ambiguities get resolved elsewhere. */
6328 fallback
= &t
->operand_types
[op
];
6330 else if (i
.types
[op
].bitfield
.class == RegSIMD
6331 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6333 if (i
.types
[op
].bitfield
.zmmword
)
6335 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6337 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6341 if (!type
&& !i
.memshift
)
6345 if (type
->bitfield
.zmmword
)
6347 else if (type
->bitfield
.ymmword
)
6349 else if (type
->bitfield
.xmmword
)
6353 /* For the check in fits_in_disp8(). */
6354 if (i
.memshift
== 0)
6358 for (op
= 0; op
< i
.operands
; op
++)
6359 if (operand_type_check (i
.types
[op
], disp
)
6360 && i
.op
[op
].disps
->X_op
== O_constant
)
6362 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6364 i
.types
[op
].bitfield
.disp8
= 1;
6367 i
.types
[op
].bitfield
.disp8
= 0;
6376 /* Check if encoding requirements are met by the instruction. */
6379 VEX_check_encoding (const insn_template
*t
)
6381 if (i
.vec_encoding
== vex_encoding_error
)
6383 i
.error
= unsupported
;
6387 if (i
.vec_encoding
== vex_encoding_evex
)
6389 /* This instruction must be encoded with EVEX prefix. */
6390 if (!is_evex_encoding (t
))
6392 i
.error
= unsupported
;
6398 if (!t
->opcode_modifier
.vex
)
6400 /* This instruction template doesn't have VEX prefix. */
6401 if (i
.vec_encoding
!= vex_encoding_default
)
6403 i
.error
= unsupported
;
6412 static const insn_template
*
6413 match_template (char mnem_suffix
)
6415 /* Points to template once we've found it. */
6416 const insn_template
*t
;
6417 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6418 i386_operand_type overlap4
;
6419 unsigned int found_reverse_match
;
6420 i386_opcode_modifier suffix_check
;
6421 i386_operand_type operand_types
[MAX_OPERANDS
];
6422 int addr_prefix_disp
;
6423 unsigned int j
, size_match
, check_register
;
6424 enum i386_error specific_error
= 0;
6426 #if MAX_OPERANDS != 5
6427 # error "MAX_OPERANDS must be 5."
6430 found_reverse_match
= 0;
6431 addr_prefix_disp
= -1;
6433 /* Prepare for mnemonic suffix check. */
6434 memset (&suffix_check
, 0, sizeof (suffix_check
));
6435 switch (mnem_suffix
)
6437 case BYTE_MNEM_SUFFIX
:
6438 suffix_check
.no_bsuf
= 1;
6440 case WORD_MNEM_SUFFIX
:
6441 suffix_check
.no_wsuf
= 1;
6443 case SHORT_MNEM_SUFFIX
:
6444 suffix_check
.no_ssuf
= 1;
6446 case LONG_MNEM_SUFFIX
:
6447 suffix_check
.no_lsuf
= 1;
6449 case QWORD_MNEM_SUFFIX
:
6450 suffix_check
.no_qsuf
= 1;
6453 /* NB: In Intel syntax, normally we can check for memory operand
6454 size when there is no mnemonic suffix. But jmp and call have
6455 2 different encodings with Dword memory operand size, one with
6456 No_ldSuf and the other without. i.suffix is set to
6457 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6458 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6459 suffix_check
.no_ldsuf
= 1;
6462 /* Must have right number of operands. */
6463 i
.error
= number_of_operands_mismatch
;
6465 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6467 addr_prefix_disp
= -1;
6468 found_reverse_match
= 0;
6470 if (i
.operands
!= t
->operands
)
6473 /* Check processor support. */
6474 i
.error
= unsupported
;
6475 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6478 /* Check Pseudo Prefix. */
6479 i
.error
= unsupported
;
6480 if (t
->opcode_modifier
.pseudovexprefix
6481 && !(i
.vec_encoding
== vex_encoding_vex
6482 || i
.vec_encoding
== vex_encoding_vex3
))
6485 /* Check AT&T mnemonic. */
6486 i
.error
= unsupported_with_intel_mnemonic
;
6487 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6490 /* Check AT&T/Intel syntax. */
6491 i
.error
= unsupported_syntax
;
6492 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6493 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6496 /* Check Intel64/AMD64 ISA. */
6500 /* Default: Don't accept Intel64. */
6501 if (t
->opcode_modifier
.isa64
== INTEL64
)
6505 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6506 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6510 /* -mintel64: Don't accept AMD64. */
6511 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6516 /* Check the suffix. */
6517 i
.error
= invalid_instruction_suffix
;
6518 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6519 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6520 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6521 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6522 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6523 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6526 size_match
= operand_size_match (t
);
6530 /* This is intentionally not
6532 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6534 as the case of a missing * on the operand is accepted (perhaps with
6535 a warning, issued further down). */
6536 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6538 i
.error
= operand_type_mismatch
;
6542 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6543 operand_types
[j
] = t
->operand_types
[j
];
6545 /* In general, don't allow
6546 - 64-bit operands outside of 64-bit mode,
6547 - 32-bit operands on pre-386. */
6548 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6549 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6550 && flag_code
!= CODE_64BIT
6551 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6552 && t
->base_opcode
== 0xc7
6553 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6554 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6555 || (i
.suffix
== LONG_MNEM_SUFFIX
6556 && !cpu_arch_flags
.bitfield
.cpui386
))
6558 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6559 && !intel_float_operand (t
->name
))
6560 : intel_float_operand (t
->name
) != 2)
6561 && (t
->operands
== i
.imm_operands
6562 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6563 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6564 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6565 || (operand_types
[j
].bitfield
.class != RegMMX
6566 && operand_types
[j
].bitfield
.class != RegSIMD
6567 && operand_types
[j
].bitfield
.class != RegMask
))
6568 && !t
->opcode_modifier
.sib
)
6571 /* Do not verify operands when there are none. */
6574 if (VEX_check_encoding (t
))
6576 specific_error
= i
.error
;
6580 /* We've found a match; break out of loop. */
6584 if (!t
->opcode_modifier
.jump
6585 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6587 /* There should be only one Disp operand. */
6588 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6589 if (operand_type_check (operand_types
[j
], disp
))
6591 if (j
< MAX_OPERANDS
)
6593 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6595 addr_prefix_disp
= j
;
6597 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6598 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6602 override
= !override
;
6605 if (operand_types
[j
].bitfield
.disp32
6606 && operand_types
[j
].bitfield
.disp16
)
6608 operand_types
[j
].bitfield
.disp16
= override
;
6609 operand_types
[j
].bitfield
.disp32
= !override
;
6611 operand_types
[j
].bitfield
.disp32s
= 0;
6612 operand_types
[j
].bitfield
.disp64
= 0;
6616 if (operand_types
[j
].bitfield
.disp32s
6617 || operand_types
[j
].bitfield
.disp64
)
6619 operand_types
[j
].bitfield
.disp64
&= !override
;
6620 operand_types
[j
].bitfield
.disp32s
&= !override
;
6621 operand_types
[j
].bitfield
.disp32
= override
;
6623 operand_types
[j
].bitfield
.disp16
= 0;
6631 case BFD_RELOC_386_GOT32
:
6632 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6633 if (t
->base_opcode
== 0xa0
6634 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6637 case BFD_RELOC_386_TLS_GOTIE
:
6638 case BFD_RELOC_386_TLS_LE_32
:
6639 case BFD_RELOC_X86_64_GOTTPOFF
:
6640 case BFD_RELOC_X86_64_TLSLD
:
6641 /* Don't allow KMOV in TLS code sequences. */
6642 if (t
->opcode_modifier
.vex
)
6649 /* We check register size if needed. */
6650 if (t
->opcode_modifier
.checkregsize
)
6652 check_register
= (1 << t
->operands
) - 1;
6653 if (i
.broadcast
.type
)
6654 check_register
&= ~(1 << i
.broadcast
.operand
);
6659 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6660 switch (t
->operands
)
6663 if (!operand_type_match (overlap0
, i
.types
[0]))
6667 /* xchg %eax, %eax is a special case. It is an alias for nop
6668 only in 32bit mode and we can use opcode 0x90. In 64bit
6669 mode, we can't use 0x90 for xchg %eax, %eax since it should
6670 zero-extend %eax to %rax. */
6671 if (flag_code
== CODE_64BIT
6672 && t
->base_opcode
== 0x90
6673 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6674 && i
.types
[0].bitfield
.instance
== Accum
6675 && i
.types
[0].bitfield
.dword
6676 && i
.types
[1].bitfield
.instance
== Accum
6677 && i
.types
[1].bitfield
.dword
)
6679 /* xrelease mov %eax, <disp> is another special case. It must not
6680 match the accumulator-only encoding of mov. */
6681 if (flag_code
!= CODE_64BIT
6683 && t
->base_opcode
== 0xa0
6684 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6685 && i
.types
[0].bitfield
.instance
== Accum
6686 && (i
.flags
[1] & Operand_Mem
))
6691 if (!(size_match
& MATCH_STRAIGHT
))
6693 /* Reverse direction of operands if swapping is possible in the first
6694 place (operands need to be symmetric) and
6695 - the load form is requested, and the template is a store form,
6696 - the store form is requested, and the template is a load form,
6697 - the non-default (swapped) form is requested. */
6698 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6699 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6700 && !operand_type_all_zero (&overlap1
))
6701 switch (i
.dir_encoding
)
6703 case dir_encoding_load
:
6704 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6705 || t
->opcode_modifier
.regmem
)
6709 case dir_encoding_store
:
6710 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6711 && !t
->opcode_modifier
.regmem
)
6715 case dir_encoding_swap
:
6718 case dir_encoding_default
:
6721 /* If we want store form, we skip the current load. */
6722 if ((i
.dir_encoding
== dir_encoding_store
6723 || i
.dir_encoding
== dir_encoding_swap
)
6724 && i
.mem_operands
== 0
6725 && t
->opcode_modifier
.load
)
6730 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6731 if (!operand_type_match (overlap0
, i
.types
[0])
6732 || !operand_type_match (overlap1
, i
.types
[1])
6733 || ((check_register
& 3) == 3
6734 && !operand_type_register_match (i
.types
[0],
6739 /* Check if other direction is valid ... */
6740 if (!t
->opcode_modifier
.d
)
6744 if (!(size_match
& MATCH_REVERSE
))
6746 /* Try reversing direction of operands. */
6747 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6748 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6749 if (!operand_type_match (overlap0
, i
.types
[0])
6750 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6752 && !operand_type_register_match (i
.types
[0],
6753 operand_types
[i
.operands
- 1],
6754 i
.types
[i
.operands
- 1],
6757 /* Does not match either direction. */
6760 /* found_reverse_match holds which of D or FloatR
6762 if (!t
->opcode_modifier
.d
)
6763 found_reverse_match
= 0;
6764 else if (operand_types
[0].bitfield
.tbyte
)
6765 found_reverse_match
= Opcode_FloatD
;
6766 else if (operand_types
[0].bitfield
.xmmword
6767 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6768 || operand_types
[0].bitfield
.class == RegMMX
6769 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6770 || is_any_vex_encoding(t
))
6771 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6772 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6774 found_reverse_match
= Opcode_D
;
6775 if (t
->opcode_modifier
.floatr
)
6776 found_reverse_match
|= Opcode_FloatR
;
6780 /* Found a forward 2 operand match here. */
6781 switch (t
->operands
)
6784 overlap4
= operand_type_and (i
.types
[4],
6788 overlap3
= operand_type_and (i
.types
[3],
6792 overlap2
= operand_type_and (i
.types
[2],
6797 switch (t
->operands
)
6800 if (!operand_type_match (overlap4
, i
.types
[4])
6801 || !operand_type_register_match (i
.types
[3],
6808 if (!operand_type_match (overlap3
, i
.types
[3])
6809 || ((check_register
& 0xa) == 0xa
6810 && !operand_type_register_match (i
.types
[1],
6814 || ((check_register
& 0xc) == 0xc
6815 && !operand_type_register_match (i
.types
[2],
6822 /* Here we make use of the fact that there are no
6823 reverse match 3 operand instructions. */
6824 if (!operand_type_match (overlap2
, i
.types
[2])
6825 || ((check_register
& 5) == 5
6826 && !operand_type_register_match (i
.types
[0],
6830 || ((check_register
& 6) == 6
6831 && !operand_type_register_match (i
.types
[1],
6839 /* Found either forward/reverse 2, 3 or 4 operand match here:
6840 slip through to break. */
6843 /* Check if vector operands are valid. */
6844 if (check_VecOperands (t
))
6846 specific_error
= i
.error
;
6850 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6851 if (VEX_check_encoding (t
))
6853 specific_error
= i
.error
;
6857 /* We've found a match; break out of loop. */
6861 if (t
== current_templates
->end
)
6863 /* We found no match. */
6864 const char *err_msg
;
6865 switch (specific_error
? specific_error
: i
.error
)
6869 case operand_size_mismatch
:
6870 err_msg
= _("operand size mismatch");
6872 case operand_type_mismatch
:
6873 err_msg
= _("operand type mismatch");
6875 case register_type_mismatch
:
6876 err_msg
= _("register type mismatch");
6878 case number_of_operands_mismatch
:
6879 err_msg
= _("number of operands mismatch");
6881 case invalid_instruction_suffix
:
6882 err_msg
= _("invalid instruction suffix");
6885 err_msg
= _("constant doesn't fit in 4 bits");
6887 case unsupported_with_intel_mnemonic
:
6888 err_msg
= _("unsupported with Intel mnemonic");
6890 case unsupported_syntax
:
6891 err_msg
= _("unsupported syntax");
6894 as_bad (_("unsupported instruction `%s'"),
6895 current_templates
->start
->name
);
6897 case invalid_sib_address
:
6898 err_msg
= _("invalid SIB address");
6900 case invalid_vsib_address
:
6901 err_msg
= _("invalid VSIB address");
6903 case invalid_vector_register_set
:
6904 err_msg
= _("mask, index, and destination registers must be distinct");
6906 case invalid_tmm_register_set
:
6907 err_msg
= _("all tmm registers must be distinct");
6909 case invalid_dest_and_src_register_set
:
6910 err_msg
= _("destination and source registers must be distinct");
6912 case unsupported_vector_index_register
:
6913 err_msg
= _("unsupported vector index register");
6915 case unsupported_broadcast
:
6916 err_msg
= _("unsupported broadcast");
6918 case broadcast_needed
:
6919 err_msg
= _("broadcast is needed for operand of such type");
6921 case unsupported_masking
:
6922 err_msg
= _("unsupported masking");
6924 case mask_not_on_destination
:
6925 err_msg
= _("mask not on destination operand");
6927 case no_default_mask
:
6928 err_msg
= _("default mask isn't allowed");
6930 case unsupported_rc_sae
:
6931 err_msg
= _("unsupported static rounding/sae");
6933 case rc_sae_operand_not_last_imm
:
6935 err_msg
= _("RC/SAE operand must precede immediate operands");
6937 err_msg
= _("RC/SAE operand must follow immediate operands");
6939 case invalid_register_operand
:
6940 err_msg
= _("invalid register operand");
6943 as_bad (_("%s for `%s'"), err_msg
,
6944 current_templates
->start
->name
);
6948 if (!quiet_warnings
)
6951 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6952 as_warn (_("indirect %s without `*'"), t
->name
);
6954 if (t
->opcode_modifier
.isprefix
6955 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6957 /* Warn them that a data or address size prefix doesn't
6958 affect assembly of the next line of code. */
6959 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6963 /* Copy the template we found. */
6964 install_template (t
);
6966 if (addr_prefix_disp
!= -1)
6967 i
.tm
.operand_types
[addr_prefix_disp
]
6968 = operand_types
[addr_prefix_disp
];
6970 if (found_reverse_match
)
6972 /* If we found a reverse match we must alter the opcode direction
6973 bit and clear/flip the regmem modifier one. found_reverse_match
6974 holds bits to change (different for int & float insns). */
6976 i
.tm
.base_opcode
^= found_reverse_match
;
6978 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6979 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6981 /* Certain SIMD insns have their load forms specified in the opcode
6982 table, and hence we need to _set_ RegMem instead of clearing it.
6983 We need to avoid setting the bit though on insns like KMOVW. */
6984 i
.tm
.opcode_modifier
.regmem
6985 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6986 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6987 && !i
.tm
.opcode_modifier
.regmem
;
6996 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6997 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6999 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
7001 as_bad (_("`%s' operand %u must use `%ses' segment"),
7003 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
7008 /* There's only ever one segment override allowed per instruction.
7009 This instruction possibly has a legal segment override on the
7010 second operand, so copy the segment to where non-string
7011 instructions store it, allowing common code. */
7012 i
.seg
[op
] = i
.seg
[1];
7018 process_suffix (void)
7020 bool is_crc32
= false, is_movx
= false;
7022 /* If matched instruction specifies an explicit instruction mnemonic
7024 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
7025 i
.suffix
= WORD_MNEM_SUFFIX
;
7026 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
7027 i
.suffix
= LONG_MNEM_SUFFIX
;
7028 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
7029 i
.suffix
= QWORD_MNEM_SUFFIX
;
7030 else if (i
.reg_operands
7031 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
7032 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
7034 unsigned int numop
= i
.operands
;
7037 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7038 && (i
.tm
.base_opcode
| 8) == 0xbe)
7039 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7040 && i
.tm
.base_opcode
== 0x63
7041 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
7044 is_crc32
= (i
.tm
.base_opcode
== 0xf0
7045 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7046 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
7048 /* movsx/movzx want only their source operand considered here, for the
7049 ambiguity checking below. The suffix will be replaced afterwards
7050 to represent the destination (register). */
7051 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
7054 /* crc32 needs REX.W set regardless of suffix / source operand size. */
7055 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
7058 /* If there's no instruction mnemonic suffix we try to invent one
7059 based on GPR operands. */
7062 /* We take i.suffix from the last register operand specified,
7063 Destination register type is more significant than source
7064 register type. crc32 in SSE4.2 prefers source register
7066 unsigned int op
= is_crc32
? 1 : i
.operands
;
7069 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7070 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7072 if (i
.types
[op
].bitfield
.class != Reg
)
7074 if (i
.types
[op
].bitfield
.byte
)
7075 i
.suffix
= BYTE_MNEM_SUFFIX
;
7076 else if (i
.types
[op
].bitfield
.word
)
7077 i
.suffix
= WORD_MNEM_SUFFIX
;
7078 else if (i
.types
[op
].bitfield
.dword
)
7079 i
.suffix
= LONG_MNEM_SUFFIX
;
7080 else if (i
.types
[op
].bitfield
.qword
)
7081 i
.suffix
= QWORD_MNEM_SUFFIX
;
7087 /* As an exception, movsx/movzx silently default to a byte source
7089 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7090 i
.suffix
= BYTE_MNEM_SUFFIX
;
7092 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7095 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7096 && i
.tm
.opcode_modifier
.no_bsuf
)
7098 else if (!check_byte_reg ())
7101 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7104 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7105 && i
.tm
.opcode_modifier
.no_lsuf
7106 && !i
.tm
.opcode_modifier
.todword
7107 && !i
.tm
.opcode_modifier
.toqword
)
7109 else if (!check_long_reg ())
7112 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7115 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7116 && i
.tm
.opcode_modifier
.no_qsuf
7117 && !i
.tm
.opcode_modifier
.todword
7118 && !i
.tm
.opcode_modifier
.toqword
)
7120 else if (!check_qword_reg ())
7123 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7126 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7127 && i
.tm
.opcode_modifier
.no_wsuf
)
7129 else if (!check_word_reg ())
7132 else if (intel_syntax
7133 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7134 /* Do nothing if the instruction is going to ignore the prefix. */
7139 /* Undo the movsx/movzx change done above. */
7142 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7145 i
.suffix
= stackop_size
;
7146 if (stackop_size
== LONG_MNEM_SUFFIX
)
7148 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7149 .code16gcc directive to support 16-bit mode with
7150 32-bit address. For IRET without a suffix, generate
7151 16-bit IRET (opcode 0xcf) to return from an interrupt
7153 if (i
.tm
.base_opcode
== 0xcf)
7155 i
.suffix
= WORD_MNEM_SUFFIX
;
7156 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7158 /* Warn about changed behavior for segment register push/pop. */
7159 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7160 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7165 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7166 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7167 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7168 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7169 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7170 && i
.tm
.extension_opcode
<= 3)))
7175 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7177 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7178 || i
.tm
.opcode_modifier
.no_lsuf
)
7179 i
.suffix
= QWORD_MNEM_SUFFIX
;
7184 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7185 i
.suffix
= LONG_MNEM_SUFFIX
;
7188 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7189 i
.suffix
= WORD_MNEM_SUFFIX
;
7195 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7196 /* Also cover lret/retf/iret in 64-bit mode. */
7197 || (flag_code
== CODE_64BIT
7198 && !i
.tm
.opcode_modifier
.no_lsuf
7199 && !i
.tm
.opcode_modifier
.no_qsuf
))
7200 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7201 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7202 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7203 /* Accept FLDENV et al without suffix. */
7204 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7206 unsigned int suffixes
, evex
= 0;
7208 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7209 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7211 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7213 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
7215 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7217 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7220 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7221 also suitable for AT&T syntax mode, it was requested that this be
7222 restricted to just Intel syntax. */
7223 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
.type
)
7227 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7229 if (is_evex_encoding (&i
.tm
)
7230 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7232 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7233 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7234 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7235 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7236 if (!i
.tm
.opcode_modifier
.evex
7237 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7238 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7241 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7242 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7243 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7246 /* Any properly sized operand disambiguates the insn. */
7247 if (i
.types
[op
].bitfield
.xmmword
7248 || i
.types
[op
].bitfield
.ymmword
7249 || i
.types
[op
].bitfield
.zmmword
)
7251 suffixes
&= ~(7 << 6);
7256 if ((i
.flags
[op
] & Operand_Mem
)
7257 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7259 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7261 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7263 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7265 if (is_evex_encoding (&i
.tm
))
7271 /* Are multiple suffixes / operand sizes allowed? */
7272 if (suffixes
& (suffixes
- 1))
7275 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7276 || operand_check
== check_error
))
7278 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7281 if (operand_check
== check_error
)
7283 as_bad (_("no instruction mnemonic suffix given and "
7284 "no register operands; can't size `%s'"), i
.tm
.name
);
7287 if (operand_check
== check_warning
)
7288 as_warn (_("%s; using default for `%s'"),
7290 ? _("ambiguous operand size")
7291 : _("no instruction mnemonic suffix given and "
7292 "no register operands"),
7295 if (i
.tm
.opcode_modifier
.floatmf
)
7296 i
.suffix
= SHORT_MNEM_SUFFIX
;
7298 /* handled below */;
7300 i
.tm
.opcode_modifier
.evex
= evex
;
7301 else if (flag_code
== CODE_16BIT
)
7302 i
.suffix
= WORD_MNEM_SUFFIX
;
7303 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7304 i
.suffix
= LONG_MNEM_SUFFIX
;
7306 i
.suffix
= QWORD_MNEM_SUFFIX
;
7312 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7313 In AT&T syntax, if there is no suffix (warned about above), the default
7314 will be byte extension. */
7315 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7316 i
.tm
.base_opcode
|= 1;
7318 /* For further processing, the suffix should represent the destination
7319 (register). This is already the case when one was used with
7320 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7321 no suffix to begin with. */
7322 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7324 if (i
.types
[1].bitfield
.word
)
7325 i
.suffix
= WORD_MNEM_SUFFIX
;
7326 else if (i
.types
[1].bitfield
.qword
)
7327 i
.suffix
= QWORD_MNEM_SUFFIX
;
7329 i
.suffix
= LONG_MNEM_SUFFIX
;
7331 i
.tm
.opcode_modifier
.w
= 0;
7335 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7336 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7337 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7339 /* Change the opcode based on the operand size given by i.suffix. */
7342 /* Size floating point instruction. */
7343 case LONG_MNEM_SUFFIX
:
7344 if (i
.tm
.opcode_modifier
.floatmf
)
7346 i
.tm
.base_opcode
^= 4;
7350 case WORD_MNEM_SUFFIX
:
7351 case QWORD_MNEM_SUFFIX
:
7352 /* It's not a byte, select word/dword operation. */
7353 if (i
.tm
.opcode_modifier
.w
)
7356 i
.tm
.base_opcode
|= 8;
7358 i
.tm
.base_opcode
|= 1;
7361 case SHORT_MNEM_SUFFIX
:
7362 /* Now select between word & dword operations via the operand
7363 size prefix, except for instructions that will ignore this
7365 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7366 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7367 && !i
.tm
.opcode_modifier
.floatmf
7368 && !is_any_vex_encoding (&i
.tm
)
7369 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7370 || (flag_code
== CODE_64BIT
7371 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7373 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7375 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7376 prefix
= ADDR_PREFIX_OPCODE
;
7378 if (!add_prefix (prefix
))
7382 /* Set mode64 for an operand. */
7383 if (i
.suffix
== QWORD_MNEM_SUFFIX
7384 && flag_code
== CODE_64BIT
7385 && !i
.tm
.opcode_modifier
.norex64
7386 && !i
.tm
.opcode_modifier
.vexw
7387 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7389 && ! (i
.operands
== 2
7390 && i
.tm
.base_opcode
== 0x90
7391 && i
.tm
.extension_opcode
== None
7392 && i
.types
[0].bitfield
.instance
== Accum
7393 && i
.types
[0].bitfield
.qword
7394 && i
.types
[1].bitfield
.instance
== Accum
7395 && i
.types
[1].bitfield
.qword
))
7401 /* Select word/dword/qword operation with explicit data sizing prefix
7402 when there are no suitable register operands. */
7403 if (i
.tm
.opcode_modifier
.w
7404 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7406 || (i
.reg_operands
== 1
7408 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7410 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7411 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7414 i
.tm
.base_opcode
|= 1;
7418 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7420 gas_assert (!i
.suffix
);
7421 gas_assert (i
.reg_operands
);
7423 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7426 /* The address size override prefix changes the size of the
7428 if (flag_code
== CODE_64BIT
7429 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7431 as_bad (_("16-bit addressing unavailable for `%s'"),
7436 if ((flag_code
== CODE_32BIT
7437 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7438 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7439 && !add_prefix (ADDR_PREFIX_OPCODE
))
7444 /* Check invalid register operand when the address size override
7445 prefix changes the size of register operands. */
7447 enum { need_word
, need_dword
, need_qword
} need
;
7449 /* Check the register operand for the address size prefix if
7450 the memory operand has no real registers, like symbol, DISP
7451 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7452 if (i
.mem_operands
== 1
7453 && i
.reg_operands
== 1
7455 && i
.types
[1].bitfield
.class == Reg
7456 && (flag_code
== CODE_32BIT
7457 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7458 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7459 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7460 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7461 || (x86_elf_abi
== X86_64_X32_ABI
7463 && i
.base_reg
->reg_num
== RegIP
7464 && i
.base_reg
->reg_type
.bitfield
.qword
))
7468 && !add_prefix (ADDR_PREFIX_OPCODE
))
7471 if (flag_code
== CODE_32BIT
)
7472 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7473 else if (i
.prefix
[ADDR_PREFIX
])
7476 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7478 for (op
= 0; op
< i
.operands
; op
++)
7480 if (i
.types
[op
].bitfield
.class != Reg
)
7486 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7490 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7494 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7499 as_bad (_("invalid register operand size for `%s'"),
7510 check_byte_reg (void)
7514 for (op
= i
.operands
; --op
>= 0;)
7516 /* Skip non-register operands. */
7517 if (i
.types
[op
].bitfield
.class != Reg
)
7520 /* If this is an eight bit register, it's OK. If it's the 16 or
7521 32 bit version of an eight bit register, we will just use the
7522 low portion, and that's OK too. */
7523 if (i
.types
[op
].bitfield
.byte
)
7526 /* I/O port address operands are OK too. */
7527 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7528 && i
.tm
.operand_types
[op
].bitfield
.word
)
7531 /* crc32 only wants its source operand checked here. */
7532 if (i
.tm
.base_opcode
== 0xf0
7533 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7534 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7538 /* Any other register is bad. */
7539 as_bad (_("`%s%s' not allowed with `%s%c'"),
7540 register_prefix
, i
.op
[op
].regs
->reg_name
,
7541 i
.tm
.name
, i
.suffix
);
7548 check_long_reg (void)
7552 for (op
= i
.operands
; --op
>= 0;)
7553 /* Skip non-register operands. */
7554 if (i
.types
[op
].bitfield
.class != Reg
)
7556 /* Reject eight bit registers, except where the template requires
7557 them. (eg. movzb) */
7558 else if (i
.types
[op
].bitfield
.byte
7559 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7560 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7561 && (i
.tm
.operand_types
[op
].bitfield
.word
7562 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7564 as_bad (_("`%s%s' not allowed with `%s%c'"),
7566 i
.op
[op
].regs
->reg_name
,
7571 /* Error if the e prefix on a general reg is missing. */
7572 else if (i
.types
[op
].bitfield
.word
7573 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7574 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7575 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7577 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7578 register_prefix
, i
.op
[op
].regs
->reg_name
,
7582 /* Warn if the r prefix on a general reg is present. */
7583 else if (i
.types
[op
].bitfield
.qword
7584 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7585 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7586 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7589 && i
.tm
.opcode_modifier
.toqword
7590 && i
.types
[0].bitfield
.class != RegSIMD
)
7592 /* Convert to QWORD. We want REX byte. */
7593 i
.suffix
= QWORD_MNEM_SUFFIX
;
7597 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7598 register_prefix
, i
.op
[op
].regs
->reg_name
,
7607 check_qword_reg (void)
7611 for (op
= i
.operands
; --op
>= 0; )
7612 /* Skip non-register operands. */
7613 if (i
.types
[op
].bitfield
.class != Reg
)
7615 /* Reject eight bit registers, except where the template requires
7616 them. (eg. movzb) */
7617 else if (i
.types
[op
].bitfield
.byte
7618 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7619 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7620 && (i
.tm
.operand_types
[op
].bitfield
.word
7621 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7623 as_bad (_("`%s%s' not allowed with `%s%c'"),
7625 i
.op
[op
].regs
->reg_name
,
7630 /* Warn if the r prefix on a general reg is missing. */
7631 else if ((i
.types
[op
].bitfield
.word
7632 || i
.types
[op
].bitfield
.dword
)
7633 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7634 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7635 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7637 /* Prohibit these changes in the 64bit mode, since the
7638 lowering is more complicated. */
7640 && i
.tm
.opcode_modifier
.todword
7641 && i
.types
[0].bitfield
.class != RegSIMD
)
7643 /* Convert to DWORD. We don't want REX byte. */
7644 i
.suffix
= LONG_MNEM_SUFFIX
;
7648 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7649 register_prefix
, i
.op
[op
].regs
->reg_name
,
7658 check_word_reg (void)
7661 for (op
= i
.operands
; --op
>= 0;)
7662 /* Skip non-register operands. */
7663 if (i
.types
[op
].bitfield
.class != Reg
)
7665 /* Reject eight bit registers, except where the template requires
7666 them. (eg. movzb) */
7667 else if (i
.types
[op
].bitfield
.byte
7668 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7669 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7670 && (i
.tm
.operand_types
[op
].bitfield
.word
7671 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7673 as_bad (_("`%s%s' not allowed with `%s%c'"),
7675 i
.op
[op
].regs
->reg_name
,
7680 /* Error if the e or r prefix on a general reg is present. */
7681 else if ((i
.types
[op
].bitfield
.dword
7682 || i
.types
[op
].bitfield
.qword
)
7683 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7684 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7685 && i
.tm
.operand_types
[op
].bitfield
.word
)
7687 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7688 register_prefix
, i
.op
[op
].regs
->reg_name
,
7692 /* For some instructions need encode as EVEX.W=1 without explicit VexW1. */
7693 else if (i
.types
[op
].bitfield
.qword
7695 && i
.tm
.opcode_modifier
.toqword
)
7697 /* Convert to QWORD. We want EVEX.W byte. */
7698 i
.suffix
= QWORD_MNEM_SUFFIX
;
7704 update_imm (unsigned int j
)
7706 i386_operand_type overlap
= i
.types
[j
];
7707 if ((overlap
.bitfield
.imm8
7708 || overlap
.bitfield
.imm8s
7709 || overlap
.bitfield
.imm16
7710 || overlap
.bitfield
.imm32
7711 || overlap
.bitfield
.imm32s
7712 || overlap
.bitfield
.imm64
)
7713 && !operand_type_equal (&overlap
, &imm8
)
7714 && !operand_type_equal (&overlap
, &imm8s
)
7715 && !operand_type_equal (&overlap
, &imm16
)
7716 && !operand_type_equal (&overlap
, &imm32
)
7717 && !operand_type_equal (&overlap
, &imm32s
)
7718 && !operand_type_equal (&overlap
, &imm64
))
7722 i386_operand_type temp
;
7724 operand_type_set (&temp
, 0);
7725 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7727 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7728 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7730 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7731 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7732 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7734 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7735 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7738 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7741 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7742 || operand_type_equal (&overlap
, &imm16_32
)
7743 || operand_type_equal (&overlap
, &imm16_32s
))
7745 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7750 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7751 overlap
= operand_type_and (overlap
, imm32s
);
7752 else if (i
.prefix
[DATA_PREFIX
])
7753 overlap
= operand_type_and (overlap
,
7754 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7755 if (!operand_type_equal (&overlap
, &imm8
)
7756 && !operand_type_equal (&overlap
, &imm8s
)
7757 && !operand_type_equal (&overlap
, &imm16
)
7758 && !operand_type_equal (&overlap
, &imm32
)
7759 && !operand_type_equal (&overlap
, &imm32s
)
7760 && !operand_type_equal (&overlap
, &imm64
))
7762 as_bad (_("no instruction mnemonic suffix given; "
7763 "can't determine immediate size"));
7767 i
.types
[j
] = overlap
;
7777 /* Update the first 2 immediate operands. */
7778 n
= i
.operands
> 2 ? 2 : i
.operands
;
7781 for (j
= 0; j
< n
; j
++)
7782 if (update_imm (j
) == 0)
7785 /* The 3rd operand can't be immediate operand. */
7786 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7793 process_operands (void)
7795 /* Default segment register this instruction will use for memory
7796 accesses. 0 means unknown. This is only for optimizing out
7797 unnecessary segment overrides. */
7798 const reg_entry
*default_seg
= NULL
;
7800 if (i
.tm
.opcode_modifier
.sse2avx
)
7802 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7804 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7805 i
.prefix
[REX_PREFIX
] = 0;
7808 /* ImmExt should be processed after SSE2AVX. */
7809 else if (i
.tm
.opcode_modifier
.immext
)
7812 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7814 unsigned int dupl
= i
.operands
;
7815 unsigned int dest
= dupl
- 1;
7818 /* The destination must be an xmm register. */
7819 gas_assert (i
.reg_operands
7820 && MAX_OPERANDS
> dupl
7821 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7823 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7824 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7826 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7828 /* Keep xmm0 for instructions with VEX prefix and 3
7830 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7831 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7836 /* We remove the first xmm0 and keep the number of
7837 operands unchanged, which in fact duplicates the
7839 for (j
= 1; j
< i
.operands
; j
++)
7841 i
.op
[j
- 1] = i
.op
[j
];
7842 i
.types
[j
- 1] = i
.types
[j
];
7843 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7844 i
.flags
[j
- 1] = i
.flags
[j
];
7848 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7850 gas_assert ((MAX_OPERANDS
- 1) > dupl
7851 && (i
.tm
.opcode_modifier
.vexsources
7854 /* Add the implicit xmm0 for instructions with VEX prefix
7856 for (j
= i
.operands
; j
> 0; j
--)
7858 i
.op
[j
] = i
.op
[j
- 1];
7859 i
.types
[j
] = i
.types
[j
- 1];
7860 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7861 i
.flags
[j
] = i
.flags
[j
- 1];
7864 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7865 i
.types
[0] = regxmm
;
7866 i
.tm
.operand_types
[0] = regxmm
;
7869 i
.reg_operands
+= 2;
7874 i
.op
[dupl
] = i
.op
[dest
];
7875 i
.types
[dupl
] = i
.types
[dest
];
7876 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7877 i
.flags
[dupl
] = i
.flags
[dest
];
7886 i
.op
[dupl
] = i
.op
[dest
];
7887 i
.types
[dupl
] = i
.types
[dest
];
7888 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7889 i
.flags
[dupl
] = i
.flags
[dest
];
7892 if (i
.tm
.opcode_modifier
.immext
)
7895 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7896 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7900 for (j
= 1; j
< i
.operands
; j
++)
7902 i
.op
[j
- 1] = i
.op
[j
];
7903 i
.types
[j
- 1] = i
.types
[j
];
7905 /* We need to adjust fields in i.tm since they are used by
7906 build_modrm_byte. */
7907 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7909 i
.flags
[j
- 1] = i
.flags
[j
];
7916 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7918 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7920 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7921 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7922 regnum
= register_number (i
.op
[1].regs
);
7923 first_reg_in_group
= regnum
& ~3;
7924 last_reg_in_group
= first_reg_in_group
+ 3;
7925 if (regnum
!= first_reg_in_group
)
7926 as_warn (_("source register `%s%s' implicitly denotes"
7927 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7928 register_prefix
, i
.op
[1].regs
->reg_name
,
7929 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7930 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7933 else if (i
.tm
.opcode_modifier
.regkludge
)
7935 /* The imul $imm, %reg instruction is converted into
7936 imul $imm, %reg, %reg, and the clr %reg instruction
7937 is converted into xor %reg, %reg. */
7939 unsigned int first_reg_op
;
7941 if (operand_type_check (i
.types
[0], reg
))
7945 /* Pretend we saw the extra register operand. */
7946 gas_assert (i
.reg_operands
== 1
7947 && i
.op
[first_reg_op
+ 1].regs
== 0);
7948 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7949 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7954 if (i
.tm
.opcode_modifier
.modrm
)
7956 /* The opcode is completed (modulo i.tm.extension_opcode which
7957 must be put into the modrm byte). Now, we make the modrm and
7958 index base bytes based on all the info we've collected. */
7960 default_seg
= build_modrm_byte ();
7962 else if (i
.types
[0].bitfield
.class == SReg
)
7964 if (flag_code
!= CODE_64BIT
7965 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7966 && i
.op
[0].regs
->reg_num
== 1
7967 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7968 && i
.op
[0].regs
->reg_num
< 4)
7970 as_bad (_("you can't `%s %s%s'"),
7971 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7974 if (i
.op
[0].regs
->reg_num
> 3
7975 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7977 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7978 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7980 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7982 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7983 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7985 default_seg
= reg_ds
;
7987 else if (i
.tm
.opcode_modifier
.isstring
)
7989 /* For the string instructions that allow a segment override
7990 on one of their operands, the default segment is ds. */
7991 default_seg
= reg_ds
;
7993 else if (i
.short_form
)
7995 /* The register or float register operand is in operand
7997 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7999 /* Register goes in low 3 bits of opcode. */
8000 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
8001 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8003 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
8005 /* Warn about some common errors, but press on regardless.
8006 The first case can be generated by gcc (<= 2.8.1). */
8007 if (i
.operands
== 2)
8009 /* Reversed arguments on faddp, fsubp, etc. */
8010 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
8011 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
8012 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
8016 /* Extraneous `l' suffix on fp insn. */
8017 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
8018 register_prefix
, i
.op
[0].regs
->reg_name
);
8023 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
8024 && i
.tm
.base_opcode
== 0x8d /* lea */
8025 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
8026 && !is_any_vex_encoding(&i
.tm
))
8028 if (!quiet_warnings
)
8029 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
8033 i
.prefix
[SEG_PREFIX
] = 0;
8037 /* If a segment was explicitly specified, and the specified segment
8038 is neither the default nor the one already recorded from a prefix,
8039 use an opcode prefix to select it. If we never figured out what
8040 the default segment is, then default_seg will be zero at this
8041 point, and the specified segment prefix will always be used. */
8043 && i
.seg
[0] != default_seg
8044 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
8046 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
8052 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
8055 if (r
->reg_flags
& RegRex
)
8057 if (i
.rex
& rex_bit
)
8058 as_bad (_("same type of prefix used twice"));
8061 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
8063 gas_assert (i
.vex
.register_specifier
== r
);
8064 i
.vex
.register_specifier
+= 8;
8067 if (r
->reg_flags
& RegVRex
)
8071 static const reg_entry
*
8072 build_modrm_byte (void)
8074 const reg_entry
*default_seg
= NULL
;
8075 unsigned int source
, dest
;
8078 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
8081 unsigned int nds
, reg_slot
;
8084 dest
= i
.operands
- 1;
8087 /* There are 2 kinds of instructions:
8088 1. 5 operands: 4 register operands or 3 register operands
8089 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8090 VexW0 or VexW1. The destination must be either XMM, YMM or
8092 2. 4 operands: 4 register operands or 3 register operands
8093 plus 1 memory operand, with VexXDS. */
8094 gas_assert ((i
.reg_operands
== 4
8095 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
8096 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8097 && i
.tm
.opcode_modifier
.vexw
8098 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8100 /* If VexW1 is set, the first non-immediate operand is the source and
8101 the second non-immediate one is encoded in the immediate operand. */
8102 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
8104 source
= i
.imm_operands
;
8105 reg_slot
= i
.imm_operands
+ 1;
8109 source
= i
.imm_operands
+ 1;
8110 reg_slot
= i
.imm_operands
;
8113 if (i
.imm_operands
== 0)
8115 /* When there is no immediate operand, generate an 8bit
8116 immediate operand to encode the first operand. */
8117 exp
= &im_expressions
[i
.imm_operands
++];
8118 i
.op
[i
.operands
].imms
= exp
;
8119 i
.types
[i
.operands
] = imm8
;
8122 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8123 exp
->X_op
= O_constant
;
8124 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
8125 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8129 gas_assert (i
.imm_operands
== 1);
8130 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8131 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8133 /* Turn on Imm8 again so that output_imm will generate it. */
8134 i
.types
[0].bitfield
.imm8
= 1;
8136 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8137 i
.op
[0].imms
->X_add_number
8138 |= register_number (i
.op
[reg_slot
].regs
) << 4;
8139 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8142 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
8143 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
8148 /* i.reg_operands MUST be the number of real register operands;
8149 implicit registers do not count. If there are 3 register
8150 operands, it must be a instruction with VexNDS. For a
8151 instruction with VexNDD, the destination register is encoded
8152 in VEX prefix. If there are 4 register operands, it must be
8153 a instruction with VEX prefix and 3 sources. */
8154 if (i
.mem_operands
== 0
8155 && ((i
.reg_operands
== 2
8156 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
8157 || (i
.reg_operands
== 3
8158 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8159 || (i
.reg_operands
== 4 && vex_3_sources
)))
8167 /* When there are 3 operands, one of them may be immediate,
8168 which may be the first or the last operand. Otherwise,
8169 the first operand must be shift count register (cl) or it
8170 is an instruction with VexNDS. */
8171 gas_assert (i
.imm_operands
== 1
8172 || (i
.imm_operands
== 0
8173 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8174 || (i
.types
[0].bitfield
.instance
== RegC
8175 && i
.types
[0].bitfield
.byte
))));
8176 if (operand_type_check (i
.types
[0], imm
)
8177 || (i
.types
[0].bitfield
.instance
== RegC
8178 && i
.types
[0].bitfield
.byte
))
8184 /* When there are 4 operands, the first two must be 8bit
8185 immediate operands. The source operand will be the 3rd
8188 For instructions with VexNDS, if the first operand
8189 an imm8, the source operand is the 2nd one. If the last
8190 operand is imm8, the source operand is the first one. */
8191 gas_assert ((i
.imm_operands
== 2
8192 && i
.types
[0].bitfield
.imm8
8193 && i
.types
[1].bitfield
.imm8
)
8194 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8195 && i
.imm_operands
== 1
8196 && (i
.types
[0].bitfield
.imm8
8197 || i
.types
[i
.operands
- 1].bitfield
.imm8
8198 || i
.rounding
.type
!= rc_none
)));
8199 if (i
.imm_operands
== 2)
8203 if (i
.types
[0].bitfield
.imm8
)
8210 if (is_evex_encoding (&i
.tm
))
8212 /* For EVEX instructions, when there are 5 operands, the
8213 first one must be immediate operand. If the second one
8214 is immediate operand, the source operand is the 3th
8215 one. If the last one is immediate operand, the source
8216 operand is the 2nd one. */
8217 gas_assert (i
.imm_operands
== 2
8218 && i
.tm
.opcode_modifier
.sae
8219 && operand_type_check (i
.types
[0], imm
));
8220 if (operand_type_check (i
.types
[1], imm
))
8222 else if (operand_type_check (i
.types
[4], imm
))
8236 /* RC/SAE operand could be between DEST and SRC. That happens
8237 when one operand is GPR and the other one is XMM/YMM/ZMM
8239 if (i
.rounding
.type
!= rc_none
&& i
.rounding
.operand
== dest
)
8242 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8244 /* For instructions with VexNDS, the register-only source
8245 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8246 register. It is encoded in VEX prefix. */
8248 i386_operand_type op
;
8251 /* Swap two source operands if needed. */
8252 if (i
.tm
.opcode_modifier
.swapsources
)
8260 op
= i
.tm
.operand_types
[vvvv
];
8261 if ((dest
+ 1) >= i
.operands
8262 || ((op
.bitfield
.class != Reg
8263 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8264 && op
.bitfield
.class != RegSIMD
8265 && !operand_type_equal (&op
, ®mask
)))
8267 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8273 /* One of the register operands will be encoded in the i.rm.reg
8274 field, the other in the combined i.rm.mode and i.rm.regmem
8275 fields. If no form of this instruction supports a memory
8276 destination operand, then we assume the source operand may
8277 sometimes be a memory operand and so we need to store the
8278 destination in the i.rm.reg field. */
8279 if (!i
.tm
.opcode_modifier
.regmem
8280 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8282 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8283 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8284 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8285 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8289 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8290 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8291 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8292 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8294 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8296 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8299 add_prefix (LOCK_PREFIX_OPCODE
);
8303 { /* If it's not 2 reg operands... */
8308 unsigned int fake_zero_displacement
= 0;
8311 for (op
= 0; op
< i
.operands
; op
++)
8312 if (i
.flags
[op
] & Operand_Mem
)
8314 gas_assert (op
< i
.operands
);
8316 if (i
.tm
.opcode_modifier
.sib
)
8318 /* The index register of VSIB shouldn't be RegIZ. */
8319 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8320 && i
.index_reg
->reg_num
== RegIZ
)
8323 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8326 i
.sib
.base
= NO_BASE_REGISTER
;
8327 i
.sib
.scale
= i
.log2_scale_factor
;
8328 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8329 if (want_disp32 (&i
.tm
))
8330 i
.types
[op
].bitfield
.disp32
= 1;
8332 i
.types
[op
].bitfield
.disp32s
= 1;
8335 /* Since the mandatory SIB always has index register, so
8336 the code logic remains unchanged. The non-mandatory SIB
8337 without index register is allowed and will be handled
8341 if (i
.index_reg
->reg_num
== RegIZ
)
8342 i
.sib
.index
= NO_INDEX_REGISTER
;
8344 i
.sib
.index
= i
.index_reg
->reg_num
;
8345 set_rex_vrex (i
.index_reg
, REX_X
, false);
8349 default_seg
= reg_ds
;
8351 if (i
.base_reg
== 0)
8354 if (!i
.disp_operands
)
8355 fake_zero_displacement
= 1;
8356 if (i
.index_reg
== 0)
8358 /* Both check for VSIB and mandatory non-vector SIB. */
8359 gas_assert (!i
.tm
.opcode_modifier
.sib
8360 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8361 /* Operand is just <disp> */
8362 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8363 if (flag_code
== CODE_64BIT
)
8365 /* 64bit mode overwrites the 32bit absolute
8366 addressing by RIP relative addressing and
8367 absolute addressing is encoded by one of the
8368 redundant SIB forms. */
8369 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8370 i
.sib
.base
= NO_BASE_REGISTER
;
8371 i
.sib
.index
= NO_INDEX_REGISTER
;
8372 if (want_disp32 (&i
.tm
))
8373 i
.types
[op
].bitfield
.disp32
= 1;
8375 i
.types
[op
].bitfield
.disp32s
= 1;
8377 else if ((flag_code
== CODE_16BIT
)
8378 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8380 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8381 i
.types
[op
].bitfield
.disp16
= 1;
8385 i
.rm
.regmem
= NO_BASE_REGISTER
;
8386 i
.types
[op
].bitfield
.disp32
= 1;
8389 else if (!i
.tm
.opcode_modifier
.sib
)
8391 /* !i.base_reg && i.index_reg */
8392 if (i
.index_reg
->reg_num
== RegIZ
)
8393 i
.sib
.index
= NO_INDEX_REGISTER
;
8395 i
.sib
.index
= i
.index_reg
->reg_num
;
8396 i
.sib
.base
= NO_BASE_REGISTER
;
8397 i
.sib
.scale
= i
.log2_scale_factor
;
8398 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8399 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8400 if (want_disp32 (&i
.tm
))
8401 i
.types
[op
].bitfield
.disp32
= 1;
8403 i
.types
[op
].bitfield
.disp32s
= 1;
8404 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8408 /* RIP addressing for 64bit mode. */
8409 else if (i
.base_reg
->reg_num
== RegIP
)
8411 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8412 i
.rm
.regmem
= NO_BASE_REGISTER
;
8413 i
.types
[op
].bitfield
.disp8
= 0;
8414 i
.types
[op
].bitfield
.disp16
= 0;
8415 i
.types
[op
].bitfield
.disp32
= 0;
8416 i
.types
[op
].bitfield
.disp32s
= 1;
8417 i
.types
[op
].bitfield
.disp64
= 0;
8418 i
.flags
[op
] |= Operand_PCrel
;
8419 if (! i
.disp_operands
)
8420 fake_zero_displacement
= 1;
8422 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8424 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8425 switch (i
.base_reg
->reg_num
)
8428 if (i
.index_reg
== 0)
8430 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8431 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8434 default_seg
= reg_ss
;
8435 if (i
.index_reg
== 0)
8438 if (operand_type_check (i
.types
[op
], disp
) == 0)
8440 /* fake (%bp) into 0(%bp) */
8441 if (i
.disp_encoding
== disp_encoding_16bit
)
8442 i
.types
[op
].bitfield
.disp16
= 1;
8444 i
.types
[op
].bitfield
.disp8
= 1;
8445 fake_zero_displacement
= 1;
8448 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8449 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8451 default: /* (%si) -> 4 or (%di) -> 5 */
8452 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8454 if (!fake_zero_displacement
8458 fake_zero_displacement
= 1;
8459 if (i
.disp_encoding
== disp_encoding_8bit
)
8460 i
.types
[op
].bitfield
.disp8
= 1;
8462 i
.types
[op
].bitfield
.disp16
= 1;
8464 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8466 else /* i.base_reg and 32/64 bit mode */
8468 if (operand_type_check (i
.types
[op
], disp
))
8470 i
.types
[op
].bitfield
.disp16
= 0;
8471 i
.types
[op
].bitfield
.disp64
= 0;
8472 if (!want_disp32 (&i
.tm
))
8474 i
.types
[op
].bitfield
.disp32
= 0;
8475 i
.types
[op
].bitfield
.disp32s
= 1;
8479 i
.types
[op
].bitfield
.disp32
= 1;
8480 i
.types
[op
].bitfield
.disp32s
= 0;
8484 if (!i
.tm
.opcode_modifier
.sib
)
8485 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8486 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8488 i
.sib
.base
= i
.base_reg
->reg_num
;
8489 /* x86-64 ignores REX prefix bit here to avoid decoder
8491 if (!(i
.base_reg
->reg_flags
& RegRex
)
8492 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8493 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8494 default_seg
= reg_ss
;
8495 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8497 fake_zero_displacement
= 1;
8498 if (i
.disp_encoding
== disp_encoding_32bit
)
8499 i
.types
[op
].bitfield
.disp32
= 1;
8501 i
.types
[op
].bitfield
.disp8
= 1;
8503 i
.sib
.scale
= i
.log2_scale_factor
;
8504 if (i
.index_reg
== 0)
8506 /* Only check for VSIB. */
8507 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8508 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8509 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8511 /* <disp>(%esp) becomes two byte modrm with no index
8512 register. We've already stored the code for esp
8513 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8514 Any base register besides %esp will not use the
8515 extra modrm byte. */
8516 i
.sib
.index
= NO_INDEX_REGISTER
;
8518 else if (!i
.tm
.opcode_modifier
.sib
)
8520 if (i
.index_reg
->reg_num
== RegIZ
)
8521 i
.sib
.index
= NO_INDEX_REGISTER
;
8523 i
.sib
.index
= i
.index_reg
->reg_num
;
8524 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8525 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8530 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8531 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8535 if (!fake_zero_displacement
8539 fake_zero_displacement
= 1;
8540 if (i
.disp_encoding
== disp_encoding_8bit
)
8541 i
.types
[op
].bitfield
.disp8
= 1;
8543 i
.types
[op
].bitfield
.disp32
= 1;
8545 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8549 if (fake_zero_displacement
)
8551 /* Fakes a zero displacement assuming that i.types[op]
8552 holds the correct displacement size. */
8555 gas_assert (i
.op
[op
].disps
== 0);
8556 exp
= &disp_expressions
[i
.disp_operands
++];
8557 i
.op
[op
].disps
= exp
;
8558 exp
->X_op
= O_constant
;
8559 exp
->X_add_number
= 0;
8560 exp
->X_add_symbol
= (symbolS
*) 0;
8561 exp
->X_op_symbol
= (symbolS
*) 0;
8569 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8571 if (operand_type_check (i
.types
[0], imm
))
8572 i
.vex
.register_specifier
= NULL
;
8575 /* VEX.vvvv encodes one of the sources when the first
8576 operand is not an immediate. */
8577 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8578 i
.vex
.register_specifier
= i
.op
[0].regs
;
8580 i
.vex
.register_specifier
= i
.op
[1].regs
;
8583 /* Destination is a XMM register encoded in the ModRM.reg
8585 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8586 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8589 /* ModRM.rm and VEX.B encodes the other source. */
8590 if (!i
.mem_operands
)
8594 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8595 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8597 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8599 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8603 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8605 i
.vex
.register_specifier
= i
.op
[2].regs
;
8606 if (!i
.mem_operands
)
8609 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8610 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8614 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8615 (if any) based on i.tm.extension_opcode. Again, we must be
8616 careful to make sure that segment/control/debug/test/MMX
8617 registers are coded into the i.rm.reg field. */
8618 else if (i
.reg_operands
)
8621 unsigned int vex_reg
= ~0;
8623 for (op
= 0; op
< i
.operands
; op
++)
8624 if (i
.types
[op
].bitfield
.class == Reg
8625 || i
.types
[op
].bitfield
.class == RegBND
8626 || i
.types
[op
].bitfield
.class == RegMask
8627 || i
.types
[op
].bitfield
.class == SReg
8628 || i
.types
[op
].bitfield
.class == RegCR
8629 || i
.types
[op
].bitfield
.class == RegDR
8630 || i
.types
[op
].bitfield
.class == RegTR
8631 || i
.types
[op
].bitfield
.class == RegSIMD
8632 || i
.types
[op
].bitfield
.class == RegMMX
)
8637 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8639 /* For instructions with VexNDS, the register-only
8640 source operand is encoded in VEX prefix. */
8641 gas_assert (mem
!= (unsigned int) ~0);
8646 gas_assert (op
< i
.operands
);
8650 /* Check register-only source operand when two source
8651 operands are swapped. */
8652 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8653 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8657 gas_assert (mem
== (vex_reg
+ 1)
8658 && op
< i
.operands
);
8663 gas_assert (vex_reg
< i
.operands
);
8667 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8669 /* For instructions with VexNDD, the register destination
8670 is encoded in VEX prefix. */
8671 if (i
.mem_operands
== 0)
8673 /* There is no memory operand. */
8674 gas_assert ((op
+ 2) == i
.operands
);
8679 /* There are only 2 non-immediate operands. */
8680 gas_assert (op
< i
.imm_operands
+ 2
8681 && i
.operands
== i
.imm_operands
+ 2);
8682 vex_reg
= i
.imm_operands
+ 1;
8686 gas_assert (op
< i
.operands
);
8688 if (vex_reg
!= (unsigned int) ~0)
8690 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8692 if ((type
->bitfield
.class != Reg
8693 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8694 && type
->bitfield
.class != RegSIMD
8695 && !operand_type_equal (type
, ®mask
))
8698 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8701 /* Don't set OP operand twice. */
8704 /* If there is an extension opcode to put here, the
8705 register number must be put into the regmem field. */
8706 if (i
.tm
.extension_opcode
!= None
)
8708 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8709 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8710 i
.tm
.opcode_modifier
.sse2avx
);
8714 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8715 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8716 i
.tm
.opcode_modifier
.sse2avx
);
8720 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8721 must set it to 3 to indicate this is a register operand
8722 in the regmem field. */
8723 if (!i
.mem_operands
)
8727 /* Fill in i.rm.reg field with extension opcode (if any). */
8728 if (i
.tm
.extension_opcode
!= None
)
8729 i
.rm
.reg
= i
.tm
.extension_opcode
;
8735 frag_opcode_byte (unsigned char byte
)
8737 if (now_seg
!= absolute_section
)
8738 FRAG_APPEND_1_CHAR (byte
);
8740 ++abs_section_offset
;
8744 flip_code16 (unsigned int code16
)
8746 gas_assert (i
.tm
.operands
== 1);
8748 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8749 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8750 || i
.tm
.operand_types
[0].bitfield
.disp32s
8751 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8756 output_branch (void)
8762 relax_substateT subtype
;
8766 if (now_seg
== absolute_section
)
8768 as_bad (_("relaxable branches not supported in absolute section"));
8772 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8773 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8776 if (i
.prefix
[DATA_PREFIX
] != 0)
8780 code16
^= flip_code16(code16
);
8782 /* Pentium4 branch hints. */
8783 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8784 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8789 if (i
.prefix
[REX_PREFIX
] != 0)
8795 /* BND prefixed jump. */
8796 if (i
.prefix
[BND_PREFIX
] != 0)
8802 if (i
.prefixes
!= 0)
8803 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8805 /* It's always a symbol; End frag & setup for relax.
8806 Make sure there is enough room in this frag for the largest
8807 instruction we may generate in md_convert_frag. This is 2
8808 bytes for the opcode and room for the prefix and largest
8810 frag_grow (prefix
+ 2 + 4);
8811 /* Prefix and 1 opcode byte go in fr_fix. */
8812 p
= frag_more (prefix
+ 1);
8813 if (i
.prefix
[DATA_PREFIX
] != 0)
8814 *p
++ = DATA_PREFIX_OPCODE
;
8815 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8816 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8817 *p
++ = i
.prefix
[SEG_PREFIX
];
8818 if (i
.prefix
[BND_PREFIX
] != 0)
8819 *p
++ = BND_PREFIX_OPCODE
;
8820 if (i
.prefix
[REX_PREFIX
] != 0)
8821 *p
++ = i
.prefix
[REX_PREFIX
];
8822 *p
= i
.tm
.base_opcode
;
8824 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8825 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8826 else if (cpu_arch_flags
.bitfield
.cpui386
)
8827 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8829 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8832 sym
= i
.op
[0].disps
->X_add_symbol
;
8833 off
= i
.op
[0].disps
->X_add_number
;
8835 if (i
.op
[0].disps
->X_op
!= O_constant
8836 && i
.op
[0].disps
->X_op
!= O_symbol
)
8838 /* Handle complex expressions. */
8839 sym
= make_expr_symbol (i
.op
[0].disps
);
8843 frag_now
->tc_frag_data
.code64
= flag_code
== CODE_64BIT
;
8845 /* 1 possible extra opcode + 4 byte displacement go in var part.
8846 Pass reloc in fr_var. */
8847 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8850 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8851 /* Return TRUE iff PLT32 relocation should be used for branching to
8855 need_plt32_p (symbolS
*s
)
8857 /* PLT32 relocation is ELF only. */
8862 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8863 krtld support it. */
8867 /* Since there is no need to prepare for PLT branch on x86-64, we
8868 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8869 be used as a marker for 32-bit PC-relative branches. */
8876 /* Weak or undefined symbol need PLT32 relocation. */
8877 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8880 /* Non-global symbol doesn't need PLT32 relocation. */
8881 if (! S_IS_EXTERNAL (s
))
8884 /* Other global symbols need PLT32 relocation. NB: Symbol with
8885 non-default visibilities are treated as normal global symbol
8886 so that PLT32 relocation can be used as a marker for 32-bit
8887 PC-relative branches. It is useful for linker relaxation. */
8898 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8900 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8902 /* This is a loop or jecxz type instruction. */
8904 if (i
.prefix
[ADDR_PREFIX
] != 0)
8906 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8909 /* Pentium4 branch hints. */
8910 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8911 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8913 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8922 if (flag_code
== CODE_16BIT
)
8925 if (i
.prefix
[DATA_PREFIX
] != 0)
8927 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8929 code16
^= flip_code16(code16
);
8937 /* BND prefixed jump. */
8938 if (i
.prefix
[BND_PREFIX
] != 0)
8940 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8944 if (i
.prefix
[REX_PREFIX
] != 0)
8946 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8950 if (i
.prefixes
!= 0)
8951 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8953 if (now_seg
== absolute_section
)
8955 abs_section_offset
+= i
.opcode_length
+ size
;
8959 p
= frag_more (i
.opcode_length
+ size
);
8960 switch (i
.opcode_length
)
8963 *p
++ = i
.tm
.base_opcode
>> 8;
8966 *p
++ = i
.tm
.base_opcode
;
8972 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8973 if (flag_code
== CODE_64BIT
&& size
== 4
8974 && jump_reloc
== NO_RELOC
&& i
.op
[0].disps
->X_add_number
== 0
8975 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8976 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8979 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8981 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8982 i
.op
[0].disps
, 1, jump_reloc
);
8984 /* All jumps handled here are signed, but don't unconditionally use a
8985 signed limit check for 32 and 16 bit jumps as we want to allow wrap
8986 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
8991 fixP
->fx_signed
= 1;
8995 if (i
.tm
.base_opcode
== 0xc7f8)
8996 fixP
->fx_signed
= 1;
9000 if (flag_code
== CODE_64BIT
)
9001 fixP
->fx_signed
= 1;
9007 output_interseg_jump (void)
9015 if (flag_code
== CODE_16BIT
)
9019 if (i
.prefix
[DATA_PREFIX
] != 0)
9026 gas_assert (!i
.prefix
[REX_PREFIX
]);
9032 if (i
.prefixes
!= 0)
9033 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
9035 if (now_seg
== absolute_section
)
9037 abs_section_offset
+= prefix
+ 1 + 2 + size
;
9041 /* 1 opcode; 2 segment; offset */
9042 p
= frag_more (prefix
+ 1 + 2 + size
);
9044 if (i
.prefix
[DATA_PREFIX
] != 0)
9045 *p
++ = DATA_PREFIX_OPCODE
;
9047 if (i
.prefix
[REX_PREFIX
] != 0)
9048 *p
++ = i
.prefix
[REX_PREFIX
];
9050 *p
++ = i
.tm
.base_opcode
;
9051 if (i
.op
[1].imms
->X_op
== O_constant
)
9053 offsetT n
= i
.op
[1].imms
->X_add_number
;
9056 && !fits_in_unsigned_word (n
)
9057 && !fits_in_signed_word (n
))
9059 as_bad (_("16-bit jump out of range"));
9062 md_number_to_chars (p
, n
, size
);
9065 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9066 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9069 if (i
.op
[0].imms
->X_op
== O_constant
)
9070 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9072 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9073 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9076 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9081 asection
*seg
= now_seg
;
9082 subsegT subseg
= now_subseg
;
9084 unsigned int alignment
, align_size_1
;
9085 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9086 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9087 unsigned int padding
;
9089 if (!IS_ELF
|| !x86_used_note
)
9092 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9094 /* The .note.gnu.property section layout:
9096 Field Length Contents
9099 n_descsz 4 The note descriptor size
9100 n_type 4 NT_GNU_PROPERTY_TYPE_0
9102 n_desc n_descsz The program property array
9106 /* Create the .note.gnu.property section. */
9107 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9108 bfd_set_section_flags (sec
,
9115 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9126 bfd_set_section_alignment (sec
, alignment
);
9127 elf_section_type (sec
) = SHT_NOTE
;
9129 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9131 isa_1_descsz_raw
= 4 + 4 + 4;
9132 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9133 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9135 feature_2_descsz_raw
= isa_1_descsz
;
9136 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9138 feature_2_descsz_raw
+= 4 + 4 + 4;
9139 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9140 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9143 descsz
= feature_2_descsz
;
9144 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9145 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9147 /* Write n_namsz. */
9148 md_number_to_chars (p
, (valueT
) 4, 4);
9150 /* Write n_descsz. */
9151 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9154 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9157 memcpy (p
+ 4 * 3, "GNU", 4);
9159 /* Write 4-byte type. */
9160 md_number_to_chars (p
+ 4 * 4,
9161 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9163 /* Write 4-byte data size. */
9164 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9166 /* Write 4-byte data. */
9167 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9169 /* Zero out paddings. */
9170 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9172 memset (p
+ 4 * 7, 0, padding
);
9174 /* Write 4-byte type. */
9175 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9176 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9178 /* Write 4-byte data size. */
9179 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9181 /* Write 4-byte data. */
9182 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9183 (valueT
) x86_feature_2_used
, 4);
9185 /* Zero out paddings. */
9186 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9188 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9190 /* We probably can't restore the current segment, for there likely
9193 subseg_set (seg
, subseg
);
9198 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9199 const char *frag_now_ptr
)
9201 unsigned int len
= 0;
9203 if (start_frag
!= frag_now
)
9205 const fragS
*fr
= start_frag
;
9210 } while (fr
&& fr
!= frag_now
);
9213 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9216 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9217 be macro-fused with conditional jumps.
9218 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9219 or is one of the following format:
9232 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9234 /* No RIP address. */
9235 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9238 /* No opcodes outside of base encoding space. */
9239 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9242 /* add, sub without add/sub m, imm. */
9243 if (i
.tm
.base_opcode
<= 5
9244 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9245 || ((i
.tm
.base_opcode
| 3) == 0x83
9246 && (i
.tm
.extension_opcode
== 0x5
9247 || i
.tm
.extension_opcode
== 0x0)))
9249 *mf_cmp_p
= mf_cmp_alu_cmp
;
9250 return !(i
.mem_operands
&& i
.imm_operands
);
9253 /* and without and m, imm. */
9254 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9255 || ((i
.tm
.base_opcode
| 3) == 0x83
9256 && i
.tm
.extension_opcode
== 0x4))
9258 *mf_cmp_p
= mf_cmp_test_and
;
9259 return !(i
.mem_operands
&& i
.imm_operands
);
9262 /* test without test m imm. */
9263 if ((i
.tm
.base_opcode
| 1) == 0x85
9264 || (i
.tm
.base_opcode
| 1) == 0xa9
9265 || ((i
.tm
.base_opcode
| 1) == 0xf7
9266 && i
.tm
.extension_opcode
== 0))
9268 *mf_cmp_p
= mf_cmp_test_and
;
9269 return !(i
.mem_operands
&& i
.imm_operands
);
9272 /* cmp without cmp m, imm. */
9273 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9274 || ((i
.tm
.base_opcode
| 3) == 0x83
9275 && (i
.tm
.extension_opcode
== 0x7)))
9277 *mf_cmp_p
= mf_cmp_alu_cmp
;
9278 return !(i
.mem_operands
&& i
.imm_operands
);
9281 /* inc, dec without inc/dec m. */
9282 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9283 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9284 || ((i
.tm
.base_opcode
| 1) == 0xff
9285 && i
.tm
.extension_opcode
<= 0x1))
9287 *mf_cmp_p
= mf_cmp_incdec
;
9288 return !i
.mem_operands
;
9294 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9297 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9299 /* NB: Don't work with COND_JUMP86 without i386. */
9300 if (!align_branch_power
9301 || now_seg
== absolute_section
9302 || !cpu_arch_flags
.bitfield
.cpui386
9303 || !(align_branch
& align_branch_fused_bit
))
9306 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9308 if (last_insn
.kind
== last_insn_other
9309 || last_insn
.seg
!= now_seg
)
9312 as_warn_where (last_insn
.file
, last_insn
.line
,
9313 _("`%s` skips -malign-branch-boundary on `%s`"),
9314 last_insn
.name
, i
.tm
.name
);
9320 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9323 add_branch_prefix_frag_p (void)
9325 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9326 to PadLock instructions since they include prefixes in opcode. */
9327 if (!align_branch_power
9328 || !align_branch_prefix_size
9329 || now_seg
== absolute_section
9330 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9331 || !cpu_arch_flags
.bitfield
.cpui386
)
9334 /* Don't add prefix if it is a prefix or there is no operand in case
9335 that segment prefix is special. */
9336 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9339 if (last_insn
.kind
== last_insn_other
9340 || last_insn
.seg
!= now_seg
)
9344 as_warn_where (last_insn
.file
, last_insn
.line
,
9345 _("`%s` skips -malign-branch-boundary on `%s`"),
9346 last_insn
.name
, i
.tm
.name
);
9351 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9354 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9355 enum mf_jcc_kind
*mf_jcc_p
)
9359 /* NB: Don't work with COND_JUMP86 without i386. */
9360 if (!align_branch_power
9361 || now_seg
== absolute_section
9362 || !cpu_arch_flags
.bitfield
.cpui386
9363 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9368 /* Check for jcc and direct jmp. */
9369 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9371 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9373 *branch_p
= align_branch_jmp
;
9374 add_padding
= align_branch
& align_branch_jmp_bit
;
9378 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9379 igore the lowest bit. */
9380 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9381 *branch_p
= align_branch_jcc
;
9382 if ((align_branch
& align_branch_jcc_bit
))
9386 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9389 *branch_p
= align_branch_ret
;
9390 if ((align_branch
& align_branch_ret_bit
))
9395 /* Check for indirect jmp, direct and indirect calls. */
9396 if (i
.tm
.base_opcode
== 0xe8)
9399 *branch_p
= align_branch_call
;
9400 if ((align_branch
& align_branch_call_bit
))
9403 else if (i
.tm
.base_opcode
== 0xff
9404 && (i
.tm
.extension_opcode
== 2
9405 || i
.tm
.extension_opcode
== 4))
9407 /* Indirect call and jmp. */
9408 *branch_p
= align_branch_indirect
;
9409 if ((align_branch
& align_branch_indirect_bit
))
9416 && (i
.op
[0].disps
->X_op
== O_symbol
9417 || (i
.op
[0].disps
->X_op
== O_subtract
9418 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9420 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9421 /* No padding to call to global or undefined tls_get_addr. */
9422 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9423 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9429 && last_insn
.kind
!= last_insn_other
9430 && last_insn
.seg
== now_seg
)
9433 as_warn_where (last_insn
.file
, last_insn
.line
,
9434 _("`%s` skips -malign-branch-boundary on `%s`"),
9435 last_insn
.name
, i
.tm
.name
);
9445 fragS
*insn_start_frag
;
9446 offsetT insn_start_off
;
9447 fragS
*fragP
= NULL
;
9448 enum align_branch_kind branch
= align_branch_none
;
9449 /* The initializer is arbitrary just to avoid uninitialized error.
9450 it's actually either assigned in add_branch_padding_frag_p
9451 or never be used. */
9452 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9454 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9455 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9457 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9458 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9459 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9461 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9462 || i
.tm
.cpu_flags
.bitfield
.cpu287
9463 || i
.tm
.cpu_flags
.bitfield
.cpu387
9464 || i
.tm
.cpu_flags
.bitfield
.cpu687
9465 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9466 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9468 if ((i
.xstate
& xstate_mmx
)
9469 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9470 && !is_any_vex_encoding (&i
.tm
)
9471 && (i
.tm
.base_opcode
== 0x77 /* emms */
9472 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9473 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9477 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9478 i
.xstate
|= xstate_zmm
;
9479 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9480 i
.xstate
|= xstate_ymm
;
9481 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9482 i
.xstate
|= xstate_xmm
;
9485 /* vzeroall / vzeroupper */
9486 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9487 i
.xstate
|= xstate_ymm
;
9489 if ((i
.xstate
& xstate_xmm
)
9490 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9491 || (i
.tm
.base_opcode
== 0xae
9492 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9493 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9494 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9495 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9496 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9498 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9499 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9500 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9501 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9502 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9503 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9504 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9505 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9506 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9507 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9508 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9509 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9510 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9511 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9513 if (x86_feature_2_used
9514 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9515 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9516 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9517 && i
.tm
.base_opcode
== 0xc7
9518 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9519 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9520 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9521 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9522 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9523 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9524 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9525 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9526 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9527 /* LAHF-SAHF insns in 64-bit mode. */
9528 || (flag_code
== CODE_64BIT
9529 && (i
.tm
.base_opcode
| 1) == 0x9f
9530 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9531 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9532 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9533 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9534 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9535 CpuAVX512DQ, LPW, TBM and AMX. */
9536 || (i
.tm
.opcode_modifier
.vex
9537 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9538 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9539 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9540 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9541 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9542 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9543 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9544 || i
.tm
.cpu_flags
.bitfield
.cpufma
9545 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9546 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9547 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9548 || (x86_feature_2_used
9549 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9550 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9551 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9552 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9553 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9554 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9555 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9556 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9557 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9559 || (i
.tm
.opcode_modifier
.evex
9560 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9561 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9562 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9563 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9567 /* Tie dwarf2 debug info to the address at the start of the insn.
9568 We can't do this after the insn has been output as the current
9569 frag may have been closed off. eg. by frag_var. */
9570 dwarf2_emit_insn (0);
9572 insn_start_frag
= frag_now
;
9573 insn_start_off
= frag_now_fix ();
9575 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9578 /* Branch can be 8 bytes. Leave some room for prefixes. */
9579 unsigned int max_branch_padding_size
= 14;
9581 /* Align section to boundary. */
9582 record_alignment (now_seg
, align_branch_power
);
9584 /* Make room for padding. */
9585 frag_grow (max_branch_padding_size
);
9587 /* Start of the padding. */
9592 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9593 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9596 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9597 fragP
->tc_frag_data
.branch_type
= branch
;
9598 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9602 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9604 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9605 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9607 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9608 output_interseg_jump ();
9611 /* Output normal instructions here. */
9615 enum mf_cmp_kind mf_cmp
;
9618 && (i
.tm
.base_opcode
== 0xaee8
9619 || i
.tm
.base_opcode
== 0xaef0
9620 || i
.tm
.base_opcode
== 0xaef8))
9622 /* Encode lfence, mfence, and sfence as
9623 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9624 if (flag_code
== CODE_16BIT
)
9625 as_bad (_("Cannot convert `%s' in 16-bit mode"), i
.tm
.name
);
9626 else if (omit_lock_prefix
)
9627 as_bad (_("Cannot convert `%s' with `-momit-lock-prefix=yes' in effect"),
9629 else if (now_seg
!= absolute_section
)
9631 offsetT val
= 0x240483f0ULL
;
9634 md_number_to_chars (p
, val
, 5);
9637 abs_section_offset
+= 5;
9641 /* Some processors fail on LOCK prefix. This options makes
9642 assembler ignore LOCK prefix and serves as a workaround. */
9643 if (omit_lock_prefix
)
9645 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9646 && i
.tm
.opcode_modifier
.isprefix
)
9648 i
.prefix
[LOCK_PREFIX
] = 0;
9652 /* Skip if this is a branch. */
9654 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9656 /* Make room for padding. */
9657 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9662 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9663 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9666 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9667 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9668 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9670 else if (add_branch_prefix_frag_p ())
9672 unsigned int max_prefix_size
= align_branch_prefix_size
;
9674 /* Make room for padding. */
9675 frag_grow (max_prefix_size
);
9680 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9681 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9684 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9687 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9688 don't need the explicit prefix. */
9689 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9691 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9700 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9701 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9705 switch (i
.opcode_length
)
9710 /* Check for pseudo prefixes. */
9711 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9713 as_bad_where (insn_start_frag
->fr_file
,
9714 insn_start_frag
->fr_line
,
9715 _("pseudo prefix without instruction"));
9725 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9726 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9727 R_X86_64_GOTTPOFF relocation so that linker can safely
9728 perform IE->LE optimization. A dummy REX_OPCODE prefix
9729 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9730 relocation for GDesc -> IE/LE optimization. */
9731 if (x86_elf_abi
== X86_64_X32_ABI
9733 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9734 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9735 && i
.prefix
[REX_PREFIX
] == 0)
9736 add_prefix (REX_OPCODE
);
9739 /* The prefix bytes. */
9740 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9742 frag_opcode_byte (*q
);
9746 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9752 frag_opcode_byte (*q
);
9755 /* There should be no other prefixes for instructions
9760 /* For EVEX instructions i.vrex should become 0 after
9761 build_evex_prefix. For VEX instructions upper 16 registers
9762 aren't available, so VREX should be 0. */
9765 /* Now the VEX prefix. */
9766 if (now_seg
!= absolute_section
)
9768 p
= frag_more (i
.vex
.length
);
9769 for (j
= 0; j
< i
.vex
.length
; j
++)
9770 p
[j
] = i
.vex
.bytes
[j
];
9773 abs_section_offset
+= i
.vex
.length
;
9776 /* Now the opcode; be careful about word order here! */
9777 j
= i
.opcode_length
;
9779 switch (i
.tm
.opcode_modifier
.opcodespace
)
9794 if (now_seg
== absolute_section
)
9795 abs_section_offset
+= j
;
9798 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9804 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9807 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9808 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9812 switch (i
.opcode_length
)
9815 /* Put out high byte first: can't use md_number_to_chars! */
9816 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9819 *p
= i
.tm
.base_opcode
& 0xff;
9828 /* Now the modrm byte and sib byte (if present). */
9829 if (i
.tm
.opcode_modifier
.modrm
)
9831 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9833 | (i
.rm
.mode
<< 6));
9834 /* If i.rm.regmem == ESP (4)
9835 && i.rm.mode != (Register mode)
9837 ==> need second modrm byte. */
9838 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9840 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9841 frag_opcode_byte ((i
.sib
.base
<< 0)
9842 | (i
.sib
.index
<< 3)
9843 | (i
.sib
.scale
<< 6));
9846 if (i
.disp_operands
)
9847 output_disp (insn_start_frag
, insn_start_off
);
9850 output_imm (insn_start_frag
, insn_start_off
);
9853 * frag_now_fix () returning plain abs_section_offset when we're in the
9854 * absolute section, and abs_section_offset not getting updated as data
9855 * gets added to the frag breaks the logic below.
9857 if (now_seg
!= absolute_section
)
9859 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9861 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9865 /* NB: Don't add prefix with GOTPC relocation since
9866 output_disp() above depends on the fixed encoding
9867 length. Can't add prefix with TLS relocation since
9868 it breaks TLS linker optimization. */
9869 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9870 /* Prefix count on the current instruction. */
9871 unsigned int count
= i
.vex
.length
;
9873 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9874 /* REX byte is encoded in VEX/EVEX prefix. */
9875 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9878 /* Count prefixes for extended opcode maps. */
9880 switch (i
.tm
.opcode_modifier
.opcodespace
)
9895 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9898 /* Set the maximum prefix size in BRANCH_PREFIX
9900 if (fragP
->tc_frag_data
.max_bytes
> max
)
9901 fragP
->tc_frag_data
.max_bytes
= max
;
9902 if (fragP
->tc_frag_data
.max_bytes
> count
)
9903 fragP
->tc_frag_data
.max_bytes
-= count
;
9905 fragP
->tc_frag_data
.max_bytes
= 0;
9909 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9911 unsigned int max_prefix_size
;
9912 if (align_branch_prefix_size
> max
)
9913 max_prefix_size
= max
;
9915 max_prefix_size
= align_branch_prefix_size
;
9916 if (max_prefix_size
> count
)
9917 fragP
->tc_frag_data
.max_prefix_length
9918 = max_prefix_size
- count
;
9921 /* Use existing segment prefix if possible. Use CS
9922 segment prefix in 64-bit mode. In 32-bit mode, use SS
9923 segment prefix with ESP/EBP base register and use DS
9924 segment prefix without ESP/EBP base register. */
9925 if (i
.prefix
[SEG_PREFIX
])
9926 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9927 else if (flag_code
== CODE_64BIT
)
9928 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9930 && (i
.base_reg
->reg_num
== 4
9931 || i
.base_reg
->reg_num
== 5))
9932 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9934 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9939 /* NB: Don't work with COND_JUMP86 without i386. */
9940 if (align_branch_power
9941 && now_seg
!= absolute_section
9942 && cpu_arch_flags
.bitfield
.cpui386
)
9944 /* Terminate each frag so that we can add prefix and check for
9946 frag_wane (frag_now
);
9953 pi ("" /*line*/, &i
);
9955 #endif /* DEBUG386 */
9958 /* Return the size of the displacement operand N. */
9961 disp_size (unsigned int n
)
9965 if (i
.types
[n
].bitfield
.disp64
)
9967 else if (i
.types
[n
].bitfield
.disp8
)
9969 else if (i
.types
[n
].bitfield
.disp16
)
9974 /* Return the size of the immediate operand N. */
9977 imm_size (unsigned int n
)
9980 if (i
.types
[n
].bitfield
.imm64
)
9982 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9984 else if (i
.types
[n
].bitfield
.imm16
)
9990 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9995 for (n
= 0; n
< i
.operands
; n
++)
9997 if (operand_type_check (i
.types
[n
], disp
))
9999 int size
= disp_size (n
);
10001 if (now_seg
== absolute_section
)
10002 abs_section_offset
+= size
;
10003 else if (i
.op
[n
].disps
->X_op
== O_constant
)
10005 offsetT val
= i
.op
[n
].disps
->X_add_number
;
10007 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
10009 p
= frag_more (size
);
10010 md_number_to_chars (p
, val
, size
);
10014 enum bfd_reloc_code_real reloc_type
;
10015 int sign
= i
.types
[n
].bitfield
.disp32s
;
10016 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
10019 /* We can't have 8 bit displacement here. */
10020 gas_assert (!i
.types
[n
].bitfield
.disp8
);
10022 /* The PC relative address is computed relative
10023 to the instruction boundary, so in case immediate
10024 fields follows, we need to adjust the value. */
10025 if (pcrel
&& i
.imm_operands
)
10030 for (n1
= 0; n1
< i
.operands
; n1
++)
10031 if (operand_type_check (i
.types
[n1
], imm
))
10033 /* Only one immediate is allowed for PC
10034 relative address. */
10035 gas_assert (sz
== 0);
10036 sz
= imm_size (n1
);
10037 i
.op
[n
].disps
->X_add_number
-= sz
;
10039 /* We should find the immediate. */
10040 gas_assert (sz
!= 0);
10043 p
= frag_more (size
);
10044 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
10046 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
10047 && (((reloc_type
== BFD_RELOC_32
10048 || reloc_type
== BFD_RELOC_X86_64_32S
10049 || (reloc_type
== BFD_RELOC_64
10051 && (i
.op
[n
].disps
->X_op
== O_symbol
10052 || (i
.op
[n
].disps
->X_op
== O_add
10053 && ((symbol_get_value_expression
10054 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10056 || reloc_type
== BFD_RELOC_32_PCREL
))
10060 reloc_type
= BFD_RELOC_386_GOTPC
;
10061 i
.has_gotpc_tls_reloc
= true;
10062 i
.op
[n
].disps
->X_add_number
+=
10063 encoding_length (insn_start_frag
, insn_start_off
, p
);
10065 else if (reloc_type
== BFD_RELOC_64
)
10066 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10068 /* Don't do the adjustment for x86-64, as there
10069 the pcrel addressing is relative to the _next_
10070 insn, and that is taken care of in other code. */
10071 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10073 else if (align_branch_power
)
10075 switch (reloc_type
)
10077 case BFD_RELOC_386_TLS_GD
:
10078 case BFD_RELOC_386_TLS_LDM
:
10079 case BFD_RELOC_386_TLS_IE
:
10080 case BFD_RELOC_386_TLS_IE_32
:
10081 case BFD_RELOC_386_TLS_GOTIE
:
10082 case BFD_RELOC_386_TLS_GOTDESC
:
10083 case BFD_RELOC_386_TLS_DESC_CALL
:
10084 case BFD_RELOC_X86_64_TLSGD
:
10085 case BFD_RELOC_X86_64_TLSLD
:
10086 case BFD_RELOC_X86_64_GOTTPOFF
:
10087 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10088 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10089 i
.has_gotpc_tls_reloc
= true;
10094 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10095 size
, i
.op
[n
].disps
, pcrel
,
10098 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10099 && !i
.prefix
[ADDR_PREFIX
])
10100 fixP
->fx_signed
= 1;
10102 /* Check for "call/jmp *mem", "mov mem, %reg",
10103 "test %reg, mem" and "binop mem, %reg" where binop
10104 is one of adc, add, and, cmp, or, sbb, sub, xor
10105 instructions without data prefix. Always generate
10106 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10107 if (i
.prefix
[DATA_PREFIX
] == 0
10108 && (generate_relax_relocations
10111 && i
.rm
.regmem
== 5))
10113 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10114 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
10115 && ((i
.operands
== 1
10116 && i
.tm
.base_opcode
== 0xff
10117 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10118 || (i
.operands
== 2
10119 && (i
.tm
.base_opcode
== 0x8b
10120 || i
.tm
.base_opcode
== 0x85
10121 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10125 fixP
->fx_tcbit
= i
.rex
!= 0;
10127 && (i
.base_reg
->reg_num
== RegIP
))
10128 fixP
->fx_tcbit2
= 1;
10131 fixP
->fx_tcbit2
= 1;
10139 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10144 for (n
= 0; n
< i
.operands
; n
++)
10146 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
10147 if (i
.rounding
.type
!= rc_none
&& n
== i
.rounding
.operand
)
10150 if (operand_type_check (i
.types
[n
], imm
))
10152 int size
= imm_size (n
);
10154 if (now_seg
== absolute_section
)
10155 abs_section_offset
+= size
;
10156 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10160 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10162 p
= frag_more (size
);
10163 md_number_to_chars (p
, val
, size
);
10167 /* Not absolute_section.
10168 Need a 32-bit fixup (don't support 8bit
10169 non-absolute imms). Try to support other
10171 enum bfd_reloc_code_real reloc_type
;
10174 if (i
.types
[n
].bitfield
.imm32s
10175 && (i
.suffix
== QWORD_MNEM_SUFFIX
10176 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
10181 p
= frag_more (size
);
10182 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10184 /* This is tough to explain. We end up with this one if we
10185 * have operands that look like
10186 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10187 * obtain the absolute address of the GOT, and it is strongly
10188 * preferable from a performance point of view to avoid using
10189 * a runtime relocation for this. The actual sequence of
10190 * instructions often look something like:
10195 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10197 * The call and pop essentially return the absolute address
10198 * of the label .L66 and store it in %ebx. The linker itself
10199 * will ultimately change the first operand of the addl so
10200 * that %ebx points to the GOT, but to keep things simple, the
10201 * .o file must have this operand set so that it generates not
10202 * the absolute address of .L66, but the absolute address of
10203 * itself. This allows the linker itself simply treat a GOTPC
10204 * relocation as asking for a pcrel offset to the GOT to be
10205 * added in, and the addend of the relocation is stored in the
10206 * operand field for the instruction itself.
10208 * Our job here is to fix the operand so that it would add
10209 * the correct offset so that %ebx would point to itself. The
10210 * thing that is tricky is that .-.L66 will point to the
10211 * beginning of the instruction, so we need to further modify
10212 * the operand so that it will point to itself. There are
10213 * other cases where you have something like:
10215 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10217 * and here no correction would be required. Internally in
10218 * the assembler we treat operands of this form as not being
10219 * pcrel since the '.' is explicitly mentioned, and I wonder
10220 * whether it would simplify matters to do it this way. Who
10221 * knows. In earlier versions of the PIC patches, the
10222 * pcrel_adjust field was used to store the correction, but
10223 * since the expression is not pcrel, I felt it would be
10224 * confusing to do it this way. */
10226 if ((reloc_type
== BFD_RELOC_32
10227 || reloc_type
== BFD_RELOC_X86_64_32S
10228 || reloc_type
== BFD_RELOC_64
)
10230 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10231 && (i
.op
[n
].imms
->X_op
== O_symbol
10232 || (i
.op
[n
].imms
->X_op
== O_add
10233 && ((symbol_get_value_expression
10234 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10238 reloc_type
= BFD_RELOC_386_GOTPC
;
10239 else if (size
== 4)
10240 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10241 else if (size
== 8)
10242 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10243 i
.has_gotpc_tls_reloc
= true;
10244 i
.op
[n
].imms
->X_add_number
+=
10245 encoding_length (insn_start_frag
, insn_start_off
, p
);
10247 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10248 i
.op
[n
].imms
, 0, reloc_type
);
10254 /* x86_cons_fix_new is called via the expression parsing code when a
10255 reloc is needed. We use this hook to get the correct .got reloc. */
10256 static int cons_sign
= -1;
10259 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10260 expressionS
*exp
, bfd_reloc_code_real_type r
)
10262 r
= reloc (len
, 0, cons_sign
, r
);
10265 if (exp
->X_op
== O_secrel
)
10267 exp
->X_op
= O_symbol
;
10268 r
= BFD_RELOC_32_SECREL
;
10272 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10275 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10276 purpose of the `.dc.a' internal pseudo-op. */
10279 x86_address_bytes (void)
10281 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10283 return stdoutput
->arch_info
->bits_per_address
/ 8;
10286 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10287 || defined (LEX_AT)) && !defined (TE_PE)
10288 # define lex_got(reloc, adjust, types) NULL
10290 /* Parse operands of the form
10291 <symbol>@GOTOFF+<nnn>
10292 and similar .plt or .got references.
10294 If we find one, set up the correct relocation in RELOC and copy the
10295 input string, minus the `@GOTOFF' into a malloc'd buffer for
10296 parsing by the calling routine. Return this buffer, and if ADJUST
10297 is non-null set it to the length of the string we removed from the
10298 input line. Otherwise return NULL. */
10300 lex_got (enum bfd_reloc_code_real
*rel
,
10302 i386_operand_type
*types
)
10304 /* Some of the relocations depend on the size of what field is to
10305 be relocated. But in our callers i386_immediate and i386_displacement
10306 we don't yet know the operand size (this will be set by insn
10307 matching). Hence we record the word32 relocation here,
10308 and adjust the reloc according to the real size in reloc(). */
10309 static const struct {
10312 const enum bfd_reloc_code_real rel
[2];
10313 const i386_operand_type types64
;
10314 bool need_GOT_symbol
;
10317 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10318 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10319 BFD_RELOC_SIZE32
},
10320 OPERAND_TYPE_IMM32_64
, false },
10322 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10323 BFD_RELOC_X86_64_PLTOFF64
},
10324 OPERAND_TYPE_IMM64
, true },
10325 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10326 BFD_RELOC_X86_64_PLT32
},
10327 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10328 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10329 BFD_RELOC_X86_64_GOTPLT64
},
10330 OPERAND_TYPE_IMM64_DISP64
, true },
10331 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10332 BFD_RELOC_X86_64_GOTOFF64
},
10333 OPERAND_TYPE_IMM64_DISP64
, true },
10334 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10335 BFD_RELOC_X86_64_GOTPCREL
},
10336 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10337 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10338 BFD_RELOC_X86_64_TLSGD
},
10339 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10340 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10341 _dummy_first_bfd_reloc_code_real
},
10342 OPERAND_TYPE_NONE
, true },
10343 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10344 BFD_RELOC_X86_64_TLSLD
},
10345 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10346 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10347 BFD_RELOC_X86_64_GOTTPOFF
},
10348 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10349 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10350 BFD_RELOC_X86_64_TPOFF32
},
10351 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10352 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10353 _dummy_first_bfd_reloc_code_real
},
10354 OPERAND_TYPE_NONE
, true },
10355 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10356 BFD_RELOC_X86_64_DTPOFF32
},
10357 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10358 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10359 _dummy_first_bfd_reloc_code_real
},
10360 OPERAND_TYPE_NONE
, true },
10361 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10362 _dummy_first_bfd_reloc_code_real
},
10363 OPERAND_TYPE_NONE
, true },
10364 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10365 BFD_RELOC_X86_64_GOT32
},
10366 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10367 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10368 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10369 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10370 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10371 BFD_RELOC_X86_64_TLSDESC_CALL
},
10372 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10374 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10375 BFD_RELOC_32_SECREL
},
10376 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10382 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10387 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10388 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10391 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10393 int len
= gotrel
[j
].len
;
10394 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10396 if (gotrel
[j
].rel
[object_64bit
] != 0)
10399 char *tmpbuf
, *past_reloc
;
10401 *rel
= gotrel
[j
].rel
[object_64bit
];
10405 if (flag_code
!= CODE_64BIT
)
10407 types
->bitfield
.imm32
= 1;
10408 types
->bitfield
.disp32
= 1;
10411 *types
= gotrel
[j
].types64
;
10414 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10415 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10417 /* The length of the first part of our input line. */
10418 first
= cp
- input_line_pointer
;
10420 /* The second part goes from after the reloc token until
10421 (and including) an end_of_line char or comma. */
10422 past_reloc
= cp
+ 1 + len
;
10424 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10426 second
= cp
+ 1 - past_reloc
;
10428 /* Allocate and copy string. The trailing NUL shouldn't
10429 be necessary, but be safe. */
10430 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10431 memcpy (tmpbuf
, input_line_pointer
, first
);
10432 if (second
!= 0 && *past_reloc
!= ' ')
10433 /* Replace the relocation token with ' ', so that
10434 errors like foo@GOTOFF1 will be detected. */
10435 tmpbuf
[first
++] = ' ';
10437 /* Increment length by 1 if the relocation token is
10442 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10443 tmpbuf
[first
+ second
] = '\0';
10447 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10448 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10453 /* Might be a symbol version string. Don't as_bad here. */
10458 bfd_reloc_code_real_type
10459 x86_cons (expressionS
*exp
, int size
)
10461 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10463 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
10464 && !defined (LEX_AT)) \
10466 intel_syntax
= -intel_syntax
;
10469 if (size
== 4 || (object_64bit
&& size
== 8))
10471 /* Handle @GOTOFF and the like in an expression. */
10473 char *gotfree_input_line
;
10476 save
= input_line_pointer
;
10477 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10478 if (gotfree_input_line
)
10479 input_line_pointer
= gotfree_input_line
;
10483 if (gotfree_input_line
)
10485 /* expression () has merrily parsed up to the end of line,
10486 or a comma - in the wrong buffer. Transfer how far
10487 input_line_pointer has moved to the right buffer. */
10488 input_line_pointer
= (save
10489 + (input_line_pointer
- gotfree_input_line
)
10491 free (gotfree_input_line
);
10492 if (exp
->X_op
== O_constant
10493 || exp
->X_op
== O_absent
10494 || exp
->X_op
== O_illegal
10495 || exp
->X_op
== O_register
10496 || exp
->X_op
== O_big
)
10498 char c
= *input_line_pointer
;
10499 *input_line_pointer
= 0;
10500 as_bad (_("missing or invalid expression `%s'"), save
);
10501 *input_line_pointer
= c
;
10503 else if ((got_reloc
== BFD_RELOC_386_PLT32
10504 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10505 && exp
->X_op
!= O_symbol
)
10507 char c
= *input_line_pointer
;
10508 *input_line_pointer
= 0;
10509 as_bad (_("invalid PLT expression `%s'"), save
);
10510 *input_line_pointer
= c
;
10517 intel_syntax
= -intel_syntax
;
10520 i386_intel_simplify (exp
);
10525 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10526 if (size
== 4 && exp
->X_op
== O_constant
&& !object_64bit
)
10527 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10533 signed_cons (int size
)
10543 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10550 if (exp
.X_op
== O_symbol
)
10551 exp
.X_op
= O_secrel
;
10553 emit_expr (&exp
, 4);
10555 while (*input_line_pointer
++ == ',');
10557 input_line_pointer
--;
10558 demand_empty_rest_of_line ();
10562 /* Handle Vector operations. */
10565 check_VecOperations (char *op_string
)
10567 const reg_entry
*mask
;
10574 if (*op_string
== '{')
10578 /* Check broadcasts. */
10579 if (startswith (op_string
, "1to"))
10581 unsigned int bcst_type
;
10583 if (i
.broadcast
.type
)
10584 goto duplicated_vec_op
;
10587 if (*op_string
== '8')
10589 else if (*op_string
== '4')
10591 else if (*op_string
== '2')
10593 else if (*op_string
== '1'
10594 && *(op_string
+1) == '6')
10599 else if (*op_string
== '3'
10600 && *(op_string
+1) == '2')
10607 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10612 i
.broadcast
.type
= bcst_type
;
10613 i
.broadcast
.operand
= this_operand
;
10615 /* Check masking operation. */
10616 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10618 if (mask
== &bad_reg
)
10621 /* k0 can't be used for write mask. */
10622 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10624 as_bad (_("`%s%s' can't be used for write mask"),
10625 register_prefix
, mask
->reg_name
);
10632 i
.mask
.operand
= this_operand
;
10634 else if (i
.mask
.reg
->reg_num
)
10635 goto duplicated_vec_op
;
10640 /* Only "{z}" is allowed here. No need to check
10641 zeroing mask explicitly. */
10642 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10644 as_bad (_("invalid write mask `%s'"), saved
);
10649 op_string
= end_op
;
10651 /* Check zeroing-flag for masking operation. */
10652 else if (*op_string
== 'z')
10656 i
.mask
.reg
= reg_k0
;
10657 i
.mask
.zeroing
= 1;
10658 i
.mask
.operand
= this_operand
;
10662 if (i
.mask
.zeroing
)
10665 as_bad (_("duplicated `%s'"), saved
);
10669 i
.mask
.zeroing
= 1;
10671 /* Only "{%k}" is allowed here. No need to check mask
10672 register explicitly. */
10673 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10675 as_bad (_("invalid zeroing-masking `%s'"),
10684 goto unknown_vec_op
;
10686 if (*op_string
!= '}')
10688 as_bad (_("missing `}' in `%s'"), saved
);
10693 /* Strip whitespace since the addition of pseudo prefixes
10694 changed how the scrubber treats '{'. */
10695 if (is_space_char (*op_string
))
10701 /* We don't know this one. */
10702 as_bad (_("unknown vector operation: `%s'"), saved
);
10706 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10708 as_bad (_("zeroing-masking only allowed with write mask"));
10716 i386_immediate (char *imm_start
)
10718 char *save_input_line_pointer
;
10719 char *gotfree_input_line
;
10722 i386_operand_type types
;
10724 operand_type_set (&types
, ~0);
10726 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10728 as_bad (_("at most %d immediate operands are allowed"),
10729 MAX_IMMEDIATE_OPERANDS
);
10733 exp
= &im_expressions
[i
.imm_operands
++];
10734 i
.op
[this_operand
].imms
= exp
;
10736 if (is_space_char (*imm_start
))
10739 save_input_line_pointer
= input_line_pointer
;
10740 input_line_pointer
= imm_start
;
10742 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10743 if (gotfree_input_line
)
10744 input_line_pointer
= gotfree_input_line
;
10746 exp_seg
= expression (exp
);
10748 SKIP_WHITESPACE ();
10749 if (*input_line_pointer
)
10750 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10752 input_line_pointer
= save_input_line_pointer
;
10753 if (gotfree_input_line
)
10755 free (gotfree_input_line
);
10757 if (exp
->X_op
== O_constant
)
10758 exp
->X_op
= O_illegal
;
10761 if (exp_seg
== reg_section
)
10763 as_bad (_("illegal immediate register operand %s"), imm_start
);
10767 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10771 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10772 i386_operand_type types
, const char *imm_start
)
10774 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10777 as_bad (_("missing or invalid immediate expression `%s'"),
10781 else if (exp
->X_op
== O_constant
)
10783 /* Size it properly later. */
10784 i
.types
[this_operand
].bitfield
.imm64
= 1;
10786 /* If not 64bit, sign/zero extend val, to account for wraparound
10788 if (flag_code
!= CODE_64BIT
)
10789 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10791 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10792 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10793 && exp_seg
!= absolute_section
10794 && exp_seg
!= text_section
10795 && exp_seg
!= data_section
10796 && exp_seg
!= bss_section
10797 && exp_seg
!= undefined_section
10798 && !bfd_is_com_section (exp_seg
))
10800 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10806 /* This is an address. The size of the address will be
10807 determined later, depending on destination register,
10808 suffix, or the default for the section. */
10809 i
.types
[this_operand
].bitfield
.imm8
= 1;
10810 i
.types
[this_operand
].bitfield
.imm16
= 1;
10811 i
.types
[this_operand
].bitfield
.imm32
= 1;
10812 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10813 i
.types
[this_operand
].bitfield
.imm64
= 1;
10814 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10822 i386_scale (char *scale
)
10825 char *save
= input_line_pointer
;
10827 input_line_pointer
= scale
;
10828 val
= get_absolute_expression ();
10833 i
.log2_scale_factor
= 0;
10836 i
.log2_scale_factor
= 1;
10839 i
.log2_scale_factor
= 2;
10842 i
.log2_scale_factor
= 3;
10846 char sep
= *input_line_pointer
;
10848 *input_line_pointer
= '\0';
10849 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10851 *input_line_pointer
= sep
;
10852 input_line_pointer
= save
;
10856 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10858 as_warn (_("scale factor of %d without an index register"),
10859 1 << i
.log2_scale_factor
);
10860 i
.log2_scale_factor
= 0;
10862 scale
= input_line_pointer
;
10863 input_line_pointer
= save
;
10868 i386_displacement (char *disp_start
, char *disp_end
)
10872 char *save_input_line_pointer
;
10873 char *gotfree_input_line
;
10875 i386_operand_type bigdisp
, types
= anydisp
;
10878 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10880 as_bad (_("at most %d displacement operands are allowed"),
10881 MAX_MEMORY_OPERANDS
);
10885 operand_type_set (&bigdisp
, 0);
10887 || i
.types
[this_operand
].bitfield
.baseindex
10888 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10889 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10891 i386_addressing_mode ();
10892 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10893 if (flag_code
== CODE_64BIT
)
10897 bigdisp
.bitfield
.disp32s
= 1;
10898 bigdisp
.bitfield
.disp64
= 1;
10901 bigdisp
.bitfield
.disp32
= 1;
10903 else if ((flag_code
== CODE_16BIT
) ^ override
)
10904 bigdisp
.bitfield
.disp16
= 1;
10906 bigdisp
.bitfield
.disp32
= 1;
10910 /* For PC-relative branches, the width of the displacement may be
10911 dependent upon data size, but is never dependent upon address size.
10912 Also make sure to not unintentionally match against a non-PC-relative
10913 branch template. */
10914 static templates aux_templates
;
10915 const insn_template
*t
= current_templates
->start
;
10916 bool has_intel64
= false;
10918 aux_templates
.start
= t
;
10919 while (++t
< current_templates
->end
)
10921 if (t
->opcode_modifier
.jump
10922 != current_templates
->start
->opcode_modifier
.jump
)
10924 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10925 has_intel64
= true;
10927 if (t
< current_templates
->end
)
10929 aux_templates
.end
= t
;
10930 current_templates
= &aux_templates
;
10933 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10934 if (flag_code
== CODE_64BIT
)
10936 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10937 && (!intel64
|| !has_intel64
))
10938 bigdisp
.bitfield
.disp16
= 1;
10940 bigdisp
.bitfield
.disp32s
= 1;
10945 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10947 : LONG_MNEM_SUFFIX
));
10948 bigdisp
.bitfield
.disp32
= 1;
10949 if ((flag_code
== CODE_16BIT
) ^ override
)
10951 bigdisp
.bitfield
.disp32
= 0;
10952 bigdisp
.bitfield
.disp16
= 1;
10956 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10959 exp
= &disp_expressions
[i
.disp_operands
];
10960 i
.op
[this_operand
].disps
= exp
;
10962 save_input_line_pointer
= input_line_pointer
;
10963 input_line_pointer
= disp_start
;
10964 END_STRING_AND_SAVE (disp_end
);
10966 #ifndef GCC_ASM_O_HACK
10967 #define GCC_ASM_O_HACK 0
10970 END_STRING_AND_SAVE (disp_end
+ 1);
10971 if (i
.types
[this_operand
].bitfield
.baseIndex
10972 && displacement_string_end
[-1] == '+')
10974 /* This hack is to avoid a warning when using the "o"
10975 constraint within gcc asm statements.
10978 #define _set_tssldt_desc(n,addr,limit,type) \
10979 __asm__ __volatile__ ( \
10980 "movw %w2,%0\n\t" \
10981 "movw %w1,2+%0\n\t" \
10982 "rorl $16,%1\n\t" \
10983 "movb %b1,4+%0\n\t" \
10984 "movb %4,5+%0\n\t" \
10985 "movb $0,6+%0\n\t" \
10986 "movb %h1,7+%0\n\t" \
10988 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10990 This works great except that the output assembler ends
10991 up looking a bit weird if it turns out that there is
10992 no offset. You end up producing code that looks like:
11005 So here we provide the missing zero. */
11007 *displacement_string_end
= '0';
11010 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
11011 if (gotfree_input_line
)
11012 input_line_pointer
= gotfree_input_line
;
11014 exp_seg
= expression (exp
);
11016 SKIP_WHITESPACE ();
11017 if (*input_line_pointer
)
11018 as_bad (_("junk `%s' after expression"), input_line_pointer
);
11020 RESTORE_END_STRING (disp_end
+ 1);
11022 input_line_pointer
= save_input_line_pointer
;
11023 if (gotfree_input_line
)
11025 free (gotfree_input_line
);
11027 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
11028 exp
->X_op
= O_illegal
;
11031 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
11033 RESTORE_END_STRING (disp_end
);
11039 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
11040 i386_operand_type types
, const char *disp_start
)
11042 i386_operand_type bigdisp
;
11045 /* We do this to make sure that the section symbol is in
11046 the symbol table. We will ultimately change the relocation
11047 to be relative to the beginning of the section. */
11048 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
11049 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
11050 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11052 if (exp
->X_op
!= O_symbol
)
11055 if (S_IS_LOCAL (exp
->X_add_symbol
)
11056 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
11057 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
11058 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
11059 exp
->X_op
= O_subtract
;
11060 exp
->X_op_symbol
= GOT_symbol
;
11061 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
11062 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
11063 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11064 i
.reloc
[this_operand
] = BFD_RELOC_64
;
11066 i
.reloc
[this_operand
] = BFD_RELOC_32
;
11069 else if (exp
->X_op
== O_absent
11070 || exp
->X_op
== O_illegal
11071 || exp
->X_op
== O_big
)
11074 as_bad (_("missing or invalid displacement expression `%s'"),
11079 else if (exp
->X_op
== O_constant
)
11081 /* Sizing gets taken care of by optimize_disp().
11083 If not 64bit, sign/zero extend val, to account for wraparound
11085 if (flag_code
!= CODE_64BIT
)
11086 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
11089 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11090 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
11091 && exp_seg
!= absolute_section
11092 && exp_seg
!= text_section
11093 && exp_seg
!= data_section
11094 && exp_seg
!= bss_section
11095 && exp_seg
!= undefined_section
11096 && !bfd_is_com_section (exp_seg
))
11098 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11103 else if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
11104 i
.types
[this_operand
].bitfield
.disp8
= 1;
11106 /* Check if this is a displacement only operand. */
11107 bigdisp
= operand_type_and_not (i
.types
[this_operand
], anydisp
);
11108 if (operand_type_all_zero (&bigdisp
))
11109 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
11115 /* Return the active addressing mode, taking address override and
11116 registers forming the address into consideration. Update the
11117 address override prefix if necessary. */
11119 static enum flag_code
11120 i386_addressing_mode (void)
11122 enum flag_code addr_mode
;
11124 if (i
.prefix
[ADDR_PREFIX
])
11125 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
11126 else if (flag_code
== CODE_16BIT
11127 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
11128 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
11129 from md_assemble() by "is not a valid base/index expression"
11130 when there is a base and/or index. */
11131 && !i
.types
[this_operand
].bitfield
.baseindex
)
11133 /* MPX insn memory operands with neither base nor index must be forced
11134 to use 32-bit addressing in 16-bit mode. */
11135 addr_mode
= CODE_32BIT
;
11136 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11138 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11139 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11143 addr_mode
= flag_code
;
11145 #if INFER_ADDR_PREFIX
11146 if (i
.mem_operands
== 0)
11148 /* Infer address prefix from the first memory operand. */
11149 const reg_entry
*addr_reg
= i
.base_reg
;
11151 if (addr_reg
== NULL
)
11152 addr_reg
= i
.index_reg
;
11156 if (addr_reg
->reg_type
.bitfield
.dword
)
11157 addr_mode
= CODE_32BIT
;
11158 else if (flag_code
!= CODE_64BIT
11159 && addr_reg
->reg_type
.bitfield
.word
)
11160 addr_mode
= CODE_16BIT
;
11162 if (addr_mode
!= flag_code
)
11164 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11166 /* Change the size of any displacement too. At most one
11167 of Disp16 or Disp32 is set.
11168 FIXME. There doesn't seem to be any real need for
11169 separate Disp16 and Disp32 flags. The same goes for
11170 Imm16 and Imm32. Removing them would probably clean
11171 up the code quite a lot. */
11172 if (flag_code
!= CODE_64BIT
11173 && (i
.types
[this_operand
].bitfield
.disp16
11174 || i
.types
[this_operand
].bitfield
.disp32
))
11175 i
.types
[this_operand
]
11176 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11186 /* Make sure the memory operand we've been dealt is valid.
11187 Return 1 on success, 0 on a failure. */
11190 i386_index_check (const char *operand_string
)
11192 const char *kind
= "base/index";
11193 enum flag_code addr_mode
= i386_addressing_mode ();
11194 const insn_template
*t
= current_templates
->start
;
11196 if (t
->opcode_modifier
.isstring
11197 && !t
->cpu_flags
.bitfield
.cpupadlock
11198 && (current_templates
->end
[-1].opcode_modifier
.isstring
11199 || i
.mem_operands
))
11201 /* Memory operands of string insns are special in that they only allow
11202 a single register (rDI, rSI, or rBX) as their memory address. */
11203 const reg_entry
*expected_reg
;
11204 static const char *di_si
[][2] =
11210 static const char *bx
[] = { "ebx", "bx", "rbx" };
11212 kind
= "string address";
11214 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11216 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11217 - IS_STRING_ES_OP0
;
11220 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11221 || ((!i
.mem_operands
!= !intel_syntax
)
11222 && current_templates
->end
[-1].operand_types
[1]
11223 .bitfield
.baseindex
))
11226 = (const reg_entry
*) str_hash_find (reg_hash
,
11227 di_si
[addr_mode
][op
== es_op
]);
11231 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11233 if (i
.base_reg
!= expected_reg
11235 || operand_type_check (i
.types
[this_operand
], disp
))
11237 /* The second memory operand must have the same size as
11241 && !((addr_mode
== CODE_64BIT
11242 && i
.base_reg
->reg_type
.bitfield
.qword
)
11243 || (addr_mode
== CODE_32BIT
11244 ? i
.base_reg
->reg_type
.bitfield
.dword
11245 : i
.base_reg
->reg_type
.bitfield
.word
)))
11248 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11250 intel_syntax
? '[' : '(',
11252 expected_reg
->reg_name
,
11253 intel_syntax
? ']' : ')');
11260 as_bad (_("`%s' is not a valid %s expression"),
11261 operand_string
, kind
);
11266 if (addr_mode
!= CODE_16BIT
)
11268 /* 32-bit/64-bit checks. */
11269 if (i
.disp_encoding
== disp_encoding_16bit
)
11272 as_bad (_("invalid `%s' prefix"),
11273 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11278 && ((addr_mode
== CODE_64BIT
11279 ? !i
.base_reg
->reg_type
.bitfield
.qword
11280 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11281 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11282 || i
.base_reg
->reg_num
== RegIZ
))
11284 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11285 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11286 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11287 && ((addr_mode
== CODE_64BIT
11288 ? !i
.index_reg
->reg_type
.bitfield
.qword
11289 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11290 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11293 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11294 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11295 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11296 && t
->base_opcode
== 0x1b)
11297 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11298 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11299 && (t
->base_opcode
& ~1) == 0x1a)
11300 || t
->opcode_modifier
.sib
== SIBMEM
)
11302 /* They cannot use RIP-relative addressing. */
11303 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11305 as_bad (_("`%s' cannot be used here"), operand_string
);
11309 /* bndldx and bndstx ignore their scale factor. */
11310 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11311 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11312 && (t
->base_opcode
& ~1) == 0x1a
11313 && i
.log2_scale_factor
)
11314 as_warn (_("register scaling is being ignored here"));
11319 /* 16-bit checks. */
11320 if (i
.disp_encoding
== disp_encoding_32bit
)
11324 && (!i
.base_reg
->reg_type
.bitfield
.word
11325 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11327 && (!i
.index_reg
->reg_type
.bitfield
.word
11328 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11330 && i
.base_reg
->reg_num
< 6
11331 && i
.index_reg
->reg_num
>= 6
11332 && i
.log2_scale_factor
== 0))))
11339 /* Handle vector immediates. */
11342 RC_SAE_immediate (const char *imm_start
)
11344 unsigned int match_found
, j
;
11345 const char *pstr
= imm_start
;
11353 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11355 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11357 if (i
.rounding
.type
!= rc_none
)
11359 as_bad (_("duplicated `%s'"), imm_start
);
11363 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11364 i
.rounding
.operand
= this_operand
;
11366 pstr
+= RC_NamesTable
[j
].len
;
11374 if (*pstr
++ != '}')
11376 as_bad (_("Missing '}': '%s'"), imm_start
);
11379 /* RC/SAE immediate string should contain nothing more. */;
11382 as_bad (_("Junk after '}': '%s'"), imm_start
);
11386 exp
= &im_expressions
[i
.imm_operands
++];
11387 i
.op
[this_operand
].imms
= exp
;
11389 exp
->X_op
= O_constant
;
11390 exp
->X_add_number
= 0;
11391 exp
->X_add_symbol
= (symbolS
*) 0;
11392 exp
->X_op_symbol
= (symbolS
*) 0;
11394 i
.types
[this_operand
].bitfield
.imm8
= 1;
11398 /* Only string instructions can have a second memory operand, so
11399 reduce current_templates to just those if it contains any. */
11401 maybe_adjust_templates (void)
11403 const insn_template
*t
;
11405 gas_assert (i
.mem_operands
== 1);
11407 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11408 if (t
->opcode_modifier
.isstring
)
11411 if (t
< current_templates
->end
)
11413 static templates aux_templates
;
11416 aux_templates
.start
= t
;
11417 for (; t
< current_templates
->end
; ++t
)
11418 if (!t
->opcode_modifier
.isstring
)
11420 aux_templates
.end
= t
;
11422 /* Determine whether to re-check the first memory operand. */
11423 recheck
= (aux_templates
.start
!= current_templates
->start
11424 || t
!= current_templates
->end
);
11426 current_templates
= &aux_templates
;
11430 i
.mem_operands
= 0;
11431 if (i
.memop1_string
!= NULL
11432 && i386_index_check (i
.memop1_string
) == 0)
11434 i
.mem_operands
= 1;
11441 static INLINE
bool starts_memory_operand (char c
)
11444 || is_identifier_char (c
)
11445 || strchr ("([\"+-!~", c
);
11448 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11452 i386_att_operand (char *operand_string
)
11454 const reg_entry
*r
;
11456 char *op_string
= operand_string
;
11458 if (is_space_char (*op_string
))
11461 /* We check for an absolute prefix (differentiating,
11462 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11463 if (*op_string
== ABSOLUTE_PREFIX
)
11466 if (is_space_char (*op_string
))
11468 i
.jumpabsolute
= true;
11471 /* Check if operand is a register. */
11472 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11474 i386_operand_type temp
;
11479 /* Check for a segment override by searching for ':' after a
11480 segment register. */
11481 op_string
= end_op
;
11482 if (is_space_char (*op_string
))
11484 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11486 i
.seg
[i
.mem_operands
] = r
;
11488 /* Skip the ':' and whitespace. */
11490 if (is_space_char (*op_string
))
11493 /* Handle case of %es:*foo. */
11494 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
)
11497 if (is_space_char (*op_string
))
11499 i
.jumpabsolute
= true;
11502 if (!starts_memory_operand (*op_string
))
11504 as_bad (_("bad memory operand `%s'"), op_string
);
11507 goto do_memory_reference
;
11510 /* Handle vector operations. */
11511 if (*op_string
== '{')
11513 op_string
= check_VecOperations (op_string
);
11514 if (op_string
== NULL
)
11520 as_bad (_("junk `%s' after register"), op_string
);
11523 temp
= r
->reg_type
;
11524 temp
.bitfield
.baseindex
= 0;
11525 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11527 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11528 i
.op
[this_operand
].regs
= r
;
11531 else if (*op_string
== REGISTER_PREFIX
)
11533 as_bad (_("bad register name `%s'"), op_string
);
11536 else if (*op_string
== IMMEDIATE_PREFIX
)
11539 if (i
.jumpabsolute
)
11541 as_bad (_("immediate operand illegal with absolute jump"));
11544 if (!i386_immediate (op_string
))
11547 else if (RC_SAE_immediate (operand_string
))
11549 /* If it is a RC or SAE immediate, do nothing. */
11552 else if (starts_memory_operand (*op_string
))
11554 /* This is a memory reference of some sort. */
11557 /* Start and end of displacement string expression (if found). */
11558 char *displacement_string_start
;
11559 char *displacement_string_end
;
11561 do_memory_reference
:
11562 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11564 if ((i
.mem_operands
== 1
11565 && !current_templates
->start
->opcode_modifier
.isstring
)
11566 || i
.mem_operands
== 2)
11568 as_bad (_("too many memory references for `%s'"),
11569 current_templates
->start
->name
);
11573 /* Check for base index form. We detect the base index form by
11574 looking for an ')' at the end of the operand, searching
11575 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11577 base_string
= op_string
+ strlen (op_string
);
11579 /* Handle vector operations. */
11581 if (is_space_char (*base_string
))
11584 if (*base_string
== '}')
11586 char *vop_start
= NULL
;
11588 while (base_string
-- > op_string
)
11590 if (*base_string
== '"')
11592 if (*base_string
!= '{')
11595 vop_start
= base_string
;
11598 if (is_space_char (*base_string
))
11601 if (*base_string
!= '}')
11609 as_bad (_("unbalanced figure braces"));
11613 if (check_VecOperations (vop_start
) == NULL
)
11617 /* If we only have a displacement, set-up for it to be parsed later. */
11618 displacement_string_start
= op_string
;
11619 displacement_string_end
= base_string
+ 1;
11621 if (*base_string
== ')')
11624 unsigned int parens_not_balanced
= 1;
11626 /* We've already checked that the number of left & right ()'s are
11627 equal, so this loop will not be infinite. */
11631 if (*base_string
== ')')
11632 parens_not_balanced
++;
11633 if (*base_string
== '(')
11634 parens_not_balanced
--;
11636 while (parens_not_balanced
&& *base_string
!= '"');
11638 temp_string
= base_string
;
11640 /* Skip past '(' and whitespace. */
11641 if (*base_string
== '(')
11643 if (is_space_char (*base_string
))
11646 if (*base_string
== ','
11647 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11650 displacement_string_end
= temp_string
;
11652 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11656 if (i
.base_reg
== &bad_reg
)
11658 base_string
= end_op
;
11659 if (is_space_char (*base_string
))
11663 /* There may be an index reg or scale factor here. */
11664 if (*base_string
== ',')
11667 if (is_space_char (*base_string
))
11670 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11673 if (i
.index_reg
== &bad_reg
)
11675 base_string
= end_op
;
11676 if (is_space_char (*base_string
))
11678 if (*base_string
== ',')
11681 if (is_space_char (*base_string
))
11684 else if (*base_string
!= ')')
11686 as_bad (_("expecting `,' or `)' "
11687 "after index register in `%s'"),
11692 else if (*base_string
== REGISTER_PREFIX
)
11694 end_op
= strchr (base_string
, ',');
11697 as_bad (_("bad register name `%s'"), base_string
);
11701 /* Check for scale factor. */
11702 if (*base_string
!= ')')
11704 char *end_scale
= i386_scale (base_string
);
11709 base_string
= end_scale
;
11710 if (is_space_char (*base_string
))
11712 if (*base_string
!= ')')
11714 as_bad (_("expecting `)' "
11715 "after scale factor in `%s'"),
11720 else if (!i
.index_reg
)
11722 as_bad (_("expecting index register or scale factor "
11723 "after `,'; got '%c'"),
11728 else if (*base_string
!= ')')
11730 as_bad (_("expecting `,' or `)' "
11731 "after base register in `%s'"),
11736 else if (*base_string
== REGISTER_PREFIX
)
11738 end_op
= strchr (base_string
, ',');
11741 as_bad (_("bad register name `%s'"), base_string
);
11746 /* If there's an expression beginning the operand, parse it,
11747 assuming displacement_string_start and
11748 displacement_string_end are meaningful. */
11749 if (displacement_string_start
!= displacement_string_end
)
11751 if (!i386_displacement (displacement_string_start
,
11752 displacement_string_end
))
11756 /* Special case for (%dx) while doing input/output op. */
11758 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11759 && i
.base_reg
->reg_type
.bitfield
.word
11760 && i
.index_reg
== 0
11761 && i
.log2_scale_factor
== 0
11762 && i
.seg
[i
.mem_operands
] == 0
11763 && !operand_type_check (i
.types
[this_operand
], disp
))
11765 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11769 if (i386_index_check (operand_string
) == 0)
11771 i
.flags
[this_operand
] |= Operand_Mem
;
11772 if (i
.mem_operands
== 0)
11773 i
.memop1_string
= xstrdup (operand_string
);
11778 /* It's not a memory operand; argh! */
11779 as_bad (_("invalid char %s beginning operand %d `%s'"),
11780 output_invalid (*op_string
),
11785 return 1; /* Normal return. */
11788 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11789 that an rs_machine_dependent frag may reach. */
11792 i386_frag_max_var (fragS
*frag
)
11794 /* The only relaxable frags are for jumps.
11795 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11796 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11797 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11800 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11802 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11804 /* STT_GNU_IFUNC symbol must go through PLT. */
11805 if ((symbol_get_bfdsym (fr_symbol
)->flags
11806 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11809 if (!S_IS_EXTERNAL (fr_symbol
))
11810 /* Symbol may be weak or local. */
11811 return !S_IS_WEAK (fr_symbol
);
11813 /* Global symbols with non-default visibility can't be preempted. */
11814 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11817 if (fr_var
!= NO_RELOC
)
11818 switch ((enum bfd_reloc_code_real
) fr_var
)
11820 case BFD_RELOC_386_PLT32
:
11821 case BFD_RELOC_X86_64_PLT32
:
11822 /* Symbol with PLT relocation may be preempted. */
11828 /* Global symbols with default visibility in a shared library may be
11829 preempted by another definition. */
11834 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11835 Note also work for Skylake and Cascadelake.
11836 ---------------------------------------------------------------------
11837 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11838 | ------ | ----------- | ------- | -------- |
11840 | Jno | N | N | Y |
11841 | Jc/Jb | Y | N | Y |
11842 | Jae/Jnb | Y | N | Y |
11843 | Je/Jz | Y | Y | Y |
11844 | Jne/Jnz | Y | Y | Y |
11845 | Jna/Jbe | Y | N | Y |
11846 | Ja/Jnbe | Y | N | Y |
11848 | Jns | N | N | Y |
11849 | Jp/Jpe | N | N | Y |
11850 | Jnp/Jpo | N | N | Y |
11851 | Jl/Jnge | Y | Y | Y |
11852 | Jge/Jnl | Y | Y | Y |
11853 | Jle/Jng | Y | Y | Y |
11854 | Jg/Jnle | Y | Y | Y |
11855 --------------------------------------------------------------------- */
11857 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11859 if (mf_cmp
== mf_cmp_alu_cmp
)
11860 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11861 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11862 if (mf_cmp
== mf_cmp_incdec
)
11863 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11864 || mf_jcc
== mf_jcc_jle
);
11865 if (mf_cmp
== mf_cmp_test_and
)
11870 /* Return the next non-empty frag. */
11873 i386_next_non_empty_frag (fragS
*fragP
)
11875 /* There may be a frag with a ".fill 0" when there is no room in
11876 the current frag for frag_grow in output_insn. */
11877 for (fragP
= fragP
->fr_next
;
11879 && fragP
->fr_type
== rs_fill
11880 && fragP
->fr_fix
== 0);
11881 fragP
= fragP
->fr_next
)
11886 /* Return the next jcc frag after BRANCH_PADDING. */
11889 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11891 fragS
*branch_fragP
;
11895 if (pad_fragP
->fr_type
== rs_machine_dependent
11896 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11897 == BRANCH_PADDING
))
11899 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11900 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11902 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11903 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11904 pad_fragP
->tc_frag_data
.mf_type
))
11905 return branch_fragP
;
11911 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11914 i386_classify_machine_dependent_frag (fragS
*fragP
)
11918 fragS
*branch_fragP
;
11920 unsigned int max_prefix_length
;
11922 if (fragP
->tc_frag_data
.classified
)
11925 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11926 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11927 for (next_fragP
= fragP
;
11928 next_fragP
!= NULL
;
11929 next_fragP
= next_fragP
->fr_next
)
11931 next_fragP
->tc_frag_data
.classified
= 1;
11932 if (next_fragP
->fr_type
== rs_machine_dependent
)
11933 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11935 case BRANCH_PADDING
:
11936 /* The BRANCH_PADDING frag must be followed by a branch
11938 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11939 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11941 case FUSED_JCC_PADDING
:
11942 /* Check if this is a fused jcc:
11944 CMP like instruction
11948 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11949 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11950 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11953 /* The BRANCH_PADDING frag is merged with the
11954 FUSED_JCC_PADDING frag. */
11955 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11956 /* CMP like instruction size. */
11957 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11958 frag_wane (pad_fragP
);
11959 /* Skip to branch_fragP. */
11960 next_fragP
= branch_fragP
;
11962 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11964 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11966 next_fragP
->fr_subtype
11967 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11968 next_fragP
->tc_frag_data
.max_bytes
11969 = next_fragP
->tc_frag_data
.max_prefix_length
;
11970 /* This will be updated in the BRANCH_PREFIX scan. */
11971 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11974 frag_wane (next_fragP
);
11979 /* Stop if there is no BRANCH_PREFIX. */
11980 if (!align_branch_prefix_size
)
11983 /* Scan for BRANCH_PREFIX. */
11984 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11986 if (fragP
->fr_type
!= rs_machine_dependent
11987 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11991 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11992 COND_JUMP_PREFIX. */
11993 max_prefix_length
= 0;
11994 for (next_fragP
= fragP
;
11995 next_fragP
!= NULL
;
11996 next_fragP
= next_fragP
->fr_next
)
11998 if (next_fragP
->fr_type
== rs_fill
)
11999 /* Skip rs_fill frags. */
12001 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
12002 /* Stop for all other frags. */
12005 /* rs_machine_dependent frags. */
12006 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12009 /* Count BRANCH_PREFIX frags. */
12010 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
12012 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
12013 frag_wane (next_fragP
);
12017 += next_fragP
->tc_frag_data
.max_bytes
;
12019 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12021 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12022 == FUSED_JCC_PADDING
))
12024 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
12025 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
12029 /* Stop for other rs_machine_dependent frags. */
12033 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
12035 /* Skip to the next frag. */
12036 fragP
= next_fragP
;
12040 /* Compute padding size for
12043 CMP like instruction
12045 COND_JUMP/UNCOND_JUMP
12050 COND_JUMP/UNCOND_JUMP
12054 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
12056 unsigned int offset
, size
, padding_size
;
12057 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
12059 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
12061 address
= fragP
->fr_address
;
12062 address
+= fragP
->fr_fix
;
12064 /* CMP like instrunction size. */
12065 size
= fragP
->tc_frag_data
.cmp_size
;
12067 /* The base size of the branch frag. */
12068 size
+= branch_fragP
->fr_fix
;
12070 /* Add opcode and displacement bytes for the rs_machine_dependent
12072 if (branch_fragP
->fr_type
== rs_machine_dependent
)
12073 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
12075 /* Check if branch is within boundary and doesn't end at the last
12077 offset
= address
& ((1U << align_branch_power
) - 1);
12078 if ((offset
+ size
) >= (1U << align_branch_power
))
12079 /* Padding needed to avoid crossing boundary. */
12080 padding_size
= (1U << align_branch_power
) - offset
;
12082 /* No padding needed. */
12085 /* The return value may be saved in tc_frag_data.length which is
12087 if (!fits_in_unsigned_byte (padding_size
))
12090 return padding_size
;
12093 /* i386_generic_table_relax_frag()
12095 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
12096 grow/shrink padding to align branch frags. Hand others to
12100 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
12102 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12103 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12105 long padding_size
= i386_branch_padding_size (fragP
, 0);
12106 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
12108 /* When the BRANCH_PREFIX frag is used, the computed address
12109 must match the actual address and there should be no padding. */
12110 if (fragP
->tc_frag_data
.padding_address
12111 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
12115 /* Update the padding size. */
12117 fragP
->tc_frag_data
.length
= padding_size
;
12121 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12123 fragS
*padding_fragP
, *next_fragP
;
12124 long padding_size
, left_size
, last_size
;
12126 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12127 if (!padding_fragP
)
12128 /* Use the padding set by the leading BRANCH_PREFIX frag. */
12129 return (fragP
->tc_frag_data
.length
12130 - fragP
->tc_frag_data
.last_length
);
12132 /* Compute the relative address of the padding frag in the very
12133 first time where the BRANCH_PREFIX frag sizes are zero. */
12134 if (!fragP
->tc_frag_data
.padding_address
)
12135 fragP
->tc_frag_data
.padding_address
12136 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
12138 /* First update the last length from the previous interation. */
12139 left_size
= fragP
->tc_frag_data
.prefix_length
;
12140 for (next_fragP
= fragP
;
12141 next_fragP
!= padding_fragP
;
12142 next_fragP
= next_fragP
->fr_next
)
12143 if (next_fragP
->fr_type
== rs_machine_dependent
12144 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12149 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12153 if (max
> left_size
)
12158 next_fragP
->tc_frag_data
.last_length
= size
;
12162 next_fragP
->tc_frag_data
.last_length
= 0;
12165 /* Check the padding size for the padding frag. */
12166 padding_size
= i386_branch_padding_size
12167 (padding_fragP
, (fragP
->fr_address
12168 + fragP
->tc_frag_data
.padding_address
));
12170 last_size
= fragP
->tc_frag_data
.prefix_length
;
12171 /* Check if there is change from the last interation. */
12172 if (padding_size
== last_size
)
12174 /* Update the expected address of the padding frag. */
12175 padding_fragP
->tc_frag_data
.padding_address
12176 = (fragP
->fr_address
+ padding_size
12177 + fragP
->tc_frag_data
.padding_address
);
12181 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12183 /* No padding if there is no sufficient room. Clear the
12184 expected address of the padding frag. */
12185 padding_fragP
->tc_frag_data
.padding_address
= 0;
12189 /* Store the expected address of the padding frag. */
12190 padding_fragP
->tc_frag_data
.padding_address
12191 = (fragP
->fr_address
+ padding_size
12192 + fragP
->tc_frag_data
.padding_address
);
12194 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12196 /* Update the length for the current interation. */
12197 left_size
= padding_size
;
12198 for (next_fragP
= fragP
;
12199 next_fragP
!= padding_fragP
;
12200 next_fragP
= next_fragP
->fr_next
)
12201 if (next_fragP
->fr_type
== rs_machine_dependent
12202 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12207 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12211 if (max
> left_size
)
12216 next_fragP
->tc_frag_data
.length
= size
;
12220 next_fragP
->tc_frag_data
.length
= 0;
12223 return (fragP
->tc_frag_data
.length
12224 - fragP
->tc_frag_data
.last_length
);
12226 return relax_frag (segment
, fragP
, stretch
);
12229 /* md_estimate_size_before_relax()
12231 Called just before relax() for rs_machine_dependent frags. The x86
12232 assembler uses these frags to handle variable size jump
12235 Any symbol that is now undefined will not become defined.
12236 Return the correct fr_subtype in the frag.
12237 Return the initial "guess for variable size of frag" to caller.
12238 The guess is actually the growth beyond the fixed part. Whatever
12239 we do to grow the fixed or variable part contributes to our
12243 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12245 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12246 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12247 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12249 i386_classify_machine_dependent_frag (fragP
);
12250 return fragP
->tc_frag_data
.length
;
12253 /* We've already got fragP->fr_subtype right; all we have to do is
12254 check for un-relaxable symbols. On an ELF system, we can't relax
12255 an externally visible symbol, because it may be overridden by a
12257 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12258 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12260 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12263 #if defined (OBJ_COFF) && defined (TE_PE)
12264 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12265 && S_IS_WEAK (fragP
->fr_symbol
))
12269 /* Symbol is undefined in this segment, or we need to keep a
12270 reloc so that weak symbols can be overridden. */
12271 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12272 enum bfd_reloc_code_real reloc_type
;
12273 unsigned char *opcode
;
12277 if (fragP
->fr_var
!= NO_RELOC
)
12278 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12279 else if (size
== 2)
12280 reloc_type
= BFD_RELOC_16_PCREL
;
12281 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12282 else if (fragP
->tc_frag_data
.code64
&& fragP
->fr_offset
== 0
12283 && need_plt32_p (fragP
->fr_symbol
))
12284 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12287 reloc_type
= BFD_RELOC_32_PCREL
;
12289 old_fr_fix
= fragP
->fr_fix
;
12290 opcode
= (unsigned char *) fragP
->fr_opcode
;
12292 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12295 /* Make jmp (0xeb) a (d)word displacement jump. */
12297 fragP
->fr_fix
+= size
;
12298 fixP
= fix_new (fragP
, old_fr_fix
, size
,
12300 fragP
->fr_offset
, 1,
12306 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12308 /* Negate the condition, and branch past an
12309 unconditional jump. */
12312 /* Insert an unconditional jump. */
12314 /* We added two extra opcode bytes, and have a two byte
12316 fragP
->fr_fix
+= 2 + 2;
12317 fix_new (fragP
, old_fr_fix
+ 2, 2,
12319 fragP
->fr_offset
, 1,
12323 /* Fall through. */
12326 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12328 fragP
->fr_fix
+= 1;
12329 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12331 fragP
->fr_offset
, 1,
12332 BFD_RELOC_8_PCREL
);
12333 fixP
->fx_signed
= 1;
12337 /* This changes the byte-displacement jump 0x7N
12338 to the (d)word-displacement jump 0x0f,0x8N. */
12339 opcode
[1] = opcode
[0] + 0x10;
12340 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12341 /* We've added an opcode byte. */
12342 fragP
->fr_fix
+= 1 + size
;
12343 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
12345 fragP
->fr_offset
, 1,
12350 BAD_CASE (fragP
->fr_subtype
);
12354 /* All jumps handled here are signed, but don't unconditionally use a
12355 signed limit check for 32 and 16 bit jumps as we want to allow wrap
12356 around at 4G (outside of 64-bit mode) and 64k. */
12357 if (size
== 4 && flag_code
== CODE_64BIT
)
12358 fixP
->fx_signed
= 1;
12361 return fragP
->fr_fix
- old_fr_fix
;
12364 /* Guess size depending on current relax state. Initially the relax
12365 state will correspond to a short jump and we return 1, because
12366 the variable part of the frag (the branch offset) is one byte
12367 long. However, we can relax a section more than once and in that
12368 case we must either set fr_subtype back to the unrelaxed state,
12369 or return the value for the appropriate branch. */
12370 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12373 /* Called after relax() is finished.
12375 In: Address of frag.
12376 fr_type == rs_machine_dependent.
12377 fr_subtype is what the address relaxed to.
12379 Out: Any fixSs and constants are set up.
12380 Caller will turn frag into a ".space 0". */
12383 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12386 unsigned char *opcode
;
12387 unsigned char *where_to_put_displacement
= NULL
;
12388 offsetT target_address
;
12389 offsetT opcode_address
;
12390 unsigned int extension
= 0;
12391 offsetT displacement_from_opcode_start
;
12393 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12394 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12395 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12397 /* Generate nop padding. */
12398 unsigned int size
= fragP
->tc_frag_data
.length
;
12401 if (size
> fragP
->tc_frag_data
.max_bytes
)
12407 const char *branch
= "branch";
12408 const char *prefix
= "";
12409 fragS
*padding_fragP
;
12410 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12413 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12414 switch (fragP
->tc_frag_data
.default_prefix
)
12419 case CS_PREFIX_OPCODE
:
12422 case DS_PREFIX_OPCODE
:
12425 case ES_PREFIX_OPCODE
:
12428 case FS_PREFIX_OPCODE
:
12431 case GS_PREFIX_OPCODE
:
12434 case SS_PREFIX_OPCODE
:
12439 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12440 "%s within %d-byte boundary\n");
12442 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12443 "align %s within %d-byte boundary\n");
12447 padding_fragP
= fragP
;
12448 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12449 "%s within %d-byte boundary\n");
12453 switch (padding_fragP
->tc_frag_data
.branch_type
)
12455 case align_branch_jcc
:
12458 case align_branch_fused
:
12459 branch
= "fused jcc";
12461 case align_branch_jmp
:
12464 case align_branch_call
:
12467 case align_branch_indirect
:
12468 branch
= "indiret branch";
12470 case align_branch_ret
:
12477 fprintf (stdout
, msg
,
12478 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12479 (long long) fragP
->fr_address
, branch
,
12480 1 << align_branch_power
);
12482 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12483 memset (fragP
->fr_opcode
,
12484 fragP
->tc_frag_data
.default_prefix
, size
);
12486 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12488 fragP
->fr_fix
+= size
;
12493 opcode
= (unsigned char *) fragP
->fr_opcode
;
12495 /* Address we want to reach in file space. */
12496 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12498 /* Address opcode resides at in file space. */
12499 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12501 /* Displacement from opcode start to fill into instruction. */
12502 displacement_from_opcode_start
= target_address
- opcode_address
;
12504 if ((fragP
->fr_subtype
& BIG
) == 0)
12506 /* Don't have to change opcode. */
12507 extension
= 1; /* 1 opcode + 1 displacement */
12508 where_to_put_displacement
= &opcode
[1];
12512 if (no_cond_jump_promotion
12513 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12514 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12515 _("long jump required"));
12517 switch (fragP
->fr_subtype
)
12519 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12520 extension
= 4; /* 1 opcode + 4 displacement */
12522 where_to_put_displacement
= &opcode
[1];
12525 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12526 extension
= 2; /* 1 opcode + 2 displacement */
12528 where_to_put_displacement
= &opcode
[1];
12531 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12532 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12533 extension
= 5; /* 2 opcode + 4 displacement */
12534 opcode
[1] = opcode
[0] + 0x10;
12535 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12536 where_to_put_displacement
= &opcode
[2];
12539 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12540 extension
= 3; /* 2 opcode + 2 displacement */
12541 opcode
[1] = opcode
[0] + 0x10;
12542 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12543 where_to_put_displacement
= &opcode
[2];
12546 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12551 where_to_put_displacement
= &opcode
[3];
12555 BAD_CASE (fragP
->fr_subtype
);
12560 /* If size if less then four we are sure that the operand fits,
12561 but if it's 4, then it could be that the displacement is larger
12563 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12565 && ((addressT
) (displacement_from_opcode_start
- extension
12566 + ((addressT
) 1 << 31))
12567 > (((addressT
) 2 << 31) - 1)))
12569 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12570 _("jump target out of range"));
12571 /* Make us emit 0. */
12572 displacement_from_opcode_start
= extension
;
12574 /* Now put displacement after opcode. */
12575 md_number_to_chars ((char *) where_to_put_displacement
,
12576 (valueT
) (displacement_from_opcode_start
- extension
),
12577 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12578 fragP
->fr_fix
+= extension
;
12581 /* Apply a fixup (fixP) to segment data, once it has been determined
12582 by our caller that we have all the info we need to fix it up.
12584 Parameter valP is the pointer to the value of the bits.
12586 On the 386, immediates, displacements, and data pointers are all in
12587 the same (little-endian) format, so we don't need to care about which
12588 we are handling. */
12591 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12593 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12594 valueT value
= *valP
;
12596 #if !defined (TE_Mach)
12597 if (fixP
->fx_pcrel
)
12599 switch (fixP
->fx_r_type
)
12605 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12608 case BFD_RELOC_X86_64_32S
:
12609 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12612 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12615 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12620 if (fixP
->fx_addsy
!= NULL
12621 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12622 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12623 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12624 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12625 && !use_rela_relocations
)
12627 /* This is a hack. There should be a better way to handle this.
12628 This covers for the fact that bfd_install_relocation will
12629 subtract the current location (for partial_inplace, PC relative
12630 relocations); see more below. */
12634 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12637 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12639 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12642 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12644 if ((sym_seg
== seg
12645 || (symbol_section_p (fixP
->fx_addsy
)
12646 && sym_seg
!= absolute_section
))
12647 && !generic_force_reloc (fixP
))
12649 /* Yes, we add the values in twice. This is because
12650 bfd_install_relocation subtracts them out again. I think
12651 bfd_install_relocation is broken, but I don't dare change
12653 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12657 #if defined (OBJ_COFF) && defined (TE_PE)
12658 /* For some reason, the PE format does not store a
12659 section address offset for a PC relative symbol. */
12660 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12661 || S_IS_WEAK (fixP
->fx_addsy
))
12662 value
+= md_pcrel_from (fixP
);
12665 #if defined (OBJ_COFF) && defined (TE_PE)
12666 if (fixP
->fx_addsy
!= NULL
12667 && S_IS_WEAK (fixP
->fx_addsy
)
12668 /* PR 16858: Do not modify weak function references. */
12669 && ! fixP
->fx_pcrel
)
12671 #if !defined (TE_PEP)
12672 /* For x86 PE weak function symbols are neither PC-relative
12673 nor do they set S_IS_FUNCTION. So the only reliable way
12674 to detect them is to check the flags of their containing
12676 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12677 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12681 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12685 /* Fix a few things - the dynamic linker expects certain values here,
12686 and we must not disappoint it. */
12687 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12688 if (IS_ELF
&& fixP
->fx_addsy
)
12689 switch (fixP
->fx_r_type
)
12691 case BFD_RELOC_386_PLT32
:
12692 case BFD_RELOC_X86_64_PLT32
:
12693 /* Make the jump instruction point to the address of the operand.
12694 At runtime we merely add the offset to the actual PLT entry.
12695 NB: Subtract the offset size only for jump instructions. */
12696 if (fixP
->fx_pcrel
)
12700 case BFD_RELOC_386_TLS_GD
:
12701 case BFD_RELOC_386_TLS_LDM
:
12702 case BFD_RELOC_386_TLS_IE_32
:
12703 case BFD_RELOC_386_TLS_IE
:
12704 case BFD_RELOC_386_TLS_GOTIE
:
12705 case BFD_RELOC_386_TLS_GOTDESC
:
12706 case BFD_RELOC_X86_64_TLSGD
:
12707 case BFD_RELOC_X86_64_TLSLD
:
12708 case BFD_RELOC_X86_64_GOTTPOFF
:
12709 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12710 value
= 0; /* Fully resolved at runtime. No addend. */
12712 case BFD_RELOC_386_TLS_LE
:
12713 case BFD_RELOC_386_TLS_LDO_32
:
12714 case BFD_RELOC_386_TLS_LE_32
:
12715 case BFD_RELOC_X86_64_DTPOFF32
:
12716 case BFD_RELOC_X86_64_DTPOFF64
:
12717 case BFD_RELOC_X86_64_TPOFF32
:
12718 case BFD_RELOC_X86_64_TPOFF64
:
12719 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12722 case BFD_RELOC_386_TLS_DESC_CALL
:
12723 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12724 value
= 0; /* Fully resolved at runtime. No addend. */
12725 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12729 case BFD_RELOC_VTABLE_INHERIT
:
12730 case BFD_RELOC_VTABLE_ENTRY
:
12737 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12739 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
12741 value
= extend_to_32bit_address (value
);
12744 #endif /* !defined (TE_Mach) */
12746 /* Are we finished with this relocation now? */
12747 if (fixP
->fx_addsy
== NULL
)
12750 switch (fixP
->fx_r_type
)
12752 case BFD_RELOC_X86_64_32S
:
12753 fixP
->fx_signed
= 1;
12760 #if defined (OBJ_COFF) && defined (TE_PE)
12761 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12764 /* Remember value for tc_gen_reloc. */
12765 fixP
->fx_addnumber
= value
;
12766 /* Clear out the frag for now. */
12770 else if (use_rela_relocations
)
12772 if (!disallow_64bit_reloc
|| fixP
->fx_r_type
== NO_RELOC
)
12773 fixP
->fx_no_overflow
= 1;
12774 /* Remember value for tc_gen_reloc. */
12775 fixP
->fx_addnumber
= value
;
12779 md_number_to_chars (p
, value
, fixP
->fx_size
);
12783 md_atof (int type
, char *litP
, int *sizeP
)
12785 /* This outputs the LITTLENUMs in REVERSE order;
12786 in accord with the bigendian 386. */
12787 return ieee_md_atof (type
, litP
, sizeP
, false);
12790 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12793 output_invalid (int c
)
12796 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12799 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12800 "(0x%x)", (unsigned char) c
);
12801 return output_invalid_buf
;
12804 /* Verify that @r can be used in the current context. */
12806 static bool check_register (const reg_entry
*r
)
12808 if (allow_pseudo_reg
)
12811 if (operand_type_all_zero (&r
->reg_type
))
12814 if ((r
->reg_type
.bitfield
.dword
12815 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12816 || r
->reg_type
.bitfield
.class == RegCR
12817 || r
->reg_type
.bitfield
.class == RegDR
)
12818 && !cpu_arch_flags
.bitfield
.cpui386
)
12821 if (r
->reg_type
.bitfield
.class == RegTR
12822 && (flag_code
== CODE_64BIT
12823 || !cpu_arch_flags
.bitfield
.cpui386
12824 || cpu_arch_isa_flags
.bitfield
.cpui586
12825 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12828 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12831 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12833 if (r
->reg_type
.bitfield
.zmmword
12834 || r
->reg_type
.bitfield
.class == RegMask
)
12837 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12839 if (r
->reg_type
.bitfield
.ymmword
)
12842 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12847 if (r
->reg_type
.bitfield
.tmmword
12848 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12849 || flag_code
!= CODE_64BIT
))
12852 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12855 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12856 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12859 /* Upper 16 vector registers are only available with VREX in 64bit
12860 mode, and require EVEX encoding. */
12861 if (r
->reg_flags
& RegVRex
)
12863 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12864 || flag_code
!= CODE_64BIT
)
12867 if (i
.vec_encoding
== vex_encoding_default
)
12868 i
.vec_encoding
= vex_encoding_evex
;
12869 else if (i
.vec_encoding
!= vex_encoding_evex
)
12870 i
.vec_encoding
= vex_encoding_error
;
12873 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12874 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12875 && flag_code
!= CODE_64BIT
)
12878 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12885 /* REG_STRING starts *before* REGISTER_PREFIX. */
12887 static const reg_entry
*
12888 parse_real_register (char *reg_string
, char **end_op
)
12890 char *s
= reg_string
;
12892 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12893 const reg_entry
*r
;
12895 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12896 if (*s
== REGISTER_PREFIX
)
12899 if (is_space_char (*s
))
12902 p
= reg_name_given
;
12903 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12905 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12906 return (const reg_entry
*) NULL
;
12910 /* For naked regs, make sure that we are not dealing with an identifier.
12911 This prevents confusing an identifier like `eax_var' with register
12913 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12914 return (const reg_entry
*) NULL
;
12918 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12920 /* Handle floating point regs, allowing spaces in the (i) part. */
12923 if (!cpu_arch_flags
.bitfield
.cpu8087
12924 && !cpu_arch_flags
.bitfield
.cpu287
12925 && !cpu_arch_flags
.bitfield
.cpu387
12926 && !allow_pseudo_reg
)
12927 return (const reg_entry
*) NULL
;
12929 if (is_space_char (*s
))
12934 if (is_space_char (*s
))
12936 if (*s
>= '0' && *s
<= '7')
12938 int fpr
= *s
- '0';
12940 if (is_space_char (*s
))
12945 know (r
[fpr
].reg_num
== fpr
);
12949 /* We have "%st(" then garbage. */
12950 return (const reg_entry
*) NULL
;
12954 return r
&& check_register (r
) ? r
: NULL
;
12957 /* REG_STRING starts *before* REGISTER_PREFIX. */
12959 static const reg_entry
*
12960 parse_register (char *reg_string
, char **end_op
)
12962 const reg_entry
*r
;
12964 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12965 r
= parse_real_register (reg_string
, end_op
);
12970 char *save
= input_line_pointer
;
12974 input_line_pointer
= reg_string
;
12975 c
= get_symbol_name (®_string
);
12976 symbolP
= symbol_find (reg_string
);
12977 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12979 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12981 if (e
->X_op
== O_register
12982 && (valueT
) e
->X_add_number
< i386_regtab_size
)
12984 r
= i386_regtab
+ e
->X_add_number
;
12985 if (!check_register (r
))
12987 as_bad (_("register '%s%s' cannot be used here"),
12988 register_prefix
, r
->reg_name
);
12991 *end_op
= input_line_pointer
;
12994 *input_line_pointer
= c
;
12995 input_line_pointer
= save
;
13001 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
13003 const reg_entry
*r
;
13004 char *end
= input_line_pointer
;
13007 r
= parse_register (name
, &input_line_pointer
);
13008 if (r
&& end
<= input_line_pointer
)
13010 *nextcharP
= *input_line_pointer
;
13011 *input_line_pointer
= 0;
13014 e
->X_op
= O_register
;
13015 e
->X_add_number
= r
- i386_regtab
;
13018 e
->X_op
= O_illegal
;
13021 input_line_pointer
= end
;
13023 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
13027 md_operand (expressionS
*e
)
13030 const reg_entry
*r
;
13032 switch (*input_line_pointer
)
13034 case REGISTER_PREFIX
:
13035 r
= parse_real_register (input_line_pointer
, &end
);
13038 e
->X_op
= O_register
;
13039 e
->X_add_number
= r
- i386_regtab
;
13040 input_line_pointer
= end
;
13045 gas_assert (intel_syntax
);
13046 end
= input_line_pointer
++;
13048 if (*input_line_pointer
== ']')
13050 ++input_line_pointer
;
13051 e
->X_op_symbol
= make_expr_symbol (e
);
13052 e
->X_add_symbol
= NULL
;
13053 e
->X_add_number
= 0;
13058 e
->X_op
= O_absent
;
13059 input_line_pointer
= end
;
13066 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13067 const char *md_shortopts
= "kVQ:sqnO::";
13069 const char *md_shortopts
= "qnO::";
13072 #define OPTION_32 (OPTION_MD_BASE + 0)
13073 #define OPTION_64 (OPTION_MD_BASE + 1)
13074 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
13075 #define OPTION_MARCH (OPTION_MD_BASE + 3)
13076 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
13077 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
13078 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
13079 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
13080 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
13081 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
13082 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
13083 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
13084 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
13085 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
13086 #define OPTION_X32 (OPTION_MD_BASE + 14)
13087 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
13088 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
13089 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
13090 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
13091 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
13092 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
13093 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
13094 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
13095 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
13096 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
13097 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
13098 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
13099 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
13100 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
13101 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
13102 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
13103 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
13104 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
13105 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
13106 #define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
13108 struct option md_longopts
[] =
13110 {"32", no_argument
, NULL
, OPTION_32
},
13111 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13112 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13113 {"64", no_argument
, NULL
, OPTION_64
},
13115 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13116 {"x32", no_argument
, NULL
, OPTION_X32
},
13117 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
13118 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
13120 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
13121 {"march", required_argument
, NULL
, OPTION_MARCH
},
13122 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
13123 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
13124 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
13125 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
13126 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
13127 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
13128 {"muse-unaligned-vector-move", no_argument
, NULL
, OPTION_MUSE_UNALIGNED_VECTOR_MOVE
},
13129 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
13130 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
13131 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
13132 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
13133 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
13134 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
13135 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
13136 # if defined (TE_PE) || defined (TE_PEP)
13137 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
13139 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
13140 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
13141 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
13142 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
13143 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
13144 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
13145 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
13146 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
13147 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
13148 {"mlfence-before-indirect-branch", required_argument
, NULL
,
13149 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
13150 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
13151 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
13152 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
13153 {NULL
, no_argument
, NULL
, 0}
13155 size_t md_longopts_size
= sizeof (md_longopts
);
13158 md_parse_option (int c
, const char *arg
)
13161 char *arch
, *next
, *saved
, *type
;
13166 optimize_align_code
= 0;
13170 quiet_warnings
= 1;
13173 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13174 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
13175 should be emitted or not. FIXME: Not implemented. */
13177 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
13181 /* -V: SVR4 argument to print version ID. */
13183 print_version_id ();
13186 /* -k: Ignore for FreeBSD compatibility. */
13191 /* -s: On i386 Solaris, this tells the native assembler to use
13192 .stab instead of .stab.excl. We always use .stab anyhow. */
13195 case OPTION_MSHARED
:
13199 case OPTION_X86_USED_NOTE
:
13200 if (strcasecmp (arg
, "yes") == 0)
13202 else if (strcasecmp (arg
, "no") == 0)
13205 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13210 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13211 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13214 const char **list
, **l
;
13216 list
= bfd_target_list ();
13217 for (l
= list
; *l
!= NULL
; l
++)
13218 if (startswith (*l
, "elf64-x86-64")
13219 || strcmp (*l
, "coff-x86-64") == 0
13220 || strcmp (*l
, "pe-x86-64") == 0
13221 || strcmp (*l
, "pei-x86-64") == 0
13222 || strcmp (*l
, "mach-o-x86-64") == 0)
13224 default_arch
= "x86_64";
13228 as_fatal (_("no compiled in support for x86_64"));
13234 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13238 const char **list
, **l
;
13240 list
= bfd_target_list ();
13241 for (l
= list
; *l
!= NULL
; l
++)
13242 if (startswith (*l
, "elf32-x86-64"))
13244 default_arch
= "x86_64:32";
13248 as_fatal (_("no compiled in support for 32bit x86_64"));
13252 as_fatal (_("32bit x86_64 is only supported for ELF"));
13257 default_arch
= "i386";
13260 case OPTION_DIVIDE
:
13261 #ifdef SVR4_COMMENT_CHARS
13266 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13268 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13272 i386_comment_chars
= n
;
13278 saved
= xstrdup (arg
);
13280 /* Allow -march=+nosse. */
13286 as_fatal (_("invalid -march= option: `%s'"), arg
);
13287 next
= strchr (arch
, '+');
13290 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13292 if (arch
== saved
&& strcmp (arch
, cpu_arch
[j
].name
) == 0)
13295 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13298 cpu_arch_name
= cpu_arch
[j
].name
;
13299 cpu_sub_arch_name
= NULL
;
13300 cpu_arch_flags
= cpu_arch
[j
].flags
;
13301 cpu_arch_isa
= cpu_arch
[j
].type
;
13302 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13303 if (!cpu_arch_tune_set
)
13305 cpu_arch_tune
= cpu_arch_isa
;
13306 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13310 else if (*cpu_arch
[j
].name
== '.'
13311 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13313 /* ISA extension. */
13314 i386_cpu_flags flags
;
13316 flags
= cpu_flags_or (cpu_arch_flags
,
13317 cpu_arch
[j
].flags
);
13319 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13321 if (cpu_sub_arch_name
)
13323 char *name
= cpu_sub_arch_name
;
13324 cpu_sub_arch_name
= concat (name
,
13326 (const char *) NULL
);
13330 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13331 cpu_arch_flags
= flags
;
13332 cpu_arch_isa_flags
= flags
;
13336 = cpu_flags_or (cpu_arch_isa_flags
,
13337 cpu_arch
[j
].flags
);
13342 if (j
>= ARRAY_SIZE (cpu_arch
))
13344 /* Disable an ISA extension. */
13345 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13346 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13348 i386_cpu_flags flags
;
13350 flags
= cpu_flags_and_not (cpu_arch_flags
,
13351 cpu_noarch
[j
].flags
);
13352 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13354 if (cpu_sub_arch_name
)
13356 char *name
= cpu_sub_arch_name
;
13357 cpu_sub_arch_name
= concat (arch
,
13358 (const char *) NULL
);
13362 cpu_sub_arch_name
= xstrdup (arch
);
13363 cpu_arch_flags
= flags
;
13364 cpu_arch_isa_flags
= flags
;
13369 if (j
>= ARRAY_SIZE (cpu_noarch
))
13370 j
= ARRAY_SIZE (cpu_arch
);
13373 if (j
>= ARRAY_SIZE (cpu_arch
))
13374 as_fatal (_("invalid -march= option: `%s'"), arg
);
13378 while (next
!= NULL
);
13384 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13385 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13387 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13389 cpu_arch_tune_set
= 1;
13390 cpu_arch_tune
= cpu_arch
[j
].type
;
13391 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13395 if (j
>= ARRAY_SIZE (cpu_arch
))
13396 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13399 case OPTION_MMNEMONIC
:
13400 if (strcasecmp (arg
, "att") == 0)
13401 intel_mnemonic
= 0;
13402 else if (strcasecmp (arg
, "intel") == 0)
13403 intel_mnemonic
= 1;
13405 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13408 case OPTION_MSYNTAX
:
13409 if (strcasecmp (arg
, "att") == 0)
13411 else if (strcasecmp (arg
, "intel") == 0)
13414 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13417 case OPTION_MINDEX_REG
:
13418 allow_index_reg
= 1;
13421 case OPTION_MNAKED_REG
:
13422 allow_naked_reg
= 1;
13425 case OPTION_MSSE2AVX
:
13429 case OPTION_MUSE_UNALIGNED_VECTOR_MOVE
:
13430 use_unaligned_vector_move
= 1;
13433 case OPTION_MSSE_CHECK
:
13434 if (strcasecmp (arg
, "error") == 0)
13435 sse_check
= check_error
;
13436 else if (strcasecmp (arg
, "warning") == 0)
13437 sse_check
= check_warning
;
13438 else if (strcasecmp (arg
, "none") == 0)
13439 sse_check
= check_none
;
13441 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13444 case OPTION_MOPERAND_CHECK
:
13445 if (strcasecmp (arg
, "error") == 0)
13446 operand_check
= check_error
;
13447 else if (strcasecmp (arg
, "warning") == 0)
13448 operand_check
= check_warning
;
13449 else if (strcasecmp (arg
, "none") == 0)
13450 operand_check
= check_none
;
13452 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13455 case OPTION_MAVXSCALAR
:
13456 if (strcasecmp (arg
, "128") == 0)
13457 avxscalar
= vex128
;
13458 else if (strcasecmp (arg
, "256") == 0)
13459 avxscalar
= vex256
;
13461 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13464 case OPTION_MVEXWIG
:
13465 if (strcmp (arg
, "0") == 0)
13467 else if (strcmp (arg
, "1") == 0)
13470 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13473 case OPTION_MADD_BND_PREFIX
:
13474 add_bnd_prefix
= 1;
13477 case OPTION_MEVEXLIG
:
13478 if (strcmp (arg
, "128") == 0)
13479 evexlig
= evexl128
;
13480 else if (strcmp (arg
, "256") == 0)
13481 evexlig
= evexl256
;
13482 else if (strcmp (arg
, "512") == 0)
13483 evexlig
= evexl512
;
13485 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13488 case OPTION_MEVEXRCIG
:
13489 if (strcmp (arg
, "rne") == 0)
13491 else if (strcmp (arg
, "rd") == 0)
13493 else if (strcmp (arg
, "ru") == 0)
13495 else if (strcmp (arg
, "rz") == 0)
13498 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13501 case OPTION_MEVEXWIG
:
13502 if (strcmp (arg
, "0") == 0)
13504 else if (strcmp (arg
, "1") == 0)
13507 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13510 # if defined (TE_PE) || defined (TE_PEP)
13511 case OPTION_MBIG_OBJ
:
13516 case OPTION_MOMIT_LOCK_PREFIX
:
13517 if (strcasecmp (arg
, "yes") == 0)
13518 omit_lock_prefix
= 1;
13519 else if (strcasecmp (arg
, "no") == 0)
13520 omit_lock_prefix
= 0;
13522 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13525 case OPTION_MFENCE_AS_LOCK_ADD
:
13526 if (strcasecmp (arg
, "yes") == 0)
13528 else if (strcasecmp (arg
, "no") == 0)
13531 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13534 case OPTION_MLFENCE_AFTER_LOAD
:
13535 if (strcasecmp (arg
, "yes") == 0)
13536 lfence_after_load
= 1;
13537 else if (strcasecmp (arg
, "no") == 0)
13538 lfence_after_load
= 0;
13540 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13543 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13544 if (strcasecmp (arg
, "all") == 0)
13546 lfence_before_indirect_branch
= lfence_branch_all
;
13547 if (lfence_before_ret
== lfence_before_ret_none
)
13548 lfence_before_ret
= lfence_before_ret_shl
;
13550 else if (strcasecmp (arg
, "memory") == 0)
13551 lfence_before_indirect_branch
= lfence_branch_memory
;
13552 else if (strcasecmp (arg
, "register") == 0)
13553 lfence_before_indirect_branch
= lfence_branch_register
;
13554 else if (strcasecmp (arg
, "none") == 0)
13555 lfence_before_indirect_branch
= lfence_branch_none
;
13557 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13561 case OPTION_MLFENCE_BEFORE_RET
:
13562 if (strcasecmp (arg
, "or") == 0)
13563 lfence_before_ret
= lfence_before_ret_or
;
13564 else if (strcasecmp (arg
, "not") == 0)
13565 lfence_before_ret
= lfence_before_ret_not
;
13566 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13567 lfence_before_ret
= lfence_before_ret_shl
;
13568 else if (strcasecmp (arg
, "none") == 0)
13569 lfence_before_ret
= lfence_before_ret_none
;
13571 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13575 case OPTION_MRELAX_RELOCATIONS
:
13576 if (strcasecmp (arg
, "yes") == 0)
13577 generate_relax_relocations
= 1;
13578 else if (strcasecmp (arg
, "no") == 0)
13579 generate_relax_relocations
= 0;
13581 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13584 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13587 long int align
= strtoul (arg
, &end
, 0);
13592 align_branch_power
= 0;
13595 else if (align
>= 16)
13598 for (align_power
= 0;
13600 align
>>= 1, align_power
++)
13602 /* Limit alignment power to 31. */
13603 if (align
== 1 && align_power
< 32)
13605 align_branch_power
= align_power
;
13610 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13614 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13617 int align
= strtoul (arg
, &end
, 0);
13618 /* Some processors only support 5 prefixes. */
13619 if (*end
== '\0' && align
>= 0 && align
< 6)
13621 align_branch_prefix_size
= align
;
13624 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13629 case OPTION_MALIGN_BRANCH
:
13631 saved
= xstrdup (arg
);
13635 next
= strchr (type
, '+');
13638 if (strcasecmp (type
, "jcc") == 0)
13639 align_branch
|= align_branch_jcc_bit
;
13640 else if (strcasecmp (type
, "fused") == 0)
13641 align_branch
|= align_branch_fused_bit
;
13642 else if (strcasecmp (type
, "jmp") == 0)
13643 align_branch
|= align_branch_jmp_bit
;
13644 else if (strcasecmp (type
, "call") == 0)
13645 align_branch
|= align_branch_call_bit
;
13646 else if (strcasecmp (type
, "ret") == 0)
13647 align_branch
|= align_branch_ret_bit
;
13648 else if (strcasecmp (type
, "indirect") == 0)
13649 align_branch
|= align_branch_indirect_bit
;
13651 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13654 while (next
!= NULL
);
13658 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13659 align_branch_power
= 5;
13660 align_branch_prefix_size
= 5;
13661 align_branch
= (align_branch_jcc_bit
13662 | align_branch_fused_bit
13663 | align_branch_jmp_bit
);
13666 case OPTION_MAMD64
:
13670 case OPTION_MINTEL64
:
13678 /* Turn off -Os. */
13679 optimize_for_space
= 0;
13681 else if (*arg
== 's')
13683 optimize_for_space
= 1;
13684 /* Turn on all encoding optimizations. */
13685 optimize
= INT_MAX
;
13689 optimize
= atoi (arg
);
13690 /* Turn off -Os. */
13691 optimize_for_space
= 0;
13701 #define MESSAGE_TEMPLATE \
13705 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13706 int *left_p
, const char *name
, int len
)
13708 int size
= sizeof (MESSAGE_TEMPLATE
);
13709 int left
= *left_p
;
13711 /* Reserve 2 spaces for ", " or ",\0" */
13714 /* Check if there is any room. */
13722 p
= mempcpy (p
, name
, len
);
13726 /* Output the current message now and start a new one. */
13729 fprintf (stream
, "%s\n", message
);
13731 left
= size
- (start
- message
) - len
- 2;
13733 gas_assert (left
>= 0);
13735 p
= mempcpy (p
, name
, len
);
13743 show_arch (FILE *stream
, int ext
, int check
)
13745 static char message
[] = MESSAGE_TEMPLATE
;
13746 char *start
= message
+ 27;
13748 int size
= sizeof (MESSAGE_TEMPLATE
);
13755 left
= size
- (start
- message
);
13756 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13758 /* Should it be skipped? */
13759 if (cpu_arch
[j
].skip
)
13762 name
= cpu_arch
[j
].name
;
13763 len
= cpu_arch
[j
].len
;
13766 /* It is an extension. Skip if we aren't asked to show it. */
13777 /* It is an processor. Skip if we show only extension. */
13780 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13782 /* It is an impossible processor - skip. */
13786 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13789 /* Display disabled extensions. */
13791 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13793 name
= cpu_noarch
[j
].name
;
13794 len
= cpu_noarch
[j
].len
;
13795 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13800 fprintf (stream
, "%s\n", message
);
13804 md_show_usage (FILE *stream
)
13806 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13807 fprintf (stream
, _("\
13808 -Qy, -Qn ignored\n\
13809 -V print assembler version number\n\
13812 fprintf (stream
, _("\
13813 -n Do not optimize code alignment\n\
13814 -q quieten some warnings\n"));
13815 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13816 fprintf (stream
, _("\
13820 # if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13821 fprintf (stream
, _("\
13822 --32/--64/--x32 generate 32bit/64bit/x32 object\n"));
13823 # elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
13824 fprintf (stream
, _("\
13825 --32/--64 generate 32bit/64bit object\n"));
13828 #ifdef SVR4_COMMENT_CHARS
13829 fprintf (stream
, _("\
13830 --divide do not treat `/' as a comment character\n"));
13832 fprintf (stream
, _("\
13833 --divide ignored\n"));
13835 fprintf (stream
, _("\
13836 -march=CPU[,+EXTENSION...]\n\
13837 generate code for CPU and EXTENSION, CPU is one of:\n"));
13838 show_arch (stream
, 0, 1);
13839 fprintf (stream
, _("\
13840 EXTENSION is combination of:\n"));
13841 show_arch (stream
, 1, 0);
13842 fprintf (stream
, _("\
13843 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13844 show_arch (stream
, 0, 0);
13845 fprintf (stream
, _("\
13846 -msse2avx encode SSE instructions with VEX prefix\n"));
13847 fprintf (stream
, _("\
13848 -muse-unaligned-vector-move\n\
13849 encode aligned vector move as unaligned vector move\n"));
13850 fprintf (stream
, _("\
13851 -msse-check=[none|error|warning] (default: warning)\n\
13852 check SSE instructions\n"));
13853 fprintf (stream
, _("\
13854 -moperand-check=[none|error|warning] (default: warning)\n\
13855 check operand combinations for validity\n"));
13856 fprintf (stream
, _("\
13857 -mavxscalar=[128|256] (default: 128)\n\
13858 encode scalar AVX instructions with specific vector\n\
13860 fprintf (stream
, _("\
13861 -mvexwig=[0|1] (default: 0)\n\
13862 encode VEX instructions with specific VEX.W value\n\
13863 for VEX.W bit ignored instructions\n"));
13864 fprintf (stream
, _("\
13865 -mevexlig=[128|256|512] (default: 128)\n\
13866 encode scalar EVEX instructions with specific vector\n\
13868 fprintf (stream
, _("\
13869 -mevexwig=[0|1] (default: 0)\n\
13870 encode EVEX instructions with specific EVEX.W value\n\
13871 for EVEX.W bit ignored instructions\n"));
13872 fprintf (stream
, _("\
13873 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13874 encode EVEX instructions with specific EVEX.RC value\n\
13875 for SAE-only ignored instructions\n"));
13876 fprintf (stream
, _("\
13877 -mmnemonic=[att|intel] "));
13878 if (SYSV386_COMPAT
)
13879 fprintf (stream
, _("(default: att)\n"));
13881 fprintf (stream
, _("(default: intel)\n"));
13882 fprintf (stream
, _("\
13883 use AT&T/Intel mnemonic\n"));
13884 fprintf (stream
, _("\
13885 -msyntax=[att|intel] (default: att)\n\
13886 use AT&T/Intel syntax\n"));
13887 fprintf (stream
, _("\
13888 -mindex-reg support pseudo index registers\n"));
13889 fprintf (stream
, _("\
13890 -mnaked-reg don't require `%%' prefix for registers\n"));
13891 fprintf (stream
, _("\
13892 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13893 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13894 fprintf (stream
, _("\
13895 -mshared disable branch optimization for shared code\n"));
13896 fprintf (stream
, _("\
13897 -mx86-used-note=[no|yes] "));
13898 if (DEFAULT_X86_USED_NOTE
)
13899 fprintf (stream
, _("(default: yes)\n"));
13901 fprintf (stream
, _("(default: no)\n"));
13902 fprintf (stream
, _("\
13903 generate x86 used ISA and feature properties\n"));
13905 #if defined (TE_PE) || defined (TE_PEP)
13906 fprintf (stream
, _("\
13907 -mbig-obj generate big object files\n"));
13909 fprintf (stream
, _("\
13910 -momit-lock-prefix=[no|yes] (default: no)\n\
13911 strip all lock prefixes\n"));
13912 fprintf (stream
, _("\
13913 -mfence-as-lock-add=[no|yes] (default: no)\n\
13914 encode lfence, mfence and sfence as\n\
13915 lock addl $0x0, (%%{re}sp)\n"));
13916 fprintf (stream
, _("\
13917 -mrelax-relocations=[no|yes] "));
13918 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13919 fprintf (stream
, _("(default: yes)\n"));
13921 fprintf (stream
, _("(default: no)\n"));
13922 fprintf (stream
, _("\
13923 generate relax relocations\n"));
13924 fprintf (stream
, _("\
13925 -malign-branch-boundary=NUM (default: 0)\n\
13926 align branches within NUM byte boundary\n"));
13927 fprintf (stream
, _("\
13928 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13929 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13931 specify types of branches to align\n"));
13932 fprintf (stream
, _("\
13933 -malign-branch-prefix-size=NUM (default: 5)\n\
13934 align branches with NUM prefixes per instruction\n"));
13935 fprintf (stream
, _("\
13936 -mbranches-within-32B-boundaries\n\
13937 align branches within 32 byte boundary\n"));
13938 fprintf (stream
, _("\
13939 -mlfence-after-load=[no|yes] (default: no)\n\
13940 generate lfence after load\n"));
13941 fprintf (stream
, _("\
13942 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13943 generate lfence before indirect near branch\n"));
13944 fprintf (stream
, _("\
13945 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13946 generate lfence before ret\n"));
13947 fprintf (stream
, _("\
13948 -mamd64 accept only AMD64 ISA [default]\n"));
13949 fprintf (stream
, _("\
13950 -mintel64 accept only Intel64 ISA\n"));
13953 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13954 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13955 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13957 /* Pick the target format to use. */
13960 i386_target_format (void)
13962 if (startswith (default_arch
, "x86_64"))
13964 update_code_flag (CODE_64BIT
, 1);
13965 if (default_arch
[6] == '\0')
13966 x86_elf_abi
= X86_64_ABI
;
13968 x86_elf_abi
= X86_64_X32_ABI
;
13970 else if (!strcmp (default_arch
, "i386"))
13971 update_code_flag (CODE_32BIT
, 1);
13972 else if (!strcmp (default_arch
, "iamcu"))
13974 update_code_flag (CODE_32BIT
, 1);
13975 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13977 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13978 cpu_arch_name
= "iamcu";
13979 cpu_sub_arch_name
= NULL
;
13980 cpu_arch_flags
= iamcu_flags
;
13981 cpu_arch_isa
= PROCESSOR_IAMCU
;
13982 cpu_arch_isa_flags
= iamcu_flags
;
13983 if (!cpu_arch_tune_set
)
13985 cpu_arch_tune
= cpu_arch_isa
;
13986 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13989 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13990 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13994 as_fatal (_("unknown architecture"));
13996 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13997 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13998 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13999 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
14001 switch (OUTPUT_FLAVOR
)
14003 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
14004 case bfd_target_aout_flavour
:
14005 return AOUT_TARGET_FORMAT
;
14007 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
14008 # if defined (TE_PE) || defined (TE_PEP)
14009 case bfd_target_coff_flavour
:
14010 if (flag_code
== CODE_64BIT
)
14013 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
14015 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
14016 # elif defined (TE_GO32)
14017 case bfd_target_coff_flavour
:
14018 return "coff-go32";
14020 case bfd_target_coff_flavour
:
14021 return "coff-i386";
14024 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14025 case bfd_target_elf_flavour
:
14027 const char *format
;
14029 switch (x86_elf_abi
)
14032 format
= ELF_TARGET_FORMAT
;
14034 tls_get_addr
= "___tls_get_addr";
14038 use_rela_relocations
= 1;
14041 tls_get_addr
= "__tls_get_addr";
14043 format
= ELF_TARGET_FORMAT64
;
14045 case X86_64_X32_ABI
:
14046 use_rela_relocations
= 1;
14049 tls_get_addr
= "__tls_get_addr";
14051 disallow_64bit_reloc
= 1;
14052 format
= ELF_TARGET_FORMAT32
;
14055 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
14057 if (x86_elf_abi
!= I386_ABI
)
14058 as_fatal (_("Intel MCU is 32bit only"));
14059 return ELF_TARGET_IAMCU_FORMAT
;
14065 #if defined (OBJ_MACH_O)
14066 case bfd_target_mach_o_flavour
:
14067 if (flag_code
== CODE_64BIT
)
14069 use_rela_relocations
= 1;
14071 return "mach-o-x86-64";
14074 return "mach-o-i386";
14082 #endif /* OBJ_MAYBE_ more than one */
14085 md_undefined_symbol (char *name
)
14087 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
14088 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
14089 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
14090 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
14094 if (symbol_find (name
))
14095 as_bad (_("GOT already in symbol table"));
14096 GOT_symbol
= symbol_new (name
, undefined_section
,
14097 &zero_address_frag
, 0);
14104 /* Round up a section size to the appropriate boundary. */
14107 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
14109 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
14110 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
14112 /* For a.out, force the section size to be aligned. If we don't do
14113 this, BFD will align it for us, but it will not write out the
14114 final bytes of the section. This may be a bug in BFD, but it is
14115 easier to fix it here since that is how the other a.out targets
14119 align
= bfd_section_alignment (segment
);
14120 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
14127 /* On the i386, PC-relative offsets are relative to the start of the
14128 next instruction. That is, the address of the offset, plus its
14129 size, since the offset is always the last part of the insn. */
14132 md_pcrel_from (fixS
*fixP
)
14134 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
14140 s_bss (int ignore ATTRIBUTE_UNUSED
)
14144 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14146 obj_elf_section_change_hook ();
14148 temp
= get_absolute_expression ();
14149 subseg_set (bss_section
, (subsegT
) temp
);
14150 demand_empty_rest_of_line ();
14155 /* Remember constant directive. */
14158 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
14160 if (last_insn
.kind
!= last_insn_directive
14161 && (bfd_section_flags (now_seg
) & SEC_CODE
))
14163 last_insn
.seg
= now_seg
;
14164 last_insn
.kind
= last_insn_directive
;
14165 last_insn
.name
= "constant directive";
14166 last_insn
.file
= as_where (&last_insn
.line
);
14167 if (lfence_before_ret
!= lfence_before_ret_none
)
14169 if (lfence_before_indirect_branch
!= lfence_branch_none
)
14170 as_warn (_("constant directive skips -mlfence-before-ret "
14171 "and -mlfence-before-indirect-branch"));
14173 as_warn (_("constant directive skips -mlfence-before-ret"));
14175 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
14176 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
14181 i386_validate_fix (fixS
*fixp
)
14183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14184 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14185 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
14186 return IS_ELF
&& fixp
->fx_addsy
14187 && (!S_IS_DEFINED (fixp
->fx_addsy
)
14188 || S_IS_EXTERNAL (fixp
->fx_addsy
));
14191 if (fixp
->fx_subsy
)
14193 if (fixp
->fx_subsy
== GOT_symbol
)
14195 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
14199 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14200 if (fixp
->fx_tcbit2
)
14201 fixp
->fx_r_type
= (fixp
->fx_tcbit
14202 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14203 : BFD_RELOC_X86_64_GOTPCRELX
);
14206 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14211 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14213 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14215 fixp
->fx_subsy
= 0;
14218 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14221 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14222 to section. Since PLT32 relocation must be against symbols,
14223 turn such PLT32 relocation into PC32 relocation. */
14225 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14226 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14227 && symbol_section_p (fixp
->fx_addsy
))
14228 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14231 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14232 && fixp
->fx_tcbit2
)
14233 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14242 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14245 bfd_reloc_code_real_type code
;
14247 switch (fixp
->fx_r_type
)
14249 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14252 case BFD_RELOC_SIZE32
:
14253 case BFD_RELOC_SIZE64
:
14255 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
14256 && (!fixp
->fx_subsy
14257 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
14258 sym
= fixp
->fx_addsy
;
14259 else if (fixp
->fx_subsy
14260 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
14261 && (!fixp
->fx_addsy
14262 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
14263 sym
= fixp
->fx_subsy
;
14266 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
14268 /* Resolve size relocation against local symbol to size of
14269 the symbol plus addend. */
14270 valueT value
= S_GET_SIZE (sym
);
14272 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
14273 value
= bfd_section_size (S_GET_SEGMENT (sym
));
14274 if (sym
== fixp
->fx_subsy
)
14277 if (fixp
->fx_addsy
)
14278 value
+= S_GET_VALUE (fixp
->fx_addsy
);
14280 else if (fixp
->fx_subsy
)
14281 value
-= S_GET_VALUE (fixp
->fx_subsy
);
14282 value
+= fixp
->fx_offset
;
14283 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14285 && !fits_in_unsigned_long (value
))
14286 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14287 _("symbol size computation overflow"));
14288 fixp
->fx_addsy
= NULL
;
14289 fixp
->fx_subsy
= NULL
;
14290 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14293 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
14295 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14296 "unsupported expression involving @size");
14300 /* Fall through. */
14302 case BFD_RELOC_X86_64_PLT32
:
14303 case BFD_RELOC_X86_64_GOT32
:
14304 case BFD_RELOC_X86_64_GOTPCREL
:
14305 case BFD_RELOC_X86_64_GOTPCRELX
:
14306 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14307 case BFD_RELOC_386_PLT32
:
14308 case BFD_RELOC_386_GOT32
:
14309 case BFD_RELOC_386_GOT32X
:
14310 case BFD_RELOC_386_GOTOFF
:
14311 case BFD_RELOC_386_GOTPC
:
14312 case BFD_RELOC_386_TLS_GD
:
14313 case BFD_RELOC_386_TLS_LDM
:
14314 case BFD_RELOC_386_TLS_LDO_32
:
14315 case BFD_RELOC_386_TLS_IE_32
:
14316 case BFD_RELOC_386_TLS_IE
:
14317 case BFD_RELOC_386_TLS_GOTIE
:
14318 case BFD_RELOC_386_TLS_LE_32
:
14319 case BFD_RELOC_386_TLS_LE
:
14320 case BFD_RELOC_386_TLS_GOTDESC
:
14321 case BFD_RELOC_386_TLS_DESC_CALL
:
14322 case BFD_RELOC_X86_64_TLSGD
:
14323 case BFD_RELOC_X86_64_TLSLD
:
14324 case BFD_RELOC_X86_64_DTPOFF32
:
14325 case BFD_RELOC_X86_64_DTPOFF64
:
14326 case BFD_RELOC_X86_64_GOTTPOFF
:
14327 case BFD_RELOC_X86_64_TPOFF32
:
14328 case BFD_RELOC_X86_64_TPOFF64
:
14329 case BFD_RELOC_X86_64_GOTOFF64
:
14330 case BFD_RELOC_X86_64_GOTPC32
:
14331 case BFD_RELOC_X86_64_GOT64
:
14332 case BFD_RELOC_X86_64_GOTPCREL64
:
14333 case BFD_RELOC_X86_64_GOTPC64
:
14334 case BFD_RELOC_X86_64_GOTPLT64
:
14335 case BFD_RELOC_X86_64_PLTOFF64
:
14336 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14337 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14338 case BFD_RELOC_RVA
:
14339 case BFD_RELOC_VTABLE_ENTRY
:
14340 case BFD_RELOC_VTABLE_INHERIT
:
14342 case BFD_RELOC_32_SECREL
:
14344 code
= fixp
->fx_r_type
;
14346 case BFD_RELOC_X86_64_32S
:
14347 if (!fixp
->fx_pcrel
)
14349 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14350 code
= fixp
->fx_r_type
;
14353 /* Fall through. */
14355 if (fixp
->fx_pcrel
)
14357 switch (fixp
->fx_size
)
14360 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14361 _("can not do %d byte pc-relative relocation"),
14363 code
= BFD_RELOC_32_PCREL
;
14365 case 1: code
= BFD_RELOC_8_PCREL
; break;
14366 case 2: code
= BFD_RELOC_16_PCREL
; break;
14367 case 4: code
= BFD_RELOC_32_PCREL
; break;
14369 case 8: code
= BFD_RELOC_64_PCREL
; break;
14375 switch (fixp
->fx_size
)
14378 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14379 _("can not do %d byte relocation"),
14381 code
= BFD_RELOC_32
;
14383 case 1: code
= BFD_RELOC_8
; break;
14384 case 2: code
= BFD_RELOC_16
; break;
14385 case 4: code
= BFD_RELOC_32
; break;
14387 case 8: code
= BFD_RELOC_64
; break;
14394 if ((code
== BFD_RELOC_32
14395 || code
== BFD_RELOC_32_PCREL
14396 || code
== BFD_RELOC_X86_64_32S
)
14398 && fixp
->fx_addsy
== GOT_symbol
)
14401 code
= BFD_RELOC_386_GOTPC
;
14403 code
= BFD_RELOC_X86_64_GOTPC32
;
14405 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14407 && fixp
->fx_addsy
== GOT_symbol
)
14409 code
= BFD_RELOC_X86_64_GOTPC64
;
14412 rel
= XNEW (arelent
);
14413 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14414 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14416 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14418 if (!use_rela_relocations
)
14420 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14421 vtable entry to be used in the relocation's section offset. */
14422 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14423 rel
->address
= fixp
->fx_offset
;
14424 #if defined (OBJ_COFF) && defined (TE_PE)
14425 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14426 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14431 /* Use the rela in 64bit mode. */
14434 if (disallow_64bit_reloc
)
14437 case BFD_RELOC_X86_64_DTPOFF64
:
14438 case BFD_RELOC_X86_64_TPOFF64
:
14439 case BFD_RELOC_64_PCREL
:
14440 case BFD_RELOC_X86_64_GOTOFF64
:
14441 case BFD_RELOC_X86_64_GOT64
:
14442 case BFD_RELOC_X86_64_GOTPCREL64
:
14443 case BFD_RELOC_X86_64_GOTPC64
:
14444 case BFD_RELOC_X86_64_GOTPLT64
:
14445 case BFD_RELOC_X86_64_PLTOFF64
:
14446 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14447 _("cannot represent relocation type %s in x32 mode"),
14448 bfd_get_reloc_code_name (code
));
14454 if (!fixp
->fx_pcrel
)
14455 rel
->addend
= fixp
->fx_offset
;
14459 case BFD_RELOC_X86_64_PLT32
:
14460 case BFD_RELOC_X86_64_GOT32
:
14461 case BFD_RELOC_X86_64_GOTPCREL
:
14462 case BFD_RELOC_X86_64_GOTPCRELX
:
14463 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14464 case BFD_RELOC_X86_64_TLSGD
:
14465 case BFD_RELOC_X86_64_TLSLD
:
14466 case BFD_RELOC_X86_64_GOTTPOFF
:
14467 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14468 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14469 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14472 rel
->addend
= (section
->vma
14474 + fixp
->fx_addnumber
14475 + md_pcrel_from (fixp
));
14480 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14481 if (rel
->howto
== NULL
)
14483 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14484 _("cannot represent relocation type %s"),
14485 bfd_get_reloc_code_name (code
));
14486 /* Set howto to a garbage value so that we can keep going. */
14487 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14488 gas_assert (rel
->howto
!= NULL
);
14494 #include "tc-i386-intel.c"
14497 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14499 int saved_naked_reg
;
14500 char saved_register_dot
;
14502 saved_naked_reg
= allow_naked_reg
;
14503 allow_naked_reg
= 1;
14504 saved_register_dot
= register_chars
['.'];
14505 register_chars
['.'] = '.';
14506 allow_pseudo_reg
= 1;
14507 expression_and_evaluate (exp
);
14508 allow_pseudo_reg
= 0;
14509 register_chars
['.'] = saved_register_dot
;
14510 allow_naked_reg
= saved_naked_reg
;
14512 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14514 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14516 exp
->X_op
= O_constant
;
14517 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14518 .dw2_regnum
[flag_code
>> 1];
14521 exp
->X_op
= O_illegal
;
14526 tc_x86_frame_initial_instructions (void)
14528 static unsigned int sp_regno
[2];
14530 if (!sp_regno
[flag_code
>> 1])
14532 char *saved_input
= input_line_pointer
;
14533 char sp
[][4] = {"esp", "rsp"};
14536 input_line_pointer
= sp
[flag_code
>> 1];
14537 tc_x86_parse_to_dw2regnum (&exp
);
14538 gas_assert (exp
.X_op
== O_constant
);
14539 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14540 input_line_pointer
= saved_input
;
14543 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14544 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14548 x86_dwarf2_addr_size (void)
14550 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14551 if (x86_elf_abi
== X86_64_X32_ABI
)
14554 return bfd_arch_bits_per_address (stdoutput
) / 8;
14558 i386_elf_section_type (const char *str
, size_t len
)
14560 if (flag_code
== CODE_64BIT
14561 && len
== sizeof ("unwind") - 1
14562 && startswith (str
, "unwind"))
14563 return SHT_X86_64_UNWIND
;
14570 i386_solaris_fix_up_eh_frame (segT sec
)
14572 if (flag_code
== CODE_64BIT
)
14573 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14579 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14583 exp
.X_op
= O_secrel
;
14584 exp
.X_add_symbol
= symbol
;
14585 exp
.X_add_number
= 0;
14586 emit_expr (&exp
, size
);
14590 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14591 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14594 x86_64_section_letter (int letter
, const char **ptr_msg
)
14596 if (flag_code
== CODE_64BIT
)
14599 return SHF_X86_64_LARGE
;
14601 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14604 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14609 x86_64_section_word (char *str
, size_t len
)
14611 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14612 return SHF_X86_64_LARGE
;
14618 handle_large_common (int small ATTRIBUTE_UNUSED
)
14620 if (flag_code
!= CODE_64BIT
)
14622 s_comm_internal (0, elf_common_parse
);
14623 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14627 static segT lbss_section
;
14628 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14629 asection
*saved_bss_section
= bss_section
;
14631 if (lbss_section
== NULL
)
14633 flagword applicable
;
14634 segT seg
= now_seg
;
14635 subsegT subseg
= now_subseg
;
14637 /* The .lbss section is for local .largecomm symbols. */
14638 lbss_section
= subseg_new (".lbss", 0);
14639 applicable
= bfd_applicable_section_flags (stdoutput
);
14640 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14641 seg_info (lbss_section
)->bss
= 1;
14643 subseg_set (seg
, subseg
);
14646 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14647 bss_section
= lbss_section
;
14649 s_comm_internal (0, elf_common_parse
);
14651 elf_com_section_ptr
= saved_com_section_ptr
;
14652 bss_section
= saved_bss_section
;
14655 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */