1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
52 #define DEFAULT_ARCH "i386"
57 #define INLINE __inline__
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
95 #define END_OF_INSN '\0'
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
109 const insn_template
*start
;
110 const insn_template
*end
;
114 /* 386 operand encoding bytes: see 386 book for details of this. */
117 unsigned int regmem
; /* codes register or memory operand */
118 unsigned int reg
; /* codes register operand (or extended opcode) */
119 unsigned int mode
; /* how to interpret regmem & reg */
123 /* x86-64 extension prefix. */
124 typedef int rex_byte
;
126 /* 386 opcode byte to code indirect addressing. */
135 /* x86 arch names, types and features */
138 const char *name
; /* arch name */
139 unsigned int len
; /* arch string length */
140 enum processor_type type
; /* arch type */
141 i386_cpu_flags flags
; /* cpu feature flags */
142 unsigned int skip
; /* show_arch should skip this. */
146 /* Used to turn off indicated flags. */
149 const char *name
; /* arch name */
150 unsigned int len
; /* arch string length */
151 i386_cpu_flags flags
; /* cpu feature flags */
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
164 static void pe_directive_secrel (int);
166 static void signed_cons (int);
167 static char *output_invalid (int c
);
168 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
170 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS
*);
175 static int i386_intel_parse_name (const char *, expressionS
*);
176 static const reg_entry
*parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code
i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template
*match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry
*build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS
*, offsetT
);
196 static void output_disp (fragS
*, offsetT
);
198 static void s_bss (int);
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used
;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used
;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
211 static const char *default_arch
= DEFAULT_ARCH
;
213 /* This struct describes rounding control and SAE in the instruction. */
227 static struct RC_Operation rc_op
;
229 /* The struct describes masking, applied to OPERAND in the instruction.
230 MASK is a pointer to the corresponding mask register. ZEROING tells
231 whether merging or zeroing mask is used. */
232 struct Mask_Operation
234 const reg_entry
*mask
;
235 unsigned int zeroing
;
236 /* The operand where this operation is associated. */
240 static struct Mask_Operation mask_op
;
242 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
244 struct Broadcast_Operation
246 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
249 /* Index of broadcasted operand. */
252 /* Number of bytes to broadcast. */
256 static struct Broadcast_Operation broadcast_op
;
261 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
262 unsigned char bytes
[4];
264 /* Destination or source register specifier. */
265 const reg_entry
*register_specifier
;
268 /* 'md_assemble ()' gathers together information and puts it into a
275 const reg_entry
*regs
;
280 operand_size_mismatch
,
281 operand_type_mismatch
,
282 register_type_mismatch
,
283 number_of_operands_mismatch
,
284 invalid_instruction_suffix
,
286 unsupported_with_intel_mnemonic
,
289 invalid_vsib_address
,
290 invalid_vector_register_set
,
291 unsupported_vector_index_register
,
292 unsupported_broadcast
,
295 mask_not_on_destination
,
298 rc_sae_operand_not_last_imm
,
299 invalid_register_operand
,
304 /* TM holds the template for the insn were currently assembling. */
307 /* SUFFIX holds the instruction size suffix for byte, word, dword
308 or qword, if given. */
311 /* OPERANDS gives the number of given operands. */
312 unsigned int operands
;
314 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
315 of given register, displacement, memory operands and immediate
317 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
319 /* TYPES [i] is the type (see above #defines) which tells us how to
320 use OP[i] for the corresponding operand. */
321 i386_operand_type types
[MAX_OPERANDS
];
323 /* Displacement expression, immediate expression, or register for each
325 union i386_op op
[MAX_OPERANDS
];
327 /* Flags for operands. */
328 unsigned int flags
[MAX_OPERANDS
];
329 #define Operand_PCrel 1
330 #define Operand_Mem 2
332 /* Relocation type for operand */
333 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
335 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
336 the base index byte below. */
337 const reg_entry
*base_reg
;
338 const reg_entry
*index_reg
;
339 unsigned int log2_scale_factor
;
341 /* SEG gives the seg_entries of this insn. They are zero unless
342 explicit segment overrides are given. */
343 const seg_entry
*seg
[2];
345 /* Copied first memory operand string, for re-checking. */
348 /* PREFIX holds all the given prefix opcodes (usually null).
349 PREFIXES is the number of prefix opcodes. */
350 unsigned int prefixes
;
351 unsigned char prefix
[MAX_PREFIXES
];
353 /* Register is in low 3 bits of opcode. */
354 bfd_boolean short_form
;
356 /* The operand to a branch insn indicates an absolute branch. */
357 bfd_boolean jumpabsolute
;
359 /* Has MMX register operands. */
360 bfd_boolean has_regmmx
;
362 /* Has XMM register operands. */
363 bfd_boolean has_regxmm
;
365 /* Has YMM register operands. */
366 bfd_boolean has_regymm
;
368 /* Has ZMM register operands. */
369 bfd_boolean has_regzmm
;
371 /* Has GOTPC or TLS relocation. */
372 bfd_boolean has_gotpc_tls_reloc
;
374 /* RM and SIB are the modrm byte and the sib byte where the
375 addressing modes of this insn are encoded. */
382 /* Masking attributes. */
383 struct Mask_Operation
*mask
;
385 /* Rounding control and SAE attributes. */
386 struct RC_Operation
*rounding
;
388 /* Broadcasting attributes. */
389 struct Broadcast_Operation
*broadcast
;
391 /* Compressed disp8*N attribute. */
392 unsigned int memshift
;
394 /* Prefer load or store in encoding. */
397 dir_encoding_default
= 0,
403 /* Prefer 8bit or 32bit displacement in encoding. */
406 disp_encoding_default
= 0,
411 /* Prefer the REX byte in encoding. */
412 bfd_boolean rex_encoding
;
414 /* Disable instruction size optimization. */
415 bfd_boolean no_optimize
;
417 /* How to encode vector instructions. */
420 vex_encoding_default
= 0,
427 const char *rep_prefix
;
430 const char *hle_prefix
;
432 /* Have BND prefix. */
433 const char *bnd_prefix
;
435 /* Have NOTRACK prefix. */
436 const char *notrack_prefix
;
439 enum i386_error error
;
442 typedef struct _i386_insn i386_insn
;
444 /* Link RC type with corresponding string, that'll be looked for in
453 static const struct RC_name RC_NamesTable
[] =
455 { rne
, STRING_COMMA_LEN ("rn-sae") },
456 { rd
, STRING_COMMA_LEN ("rd-sae") },
457 { ru
, STRING_COMMA_LEN ("ru-sae") },
458 { rz
, STRING_COMMA_LEN ("rz-sae") },
459 { saeonly
, STRING_COMMA_LEN ("sae") },
462 /* List of chars besides those in app.c:symbol_chars that can start an
463 operand. Used to prevent the scrubber eating vital white-space. */
464 const char extra_symbol_chars
[] = "*%-([{}"
473 #if (defined (TE_I386AIX) \
474 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
475 && !defined (TE_GNU) \
476 && !defined (TE_LINUX) \
477 && !defined (TE_NACL) \
478 && !defined (TE_FreeBSD) \
479 && !defined (TE_DragonFly) \
480 && !defined (TE_NetBSD)))
481 /* This array holds the chars that always start a comment. If the
482 pre-processor is disabled, these aren't very useful. The option
483 --divide will remove '/' from this list. */
484 const char *i386_comment_chars
= "#/";
485 #define SVR4_COMMENT_CHARS 1
486 #define PREFIX_SEPARATOR '\\'
489 const char *i386_comment_chars
= "#";
490 #define PREFIX_SEPARATOR '/'
493 /* This array holds the chars that only start a comment at the beginning of
494 a line. If the line seems to have the form '# 123 filename'
495 .line and .file directives will appear in the pre-processed output.
496 Note that input_file.c hand checks for '#' at the beginning of the
497 first line of the input file. This is because the compiler outputs
498 #NO_APP at the beginning of its output.
499 Also note that comments started like this one will always work if
500 '/' isn't otherwise defined. */
501 const char line_comment_chars
[] = "#/";
503 const char line_separator_chars
[] = ";";
505 /* Chars that can be used to separate mant from exp in floating point
507 const char EXP_CHARS
[] = "eE";
509 /* Chars that mean this number is a floating point constant
512 const char FLT_CHARS
[] = "fFdDxX";
514 /* Tables for lexical analysis. */
515 static char mnemonic_chars
[256];
516 static char register_chars
[256];
517 static char operand_chars
[256];
518 static char identifier_chars
[256];
519 static char digit_chars
[256];
521 /* Lexical macros. */
522 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
523 #define is_operand_char(x) (operand_chars[(unsigned char) x])
524 #define is_register_char(x) (register_chars[(unsigned char) x])
525 #define is_space_char(x) ((x) == ' ')
526 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
527 #define is_digit_char(x) (digit_chars[(unsigned char) x])
529 /* All non-digit non-letter characters that may occur in an operand. */
530 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
532 /* md_assemble() always leaves the strings it's passed unaltered. To
533 effect this we maintain a stack of saved characters that we've smashed
534 with '\0's (indicating end of strings for various sub-fields of the
535 assembler instruction). */
536 static char save_stack
[32];
537 static char *save_stack_p
;
538 #define END_STRING_AND_SAVE(s) \
539 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
540 #define RESTORE_END_STRING(s) \
541 do { *(s) = *--save_stack_p; } while (0)
543 /* The instruction we're assembling. */
546 /* Possible templates for current insn. */
547 static const templates
*current_templates
;
549 /* Per instruction expressionS buffers: max displacements & immediates. */
550 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
551 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
553 /* Current operand we are working on. */
554 static int this_operand
= -1;
556 /* We support four different modes. FLAG_CODE variable is used to distinguish
564 static enum flag_code flag_code
;
565 static unsigned int object_64bit
;
566 static unsigned int disallow_64bit_reloc
;
567 static int use_rela_relocations
= 0;
568 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
569 static const char *tls_get_addr
;
571 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
572 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
573 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
575 /* The ELF ABI to use. */
583 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
586 #if defined (TE_PE) || defined (TE_PEP)
587 /* Use big object file format. */
588 static int use_big_obj
= 0;
591 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
592 /* 1 if generating code for a shared library. */
593 static int shared
= 0;
596 /* 1 for intel syntax,
598 static int intel_syntax
= 0;
600 static enum x86_64_isa
602 amd64
= 1, /* AMD64 ISA. */
603 intel64
/* Intel64 ISA. */
606 /* 1 for intel mnemonic,
607 0 if att mnemonic. */
608 static int intel_mnemonic
= !SYSV386_COMPAT
;
610 /* 1 if pseudo registers are permitted. */
611 static int allow_pseudo_reg
= 0;
613 /* 1 if register prefix % not required. */
614 static int allow_naked_reg
= 0;
616 /* 1 if the assembler should add BND prefix for all control-transferring
617 instructions supporting it, even if this prefix wasn't specified
619 static int add_bnd_prefix
= 0;
621 /* 1 if pseudo index register, eiz/riz, is allowed . */
622 static int allow_index_reg
= 0;
624 /* 1 if the assembler should ignore LOCK prefix, even if it was
625 specified explicitly. */
626 static int omit_lock_prefix
= 0;
628 /* 1 if the assembler should encode lfence, mfence, and sfence as
629 "lock addl $0, (%{re}sp)". */
630 static int avoid_fence
= 0;
632 /* 1 if lfence should be inserted after every load. */
633 static int lfence_after_load
= 0;
635 /* Non-zero if lfence should be inserted before indirect branch. */
636 static enum lfence_before_indirect_branch_kind
638 lfence_branch_none
= 0,
639 lfence_branch_register
,
640 lfence_branch_memory
,
643 lfence_before_indirect_branch
;
645 /* Non-zero if lfence should be inserted before ret. */
646 static enum lfence_before_ret_kind
648 lfence_before_ret_none
= 0,
649 lfence_before_ret_not
,
650 lfence_before_ret_or
,
651 lfence_before_ret_shl
655 /* Types of previous instruction is .byte or prefix. */
670 /* 1 if the assembler should generate relax relocations. */
672 static int generate_relax_relocations
673 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
675 static enum check_kind
681 sse_check
, operand_check
= check_warning
;
683 /* Non-zero if branches should be aligned within power of 2 boundary. */
684 static int align_branch_power
= 0;
686 /* Types of branches to align. */
687 enum align_branch_kind
689 align_branch_none
= 0,
690 align_branch_jcc
= 1,
691 align_branch_fused
= 2,
692 align_branch_jmp
= 3,
693 align_branch_call
= 4,
694 align_branch_indirect
= 5,
698 /* Type bits of branches to align. */
699 enum align_branch_bit
701 align_branch_jcc_bit
= 1 << align_branch_jcc
,
702 align_branch_fused_bit
= 1 << align_branch_fused
,
703 align_branch_jmp_bit
= 1 << align_branch_jmp
,
704 align_branch_call_bit
= 1 << align_branch_call
,
705 align_branch_indirect_bit
= 1 << align_branch_indirect
,
706 align_branch_ret_bit
= 1 << align_branch_ret
709 static unsigned int align_branch
= (align_branch_jcc_bit
710 | align_branch_fused_bit
711 | align_branch_jmp_bit
);
713 /* Types of condition jump used by macro-fusion. */
716 mf_jcc_jo
= 0, /* base opcode 0x70 */
717 mf_jcc_jc
, /* base opcode 0x72 */
718 mf_jcc_je
, /* base opcode 0x74 */
719 mf_jcc_jna
, /* base opcode 0x76 */
720 mf_jcc_js
, /* base opcode 0x78 */
721 mf_jcc_jp
, /* base opcode 0x7a */
722 mf_jcc_jl
, /* base opcode 0x7c */
723 mf_jcc_jle
, /* base opcode 0x7e */
726 /* Types of compare flag-modifying insntructions used by macro-fusion. */
729 mf_cmp_test_and
, /* test/cmp */
730 mf_cmp_alu_cmp
, /* add/sub/cmp */
731 mf_cmp_incdec
/* inc/dec */
734 /* The maximum padding size for fused jcc. CMP like instruction can
735 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
737 #define MAX_FUSED_JCC_PADDING_SIZE 20
739 /* The maximum number of prefixes added for an instruction. */
740 static unsigned int align_branch_prefix_size
= 5;
743 1. Clear the REX_W bit with register operand if possible.
744 2. Above plus use 128bit vector instruction to clear the full vector
747 static int optimize
= 0;
750 1. Clear the REX_W bit with register operand if possible.
751 2. Above plus use 128bit vector instruction to clear the full vector
753 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
756 static int optimize_for_space
= 0;
758 /* Register prefix used for error message. */
759 static const char *register_prefix
= "%";
761 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
762 leave, push, and pop instructions so that gcc has the same stack
763 frame as in 32 bit mode. */
764 static char stackop_size
= '\0';
766 /* Non-zero to optimize code alignment. */
767 int optimize_align_code
= 1;
769 /* Non-zero to quieten some warnings. */
770 static int quiet_warnings
= 0;
773 static const char *cpu_arch_name
= NULL
;
774 static char *cpu_sub_arch_name
= NULL
;
776 /* CPU feature flags. */
777 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
779 /* If we have selected a cpu we are generating instructions for. */
780 static int cpu_arch_tune_set
= 0;
782 /* Cpu we are generating instructions for. */
783 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
785 /* CPU feature flags of cpu we are generating instructions for. */
786 static i386_cpu_flags cpu_arch_tune_flags
;
788 /* CPU instruction set architecture used. */
789 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
791 /* CPU feature flags of instruction set architecture used. */
792 i386_cpu_flags cpu_arch_isa_flags
;
794 /* If set, conditional jumps are not automatically promoted to handle
795 larger than a byte offset. */
796 static unsigned int no_cond_jump_promotion
= 0;
798 /* Encode SSE instructions with VEX prefix. */
799 static unsigned int sse2avx
;
801 /* Encode scalar AVX instructions with specific vector length. */
808 /* Encode VEX WIG instructions with specific vex.w. */
815 /* Encode scalar EVEX LIG instructions with specific vector length. */
823 /* Encode EVEX WIG instructions with specific evex.w. */
830 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
831 static enum rc_type evexrcig
= rne
;
833 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
834 static symbolS
*GOT_symbol
;
836 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
837 unsigned int x86_dwarf2_return_column
;
839 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
840 int x86_cie_data_alignment
;
842 /* Interface to relax_segment.
843 There are 3 major relax states for 386 jump insns because the
844 different types of jumps add different sizes to frags when we're
845 figuring out what sort of jump to choose to reach a given label.
847 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
848 branches which are handled by md_estimate_size_before_relax() and
849 i386_generic_table_relax_frag(). */
852 #define UNCOND_JUMP 0
854 #define COND_JUMP86 2
855 #define BRANCH_PADDING 3
856 #define BRANCH_PREFIX 4
857 #define FUSED_JCC_PADDING 5
862 #define SMALL16 (SMALL | CODE16)
864 #define BIG16 (BIG | CODE16)
868 #define INLINE __inline__
874 #define ENCODE_RELAX_STATE(type, size) \
875 ((relax_substateT) (((type) << 2) | (size)))
876 #define TYPE_FROM_RELAX_STATE(s) \
878 #define DISP_SIZE_FROM_RELAX_STATE(s) \
879 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
881 /* This table is used by relax_frag to promote short jumps to long
882 ones where necessary. SMALL (short) jumps may be promoted to BIG
883 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
884 don't allow a short jump in a 32 bit code segment to be promoted to
885 a 16 bit offset jump because it's slower (requires data size
886 prefix), and doesn't work, unless the destination is in the bottom
887 64k of the code segment (The top 16 bits of eip are zeroed). */
889 const relax_typeS md_relax_table
[] =
892 1) most positive reach of this state,
893 2) most negative reach of this state,
894 3) how many bytes this mode will have in the variable part of the frag
895 4) which index into the table to try if we can't fit into this one. */
897 /* UNCOND_JUMP states. */
898 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
899 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
900 /* dword jmp adds 4 bytes to frag:
901 0 extra opcode bytes, 4 displacement bytes. */
903 /* word jmp adds 2 byte2 to frag:
904 0 extra opcode bytes, 2 displacement bytes. */
907 /* COND_JUMP states. */
908 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
909 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
910 /* dword conditionals adds 5 bytes to frag:
911 1 extra opcode byte, 4 displacement bytes. */
913 /* word conditionals add 3 bytes to frag:
914 1 extra opcode byte, 2 displacement bytes. */
917 /* COND_JUMP86 states. */
918 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
919 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
920 /* dword conditionals adds 5 bytes to frag:
921 1 extra opcode byte, 4 displacement bytes. */
923 /* word conditionals add 4 bytes to frag:
924 1 displacement byte and a 3 byte long branch insn. */
928 static const arch_entry cpu_arch
[] =
930 /* Do not replace the first two entries - i386_target_format()
931 relies on them being there in this order. */
932 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
933 CPU_GENERIC32_FLAGS
, 0 },
934 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
935 CPU_GENERIC64_FLAGS
, 0 },
936 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
938 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
940 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
942 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
944 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
946 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
948 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
950 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
952 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
953 CPU_PENTIUMPRO_FLAGS
, 0 },
954 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
956 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
958 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
960 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
962 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
963 CPU_NOCONA_FLAGS
, 0 },
964 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
966 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
968 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
969 CPU_CORE2_FLAGS
, 1 },
970 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
971 CPU_CORE2_FLAGS
, 0 },
972 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
973 CPU_COREI7_FLAGS
, 0 },
974 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
976 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
978 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
979 CPU_IAMCU_FLAGS
, 0 },
980 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
982 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
984 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
985 CPU_ATHLON_FLAGS
, 0 },
986 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
988 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
990 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
992 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
993 CPU_AMDFAM10_FLAGS
, 0 },
994 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
995 CPU_BDVER1_FLAGS
, 0 },
996 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
997 CPU_BDVER2_FLAGS
, 0 },
998 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
999 CPU_BDVER3_FLAGS
, 0 },
1000 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1001 CPU_BDVER4_FLAGS
, 0 },
1002 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1003 CPU_ZNVER1_FLAGS
, 0 },
1004 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1005 CPU_ZNVER2_FLAGS
, 0 },
1006 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1007 CPU_BTVER1_FLAGS
, 0 },
1008 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1009 CPU_BTVER2_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1011 CPU_8087_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1014 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1016 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1019 CPU_CMOV_FLAGS
, 0 },
1020 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1021 CPU_FXSR_FLAGS
, 0 },
1022 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1024 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1026 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1027 CPU_SSE2_FLAGS
, 0 },
1028 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1029 CPU_SSE3_FLAGS
, 0 },
1030 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1031 CPU_SSE4A_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1033 CPU_SSSE3_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1035 CPU_SSE4_1_FLAGS
, 0 },
1036 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1037 CPU_SSE4_2_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1039 CPU_SSE4_2_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1042 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1043 CPU_AVX2_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1045 CPU_AVX512F_FLAGS
, 0 },
1046 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX512CD_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512ER_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512PF_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512DQ_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1055 CPU_AVX512BW_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1057 CPU_AVX512VL_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1060 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1061 CPU_VMFUNC_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1064 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1065 CPU_XSAVE_FLAGS
, 0 },
1066 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1067 CPU_XSAVEOPT_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1069 CPU_XSAVEC_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1071 CPU_XSAVES_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1074 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1075 CPU_PCLMUL_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1077 CPU_PCLMUL_FLAGS
, 1 },
1078 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1079 CPU_FSGSBASE_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1081 CPU_RDRND_FLAGS
, 0 },
1082 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1083 CPU_F16C_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1085 CPU_BMI2_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1088 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1089 CPU_FMA4_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1092 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1094 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1095 CPU_MOVBE_FLAGS
, 0 },
1096 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1097 CPU_CX16_FLAGS
, 0 },
1098 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1100 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1101 CPU_LZCNT_FLAGS
, 0 },
1102 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1103 CPU_POPCNT_FLAGS
, 0 },
1104 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1106 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1108 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1109 CPU_INVPCID_FLAGS
, 0 },
1110 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1111 CPU_CLFLUSH_FLAGS
, 0 },
1112 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1114 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1115 CPU_SYSCALL_FLAGS
, 0 },
1116 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1117 CPU_RDTSCP_FLAGS
, 0 },
1118 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1119 CPU_3DNOW_FLAGS
, 0 },
1120 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1121 CPU_3DNOWA_FLAGS
, 0 },
1122 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1123 CPU_PADLOCK_FLAGS
, 0 },
1124 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1125 CPU_SVME_FLAGS
, 1 },
1126 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1127 CPU_SVME_FLAGS
, 0 },
1128 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1129 CPU_SSE4A_FLAGS
, 0 },
1130 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1132 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1134 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1136 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1138 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1139 CPU_RDSEED_FLAGS
, 0 },
1140 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1141 CPU_PRFCHW_FLAGS
, 0 },
1142 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1143 CPU_SMAP_FLAGS
, 0 },
1144 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1146 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1148 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1149 CPU_CLFLUSHOPT_FLAGS
, 0 },
1150 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1151 CPU_PREFETCHWT1_FLAGS
, 0 },
1152 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1154 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1155 CPU_CLWB_FLAGS
, 0 },
1156 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1157 CPU_AVX512IFMA_FLAGS
, 0 },
1158 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1159 CPU_AVX512VBMI_FLAGS
, 0 },
1160 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1161 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1162 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1163 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1164 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1165 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1166 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1167 CPU_AVX512_VBMI2_FLAGS
, 0 },
1168 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1169 CPU_AVX512_VNNI_FLAGS
, 0 },
1170 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1171 CPU_AVX512_BITALG_FLAGS
, 0 },
1172 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1173 CPU_CLZERO_FLAGS
, 0 },
1174 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1175 CPU_MWAITX_FLAGS
, 0 },
1176 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1177 CPU_OSPKE_FLAGS
, 0 },
1178 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1179 CPU_RDPID_FLAGS
, 0 },
1180 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1181 CPU_PTWRITE_FLAGS
, 0 },
1182 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1184 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1185 CPU_SHSTK_FLAGS
, 0 },
1186 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1187 CPU_GFNI_FLAGS
, 0 },
1188 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1189 CPU_VAES_FLAGS
, 0 },
1190 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1191 CPU_VPCLMULQDQ_FLAGS
, 0 },
1192 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1193 CPU_WBNOINVD_FLAGS
, 0 },
1194 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1195 CPU_PCONFIG_FLAGS
, 0 },
1196 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1197 CPU_WAITPKG_FLAGS
, 0 },
1198 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1199 CPU_CLDEMOTE_FLAGS
, 0 },
1200 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1201 CPU_MOVDIRI_FLAGS
, 0 },
1202 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1203 CPU_MOVDIR64B_FLAGS
, 0 },
1204 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1205 CPU_AVX512_BF16_FLAGS
, 0 },
1206 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1207 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1208 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1209 CPU_ENQCMD_FLAGS
, 0 },
1210 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1211 CPU_SERIALIZE_FLAGS
, 0 },
1212 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1213 CPU_RDPRU_FLAGS
, 0 },
1214 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1215 CPU_MCOMMIT_FLAGS
, 0 },
1216 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1217 CPU_SEV_ES_FLAGS
, 0 },
1218 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1219 CPU_TSXLDTRK_FLAGS
, 0 },
1222 static const noarch_entry cpu_noarch
[] =
1224 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1225 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1226 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1227 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1228 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1229 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1230 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1231 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1232 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1233 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1234 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1235 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1236 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1237 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1238 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1239 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1240 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1241 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1242 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1243 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1244 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1245 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1246 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1247 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1248 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1249 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1250 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1251 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1252 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1253 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1254 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1255 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1256 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1257 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1258 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1259 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1260 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1261 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1262 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1263 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1264 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1268 /* Like s_lcomm_internal in gas/read.c but the alignment string
1269 is allowed to be optional. */
1272 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1279 && *input_line_pointer
== ',')
1281 align
= parse_align (needs_align
- 1);
1283 if (align
== (addressT
) -1)
1298 bss_alloc (symbolP
, size
, align
);
1303 pe_lcomm (int needs_align
)
1305 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1309 const pseudo_typeS md_pseudo_table
[] =
1311 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1312 {"align", s_align_bytes
, 0},
1314 {"align", s_align_ptwo
, 0},
1316 {"arch", set_cpu_arch
, 0},
1320 {"lcomm", pe_lcomm
, 1},
1322 {"ffloat", float_cons
, 'f'},
1323 {"dfloat", float_cons
, 'd'},
1324 {"tfloat", float_cons
, 'x'},
1326 {"slong", signed_cons
, 4},
1327 {"noopt", s_ignore
, 0},
1328 {"optim", s_ignore
, 0},
1329 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1330 {"code16", set_code_flag
, CODE_16BIT
},
1331 {"code32", set_code_flag
, CODE_32BIT
},
1333 {"code64", set_code_flag
, CODE_64BIT
},
1335 {"intel_syntax", set_intel_syntax
, 1},
1336 {"att_syntax", set_intel_syntax
, 0},
1337 {"intel_mnemonic", set_intel_mnemonic
, 1},
1338 {"att_mnemonic", set_intel_mnemonic
, 0},
1339 {"allow_index_reg", set_allow_index_reg
, 1},
1340 {"disallow_index_reg", set_allow_index_reg
, 0},
1341 {"sse_check", set_check
, 0},
1342 {"operand_check", set_check
, 1},
1343 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1344 {"largecomm", handle_large_common
, 0},
1346 {"file", dwarf2_directive_file
, 0},
1347 {"loc", dwarf2_directive_loc
, 0},
1348 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1351 {"secrel32", pe_directive_secrel
, 0},
1356 /* For interface with expression (). */
1357 extern char *input_line_pointer
;
1359 /* Hash table for instruction mnemonic lookup. */
1360 static struct hash_control
*op_hash
;
1362 /* Hash table for register lookup. */
1363 static struct hash_control
*reg_hash
;
1365 /* Various efficient no-op patterns for aligning code labels.
1366 Note: Don't try to assemble the instructions in the comments.
1367 0L and 0w are not legal. */
1368 static const unsigned char f32_1
[] =
1370 static const unsigned char f32_2
[] =
1371 {0x66,0x90}; /* xchg %ax,%ax */
1372 static const unsigned char f32_3
[] =
1373 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1374 static const unsigned char f32_4
[] =
1375 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1376 static const unsigned char f32_6
[] =
1377 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1378 static const unsigned char f32_7
[] =
1379 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1380 static const unsigned char f16_3
[] =
1381 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1382 static const unsigned char f16_4
[] =
1383 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1384 static const unsigned char jump_disp8
[] =
1385 {0xeb}; /* jmp disp8 */
1386 static const unsigned char jump32_disp32
[] =
1387 {0xe9}; /* jmp disp32 */
1388 static const unsigned char jump16_disp32
[] =
1389 {0x66,0xe9}; /* jmp disp32 */
1390 /* 32-bit NOPs patterns. */
1391 static const unsigned char *const f32_patt
[] = {
1392 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1394 /* 16-bit NOPs patterns. */
1395 static const unsigned char *const f16_patt
[] = {
1396 f32_1
, f32_2
, f16_3
, f16_4
1398 /* nopl (%[re]ax) */
1399 static const unsigned char alt_3
[] =
1401 /* nopl 0(%[re]ax) */
1402 static const unsigned char alt_4
[] =
1403 {0x0f,0x1f,0x40,0x00};
1404 /* nopl 0(%[re]ax,%[re]ax,1) */
1405 static const unsigned char alt_5
[] =
1406 {0x0f,0x1f,0x44,0x00,0x00};
1407 /* nopw 0(%[re]ax,%[re]ax,1) */
1408 static const unsigned char alt_6
[] =
1409 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1410 /* nopl 0L(%[re]ax) */
1411 static const unsigned char alt_7
[] =
1412 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1413 /* nopl 0L(%[re]ax,%[re]ax,1) */
1414 static const unsigned char alt_8
[] =
1415 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1416 /* nopw 0L(%[re]ax,%[re]ax,1) */
1417 static const unsigned char alt_9
[] =
1418 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1419 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1420 static const unsigned char alt_10
[] =
1421 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1422 /* data16 nopw %cs:0L(%eax,%eax,1) */
1423 static const unsigned char alt_11
[] =
1424 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1425 /* 32-bit and 64-bit NOPs patterns. */
1426 static const unsigned char *const alt_patt
[] = {
1427 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1428 alt_9
, alt_10
, alt_11
1431 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1432 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1435 i386_output_nops (char *where
, const unsigned char *const *patt
,
1436 int count
, int max_single_nop_size
)
1439 /* Place the longer NOP first. */
1442 const unsigned char *nops
;
1444 if (max_single_nop_size
< 1)
1446 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1447 max_single_nop_size
);
1451 nops
= patt
[max_single_nop_size
- 1];
1453 /* Use the smaller one if the requsted one isn't available. */
1456 max_single_nop_size
--;
1457 nops
= patt
[max_single_nop_size
- 1];
1460 last
= count
% max_single_nop_size
;
1463 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1464 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1468 nops
= patt
[last
- 1];
1471 /* Use the smaller one plus one-byte NOP if the needed one
1474 nops
= patt
[last
- 1];
1475 memcpy (where
+ offset
, nops
, last
);
1476 where
[offset
+ last
] = *patt
[0];
1479 memcpy (where
+ offset
, nops
, last
);
1484 fits_in_imm7 (offsetT num
)
1486 return (num
& 0x7f) == num
;
1490 fits_in_imm31 (offsetT num
)
1492 return (num
& 0x7fffffff) == num
;
1495 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1496 single NOP instruction LIMIT. */
1499 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1501 const unsigned char *const *patt
= NULL
;
1502 int max_single_nop_size
;
1503 /* Maximum number of NOPs before switching to jump over NOPs. */
1504 int max_number_of_nops
;
1506 switch (fragP
->fr_type
)
1511 case rs_machine_dependent
:
1512 /* Allow NOP padding for jumps and calls. */
1513 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1514 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1521 /* We need to decide which NOP sequence to use for 32bit and
1522 64bit. When -mtune= is used:
1524 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1525 PROCESSOR_GENERIC32, f32_patt will be used.
1526 2. For the rest, alt_patt will be used.
1528 When -mtune= isn't used, alt_patt will be used if
1529 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1532 When -march= or .arch is used, we can't use anything beyond
1533 cpu_arch_isa_flags. */
1535 if (flag_code
== CODE_16BIT
)
1538 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1539 /* Limit number of NOPs to 2 in 16-bit mode. */
1540 max_number_of_nops
= 2;
1544 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1546 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1547 switch (cpu_arch_tune
)
1549 case PROCESSOR_UNKNOWN
:
1550 /* We use cpu_arch_isa_flags to check if we SHOULD
1551 optimize with nops. */
1552 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1557 case PROCESSOR_PENTIUM4
:
1558 case PROCESSOR_NOCONA
:
1559 case PROCESSOR_CORE
:
1560 case PROCESSOR_CORE2
:
1561 case PROCESSOR_COREI7
:
1562 case PROCESSOR_L1OM
:
1563 case PROCESSOR_K1OM
:
1564 case PROCESSOR_GENERIC64
:
1566 case PROCESSOR_ATHLON
:
1568 case PROCESSOR_AMDFAM10
:
1570 case PROCESSOR_ZNVER
:
1574 case PROCESSOR_I386
:
1575 case PROCESSOR_I486
:
1576 case PROCESSOR_PENTIUM
:
1577 case PROCESSOR_PENTIUMPRO
:
1578 case PROCESSOR_IAMCU
:
1579 case PROCESSOR_GENERIC32
:
1586 switch (fragP
->tc_frag_data
.tune
)
1588 case PROCESSOR_UNKNOWN
:
1589 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1590 PROCESSOR_UNKNOWN. */
1594 case PROCESSOR_I386
:
1595 case PROCESSOR_I486
:
1596 case PROCESSOR_PENTIUM
:
1597 case PROCESSOR_IAMCU
:
1599 case PROCESSOR_ATHLON
:
1601 case PROCESSOR_AMDFAM10
:
1603 case PROCESSOR_ZNVER
:
1605 case PROCESSOR_GENERIC32
:
1606 /* We use cpu_arch_isa_flags to check if we CAN optimize
1608 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1613 case PROCESSOR_PENTIUMPRO
:
1614 case PROCESSOR_PENTIUM4
:
1615 case PROCESSOR_NOCONA
:
1616 case PROCESSOR_CORE
:
1617 case PROCESSOR_CORE2
:
1618 case PROCESSOR_COREI7
:
1619 case PROCESSOR_L1OM
:
1620 case PROCESSOR_K1OM
:
1621 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1626 case PROCESSOR_GENERIC64
:
1632 if (patt
== f32_patt
)
1634 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1635 /* Limit number of NOPs to 2 for older processors. */
1636 max_number_of_nops
= 2;
1640 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1641 /* Limit number of NOPs to 7 for newer processors. */
1642 max_number_of_nops
= 7;
1647 limit
= max_single_nop_size
;
1649 if (fragP
->fr_type
== rs_fill_nop
)
1651 /* Output NOPs for .nop directive. */
1652 if (limit
> max_single_nop_size
)
1654 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1655 _("invalid single nop size: %d "
1656 "(expect within [0, %d])"),
1657 limit
, max_single_nop_size
);
1661 else if (fragP
->fr_type
!= rs_machine_dependent
)
1662 fragP
->fr_var
= count
;
1664 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1666 /* Generate jump over NOPs. */
1667 offsetT disp
= count
- 2;
1668 if (fits_in_imm7 (disp
))
1670 /* Use "jmp disp8" if possible. */
1672 where
[0] = jump_disp8
[0];
1678 unsigned int size_of_jump
;
1680 if (flag_code
== CODE_16BIT
)
1682 where
[0] = jump16_disp32
[0];
1683 where
[1] = jump16_disp32
[1];
1688 where
[0] = jump32_disp32
[0];
1692 count
-= size_of_jump
+ 4;
1693 if (!fits_in_imm31 (count
))
1695 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1696 _("jump over nop padding out of range"));
1700 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1701 where
+= size_of_jump
+ 4;
1705 /* Generate multiple NOPs. */
1706 i386_output_nops (where
, patt
, count
, limit
);
1710 operand_type_all_zero (const union i386_operand_type
*x
)
1712 switch (ARRAY_SIZE(x
->array
))
1723 return !x
->array
[0];
1730 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1732 switch (ARRAY_SIZE(x
->array
))
1748 x
->bitfield
.class = ClassNone
;
1749 x
->bitfield
.instance
= InstanceNone
;
1753 operand_type_equal (const union i386_operand_type
*x
,
1754 const union i386_operand_type
*y
)
1756 switch (ARRAY_SIZE(x
->array
))
1759 if (x
->array
[2] != y
->array
[2])
1763 if (x
->array
[1] != y
->array
[1])
1767 return x
->array
[0] == y
->array
[0];
1775 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1777 switch (ARRAY_SIZE(x
->array
))
1792 return !x
->array
[0];
1799 cpu_flags_equal (const union i386_cpu_flags
*x
,
1800 const union i386_cpu_flags
*y
)
1802 switch (ARRAY_SIZE(x
->array
))
1805 if (x
->array
[3] != y
->array
[3])
1809 if (x
->array
[2] != y
->array
[2])
1813 if (x
->array
[1] != y
->array
[1])
1817 return x
->array
[0] == y
->array
[0];
1825 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1827 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1828 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1831 static INLINE i386_cpu_flags
1832 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1834 switch (ARRAY_SIZE (x
.array
))
1837 x
.array
[3] &= y
.array
[3];
1840 x
.array
[2] &= y
.array
[2];
1843 x
.array
[1] &= y
.array
[1];
1846 x
.array
[0] &= y
.array
[0];
1854 static INLINE i386_cpu_flags
1855 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1857 switch (ARRAY_SIZE (x
.array
))
1860 x
.array
[3] |= y
.array
[3];
1863 x
.array
[2] |= y
.array
[2];
1866 x
.array
[1] |= y
.array
[1];
1869 x
.array
[0] |= y
.array
[0];
1877 static INLINE i386_cpu_flags
1878 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1880 switch (ARRAY_SIZE (x
.array
))
1883 x
.array
[3] &= ~y
.array
[3];
1886 x
.array
[2] &= ~y
.array
[2];
1889 x
.array
[1] &= ~y
.array
[1];
1892 x
.array
[0] &= ~y
.array
[0];
1900 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1902 #define CPU_FLAGS_ARCH_MATCH 0x1
1903 #define CPU_FLAGS_64BIT_MATCH 0x2
1905 #define CPU_FLAGS_PERFECT_MATCH \
1906 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1908 /* Return CPU flags match bits. */
1911 cpu_flags_match (const insn_template
*t
)
1913 i386_cpu_flags x
= t
->cpu_flags
;
1914 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1916 x
.bitfield
.cpu64
= 0;
1917 x
.bitfield
.cpuno64
= 0;
1919 if (cpu_flags_all_zero (&x
))
1921 /* This instruction is available on all archs. */
1922 match
|= CPU_FLAGS_ARCH_MATCH
;
1926 /* This instruction is available only on some archs. */
1927 i386_cpu_flags cpu
= cpu_arch_flags
;
1929 /* AVX512VL is no standalone feature - match it and then strip it. */
1930 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1932 x
.bitfield
.cpuavx512vl
= 0;
1934 cpu
= cpu_flags_and (x
, cpu
);
1935 if (!cpu_flags_all_zero (&cpu
))
1937 if (x
.bitfield
.cpuavx
)
1939 /* We need to check a few extra flags with AVX. */
1940 if (cpu
.bitfield
.cpuavx
1941 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1942 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1943 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1944 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1945 match
|= CPU_FLAGS_ARCH_MATCH
;
1947 else if (x
.bitfield
.cpuavx512f
)
1949 /* We need to check a few extra flags with AVX512F. */
1950 if (cpu
.bitfield
.cpuavx512f
1951 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1952 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1953 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1954 match
|= CPU_FLAGS_ARCH_MATCH
;
1957 match
|= CPU_FLAGS_ARCH_MATCH
;
1963 static INLINE i386_operand_type
1964 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1966 if (x
.bitfield
.class != y
.bitfield
.class)
1967 x
.bitfield
.class = ClassNone
;
1968 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1969 x
.bitfield
.instance
= InstanceNone
;
1971 switch (ARRAY_SIZE (x
.array
))
1974 x
.array
[2] &= y
.array
[2];
1977 x
.array
[1] &= y
.array
[1];
1980 x
.array
[0] &= y
.array
[0];
1988 static INLINE i386_operand_type
1989 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1991 gas_assert (y
.bitfield
.class == ClassNone
);
1992 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1994 switch (ARRAY_SIZE (x
.array
))
1997 x
.array
[2] &= ~y
.array
[2];
2000 x
.array
[1] &= ~y
.array
[1];
2003 x
.array
[0] &= ~y
.array
[0];
2011 static INLINE i386_operand_type
2012 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2014 gas_assert (x
.bitfield
.class == ClassNone
||
2015 y
.bitfield
.class == ClassNone
||
2016 x
.bitfield
.class == y
.bitfield
.class);
2017 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2018 y
.bitfield
.instance
== InstanceNone
||
2019 x
.bitfield
.instance
== y
.bitfield
.instance
);
2021 switch (ARRAY_SIZE (x
.array
))
2024 x
.array
[2] |= y
.array
[2];
2027 x
.array
[1] |= y
.array
[1];
2030 x
.array
[0] |= y
.array
[0];
2038 static INLINE i386_operand_type
2039 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2041 gas_assert (y
.bitfield
.class == ClassNone
);
2042 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2044 switch (ARRAY_SIZE (x
.array
))
2047 x
.array
[2] ^= y
.array
[2];
2050 x
.array
[1] ^= y
.array
[1];
2053 x
.array
[0] ^= y
.array
[0];
2061 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2062 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2063 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2064 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2065 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2066 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2067 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2068 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2069 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2070 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2071 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2072 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2073 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2074 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2075 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2076 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2077 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2088 operand_type_check (i386_operand_type t
, enum operand_type c
)
2093 return t
.bitfield
.class == Reg
;
2096 return (t
.bitfield
.imm8
2100 || t
.bitfield
.imm32s
2101 || t
.bitfield
.imm64
);
2104 return (t
.bitfield
.disp8
2105 || t
.bitfield
.disp16
2106 || t
.bitfield
.disp32
2107 || t
.bitfield
.disp32s
2108 || t
.bitfield
.disp64
);
2111 return (t
.bitfield
.disp8
2112 || t
.bitfield
.disp16
2113 || t
.bitfield
.disp32
2114 || t
.bitfield
.disp32s
2115 || t
.bitfield
.disp64
2116 || t
.bitfield
.baseindex
);
2125 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2126 between operand GIVEN and opeand WANTED for instruction template T. */
2129 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2132 return !((i
.types
[given
].bitfield
.byte
2133 && !t
->operand_types
[wanted
].bitfield
.byte
)
2134 || (i
.types
[given
].bitfield
.word
2135 && !t
->operand_types
[wanted
].bitfield
.word
)
2136 || (i
.types
[given
].bitfield
.dword
2137 && !t
->operand_types
[wanted
].bitfield
.dword
)
2138 || (i
.types
[given
].bitfield
.qword
2139 && !t
->operand_types
[wanted
].bitfield
.qword
)
2140 || (i
.types
[given
].bitfield
.tbyte
2141 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2144 /* Return 1 if there is no conflict in SIMD register between operand
2145 GIVEN and opeand WANTED for instruction template T. */
2148 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2151 return !((i
.types
[given
].bitfield
.xmmword
2152 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2153 || (i
.types
[given
].bitfield
.ymmword
2154 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2155 || (i
.types
[given
].bitfield
.zmmword
2156 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2159 /* Return 1 if there is no conflict in any size between operand GIVEN
2160 and opeand WANTED for instruction template T. */
2163 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2166 return (match_operand_size (t
, wanted
, given
)
2167 && !((i
.types
[given
].bitfield
.unspecified
2169 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2170 || (i
.types
[given
].bitfield
.fword
2171 && !t
->operand_types
[wanted
].bitfield
.fword
)
2172 /* For scalar opcode templates to allow register and memory
2173 operands at the same time, some special casing is needed
2174 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2175 down-conversion vpmov*. */
2176 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2177 && t
->operand_types
[wanted
].bitfield
.byte
2178 + t
->operand_types
[wanted
].bitfield
.word
2179 + t
->operand_types
[wanted
].bitfield
.dword
2180 + t
->operand_types
[wanted
].bitfield
.qword
2181 > !!t
->opcode_modifier
.broadcast
)
2182 ? (i
.types
[given
].bitfield
.xmmword
2183 || i
.types
[given
].bitfield
.ymmword
2184 || i
.types
[given
].bitfield
.zmmword
)
2185 : !match_simd_size(t
, wanted
, given
))));
2188 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2189 operands for instruction template T, and it has MATCH_REVERSE set if there
2190 is no size conflict on any operands for the template with operands reversed
2191 (and the template allows for reversing in the first place). */
2193 #define MATCH_STRAIGHT 1
2194 #define MATCH_REVERSE 2
2196 static INLINE
unsigned int
2197 operand_size_match (const insn_template
*t
)
2199 unsigned int j
, match
= MATCH_STRAIGHT
;
2201 /* Don't check non-absolute jump instructions. */
2202 if (t
->opcode_modifier
.jump
2203 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2206 /* Check memory and accumulator operand size. */
2207 for (j
= 0; j
< i
.operands
; j
++)
2209 if (i
.types
[j
].bitfield
.class != Reg
2210 && i
.types
[j
].bitfield
.class != RegSIMD
2211 && t
->opcode_modifier
.anysize
)
2214 if (t
->operand_types
[j
].bitfield
.class == Reg
2215 && !match_operand_size (t
, j
, j
))
2221 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2222 && !match_simd_size (t
, j
, j
))
2228 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2229 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2235 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2242 if (!t
->opcode_modifier
.d
)
2246 i
.error
= operand_size_mismatch
;
2250 /* Check reverse. */
2251 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2253 for (j
= 0; j
< i
.operands
; j
++)
2255 unsigned int given
= i
.operands
- j
- 1;
2257 if (t
->operand_types
[j
].bitfield
.class == Reg
2258 && !match_operand_size (t
, j
, given
))
2261 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2262 && !match_simd_size (t
, j
, given
))
2265 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2266 && (!match_operand_size (t
, j
, given
)
2267 || !match_simd_size (t
, j
, given
)))
2270 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2274 return match
| MATCH_REVERSE
;
2278 operand_type_match (i386_operand_type overlap
,
2279 i386_operand_type given
)
2281 i386_operand_type temp
= overlap
;
2283 temp
.bitfield
.unspecified
= 0;
2284 temp
.bitfield
.byte
= 0;
2285 temp
.bitfield
.word
= 0;
2286 temp
.bitfield
.dword
= 0;
2287 temp
.bitfield
.fword
= 0;
2288 temp
.bitfield
.qword
= 0;
2289 temp
.bitfield
.tbyte
= 0;
2290 temp
.bitfield
.xmmword
= 0;
2291 temp
.bitfield
.ymmword
= 0;
2292 temp
.bitfield
.zmmword
= 0;
2293 if (operand_type_all_zero (&temp
))
2296 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2300 i
.error
= operand_type_mismatch
;
2304 /* If given types g0 and g1 are registers they must be of the same type
2305 unless the expected operand type register overlap is null.
2306 Some Intel syntax memory operand size checking also happens here. */
2309 operand_type_register_match (i386_operand_type g0
,
2310 i386_operand_type t0
,
2311 i386_operand_type g1
,
2312 i386_operand_type t1
)
2314 if (g0
.bitfield
.class != Reg
2315 && g0
.bitfield
.class != RegSIMD
2316 && (!operand_type_check (g0
, anymem
)
2317 || g0
.bitfield
.unspecified
2318 || (t0
.bitfield
.class != Reg
2319 && t0
.bitfield
.class != RegSIMD
)))
2322 if (g1
.bitfield
.class != Reg
2323 && g1
.bitfield
.class != RegSIMD
2324 && (!operand_type_check (g1
, anymem
)
2325 || g1
.bitfield
.unspecified
2326 || (t1
.bitfield
.class != Reg
2327 && t1
.bitfield
.class != RegSIMD
)))
2330 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2331 && g0
.bitfield
.word
== g1
.bitfield
.word
2332 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2333 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2334 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2335 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2336 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2339 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2340 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2341 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2342 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2343 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2344 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2345 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2348 i
.error
= register_type_mismatch
;
2353 static INLINE
unsigned int
2354 register_number (const reg_entry
*r
)
2356 unsigned int nr
= r
->reg_num
;
2358 if (r
->reg_flags
& RegRex
)
2361 if (r
->reg_flags
& RegVRex
)
2367 static INLINE
unsigned int
2368 mode_from_disp_size (i386_operand_type t
)
2370 if (t
.bitfield
.disp8
)
2372 else if (t
.bitfield
.disp16
2373 || t
.bitfield
.disp32
2374 || t
.bitfield
.disp32s
)
2381 fits_in_signed_byte (addressT num
)
2383 return num
+ 0x80 <= 0xff;
2387 fits_in_unsigned_byte (addressT num
)
2393 fits_in_unsigned_word (addressT num
)
2395 return num
<= 0xffff;
2399 fits_in_signed_word (addressT num
)
2401 return num
+ 0x8000 <= 0xffff;
2405 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2410 return num
+ 0x80000000 <= 0xffffffff;
2412 } /* fits_in_signed_long() */
2415 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2420 return num
<= 0xffffffff;
2422 } /* fits_in_unsigned_long() */
2425 fits_in_disp8 (offsetT num
)
2427 int shift
= i
.memshift
;
2433 mask
= (1 << shift
) - 1;
2435 /* Return 0 if NUM isn't properly aligned. */
2439 /* Check if NUM will fit in 8bit after shift. */
2440 return fits_in_signed_byte (num
>> shift
);
2444 fits_in_imm4 (offsetT num
)
2446 return (num
& 0xf) == num
;
2449 static i386_operand_type
2450 smallest_imm_type (offsetT num
)
2452 i386_operand_type t
;
2454 operand_type_set (&t
, 0);
2455 t
.bitfield
.imm64
= 1;
2457 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2459 /* This code is disabled on the 486 because all the Imm1 forms
2460 in the opcode table are slower on the i486. They're the
2461 versions with the implicitly specified single-position
2462 displacement, which has another syntax if you really want to
2464 t
.bitfield
.imm1
= 1;
2465 t
.bitfield
.imm8
= 1;
2466 t
.bitfield
.imm8s
= 1;
2467 t
.bitfield
.imm16
= 1;
2468 t
.bitfield
.imm32
= 1;
2469 t
.bitfield
.imm32s
= 1;
2471 else if (fits_in_signed_byte (num
))
2473 t
.bitfield
.imm8
= 1;
2474 t
.bitfield
.imm8s
= 1;
2475 t
.bitfield
.imm16
= 1;
2476 t
.bitfield
.imm32
= 1;
2477 t
.bitfield
.imm32s
= 1;
2479 else if (fits_in_unsigned_byte (num
))
2481 t
.bitfield
.imm8
= 1;
2482 t
.bitfield
.imm16
= 1;
2483 t
.bitfield
.imm32
= 1;
2484 t
.bitfield
.imm32s
= 1;
2486 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2488 t
.bitfield
.imm16
= 1;
2489 t
.bitfield
.imm32
= 1;
2490 t
.bitfield
.imm32s
= 1;
2492 else if (fits_in_signed_long (num
))
2494 t
.bitfield
.imm32
= 1;
2495 t
.bitfield
.imm32s
= 1;
2497 else if (fits_in_unsigned_long (num
))
2498 t
.bitfield
.imm32
= 1;
2504 offset_in_range (offsetT val
, int size
)
2510 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2511 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2512 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2514 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2520 /* If BFD64, sign extend val for 32bit address mode. */
2521 if (flag_code
!= CODE_64BIT
2522 || i
.prefix
[ADDR_PREFIX
])
2523 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2524 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2527 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2529 char buf1
[40], buf2
[40];
2531 sprint_value (buf1
, val
);
2532 sprint_value (buf2
, val
& mask
);
2533 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2548 a. PREFIX_EXIST if attempting to add a prefix where one from the
2549 same class already exists.
2550 b. PREFIX_LOCK if lock prefix is added.
2551 c. PREFIX_REP if rep/repne prefix is added.
2552 d. PREFIX_DS if ds prefix is added.
2553 e. PREFIX_OTHER if other prefix is added.
2556 static enum PREFIX_GROUP
2557 add_prefix (unsigned int prefix
)
2559 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2562 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2563 && flag_code
== CODE_64BIT
)
2565 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2566 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2567 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2568 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2579 case DS_PREFIX_OPCODE
:
2582 case CS_PREFIX_OPCODE
:
2583 case ES_PREFIX_OPCODE
:
2584 case FS_PREFIX_OPCODE
:
2585 case GS_PREFIX_OPCODE
:
2586 case SS_PREFIX_OPCODE
:
2590 case REPNE_PREFIX_OPCODE
:
2591 case REPE_PREFIX_OPCODE
:
2596 case LOCK_PREFIX_OPCODE
:
2605 case ADDR_PREFIX_OPCODE
:
2609 case DATA_PREFIX_OPCODE
:
2613 if (i
.prefix
[q
] != 0)
2621 i
.prefix
[q
] |= prefix
;
2624 as_bad (_("same type of prefix used twice"));
2630 update_code_flag (int value
, int check
)
2632 PRINTF_LIKE ((*as_error
));
2634 flag_code
= (enum flag_code
) value
;
2635 if (flag_code
== CODE_64BIT
)
2637 cpu_arch_flags
.bitfield
.cpu64
= 1;
2638 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2642 cpu_arch_flags
.bitfield
.cpu64
= 0;
2643 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2645 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2648 as_error
= as_fatal
;
2651 (*as_error
) (_("64bit mode not supported on `%s'."),
2652 cpu_arch_name
? cpu_arch_name
: default_arch
);
2654 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2657 as_error
= as_fatal
;
2660 (*as_error
) (_("32bit mode not supported on `%s'."),
2661 cpu_arch_name
? cpu_arch_name
: default_arch
);
2663 stackop_size
= '\0';
2667 set_code_flag (int value
)
2669 update_code_flag (value
, 0);
2673 set_16bit_gcc_code_flag (int new_code_flag
)
2675 flag_code
= (enum flag_code
) new_code_flag
;
2676 if (flag_code
!= CODE_16BIT
)
2678 cpu_arch_flags
.bitfield
.cpu64
= 0;
2679 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2680 stackop_size
= LONG_MNEM_SUFFIX
;
2684 set_intel_syntax (int syntax_flag
)
2686 /* Find out if register prefixing is specified. */
2687 int ask_naked_reg
= 0;
2690 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2693 int e
= get_symbol_name (&string
);
2695 if (strcmp (string
, "prefix") == 0)
2697 else if (strcmp (string
, "noprefix") == 0)
2700 as_bad (_("bad argument to syntax directive."));
2701 (void) restore_line_pointer (e
);
2703 demand_empty_rest_of_line ();
2705 intel_syntax
= syntax_flag
;
2707 if (ask_naked_reg
== 0)
2708 allow_naked_reg
= (intel_syntax
2709 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2711 allow_naked_reg
= (ask_naked_reg
< 0);
2713 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2715 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2716 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2717 register_prefix
= allow_naked_reg
? "" : "%";
2721 set_intel_mnemonic (int mnemonic_flag
)
2723 intel_mnemonic
= mnemonic_flag
;
2727 set_allow_index_reg (int flag
)
2729 allow_index_reg
= flag
;
2733 set_check (int what
)
2735 enum check_kind
*kind
;
2740 kind
= &operand_check
;
2751 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2754 int e
= get_symbol_name (&string
);
2756 if (strcmp (string
, "none") == 0)
2758 else if (strcmp (string
, "warning") == 0)
2759 *kind
= check_warning
;
2760 else if (strcmp (string
, "error") == 0)
2761 *kind
= check_error
;
2763 as_bad (_("bad argument to %s_check directive."), str
);
2764 (void) restore_line_pointer (e
);
2767 as_bad (_("missing argument for %s_check directive"), str
);
2769 demand_empty_rest_of_line ();
2773 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2774 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2776 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2777 static const char *arch
;
2779 /* Intel LIOM is only supported on ELF. */
2785 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2786 use default_arch. */
2787 arch
= cpu_arch_name
;
2789 arch
= default_arch
;
2792 /* If we are targeting Intel MCU, we must enable it. */
2793 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2794 || new_flag
.bitfield
.cpuiamcu
)
2797 /* If we are targeting Intel L1OM, we must enable it. */
2798 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2799 || new_flag
.bitfield
.cpul1om
)
2802 /* If we are targeting Intel K1OM, we must enable it. */
2803 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2804 || new_flag
.bitfield
.cpuk1om
)
2807 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2812 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2816 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2819 int e
= get_symbol_name (&string
);
2821 i386_cpu_flags flags
;
2823 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2825 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2827 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2831 cpu_arch_name
= cpu_arch
[j
].name
;
2832 cpu_sub_arch_name
= NULL
;
2833 cpu_arch_flags
= cpu_arch
[j
].flags
;
2834 if (flag_code
== CODE_64BIT
)
2836 cpu_arch_flags
.bitfield
.cpu64
= 1;
2837 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2841 cpu_arch_flags
.bitfield
.cpu64
= 0;
2842 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2844 cpu_arch_isa
= cpu_arch
[j
].type
;
2845 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2846 if (!cpu_arch_tune_set
)
2848 cpu_arch_tune
= cpu_arch_isa
;
2849 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2854 flags
= cpu_flags_or (cpu_arch_flags
,
2857 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2859 if (cpu_sub_arch_name
)
2861 char *name
= cpu_sub_arch_name
;
2862 cpu_sub_arch_name
= concat (name
,
2864 (const char *) NULL
);
2868 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2869 cpu_arch_flags
= flags
;
2870 cpu_arch_isa_flags
= flags
;
2874 = cpu_flags_or (cpu_arch_isa_flags
,
2876 (void) restore_line_pointer (e
);
2877 demand_empty_rest_of_line ();
2882 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2884 /* Disable an ISA extension. */
2885 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2886 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2888 flags
= cpu_flags_and_not (cpu_arch_flags
,
2889 cpu_noarch
[j
].flags
);
2890 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2892 if (cpu_sub_arch_name
)
2894 char *name
= cpu_sub_arch_name
;
2895 cpu_sub_arch_name
= concat (name
, string
,
2896 (const char *) NULL
);
2900 cpu_sub_arch_name
= xstrdup (string
);
2901 cpu_arch_flags
= flags
;
2902 cpu_arch_isa_flags
= flags
;
2904 (void) restore_line_pointer (e
);
2905 demand_empty_rest_of_line ();
2909 j
= ARRAY_SIZE (cpu_arch
);
2912 if (j
>= ARRAY_SIZE (cpu_arch
))
2913 as_bad (_("no such architecture: `%s'"), string
);
2915 *input_line_pointer
= e
;
2918 as_bad (_("missing cpu architecture"));
2920 no_cond_jump_promotion
= 0;
2921 if (*input_line_pointer
== ','
2922 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2927 ++input_line_pointer
;
2928 e
= get_symbol_name (&string
);
2930 if (strcmp (string
, "nojumps") == 0)
2931 no_cond_jump_promotion
= 1;
2932 else if (strcmp (string
, "jumps") == 0)
2935 as_bad (_("no such architecture modifier: `%s'"), string
);
2937 (void) restore_line_pointer (e
);
2940 demand_empty_rest_of_line ();
2943 enum bfd_architecture
2946 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2948 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2949 || flag_code
!= CODE_64BIT
)
2950 as_fatal (_("Intel L1OM is 64bit ELF only"));
2951 return bfd_arch_l1om
;
2953 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2955 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2956 || flag_code
!= CODE_64BIT
)
2957 as_fatal (_("Intel K1OM is 64bit ELF only"));
2958 return bfd_arch_k1om
;
2960 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2962 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2963 || flag_code
== CODE_64BIT
)
2964 as_fatal (_("Intel MCU is 32bit ELF only"));
2965 return bfd_arch_iamcu
;
2968 return bfd_arch_i386
;
2974 if (!strncmp (default_arch
, "x86_64", 6))
2976 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2978 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2979 || default_arch
[6] != '\0')
2980 as_fatal (_("Intel L1OM is 64bit ELF only"));
2981 return bfd_mach_l1om
;
2983 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2985 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2986 || default_arch
[6] != '\0')
2987 as_fatal (_("Intel K1OM is 64bit ELF only"));
2988 return bfd_mach_k1om
;
2990 else if (default_arch
[6] == '\0')
2991 return bfd_mach_x86_64
;
2993 return bfd_mach_x64_32
;
2995 else if (!strcmp (default_arch
, "i386")
2996 || !strcmp (default_arch
, "iamcu"))
2998 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3000 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3001 as_fatal (_("Intel MCU is 32bit ELF only"));
3002 return bfd_mach_i386_iamcu
;
3005 return bfd_mach_i386_i386
;
3008 as_fatal (_("unknown architecture"));
3014 const char *hash_err
;
3016 /* Support pseudo prefixes like {disp32}. */
3017 lex_type
['{'] = LEX_BEGIN_NAME
;
3019 /* Initialize op_hash hash table. */
3020 op_hash
= hash_new ();
3023 const insn_template
*optab
;
3024 templates
*core_optab
;
3026 /* Setup for loop. */
3028 core_optab
= XNEW (templates
);
3029 core_optab
->start
= optab
;
3034 if (optab
->name
== NULL
3035 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3037 /* different name --> ship out current template list;
3038 add to hash table; & begin anew. */
3039 core_optab
->end
= optab
;
3040 hash_err
= hash_insert (op_hash
,
3042 (void *) core_optab
);
3045 as_fatal (_("can't hash %s: %s"),
3049 if (optab
->name
== NULL
)
3051 core_optab
= XNEW (templates
);
3052 core_optab
->start
= optab
;
3057 /* Initialize reg_hash hash table. */
3058 reg_hash
= hash_new ();
3060 const reg_entry
*regtab
;
3061 unsigned int regtab_size
= i386_regtab_size
;
3063 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3065 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
3067 as_fatal (_("can't hash %s: %s"),
3073 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3078 for (c
= 0; c
< 256; c
++)
3083 mnemonic_chars
[c
] = c
;
3084 register_chars
[c
] = c
;
3085 operand_chars
[c
] = c
;
3087 else if (ISLOWER (c
))
3089 mnemonic_chars
[c
] = c
;
3090 register_chars
[c
] = c
;
3091 operand_chars
[c
] = c
;
3093 else if (ISUPPER (c
))
3095 mnemonic_chars
[c
] = TOLOWER (c
);
3096 register_chars
[c
] = mnemonic_chars
[c
];
3097 operand_chars
[c
] = c
;
3099 else if (c
== '{' || c
== '}')
3101 mnemonic_chars
[c
] = c
;
3102 operand_chars
[c
] = c
;
3105 if (ISALPHA (c
) || ISDIGIT (c
))
3106 identifier_chars
[c
] = c
;
3109 identifier_chars
[c
] = c
;
3110 operand_chars
[c
] = c
;
3115 identifier_chars
['@'] = '@';
3118 identifier_chars
['?'] = '?';
3119 operand_chars
['?'] = '?';
3121 digit_chars
['-'] = '-';
3122 mnemonic_chars
['_'] = '_';
3123 mnemonic_chars
['-'] = '-';
3124 mnemonic_chars
['.'] = '.';
3125 identifier_chars
['_'] = '_';
3126 identifier_chars
['.'] = '.';
3128 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3129 operand_chars
[(unsigned char) *p
] = *p
;
3132 if (flag_code
== CODE_64BIT
)
3134 #if defined (OBJ_COFF) && defined (TE_PE)
3135 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3138 x86_dwarf2_return_column
= 16;
3140 x86_cie_data_alignment
= -8;
3144 x86_dwarf2_return_column
= 8;
3145 x86_cie_data_alignment
= -4;
3148 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3149 can be turned into BRANCH_PREFIX frag. */
3150 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3155 i386_print_statistics (FILE *file
)
3157 hash_print_statistics (file
, "i386 opcode", op_hash
);
3158 hash_print_statistics (file
, "i386 register", reg_hash
);
3163 /* Debugging routines for md_assemble. */
3164 static void pte (insn_template
*);
3165 static void pt (i386_operand_type
);
3166 static void pe (expressionS
*);
3167 static void ps (symbolS
*);
3170 pi (const char *line
, i386_insn
*x
)
3174 fprintf (stdout
, "%s: template ", line
);
3176 fprintf (stdout
, " address: base %s index %s scale %x\n",
3177 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3178 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3179 x
->log2_scale_factor
);
3180 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3181 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3182 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3183 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3184 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3185 (x
->rex
& REX_W
) != 0,
3186 (x
->rex
& REX_R
) != 0,
3187 (x
->rex
& REX_X
) != 0,
3188 (x
->rex
& REX_B
) != 0);
3189 for (j
= 0; j
< x
->operands
; j
++)
3191 fprintf (stdout
, " #%d: ", j
+ 1);
3193 fprintf (stdout
, "\n");
3194 if (x
->types
[j
].bitfield
.class == Reg
3195 || x
->types
[j
].bitfield
.class == RegMMX
3196 || x
->types
[j
].bitfield
.class == RegSIMD
3197 || x
->types
[j
].bitfield
.class == SReg
3198 || x
->types
[j
].bitfield
.class == RegCR
3199 || x
->types
[j
].bitfield
.class == RegDR
3200 || x
->types
[j
].bitfield
.class == RegTR
)
3201 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3202 if (operand_type_check (x
->types
[j
], imm
))
3204 if (operand_type_check (x
->types
[j
], disp
))
3205 pe (x
->op
[j
].disps
);
3210 pte (insn_template
*t
)
3213 fprintf (stdout
, " %d operands ", t
->operands
);
3214 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3215 if (t
->extension_opcode
!= None
)
3216 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3217 if (t
->opcode_modifier
.d
)
3218 fprintf (stdout
, "D");
3219 if (t
->opcode_modifier
.w
)
3220 fprintf (stdout
, "W");
3221 fprintf (stdout
, "\n");
3222 for (j
= 0; j
< t
->operands
; j
++)
3224 fprintf (stdout
, " #%d type ", j
+ 1);
3225 pt (t
->operand_types
[j
]);
3226 fprintf (stdout
, "\n");
3233 fprintf (stdout
, " operation %d\n", e
->X_op
);
3234 fprintf (stdout
, " add_number %ld (%lx)\n",
3235 (long) e
->X_add_number
, (long) e
->X_add_number
);
3236 if (e
->X_add_symbol
)
3238 fprintf (stdout
, " add_symbol ");
3239 ps (e
->X_add_symbol
);
3240 fprintf (stdout
, "\n");
3244 fprintf (stdout
, " op_symbol ");
3245 ps (e
->X_op_symbol
);
3246 fprintf (stdout
, "\n");
3253 fprintf (stdout
, "%s type %s%s",
3255 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3256 segment_name (S_GET_SEGMENT (s
)));
3259 static struct type_name
3261 i386_operand_type mask
;
3264 const type_names
[] =
3266 { OPERAND_TYPE_REG8
, "r8" },
3267 { OPERAND_TYPE_REG16
, "r16" },
3268 { OPERAND_TYPE_REG32
, "r32" },
3269 { OPERAND_TYPE_REG64
, "r64" },
3270 { OPERAND_TYPE_ACC8
, "acc8" },
3271 { OPERAND_TYPE_ACC16
, "acc16" },
3272 { OPERAND_TYPE_ACC32
, "acc32" },
3273 { OPERAND_TYPE_ACC64
, "acc64" },
3274 { OPERAND_TYPE_IMM8
, "i8" },
3275 { OPERAND_TYPE_IMM8
, "i8s" },
3276 { OPERAND_TYPE_IMM16
, "i16" },
3277 { OPERAND_TYPE_IMM32
, "i32" },
3278 { OPERAND_TYPE_IMM32S
, "i32s" },
3279 { OPERAND_TYPE_IMM64
, "i64" },
3280 { OPERAND_TYPE_IMM1
, "i1" },
3281 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3282 { OPERAND_TYPE_DISP8
, "d8" },
3283 { OPERAND_TYPE_DISP16
, "d16" },
3284 { OPERAND_TYPE_DISP32
, "d32" },
3285 { OPERAND_TYPE_DISP32S
, "d32s" },
3286 { OPERAND_TYPE_DISP64
, "d64" },
3287 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3288 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3289 { OPERAND_TYPE_CONTROL
, "control reg" },
3290 { OPERAND_TYPE_TEST
, "test reg" },
3291 { OPERAND_TYPE_DEBUG
, "debug reg" },
3292 { OPERAND_TYPE_FLOATREG
, "FReg" },
3293 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3294 { OPERAND_TYPE_SREG
, "SReg" },
3295 { OPERAND_TYPE_REGMMX
, "rMMX" },
3296 { OPERAND_TYPE_REGXMM
, "rXMM" },
3297 { OPERAND_TYPE_REGYMM
, "rYMM" },
3298 { OPERAND_TYPE_REGZMM
, "rZMM" },
3299 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3303 pt (i386_operand_type t
)
3306 i386_operand_type a
;
3308 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3310 a
= operand_type_and (t
, type_names
[j
].mask
);
3311 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3312 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3317 #endif /* DEBUG386 */
3319 static bfd_reloc_code_real_type
3320 reloc (unsigned int size
,
3323 bfd_reloc_code_real_type other
)
3325 if (other
!= NO_RELOC
)
3327 reloc_howto_type
*rel
;
3332 case BFD_RELOC_X86_64_GOT32
:
3333 return BFD_RELOC_X86_64_GOT64
;
3335 case BFD_RELOC_X86_64_GOTPLT64
:
3336 return BFD_RELOC_X86_64_GOTPLT64
;
3338 case BFD_RELOC_X86_64_PLTOFF64
:
3339 return BFD_RELOC_X86_64_PLTOFF64
;
3341 case BFD_RELOC_X86_64_GOTPC32
:
3342 other
= BFD_RELOC_X86_64_GOTPC64
;
3344 case BFD_RELOC_X86_64_GOTPCREL
:
3345 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3347 case BFD_RELOC_X86_64_TPOFF32
:
3348 other
= BFD_RELOC_X86_64_TPOFF64
;
3350 case BFD_RELOC_X86_64_DTPOFF32
:
3351 other
= BFD_RELOC_X86_64_DTPOFF64
;
3357 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3358 if (other
== BFD_RELOC_SIZE32
)
3361 other
= BFD_RELOC_SIZE64
;
3364 as_bad (_("there are no pc-relative size relocations"));
3370 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3371 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3374 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3376 as_bad (_("unknown relocation (%u)"), other
);
3377 else if (size
!= bfd_get_reloc_size (rel
))
3378 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3379 bfd_get_reloc_size (rel
),
3381 else if (pcrel
&& !rel
->pc_relative
)
3382 as_bad (_("non-pc-relative relocation for pc-relative field"));
3383 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3385 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3387 as_bad (_("relocated field and relocation type differ in signedness"));
3396 as_bad (_("there are no unsigned pc-relative relocations"));
3399 case 1: return BFD_RELOC_8_PCREL
;
3400 case 2: return BFD_RELOC_16_PCREL
;
3401 case 4: return BFD_RELOC_32_PCREL
;
3402 case 8: return BFD_RELOC_64_PCREL
;
3404 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3411 case 4: return BFD_RELOC_X86_64_32S
;
3416 case 1: return BFD_RELOC_8
;
3417 case 2: return BFD_RELOC_16
;
3418 case 4: return BFD_RELOC_32
;
3419 case 8: return BFD_RELOC_64
;
3421 as_bad (_("cannot do %s %u byte relocation"),
3422 sign
> 0 ? "signed" : "unsigned", size
);
3428 /* Here we decide which fixups can be adjusted to make them relative to
3429 the beginning of the section instead of the symbol. Basically we need
3430 to make sure that the dynamic relocations are done correctly, so in
3431 some cases we force the original symbol to be used. */
3434 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3436 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3440 /* Don't adjust pc-relative references to merge sections in 64-bit
3442 if (use_rela_relocations
3443 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3447 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3448 and changed later by validate_fix. */
3449 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3450 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3453 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3454 for size relocations. */
3455 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3456 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3457 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3458 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3459 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3460 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3461 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3462 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3463 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3464 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3465 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3466 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3467 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3468 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3469 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3470 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3471 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3472 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3473 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3474 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3475 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3476 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3477 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3478 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3479 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3480 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3481 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3482 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3483 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3484 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3485 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3492 intel_float_operand (const char *mnemonic
)
3494 /* Note that the value returned is meaningful only for opcodes with (memory)
3495 operands, hence the code here is free to improperly handle opcodes that
3496 have no operands (for better performance and smaller code). */
3498 if (mnemonic
[0] != 'f')
3499 return 0; /* non-math */
3501 switch (mnemonic
[1])
3503 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3504 the fs segment override prefix not currently handled because no
3505 call path can make opcodes without operands get here */
3507 return 2 /* integer op */;
3509 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3510 return 3; /* fldcw/fldenv */
3513 if (mnemonic
[2] != 'o' /* fnop */)
3514 return 3; /* non-waiting control op */
3517 if (mnemonic
[2] == 's')
3518 return 3; /* frstor/frstpm */
3521 if (mnemonic
[2] == 'a')
3522 return 3; /* fsave */
3523 if (mnemonic
[2] == 't')
3525 switch (mnemonic
[3])
3527 case 'c': /* fstcw */
3528 case 'd': /* fstdw */
3529 case 'e': /* fstenv */
3530 case 's': /* fsts[gw] */
3536 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3537 return 0; /* fxsave/fxrstor are not really math ops */
3544 /* Build the VEX prefix. */
3547 build_vex_prefix (const insn_template
*t
)
3549 unsigned int register_specifier
;
3550 unsigned int implied_prefix
;
3551 unsigned int vector_length
;
3554 /* Check register specifier. */
3555 if (i
.vex
.register_specifier
)
3557 register_specifier
=
3558 ~register_number (i
.vex
.register_specifier
) & 0xf;
3559 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3562 register_specifier
= 0xf;
3564 /* Use 2-byte VEX prefix by swapping destination and source operand
3565 if there are more than 1 register operand. */
3566 if (i
.reg_operands
> 1
3567 && i
.vec_encoding
!= vex_encoding_vex3
3568 && i
.dir_encoding
== dir_encoding_default
3569 && i
.operands
== i
.reg_operands
3570 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3571 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3572 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3575 unsigned int xchg
= i
.operands
- 1;
3576 union i386_op temp_op
;
3577 i386_operand_type temp_type
;
3579 temp_type
= i
.types
[xchg
];
3580 i
.types
[xchg
] = i
.types
[0];
3581 i
.types
[0] = temp_type
;
3582 temp_op
= i
.op
[xchg
];
3583 i
.op
[xchg
] = i
.op
[0];
3586 gas_assert (i
.rm
.mode
== 3);
3590 i
.rm
.regmem
= i
.rm
.reg
;
3593 if (i
.tm
.opcode_modifier
.d
)
3594 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3595 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3596 else /* Use the next insn. */
3600 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3601 are no memory operands and at least 3 register ones. */
3602 if (i
.reg_operands
>= 3
3603 && i
.vec_encoding
!= vex_encoding_vex3
3604 && i
.reg_operands
== i
.operands
- i
.imm_operands
3605 && i
.tm
.opcode_modifier
.vex
3606 && i
.tm
.opcode_modifier
.commutative
3607 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3609 && i
.vex
.register_specifier
3610 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3612 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3613 union i386_op temp_op
;
3614 i386_operand_type temp_type
;
3616 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3617 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3618 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3619 &i
.types
[i
.operands
- 3]));
3620 gas_assert (i
.rm
.mode
== 3);
3622 temp_type
= i
.types
[xchg
];
3623 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3624 i
.types
[xchg
+ 1] = temp_type
;
3625 temp_op
= i
.op
[xchg
];
3626 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3627 i
.op
[xchg
+ 1] = temp_op
;
3630 xchg
= i
.rm
.regmem
| 8;
3631 i
.rm
.regmem
= ~register_specifier
& 0xf;
3632 gas_assert (!(i
.rm
.regmem
& 8));
3633 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3634 register_specifier
= ~xchg
& 0xf;
3637 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3638 vector_length
= avxscalar
;
3639 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3645 /* Determine vector length from the last multi-length vector
3648 for (op
= t
->operands
; op
--;)
3649 if (t
->operand_types
[op
].bitfield
.xmmword
3650 && t
->operand_types
[op
].bitfield
.ymmword
3651 && i
.types
[op
].bitfield
.ymmword
)
3658 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3663 case DATA_PREFIX_OPCODE
:
3666 case REPE_PREFIX_OPCODE
:
3669 case REPNE_PREFIX_OPCODE
:
3676 /* Check the REX.W bit and VEXW. */
3677 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3678 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3679 else if (i
.tm
.opcode_modifier
.vexw
)
3680 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3682 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3684 /* Use 2-byte VEX prefix if possible. */
3686 && i
.vec_encoding
!= vex_encoding_vex3
3687 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3688 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3690 /* 2-byte VEX prefix. */
3694 i
.vex
.bytes
[0] = 0xc5;
3696 /* Check the REX.R bit. */
3697 r
= (i
.rex
& REX_R
) ? 0 : 1;
3698 i
.vex
.bytes
[1] = (r
<< 7
3699 | register_specifier
<< 3
3700 | vector_length
<< 2
3705 /* 3-byte VEX prefix. */
3710 switch (i
.tm
.opcode_modifier
.vexopcode
)
3714 i
.vex
.bytes
[0] = 0xc4;
3718 i
.vex
.bytes
[0] = 0xc4;
3722 i
.vex
.bytes
[0] = 0xc4;
3726 i
.vex
.bytes
[0] = 0x8f;
3730 i
.vex
.bytes
[0] = 0x8f;
3734 i
.vex
.bytes
[0] = 0x8f;
3740 /* The high 3 bits of the second VEX byte are 1's compliment
3741 of RXB bits from REX. */
3742 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3744 i
.vex
.bytes
[2] = (w
<< 7
3745 | register_specifier
<< 3
3746 | vector_length
<< 2
3751 static INLINE bfd_boolean
3752 is_evex_encoding (const insn_template
*t
)
3754 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3755 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3756 || t
->opcode_modifier
.sae
;
3759 static INLINE bfd_boolean
3760 is_any_vex_encoding (const insn_template
*t
)
3762 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3763 || is_evex_encoding (t
);
3766 /* Build the EVEX prefix. */
3769 build_evex_prefix (void)
3771 unsigned int register_specifier
;
3772 unsigned int implied_prefix
;
3774 rex_byte vrex_used
= 0;
3776 /* Check register specifier. */
3777 if (i
.vex
.register_specifier
)
3779 gas_assert ((i
.vrex
& REX_X
) == 0);
3781 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3782 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3783 register_specifier
+= 8;
3784 /* The upper 16 registers are encoded in the fourth byte of the
3786 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3787 i
.vex
.bytes
[3] = 0x8;
3788 register_specifier
= ~register_specifier
& 0xf;
3792 register_specifier
= 0xf;
3794 /* Encode upper 16 vector index register in the fourth byte of
3796 if (!(i
.vrex
& REX_X
))
3797 i
.vex
.bytes
[3] = 0x8;
3802 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3807 case DATA_PREFIX_OPCODE
:
3810 case REPE_PREFIX_OPCODE
:
3813 case REPNE_PREFIX_OPCODE
:
3820 /* 4 byte EVEX prefix. */
3822 i
.vex
.bytes
[0] = 0x62;
3825 switch (i
.tm
.opcode_modifier
.vexopcode
)
3841 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3843 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3845 /* The fifth bit of the second EVEX byte is 1's compliment of the
3846 REX_R bit in VREX. */
3847 if (!(i
.vrex
& REX_R
))
3848 i
.vex
.bytes
[1] |= 0x10;
3852 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3854 /* When all operands are registers, the REX_X bit in REX is not
3855 used. We reuse it to encode the upper 16 registers, which is
3856 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3857 as 1's compliment. */
3858 if ((i
.vrex
& REX_B
))
3861 i
.vex
.bytes
[1] &= ~0x40;
3865 /* EVEX instructions shouldn't need the REX prefix. */
3866 i
.vrex
&= ~vrex_used
;
3867 gas_assert (i
.vrex
== 0);
3869 /* Check the REX.W bit and VEXW. */
3870 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3871 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3872 else if (i
.tm
.opcode_modifier
.vexw
)
3873 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3875 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3877 /* Encode the U bit. */
3878 implied_prefix
|= 0x4;
3880 /* The third byte of the EVEX prefix. */
3881 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3883 /* The fourth byte of the EVEX prefix. */
3884 /* The zeroing-masking bit. */
3885 if (i
.mask
&& i
.mask
->zeroing
)
3886 i
.vex
.bytes
[3] |= 0x80;
3888 /* Don't always set the broadcast bit if there is no RC. */
3891 /* Encode the vector length. */
3892 unsigned int vec_length
;
3894 if (!i
.tm
.opcode_modifier
.evex
3895 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3899 /* Determine vector length from the last multi-length vector
3902 for (op
= i
.operands
; op
--;)
3903 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3904 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3905 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3907 if (i
.types
[op
].bitfield
.zmmword
)
3909 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3912 else if (i
.types
[op
].bitfield
.ymmword
)
3914 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3917 else if (i
.types
[op
].bitfield
.xmmword
)
3919 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3922 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3924 switch (i
.broadcast
->bytes
)
3927 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3930 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3933 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3942 if (op
>= MAX_OPERANDS
)
3946 switch (i
.tm
.opcode_modifier
.evex
)
3948 case EVEXLIG
: /* LL' is ignored */
3949 vec_length
= evexlig
<< 5;
3952 vec_length
= 0 << 5;
3955 vec_length
= 1 << 5;
3958 vec_length
= 2 << 5;
3964 i
.vex
.bytes
[3] |= vec_length
;
3965 /* Encode the broadcast bit. */
3967 i
.vex
.bytes
[3] |= 0x10;
3971 if (i
.rounding
->type
!= saeonly
)
3972 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3974 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3977 if (i
.mask
&& i
.mask
->mask
)
3978 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3982 process_immext (void)
3986 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3987 which is coded in the same place as an 8-bit immediate field
3988 would be. Here we fake an 8-bit immediate operand from the
3989 opcode suffix stored in tm.extension_opcode.
3991 AVX instructions also use this encoding, for some of
3992 3 argument instructions. */
3994 gas_assert (i
.imm_operands
<= 1
3996 || (is_any_vex_encoding (&i
.tm
)
3997 && i
.operands
<= 4)));
3999 exp
= &im_expressions
[i
.imm_operands
++];
4000 i
.op
[i
.operands
].imms
= exp
;
4001 i
.types
[i
.operands
] = imm8
;
4003 exp
->X_op
= O_constant
;
4004 exp
->X_add_number
= i
.tm
.extension_opcode
;
4005 i
.tm
.extension_opcode
= None
;
4012 switch (i
.tm
.opcode_modifier
.hleprefixok
)
4017 as_bad (_("invalid instruction `%s' after `%s'"),
4018 i
.tm
.name
, i
.hle_prefix
);
4021 if (i
.prefix
[LOCK_PREFIX
])
4023 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4027 case HLEPrefixRelease
:
4028 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4030 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4034 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4036 as_bad (_("memory destination needed for instruction `%s'"
4037 " after `xrelease'"), i
.tm
.name
);
4044 /* Try the shortest encoding by shortening operand size. */
4047 optimize_encoding (void)
4051 if (optimize_for_space
4052 && !is_any_vex_encoding (&i
.tm
)
4053 && i
.reg_operands
== 1
4054 && i
.imm_operands
== 1
4055 && !i
.types
[1].bitfield
.byte
4056 && i
.op
[0].imms
->X_op
== O_constant
4057 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4058 && (i
.tm
.base_opcode
== 0xa8
4059 || (i
.tm
.base_opcode
== 0xf6
4060 && i
.tm
.extension_opcode
== 0x0)))
4063 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4065 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4066 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4068 i
.types
[1].bitfield
.byte
= 1;
4069 /* Ignore the suffix. */
4071 /* Convert to byte registers. */
4072 if (i
.types
[1].bitfield
.word
)
4074 else if (i
.types
[1].bitfield
.dword
)
4078 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4083 else if (flag_code
== CODE_64BIT
4084 && !is_any_vex_encoding (&i
.tm
)
4085 && ((i
.types
[1].bitfield
.qword
4086 && i
.reg_operands
== 1
4087 && i
.imm_operands
== 1
4088 && i
.op
[0].imms
->X_op
== O_constant
4089 && ((i
.tm
.base_opcode
== 0xb8
4090 && i
.tm
.extension_opcode
== None
4091 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4092 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4093 && ((i
.tm
.base_opcode
== 0x24
4094 || i
.tm
.base_opcode
== 0xa8)
4095 || (i
.tm
.base_opcode
== 0x80
4096 && i
.tm
.extension_opcode
== 0x4)
4097 || ((i
.tm
.base_opcode
== 0xf6
4098 || (i
.tm
.base_opcode
| 1) == 0xc7)
4099 && i
.tm
.extension_opcode
== 0x0)))
4100 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4101 && i
.tm
.base_opcode
== 0x83
4102 && i
.tm
.extension_opcode
== 0x4)))
4103 || (i
.types
[0].bitfield
.qword
4104 && ((i
.reg_operands
== 2
4105 && i
.op
[0].regs
== i
.op
[1].regs
4106 && (i
.tm
.base_opcode
== 0x30
4107 || i
.tm
.base_opcode
== 0x28))
4108 || (i
.reg_operands
== 1
4110 && i
.tm
.base_opcode
== 0x30)))))
4113 andq $imm31, %r64 -> andl $imm31, %r32
4114 andq $imm7, %r64 -> andl $imm7, %r32
4115 testq $imm31, %r64 -> testl $imm31, %r32
4116 xorq %r64, %r64 -> xorl %r32, %r32
4117 subq %r64, %r64 -> subl %r32, %r32
4118 movq $imm31, %r64 -> movl $imm31, %r32
4119 movq $imm32, %r64 -> movl $imm32, %r32
4121 i
.tm
.opcode_modifier
.norex64
= 1;
4122 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4125 movq $imm31, %r64 -> movl $imm31, %r32
4126 movq $imm32, %r64 -> movl $imm32, %r32
4128 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4129 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4130 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4131 i
.types
[0].bitfield
.imm32
= 1;
4132 i
.types
[0].bitfield
.imm32s
= 0;
4133 i
.types
[0].bitfield
.imm64
= 0;
4134 i
.types
[1].bitfield
.dword
= 1;
4135 i
.types
[1].bitfield
.qword
= 0;
4136 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4139 movq $imm31, %r64 -> movl $imm31, %r32
4141 i
.tm
.base_opcode
= 0xb8;
4142 i
.tm
.extension_opcode
= None
;
4143 i
.tm
.opcode_modifier
.w
= 0;
4144 i
.tm
.opcode_modifier
.modrm
= 0;
4148 else if (optimize
> 1
4149 && !optimize_for_space
4150 && !is_any_vex_encoding (&i
.tm
)
4151 && i
.reg_operands
== 2
4152 && i
.op
[0].regs
== i
.op
[1].regs
4153 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4154 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4155 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4158 andb %rN, %rN -> testb %rN, %rN
4159 andw %rN, %rN -> testw %rN, %rN
4160 andq %rN, %rN -> testq %rN, %rN
4161 orb %rN, %rN -> testb %rN, %rN
4162 orw %rN, %rN -> testw %rN, %rN
4163 orq %rN, %rN -> testq %rN, %rN
4165 and outside of 64-bit mode
4167 andl %rN, %rN -> testl %rN, %rN
4168 orl %rN, %rN -> testl %rN, %rN
4170 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4172 else if (i
.reg_operands
== 3
4173 && i
.op
[0].regs
== i
.op
[1].regs
4174 && !i
.types
[2].bitfield
.xmmword
4175 && (i
.tm
.opcode_modifier
.vex
4176 || ((!i
.mask
|| i
.mask
->zeroing
)
4178 && is_evex_encoding (&i
.tm
)
4179 && (i
.vec_encoding
!= vex_encoding_evex
4180 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4181 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4182 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4183 && i
.types
[2].bitfield
.ymmword
))))
4184 && ((i
.tm
.base_opcode
== 0x55
4185 || i
.tm
.base_opcode
== 0x6655
4186 || i
.tm
.base_opcode
== 0x66df
4187 || i
.tm
.base_opcode
== 0x57
4188 || i
.tm
.base_opcode
== 0x6657
4189 || i
.tm
.base_opcode
== 0x66ef
4190 || i
.tm
.base_opcode
== 0x66f8
4191 || i
.tm
.base_opcode
== 0x66f9
4192 || i
.tm
.base_opcode
== 0x66fa
4193 || i
.tm
.base_opcode
== 0x66fb
4194 || i
.tm
.base_opcode
== 0x42
4195 || i
.tm
.base_opcode
== 0x6642
4196 || i
.tm
.base_opcode
== 0x47
4197 || i
.tm
.base_opcode
== 0x6647)
4198 && i
.tm
.extension_opcode
== None
))
4201 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4203 EVEX VOP %zmmM, %zmmM, %zmmN
4204 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4205 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4206 EVEX VOP %ymmM, %ymmM, %ymmN
4207 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4208 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4209 VEX VOP %ymmM, %ymmM, %ymmN
4210 -> VEX VOP %xmmM, %xmmM, %xmmN
4211 VOP, one of vpandn and vpxor:
4212 VEX VOP %ymmM, %ymmM, %ymmN
4213 -> VEX VOP %xmmM, %xmmM, %xmmN
4214 VOP, one of vpandnd and vpandnq:
4215 EVEX VOP %zmmM, %zmmM, %zmmN
4216 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4217 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4218 EVEX VOP %ymmM, %ymmM, %ymmN
4219 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4220 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4221 VOP, one of vpxord and vpxorq:
4222 EVEX VOP %zmmM, %zmmM, %zmmN
4223 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4224 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4225 EVEX VOP %ymmM, %ymmM, %ymmN
4226 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4227 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4228 VOP, one of kxord and kxorq:
4229 VEX VOP %kM, %kM, %kN
4230 -> VEX kxorw %kM, %kM, %kN
4231 VOP, one of kandnd and kandnq:
4232 VEX VOP %kM, %kM, %kN
4233 -> VEX kandnw %kM, %kM, %kN
4235 if (is_evex_encoding (&i
.tm
))
4237 if (i
.vec_encoding
!= vex_encoding_evex
)
4239 i
.tm
.opcode_modifier
.vex
= VEX128
;
4240 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4241 i
.tm
.opcode_modifier
.evex
= 0;
4243 else if (optimize
> 1)
4244 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4248 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4250 i
.tm
.base_opcode
&= 0xff;
4251 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4254 i
.tm
.opcode_modifier
.vex
= VEX128
;
4256 if (i
.tm
.opcode_modifier
.vex
)
4257 for (j
= 0; j
< 3; j
++)
4259 i
.types
[j
].bitfield
.xmmword
= 1;
4260 i
.types
[j
].bitfield
.ymmword
= 0;
4263 else if (i
.vec_encoding
!= vex_encoding_evex
4264 && !i
.types
[0].bitfield
.zmmword
4265 && !i
.types
[1].bitfield
.zmmword
4268 && is_evex_encoding (&i
.tm
)
4269 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4270 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4271 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4272 || (i
.tm
.base_opcode
& ~4) == 0x66db
4273 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4274 && i
.tm
.extension_opcode
== None
)
4277 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4278 vmovdqu32 and vmovdqu64:
4279 EVEX VOP %xmmM, %xmmN
4280 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4281 EVEX VOP %ymmM, %ymmN
4282 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4284 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4286 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4288 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4290 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4291 VOP, one of vpand, vpandn, vpor, vpxor:
4292 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4293 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4294 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4295 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4296 EVEX VOP{d,q} mem, %xmmM, %xmmN
4297 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4298 EVEX VOP{d,q} mem, %ymmM, %ymmN
4299 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4301 for (j
= 0; j
< i
.operands
; j
++)
4302 if (operand_type_check (i
.types
[j
], disp
)
4303 && i
.op
[j
].disps
->X_op
== O_constant
)
4305 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4306 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4307 bytes, we choose EVEX Disp8 over VEX Disp32. */
4308 int evex_disp8
, vex_disp8
;
4309 unsigned int memshift
= i
.memshift
;
4310 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4312 evex_disp8
= fits_in_disp8 (n
);
4314 vex_disp8
= fits_in_disp8 (n
);
4315 if (evex_disp8
!= vex_disp8
)
4317 i
.memshift
= memshift
;
4321 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4324 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4325 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4326 i
.tm
.opcode_modifier
.vex
4327 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4328 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4329 /* VPAND, VPOR, and VPXOR are commutative. */
4330 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4331 i
.tm
.opcode_modifier
.commutative
= 1;
4332 i
.tm
.opcode_modifier
.evex
= 0;
4333 i
.tm
.opcode_modifier
.masking
= 0;
4334 i
.tm
.opcode_modifier
.broadcast
= 0;
4335 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4338 i
.types
[j
].bitfield
.disp8
4339 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4343 /* Return non-zero for load instruction. */
4349 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4350 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4354 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4355 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4356 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4357 if (i
.tm
.opcode_modifier
.anysize
)
4360 /* pop, popf, popa. */
4361 if (strcmp (i
.tm
.name
, "pop") == 0
4362 || i
.tm
.base_opcode
== 0x9d
4363 || i
.tm
.base_opcode
== 0x61)
4366 /* movs, cmps, lods, scas. */
4367 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4371 if (base_opcode
== 0x6f
4372 || i
.tm
.base_opcode
== 0xd7)
4374 /* NB: For AMD-specific insns with implicit memory operands,
4375 they're intentionally not covered. */
4378 /* No memory operand. */
4379 if (!i
.mem_operands
)
4385 if (i
.tm
.base_opcode
== 0xae
4386 && i
.tm
.opcode_modifier
.vex
4387 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
4388 && i
.tm
.extension_opcode
== 2)
4393 /* test, not, neg, mul, imul, div, idiv. */
4394 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4395 && i
.tm
.extension_opcode
!= 1)
4399 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4402 /* add, or, adc, sbb, and, sub, xor, cmp. */
4403 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4406 /* bt, bts, btr, btc. */
4407 if (i
.tm
.base_opcode
== 0xfba
4408 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4411 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4412 if ((base_opcode
== 0xc1
4413 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4414 && i
.tm
.extension_opcode
!= 6)
4417 /* cmpxchg8b, cmpxchg16b, xrstors. */
4418 if (i
.tm
.base_opcode
== 0xfc7
4419 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3))
4422 /* fxrstor, ldmxcsr, xrstor. */
4423 if (i
.tm
.base_opcode
== 0xfae
4424 && (i
.tm
.extension_opcode
== 1
4425 || i
.tm
.extension_opcode
== 2
4426 || i
.tm
.extension_opcode
== 5))
4429 /* lgdt, lidt, lmsw. */
4430 if (i
.tm
.base_opcode
== 0xf01
4431 && (i
.tm
.extension_opcode
== 2
4432 || i
.tm
.extension_opcode
== 3
4433 || i
.tm
.extension_opcode
== 6))
4437 if (i
.tm
.base_opcode
== 0xfc7
4438 && i
.tm
.extension_opcode
== 6)
4441 /* Check for x87 instructions. */
4442 if (i
.tm
.base_opcode
>= 0xd8 && i
.tm
.base_opcode
<= 0xdf)
4444 /* Skip fst, fstp, fstenv, fstcw. */
4445 if (i
.tm
.base_opcode
== 0xd9
4446 && (i
.tm
.extension_opcode
== 2
4447 || i
.tm
.extension_opcode
== 3
4448 || i
.tm
.extension_opcode
== 6
4449 || i
.tm
.extension_opcode
== 7))
4452 /* Skip fisttp, fist, fistp, fstp. */
4453 if (i
.tm
.base_opcode
== 0xdb
4454 && (i
.tm
.extension_opcode
== 1
4455 || i
.tm
.extension_opcode
== 2
4456 || i
.tm
.extension_opcode
== 3
4457 || i
.tm
.extension_opcode
== 7))
4460 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4461 if (i
.tm
.base_opcode
== 0xdd
4462 && (i
.tm
.extension_opcode
== 1
4463 || i
.tm
.extension_opcode
== 2
4464 || i
.tm
.extension_opcode
== 3
4465 || i
.tm
.extension_opcode
== 6
4466 || i
.tm
.extension_opcode
== 7))
4469 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4470 if (i
.tm
.base_opcode
== 0xdf
4471 && (i
.tm
.extension_opcode
== 1
4472 || i
.tm
.extension_opcode
== 2
4473 || i
.tm
.extension_opcode
== 3
4474 || i
.tm
.extension_opcode
== 6
4475 || i
.tm
.extension_opcode
== 7))
4482 dest
= i
.operands
- 1;
4484 /* Check fake imm8 operand and 3 source operands. */
4485 if ((i
.tm
.opcode_modifier
.immext
4486 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4487 && i
.types
[dest
].bitfield
.imm8
)
4490 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
4492 && (base_opcode
== 0x1
4493 || base_opcode
== 0x9
4494 || base_opcode
== 0x11
4495 || base_opcode
== 0x19
4496 || base_opcode
== 0x21
4497 || base_opcode
== 0x29
4498 || base_opcode
== 0x31
4499 || base_opcode
== 0x39
4500 || (i
.tm
.base_opcode
>= 0x84 && i
.tm
.base_opcode
<= 0x87)
4501 || base_opcode
== 0xfc1))
4504 /* Check for load instruction. */
4505 return (i
.types
[dest
].bitfield
.class != ClassNone
4506 || i
.types
[dest
].bitfield
.instance
== Accum
);
4509 /* Output lfence, 0xfaee8, after instruction. */
4512 insert_lfence_after (void)
4514 if (lfence_after_load
&& load_insn_p ())
4516 /* There are also two REP string instructions that require
4517 special treatment. Specifically, the compare string (CMPS)
4518 and scan string (SCAS) instructions set EFLAGS in a manner
4519 that depends on the data being compared/scanned. When used
4520 with a REP prefix, the number of iterations may therefore
4521 vary depending on this data. If the data is a program secret
4522 chosen by the adversary using an LVI method,
4523 then this data-dependent behavior may leak some aspect
4525 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4526 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4527 && i
.prefix
[REP_PREFIX
])
4529 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4532 char *p
= frag_more (3);
4539 /* Output lfence, 0xfaee8, before instruction. */
4542 insert_lfence_before (void)
4546 if (is_any_vex_encoding (&i
.tm
))
4549 if (i
.tm
.base_opcode
== 0xff
4550 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4552 /* Insert lfence before indirect branch if needed. */
4554 if (lfence_before_indirect_branch
== lfence_branch_none
)
4557 if (i
.operands
!= 1)
4560 if (i
.reg_operands
== 1)
4562 /* Indirect branch via register. Don't insert lfence with
4563 -mlfence-after-load=yes. */
4564 if (lfence_after_load
4565 || lfence_before_indirect_branch
== lfence_branch_memory
)
4568 else if (i
.mem_operands
== 1
4569 && lfence_before_indirect_branch
!= lfence_branch_register
)
4571 as_warn (_("indirect `%s` with memory operand should be avoided"),
4578 if (last_insn
.kind
!= last_insn_other
4579 && last_insn
.seg
== now_seg
)
4581 as_warn_where (last_insn
.file
, last_insn
.line
,
4582 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4583 last_insn
.name
, i
.tm
.name
);
4594 /* Output or/not/shl and lfence before near ret. */
4595 if (lfence_before_ret
!= lfence_before_ret_none
4596 && (i
.tm
.base_opcode
== 0xc2
4597 || i
.tm
.base_opcode
== 0xc3))
4599 if (last_insn
.kind
!= last_insn_other
4600 && last_insn
.seg
== now_seg
)
4602 as_warn_where (last_insn
.file
, last_insn
.line
,
4603 _("`%s` skips -mlfence-before-ret on `%s`"),
4604 last_insn
.name
, i
.tm
.name
);
4608 /* Near ret ingore operand size override under CPU64. */
4609 char prefix
= flag_code
== CODE_64BIT
4611 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4613 if (lfence_before_ret
== lfence_before_ret_not
)
4615 /* not: 0xf71424, may add prefix
4616 for operand size override or 64-bit code. */
4617 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4631 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4634 if (lfence_before_ret
== lfence_before_ret_or
)
4636 /* or: 0x830c2400, may add prefix
4637 for operand size override or 64-bit code. */
4643 /* shl: 0xc1242400, may add prefix
4644 for operand size override or 64-bit code. */
4659 /* This is the guts of the machine-dependent assembler. LINE points to a
4660 machine dependent instruction. This function is supposed to emit
4661 the frags/bytes it assembles to. */
4664 md_assemble (char *line
)
4667 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4668 const insn_template
*t
;
4670 /* Initialize globals. */
4671 memset (&i
, '\0', sizeof (i
));
4672 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4673 i
.reloc
[j
] = NO_RELOC
;
4674 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4675 memset (im_expressions
, '\0', sizeof (im_expressions
));
4676 save_stack_p
= save_stack
;
4678 /* First parse an instruction mnemonic & call i386_operand for the operands.
4679 We assume that the scrubber has arranged it so that line[0] is the valid
4680 start of a (possibly prefixed) mnemonic. */
4682 line
= parse_insn (line
, mnemonic
);
4685 mnem_suffix
= i
.suffix
;
4687 line
= parse_operands (line
, mnemonic
);
4689 xfree (i
.memop1_string
);
4690 i
.memop1_string
= NULL
;
4694 /* Now we've parsed the mnemonic into a set of templates, and have the
4695 operands at hand. */
4697 /* All Intel opcodes have reversed operands except for "bound", "enter",
4698 "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
4699 intersegment "jmp" and "call" instructions with 2 immediate operands so
4700 that the immediate segment precedes the offset, as it does when in AT&T
4704 && (strcmp (mnemonic
, "bound") != 0)
4705 && (strcmp (mnemonic
, "invlpga") != 0)
4706 && (strncmp (mnemonic
, "monitor", 7) != 0)
4707 && (strncmp (mnemonic
, "mwait", 5) != 0)
4708 && (strcmp (mnemonic
, "tpause") != 0)
4709 && (strcmp (mnemonic
, "umwait") != 0)
4710 && !(operand_type_check (i
.types
[0], imm
)
4711 && operand_type_check (i
.types
[1], imm
)))
4714 /* The order of the immediates should be reversed
4715 for 2 immediates extrq and insertq instructions */
4716 if (i
.imm_operands
== 2
4717 && (strcmp (mnemonic
, "extrq") == 0
4718 || strcmp (mnemonic
, "insertq") == 0))
4719 swap_2_operands (0, 1);
4724 /* Don't optimize displacement for movabs since it only takes 64bit
4727 && i
.disp_encoding
!= disp_encoding_32bit
4728 && (flag_code
!= CODE_64BIT
4729 || strcmp (mnemonic
, "movabs") != 0))
4732 /* Next, we find a template that matches the given insn,
4733 making sure the overlap of the given operands types is consistent
4734 with the template operand types. */
4736 if (!(t
= match_template (mnem_suffix
)))
4739 if (sse_check
!= check_none
4740 && !i
.tm
.opcode_modifier
.noavx
4741 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4742 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4743 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4744 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4745 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4746 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4747 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4748 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4749 || i
.tm
.cpu_flags
.bitfield
.cpusse4a
4750 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4751 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4752 || i
.tm
.cpu_flags
.bitfield
.cpusha
4753 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4755 (sse_check
== check_warning
4757 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4760 if (i
.tm
.opcode_modifier
.fwait
)
4761 if (!add_prefix (FWAIT_OPCODE
))
4764 /* Check if REP prefix is OK. */
4765 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4767 as_bad (_("invalid instruction `%s' after `%s'"),
4768 i
.tm
.name
, i
.rep_prefix
);
4772 /* Check for lock without a lockable instruction. Destination operand
4773 must be memory unless it is xchg (0x86). */
4774 if (i
.prefix
[LOCK_PREFIX
]
4775 && (!i
.tm
.opcode_modifier
.islockable
4776 || i
.mem_operands
== 0
4777 || (i
.tm
.base_opcode
!= 0x86
4778 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4780 as_bad (_("expecting lockable instruction after `lock'"));
4784 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4785 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4787 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4791 /* Check if HLE prefix is OK. */
4792 if (i
.hle_prefix
&& !check_hle ())
4795 /* Check BND prefix. */
4796 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4797 as_bad (_("expecting valid branch instruction after `bnd'"));
4799 /* Check NOTRACK prefix. */
4800 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4801 as_bad (_("expecting indirect branch instruction after `notrack'"));
4803 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4805 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4806 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4807 else if (flag_code
!= CODE_16BIT
4808 ? i
.prefix
[ADDR_PREFIX
]
4809 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4810 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4813 /* Insert BND prefix. */
4814 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4816 if (!i
.prefix
[BND_PREFIX
])
4817 add_prefix (BND_PREFIX_OPCODE
);
4818 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4820 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4821 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4825 /* Check string instruction segment overrides. */
4826 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4828 gas_assert (i
.mem_operands
);
4829 if (!check_string ())
4831 i
.disp_operands
= 0;
4834 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4835 optimize_encoding ();
4837 if (!process_suffix ())
4840 /* Update operand types. */
4841 for (j
= 0; j
< i
.operands
; j
++)
4842 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4844 /* Make still unresolved immediate matches conform to size of immediate
4845 given in i.suffix. */
4846 if (!finalize_imm ())
4849 if (i
.types
[0].bitfield
.imm1
)
4850 i
.imm_operands
= 0; /* kludge for shift insns. */
4852 /* We only need to check those implicit registers for instructions
4853 with 3 operands or less. */
4854 if (i
.operands
<= 3)
4855 for (j
= 0; j
< i
.operands
; j
++)
4856 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4857 && !i
.types
[j
].bitfield
.xmmword
)
4860 /* ImmExt should be processed after SSE2AVX. */
4861 if (!i
.tm
.opcode_modifier
.sse2avx
4862 && i
.tm
.opcode_modifier
.immext
)
4865 /* For insns with operands there are more diddles to do to the opcode. */
4868 if (!process_operands ())
4871 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4873 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4874 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4877 if (is_any_vex_encoding (&i
.tm
))
4879 if (!cpu_arch_flags
.bitfield
.cpui286
)
4881 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4886 if (i
.tm
.opcode_modifier
.vex
)
4887 build_vex_prefix (t
);
4889 build_evex_prefix ();
4892 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4893 instructions may define INT_OPCODE as well, so avoid this corner
4894 case for those instructions that use MODRM. */
4895 if (i
.tm
.base_opcode
== INT_OPCODE
4896 && !i
.tm
.opcode_modifier
.modrm
4897 && i
.op
[0].imms
->X_add_number
== 3)
4899 i
.tm
.base_opcode
= INT3_OPCODE
;
4903 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4904 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4905 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4906 && i
.op
[0].disps
->X_op
== O_constant
)
4908 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4909 the absolute address given by the constant. Since ix86 jumps and
4910 calls are pc relative, we need to generate a reloc. */
4911 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4912 i
.op
[0].disps
->X_op
= O_symbol
;
4915 /* For 8 bit registers we need an empty rex prefix. Also if the
4916 instruction already has a prefix, we need to convert old
4917 registers to new ones. */
4919 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4920 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4921 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4922 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4923 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4924 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4929 i
.rex
|= REX_OPCODE
;
4930 for (x
= 0; x
< 2; x
++)
4932 /* Look for 8 bit operand that uses old registers. */
4933 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4934 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4936 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4937 /* In case it is "hi" register, give up. */
4938 if (i
.op
[x
].regs
->reg_num
> 3)
4939 as_bad (_("can't encode register '%s%s' in an "
4940 "instruction requiring REX prefix."),
4941 register_prefix
, i
.op
[x
].regs
->reg_name
);
4943 /* Otherwise it is equivalent to the extended register.
4944 Since the encoding doesn't change this is merely
4945 cosmetic cleanup for debug output. */
4947 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4952 if (i
.rex
== 0 && i
.rex_encoding
)
4954 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4955 that uses legacy register. If it is "hi" register, don't add
4956 the REX_OPCODE byte. */
4958 for (x
= 0; x
< 2; x
++)
4959 if (i
.types
[x
].bitfield
.class == Reg
4960 && i
.types
[x
].bitfield
.byte
4961 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4962 && i
.op
[x
].regs
->reg_num
> 3)
4964 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4965 i
.rex_encoding
= FALSE
;
4974 add_prefix (REX_OPCODE
| i
.rex
);
4976 insert_lfence_before ();
4978 /* We are ready to output the insn. */
4981 insert_lfence_after ();
4983 last_insn
.seg
= now_seg
;
4985 if (i
.tm
.opcode_modifier
.isprefix
)
4987 last_insn
.kind
= last_insn_prefix
;
4988 last_insn
.name
= i
.tm
.name
;
4989 last_insn
.file
= as_where (&last_insn
.line
);
4992 last_insn
.kind
= last_insn_other
;
4996 parse_insn (char *line
, char *mnemonic
)
4999 char *token_start
= l
;
5002 const insn_template
*t
;
5008 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5013 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5015 as_bad (_("no such instruction: `%s'"), token_start
);
5020 if (!is_space_char (*l
)
5021 && *l
!= END_OF_INSN
5023 || (*l
!= PREFIX_SEPARATOR
5026 as_bad (_("invalid character %s in mnemonic"),
5027 output_invalid (*l
));
5030 if (token_start
== l
)
5032 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5033 as_bad (_("expecting prefix; got nothing"));
5035 as_bad (_("expecting mnemonic; got nothing"));
5039 /* Look up instruction (or prefix) via hash table. */
5040 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5042 if (*l
!= END_OF_INSN
5043 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5044 && current_templates
5045 && current_templates
->start
->opcode_modifier
.isprefix
)
5047 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5049 as_bad ((flag_code
!= CODE_64BIT
5050 ? _("`%s' is only supported in 64-bit mode")
5051 : _("`%s' is not supported in 64-bit mode")),
5052 current_templates
->start
->name
);
5055 /* If we are in 16-bit mode, do not allow addr16 or data16.
5056 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5057 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5058 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5059 && flag_code
!= CODE_64BIT
5060 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5061 ^ (flag_code
== CODE_16BIT
)))
5063 as_bad (_("redundant %s prefix"),
5064 current_templates
->start
->name
);
5067 if (current_templates
->start
->opcode_length
== 0)
5069 /* Handle pseudo prefixes. */
5070 switch (current_templates
->start
->base_opcode
)
5074 i
.disp_encoding
= disp_encoding_8bit
;
5078 i
.disp_encoding
= disp_encoding_32bit
;
5082 i
.dir_encoding
= dir_encoding_load
;
5086 i
.dir_encoding
= dir_encoding_store
;
5090 i
.vec_encoding
= vex_encoding_vex
;
5094 i
.vec_encoding
= vex_encoding_vex3
;
5098 i
.vec_encoding
= vex_encoding_evex
;
5102 i
.rex_encoding
= TRUE
;
5106 i
.no_optimize
= TRUE
;
5114 /* Add prefix, checking for repeated prefixes. */
5115 switch (add_prefix (current_templates
->start
->base_opcode
))
5120 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5121 i
.notrack_prefix
= current_templates
->start
->name
;
5124 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5125 i
.hle_prefix
= current_templates
->start
->name
;
5126 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5127 i
.bnd_prefix
= current_templates
->start
->name
;
5129 i
.rep_prefix
= current_templates
->start
->name
;
5135 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5142 if (!current_templates
)
5144 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5145 Check if we should swap operand or force 32bit displacement in
5147 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5148 i
.dir_encoding
= dir_encoding_swap
;
5149 else if (mnem_p
- 3 == dot_p
5152 i
.disp_encoding
= disp_encoding_8bit
;
5153 else if (mnem_p
- 4 == dot_p
5157 i
.disp_encoding
= disp_encoding_32bit
;
5162 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
5165 if (!current_templates
)
5168 if (mnem_p
> mnemonic
)
5170 /* See if we can get a match by trimming off a suffix. */
5173 case WORD_MNEM_SUFFIX
:
5174 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5175 i
.suffix
= SHORT_MNEM_SUFFIX
;
5178 case BYTE_MNEM_SUFFIX
:
5179 case QWORD_MNEM_SUFFIX
:
5180 i
.suffix
= mnem_p
[-1];
5182 current_templates
= (const templates
*) hash_find (op_hash
,
5185 case SHORT_MNEM_SUFFIX
:
5186 case LONG_MNEM_SUFFIX
:
5189 i
.suffix
= mnem_p
[-1];
5191 current_templates
= (const templates
*) hash_find (op_hash
,
5200 if (intel_float_operand (mnemonic
) == 1)
5201 i
.suffix
= SHORT_MNEM_SUFFIX
;
5203 i
.suffix
= LONG_MNEM_SUFFIX
;
5205 current_templates
= (const templates
*) hash_find (op_hash
,
5212 if (!current_templates
)
5214 as_bad (_("no such instruction: `%s'"), token_start
);
5219 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5220 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5222 /* Check for a branch hint. We allow ",pt" and ",pn" for
5223 predict taken and predict not taken respectively.
5224 I'm not sure that branch hints actually do anything on loop
5225 and jcxz insns (JumpByte) for current Pentium4 chips. They
5226 may work in the future and it doesn't hurt to accept them
5228 if (l
[0] == ',' && l
[1] == 'p')
5232 if (!add_prefix (DS_PREFIX_OPCODE
))
5236 else if (l
[2] == 'n')
5238 if (!add_prefix (CS_PREFIX_OPCODE
))
5244 /* Any other comma loses. */
5247 as_bad (_("invalid character %s in mnemonic"),
5248 output_invalid (*l
));
5252 /* Check if instruction is supported on specified architecture. */
5254 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5256 supported
|= cpu_flags_match (t
);
5257 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5259 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5260 as_warn (_("use .code16 to ensure correct addressing mode"));
5266 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5267 as_bad (flag_code
== CODE_64BIT
5268 ? _("`%s' is not supported in 64-bit mode")
5269 : _("`%s' is only supported in 64-bit mode"),
5270 current_templates
->start
->name
);
5272 as_bad (_("`%s' is not supported on `%s%s'"),
5273 current_templates
->start
->name
,
5274 cpu_arch_name
? cpu_arch_name
: default_arch
,
5275 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5281 parse_operands (char *l
, const char *mnemonic
)
5285 /* 1 if operand is pending after ','. */
5286 unsigned int expecting_operand
= 0;
5288 /* Non-zero if operand parens not balanced. */
5289 unsigned int paren_not_balanced
;
5291 while (*l
!= END_OF_INSN
)
5293 /* Skip optional white space before operand. */
5294 if (is_space_char (*l
))
5296 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5298 as_bad (_("invalid character %s before operand %d"),
5299 output_invalid (*l
),
5303 token_start
= l
; /* After white space. */
5304 paren_not_balanced
= 0;
5305 while (paren_not_balanced
|| *l
!= ',')
5307 if (*l
== END_OF_INSN
)
5309 if (paren_not_balanced
)
5312 as_bad (_("unbalanced parenthesis in operand %d."),
5315 as_bad (_("unbalanced brackets in operand %d."),
5320 break; /* we are done */
5322 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5324 as_bad (_("invalid character %s in operand %d"),
5325 output_invalid (*l
),
5332 ++paren_not_balanced
;
5334 --paren_not_balanced
;
5339 ++paren_not_balanced
;
5341 --paren_not_balanced
;
5345 if (l
!= token_start
)
5346 { /* Yes, we've read in another operand. */
5347 unsigned int operand_ok
;
5348 this_operand
= i
.operands
++;
5349 if (i
.operands
> MAX_OPERANDS
)
5351 as_bad (_("spurious operands; (%d operands/instruction max)"),
5355 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5356 /* Now parse operand adding info to 'i' as we go along. */
5357 END_STRING_AND_SAVE (l
);
5359 if (i
.mem_operands
> 1)
5361 as_bad (_("too many memory references for `%s'"),
5368 i386_intel_operand (token_start
,
5369 intel_float_operand (mnemonic
));
5371 operand_ok
= i386_att_operand (token_start
);
5373 RESTORE_END_STRING (l
);
5379 if (expecting_operand
)
5381 expecting_operand_after_comma
:
5382 as_bad (_("expecting operand after ','; got nothing"));
5387 as_bad (_("expecting operand before ','; got nothing"));
5392 /* Now *l must be either ',' or END_OF_INSN. */
5395 if (*++l
== END_OF_INSN
)
5397 /* Just skip it, if it's \n complain. */
5398 goto expecting_operand_after_comma
;
5400 expecting_operand
= 1;
5407 swap_2_operands (int xchg1
, int xchg2
)
5409 union i386_op temp_op
;
5410 i386_operand_type temp_type
;
5411 unsigned int temp_flags
;
5412 enum bfd_reloc_code_real temp_reloc
;
5414 temp_type
= i
.types
[xchg2
];
5415 i
.types
[xchg2
] = i
.types
[xchg1
];
5416 i
.types
[xchg1
] = temp_type
;
5418 temp_flags
= i
.flags
[xchg2
];
5419 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5420 i
.flags
[xchg1
] = temp_flags
;
5422 temp_op
= i
.op
[xchg2
];
5423 i
.op
[xchg2
] = i
.op
[xchg1
];
5424 i
.op
[xchg1
] = temp_op
;
5426 temp_reloc
= i
.reloc
[xchg2
];
5427 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5428 i
.reloc
[xchg1
] = temp_reloc
;
5432 if (i
.mask
->operand
== xchg1
)
5433 i
.mask
->operand
= xchg2
;
5434 else if (i
.mask
->operand
== xchg2
)
5435 i
.mask
->operand
= xchg1
;
5439 if (i
.broadcast
->operand
== xchg1
)
5440 i
.broadcast
->operand
= xchg2
;
5441 else if (i
.broadcast
->operand
== xchg2
)
5442 i
.broadcast
->operand
= xchg1
;
5446 if (i
.rounding
->operand
== xchg1
)
5447 i
.rounding
->operand
= xchg2
;
5448 else if (i
.rounding
->operand
== xchg2
)
5449 i
.rounding
->operand
= xchg1
;
5454 swap_operands (void)
5460 swap_2_operands (1, i
.operands
- 2);
5464 swap_2_operands (0, i
.operands
- 1);
5470 if (i
.mem_operands
== 2)
5472 const seg_entry
*temp_seg
;
5473 temp_seg
= i
.seg
[0];
5474 i
.seg
[0] = i
.seg
[1];
5475 i
.seg
[1] = temp_seg
;
5479 /* Try to ensure constant immediates are represented in the smallest
5484 char guess_suffix
= 0;
5488 guess_suffix
= i
.suffix
;
5489 else if (i
.reg_operands
)
5491 /* Figure out a suffix from the last register operand specified.
5492 We can't do this properly yet, i.e. excluding special register
5493 instances, but the following works for instructions with
5494 immediates. In any case, we can't set i.suffix yet. */
5495 for (op
= i
.operands
; --op
>= 0;)
5496 if (i
.types
[op
].bitfield
.class != Reg
)
5498 else if (i
.types
[op
].bitfield
.byte
)
5500 guess_suffix
= BYTE_MNEM_SUFFIX
;
5503 else if (i
.types
[op
].bitfield
.word
)
5505 guess_suffix
= WORD_MNEM_SUFFIX
;
5508 else if (i
.types
[op
].bitfield
.dword
)
5510 guess_suffix
= LONG_MNEM_SUFFIX
;
5513 else if (i
.types
[op
].bitfield
.qword
)
5515 guess_suffix
= QWORD_MNEM_SUFFIX
;
5519 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5520 guess_suffix
= WORD_MNEM_SUFFIX
;
5522 for (op
= i
.operands
; --op
>= 0;)
5523 if (operand_type_check (i
.types
[op
], imm
))
5525 switch (i
.op
[op
].imms
->X_op
)
5528 /* If a suffix is given, this operand may be shortened. */
5529 switch (guess_suffix
)
5531 case LONG_MNEM_SUFFIX
:
5532 i
.types
[op
].bitfield
.imm32
= 1;
5533 i
.types
[op
].bitfield
.imm64
= 1;
5535 case WORD_MNEM_SUFFIX
:
5536 i
.types
[op
].bitfield
.imm16
= 1;
5537 i
.types
[op
].bitfield
.imm32
= 1;
5538 i
.types
[op
].bitfield
.imm32s
= 1;
5539 i
.types
[op
].bitfield
.imm64
= 1;
5541 case BYTE_MNEM_SUFFIX
:
5542 i
.types
[op
].bitfield
.imm8
= 1;
5543 i
.types
[op
].bitfield
.imm8s
= 1;
5544 i
.types
[op
].bitfield
.imm16
= 1;
5545 i
.types
[op
].bitfield
.imm32
= 1;
5546 i
.types
[op
].bitfield
.imm32s
= 1;
5547 i
.types
[op
].bitfield
.imm64
= 1;
5551 /* If this operand is at most 16 bits, convert it
5552 to a signed 16 bit number before trying to see
5553 whether it will fit in an even smaller size.
5554 This allows a 16-bit operand such as $0xffe0 to
5555 be recognised as within Imm8S range. */
5556 if ((i
.types
[op
].bitfield
.imm16
)
5557 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5559 i
.op
[op
].imms
->X_add_number
=
5560 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5563 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5564 if ((i
.types
[op
].bitfield
.imm32
)
5565 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5568 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5569 ^ ((offsetT
) 1 << 31))
5570 - ((offsetT
) 1 << 31));
5574 = operand_type_or (i
.types
[op
],
5575 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5577 /* We must avoid matching of Imm32 templates when 64bit
5578 only immediate is available. */
5579 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5580 i
.types
[op
].bitfield
.imm32
= 0;
5587 /* Symbols and expressions. */
5589 /* Convert symbolic operand to proper sizes for matching, but don't
5590 prevent matching a set of insns that only supports sizes other
5591 than those matching the insn suffix. */
5593 i386_operand_type mask
, allowed
;
5594 const insn_template
*t
;
5596 operand_type_set (&mask
, 0);
5597 operand_type_set (&allowed
, 0);
5599 for (t
= current_templates
->start
;
5600 t
< current_templates
->end
;
5603 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5604 allowed
= operand_type_and (allowed
, anyimm
);
5606 switch (guess_suffix
)
5608 case QWORD_MNEM_SUFFIX
:
5609 mask
.bitfield
.imm64
= 1;
5610 mask
.bitfield
.imm32s
= 1;
5612 case LONG_MNEM_SUFFIX
:
5613 mask
.bitfield
.imm32
= 1;
5615 case WORD_MNEM_SUFFIX
:
5616 mask
.bitfield
.imm16
= 1;
5618 case BYTE_MNEM_SUFFIX
:
5619 mask
.bitfield
.imm8
= 1;
5624 allowed
= operand_type_and (mask
, allowed
);
5625 if (!operand_type_all_zero (&allowed
))
5626 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5633 /* Try to use the smallest displacement type too. */
5635 optimize_disp (void)
5639 for (op
= i
.operands
; --op
>= 0;)
5640 if (operand_type_check (i
.types
[op
], disp
))
5642 if (i
.op
[op
].disps
->X_op
== O_constant
)
5644 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5646 if (i
.types
[op
].bitfield
.disp16
5647 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5649 /* If this operand is at most 16 bits, convert
5650 to a signed 16 bit number and don't use 64bit
5652 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5653 i
.types
[op
].bitfield
.disp64
= 0;
5656 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5657 if (i
.types
[op
].bitfield
.disp32
5658 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5660 /* If this operand is at most 32 bits, convert
5661 to a signed 32 bit number and don't use 64bit
5663 op_disp
&= (((offsetT
) 2 << 31) - 1);
5664 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5665 i
.types
[op
].bitfield
.disp64
= 0;
5668 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5670 i
.types
[op
].bitfield
.disp8
= 0;
5671 i
.types
[op
].bitfield
.disp16
= 0;
5672 i
.types
[op
].bitfield
.disp32
= 0;
5673 i
.types
[op
].bitfield
.disp32s
= 0;
5674 i
.types
[op
].bitfield
.disp64
= 0;
5678 else if (flag_code
== CODE_64BIT
)
5680 if (fits_in_signed_long (op_disp
))
5682 i
.types
[op
].bitfield
.disp64
= 0;
5683 i
.types
[op
].bitfield
.disp32s
= 1;
5685 if (i
.prefix
[ADDR_PREFIX
]
5686 && fits_in_unsigned_long (op_disp
))
5687 i
.types
[op
].bitfield
.disp32
= 1;
5689 if ((i
.types
[op
].bitfield
.disp32
5690 || i
.types
[op
].bitfield
.disp32s
5691 || i
.types
[op
].bitfield
.disp16
)
5692 && fits_in_disp8 (op_disp
))
5693 i
.types
[op
].bitfield
.disp8
= 1;
5695 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5696 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5698 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5699 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5700 i
.types
[op
].bitfield
.disp8
= 0;
5701 i
.types
[op
].bitfield
.disp16
= 0;
5702 i
.types
[op
].bitfield
.disp32
= 0;
5703 i
.types
[op
].bitfield
.disp32s
= 0;
5704 i
.types
[op
].bitfield
.disp64
= 0;
5707 /* We only support 64bit displacement on constants. */
5708 i
.types
[op
].bitfield
.disp64
= 0;
5712 /* Return 1 if there is a match in broadcast bytes between operand
5713 GIVEN and instruction template T. */
5716 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5718 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5719 && i
.types
[given
].bitfield
.byte
)
5720 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5721 && i
.types
[given
].bitfield
.word
)
5722 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5723 && i
.types
[given
].bitfield
.dword
)
5724 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5725 && i
.types
[given
].bitfield
.qword
));
5728 /* Check if operands are valid for the instruction. */
5731 check_VecOperands (const insn_template
*t
)
5736 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5737 any one operand are implicity requiring AVX512VL support if the actual
5738 operand size is YMMword or XMMword. Since this function runs after
5739 template matching, there's no need to check for YMMword/XMMword in
5741 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5742 if (!cpu_flags_all_zero (&cpu
)
5743 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5744 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5746 for (op
= 0; op
< t
->operands
; ++op
)
5748 if (t
->operand_types
[op
].bitfield
.zmmword
5749 && (i
.types
[op
].bitfield
.ymmword
5750 || i
.types
[op
].bitfield
.xmmword
))
5752 i
.error
= unsupported
;
5758 /* Without VSIB byte, we can't have a vector register for index. */
5759 if (!t
->opcode_modifier
.vecsib
5761 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5762 || i
.index_reg
->reg_type
.bitfield
.ymmword
5763 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5765 i
.error
= unsupported_vector_index_register
;
5769 /* Check if default mask is allowed. */
5770 if (t
->opcode_modifier
.nodefmask
5771 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5773 i
.error
= no_default_mask
;
5777 /* For VSIB byte, we need a vector register for index, and all vector
5778 registers must be distinct. */
5779 if (t
->opcode_modifier
.vecsib
)
5782 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5783 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5784 || (t
->opcode_modifier
.vecsib
== VecSIB256
5785 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5786 || (t
->opcode_modifier
.vecsib
== VecSIB512
5787 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5789 i
.error
= invalid_vsib_address
;
5793 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5794 if (i
.reg_operands
== 2 && !i
.mask
)
5796 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5797 gas_assert (i
.types
[0].bitfield
.xmmword
5798 || i
.types
[0].bitfield
.ymmword
);
5799 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5800 gas_assert (i
.types
[2].bitfield
.xmmword
5801 || i
.types
[2].bitfield
.ymmword
);
5802 if (operand_check
== check_none
)
5804 if (register_number (i
.op
[0].regs
)
5805 != register_number (i
.index_reg
)
5806 && register_number (i
.op
[2].regs
)
5807 != register_number (i
.index_reg
)
5808 && register_number (i
.op
[0].regs
)
5809 != register_number (i
.op
[2].regs
))
5811 if (operand_check
== check_error
)
5813 i
.error
= invalid_vector_register_set
;
5816 as_warn (_("mask, index, and destination registers should be distinct"));
5818 else if (i
.reg_operands
== 1 && i
.mask
)
5820 if (i
.types
[1].bitfield
.class == RegSIMD
5821 && (i
.types
[1].bitfield
.xmmword
5822 || i
.types
[1].bitfield
.ymmword
5823 || i
.types
[1].bitfield
.zmmword
)
5824 && (register_number (i
.op
[1].regs
)
5825 == register_number (i
.index_reg
)))
5827 if (operand_check
== check_error
)
5829 i
.error
= invalid_vector_register_set
;
5832 if (operand_check
!= check_none
)
5833 as_warn (_("index and destination registers should be distinct"));
5838 /* Check if broadcast is supported by the instruction and is applied
5839 to the memory operand. */
5842 i386_operand_type type
, overlap
;
5844 /* Check if specified broadcast is supported in this instruction,
5845 and its broadcast bytes match the memory operand. */
5846 op
= i
.broadcast
->operand
;
5847 if (!t
->opcode_modifier
.broadcast
5848 || !(i
.flags
[op
] & Operand_Mem
)
5849 || (!i
.types
[op
].bitfield
.unspecified
5850 && !match_broadcast_size (t
, op
)))
5853 i
.error
= unsupported_broadcast
;
5857 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5858 * i
.broadcast
->type
);
5859 operand_type_set (&type
, 0);
5860 switch (i
.broadcast
->bytes
)
5863 type
.bitfield
.word
= 1;
5866 type
.bitfield
.dword
= 1;
5869 type
.bitfield
.qword
= 1;
5872 type
.bitfield
.xmmword
= 1;
5875 type
.bitfield
.ymmword
= 1;
5878 type
.bitfield
.zmmword
= 1;
5884 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5885 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
5886 && t
->operand_types
[op
].bitfield
.byte
5887 + t
->operand_types
[op
].bitfield
.word
5888 + t
->operand_types
[op
].bitfield
.dword
5889 + t
->operand_types
[op
].bitfield
.qword
> 1)
5891 overlap
.bitfield
.xmmword
= 0;
5892 overlap
.bitfield
.ymmword
= 0;
5893 overlap
.bitfield
.zmmword
= 0;
5895 if (operand_type_all_zero (&overlap
))
5898 if (t
->opcode_modifier
.checkregsize
)
5902 type
.bitfield
.baseindex
= 1;
5903 for (j
= 0; j
< i
.operands
; ++j
)
5906 && !operand_type_register_match(i
.types
[j
],
5907 t
->operand_types
[j
],
5909 t
->operand_types
[op
]))
5914 /* If broadcast is supported in this instruction, we need to check if
5915 operand of one-element size isn't specified without broadcast. */
5916 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5918 /* Find memory operand. */
5919 for (op
= 0; op
< i
.operands
; op
++)
5920 if (i
.flags
[op
] & Operand_Mem
)
5922 gas_assert (op
< i
.operands
);
5923 /* Check size of the memory operand. */
5924 if (match_broadcast_size (t
, op
))
5926 i
.error
= broadcast_needed
;
5931 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5933 /* Check if requested masking is supported. */
5936 switch (t
->opcode_modifier
.masking
)
5940 case MERGING_MASKING
:
5941 if (i
.mask
->zeroing
)
5944 i
.error
= unsupported_masking
;
5948 case DYNAMIC_MASKING
:
5949 /* Memory destinations allow only merging masking. */
5950 if (i
.mask
->zeroing
&& i
.mem_operands
)
5952 /* Find memory operand. */
5953 for (op
= 0; op
< i
.operands
; op
++)
5954 if (i
.flags
[op
] & Operand_Mem
)
5956 gas_assert (op
< i
.operands
);
5957 if (op
== i
.operands
- 1)
5959 i
.error
= unsupported_masking
;
5969 /* Check if masking is applied to dest operand. */
5970 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5972 i
.error
= mask_not_on_destination
;
5979 if (!t
->opcode_modifier
.sae
5980 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5982 i
.error
= unsupported_rc_sae
;
5985 /* If the instruction has several immediate operands and one of
5986 them is rounding, the rounding operand should be the last
5987 immediate operand. */
5988 if (i
.imm_operands
> 1
5989 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5991 i
.error
= rc_sae_operand_not_last_imm
;
5996 /* Check vector Disp8 operand. */
5997 if (t
->opcode_modifier
.disp8memshift
5998 && i
.disp_encoding
!= disp_encoding_32bit
)
6001 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6002 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6003 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6006 const i386_operand_type
*type
= NULL
;
6009 for (op
= 0; op
< i
.operands
; op
++)
6010 if (i
.flags
[op
] & Operand_Mem
)
6012 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6013 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6014 else if (t
->operand_types
[op
].bitfield
.xmmword
6015 + t
->operand_types
[op
].bitfield
.ymmword
6016 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6017 type
= &t
->operand_types
[op
];
6018 else if (!i
.types
[op
].bitfield
.unspecified
)
6019 type
= &i
.types
[op
];
6021 else if (i
.types
[op
].bitfield
.class == RegSIMD
6022 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6024 if (i
.types
[op
].bitfield
.zmmword
)
6026 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6028 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6034 if (type
->bitfield
.zmmword
)
6036 else if (type
->bitfield
.ymmword
)
6038 else if (type
->bitfield
.xmmword
)
6042 /* For the check in fits_in_disp8(). */
6043 if (i
.memshift
== 0)
6047 for (op
= 0; op
< i
.operands
; op
++)
6048 if (operand_type_check (i
.types
[op
], disp
)
6049 && i
.op
[op
].disps
->X_op
== O_constant
)
6051 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6053 i
.types
[op
].bitfield
.disp8
= 1;
6056 i
.types
[op
].bitfield
.disp8
= 0;
6065 /* Check if operands are valid for the instruction. Update VEX
6069 VEX_check_operands (const insn_template
*t
)
6071 if (i
.vec_encoding
== vex_encoding_evex
)
6073 /* This instruction must be encoded with EVEX prefix. */
6074 if (!is_evex_encoding (t
))
6076 i
.error
= unsupported
;
6082 if (!t
->opcode_modifier
.vex
)
6084 /* This instruction template doesn't have VEX prefix. */
6085 if (i
.vec_encoding
!= vex_encoding_default
)
6087 i
.error
= unsupported
;
6093 /* Check the special Imm4 cases; must be the first operand. */
6094 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6096 if (i
.op
[0].imms
->X_op
!= O_constant
6097 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6103 /* Turn off Imm<N> so that update_imm won't complain. */
6104 operand_type_set (&i
.types
[0], 0);
6110 static const insn_template
*
6111 match_template (char mnem_suffix
)
6113 /* Points to template once we've found it. */
6114 const insn_template
*t
;
6115 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6116 i386_operand_type overlap4
;
6117 unsigned int found_reverse_match
;
6118 i386_opcode_modifier suffix_check
;
6119 i386_operand_type operand_types
[MAX_OPERANDS
];
6120 int addr_prefix_disp
;
6121 unsigned int j
, size_match
, check_register
;
6122 enum i386_error specific_error
= 0;
6124 #if MAX_OPERANDS != 5
6125 # error "MAX_OPERANDS must be 5."
6128 found_reverse_match
= 0;
6129 addr_prefix_disp
= -1;
6131 /* Prepare for mnemonic suffix check. */
6132 memset (&suffix_check
, 0, sizeof (suffix_check
));
6133 switch (mnem_suffix
)
6135 case BYTE_MNEM_SUFFIX
:
6136 suffix_check
.no_bsuf
= 1;
6138 case WORD_MNEM_SUFFIX
:
6139 suffix_check
.no_wsuf
= 1;
6141 case SHORT_MNEM_SUFFIX
:
6142 suffix_check
.no_ssuf
= 1;
6144 case LONG_MNEM_SUFFIX
:
6145 suffix_check
.no_lsuf
= 1;
6147 case QWORD_MNEM_SUFFIX
:
6148 suffix_check
.no_qsuf
= 1;
6151 /* NB: In Intel syntax, normally we can check for memory operand
6152 size when there is no mnemonic suffix. But jmp and call have
6153 2 different encodings with Dword memory operand size, one with
6154 No_ldSuf and the other without. i.suffix is set to
6155 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6156 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6157 suffix_check
.no_ldsuf
= 1;
6160 /* Must have right number of operands. */
6161 i
.error
= number_of_operands_mismatch
;
6163 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6165 addr_prefix_disp
= -1;
6166 found_reverse_match
= 0;
6168 if (i
.operands
!= t
->operands
)
6171 /* Check processor support. */
6172 i
.error
= unsupported
;
6173 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6176 /* Check AT&T mnemonic. */
6177 i
.error
= unsupported_with_intel_mnemonic
;
6178 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6181 /* Check AT&T/Intel syntax. */
6182 i
.error
= unsupported_syntax
;
6183 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6184 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6187 /* Check Intel64/AMD64 ISA. */
6191 /* Default: Don't accept Intel64. */
6192 if (t
->opcode_modifier
.isa64
== INTEL64
)
6196 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6197 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6201 /* -mintel64: Don't accept AMD64. */
6202 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6207 /* Check the suffix. */
6208 i
.error
= invalid_instruction_suffix
;
6209 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6210 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6211 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6212 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6213 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6214 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6217 size_match
= operand_size_match (t
);
6221 /* This is intentionally not
6223 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6225 as the case of a missing * on the operand is accepted (perhaps with
6226 a warning, issued further down). */
6227 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6229 i
.error
= operand_type_mismatch
;
6233 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6234 operand_types
[j
] = t
->operand_types
[j
];
6236 /* In general, don't allow
6237 - 64-bit operands outside of 64-bit mode,
6238 - 32-bit operands on pre-386. */
6239 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6240 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6241 && flag_code
!= CODE_64BIT
6242 && (t
->base_opcode
!= 0x0fc7
6243 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
6244 || (i
.suffix
== LONG_MNEM_SUFFIX
6245 && !cpu_arch_flags
.bitfield
.cpui386
))
6247 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6248 && !intel_float_operand (t
->name
))
6249 : intel_float_operand (t
->name
) != 2)
6250 && (t
->operands
== i
.imm_operands
6251 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6252 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6253 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6254 || (operand_types
[j
].bitfield
.class != RegMMX
6255 && operand_types
[j
].bitfield
.class != RegSIMD
6256 && operand_types
[j
].bitfield
.class != RegMask
))
6257 && !t
->opcode_modifier
.vecsib
)
6260 /* Do not verify operands when there are none. */
6262 /* We've found a match; break out of loop. */
6265 if (!t
->opcode_modifier
.jump
6266 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6268 /* There should be only one Disp operand. */
6269 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6270 if (operand_type_check (operand_types
[j
], disp
))
6272 if (j
< MAX_OPERANDS
)
6274 bfd_boolean override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6276 addr_prefix_disp
= j
;
6278 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6279 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6283 override
= !override
;
6286 if (operand_types
[j
].bitfield
.disp32
6287 && operand_types
[j
].bitfield
.disp16
)
6289 operand_types
[j
].bitfield
.disp16
= override
;
6290 operand_types
[j
].bitfield
.disp32
= !override
;
6292 operand_types
[j
].bitfield
.disp32s
= 0;
6293 operand_types
[j
].bitfield
.disp64
= 0;
6297 if (operand_types
[j
].bitfield
.disp32s
6298 || operand_types
[j
].bitfield
.disp64
)
6300 operand_types
[j
].bitfield
.disp64
&= !override
;
6301 operand_types
[j
].bitfield
.disp32s
&= !override
;
6302 operand_types
[j
].bitfield
.disp32
= override
;
6304 operand_types
[j
].bitfield
.disp16
= 0;
6310 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6311 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
6314 /* We check register size if needed. */
6315 if (t
->opcode_modifier
.checkregsize
)
6317 check_register
= (1 << t
->operands
) - 1;
6319 check_register
&= ~(1 << i
.broadcast
->operand
);
6324 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6325 switch (t
->operands
)
6328 if (!operand_type_match (overlap0
, i
.types
[0]))
6332 /* xchg %eax, %eax is a special case. It is an alias for nop
6333 only in 32bit mode and we can use opcode 0x90. In 64bit
6334 mode, we can't use 0x90 for xchg %eax, %eax since it should
6335 zero-extend %eax to %rax. */
6336 if (flag_code
== CODE_64BIT
6337 && t
->base_opcode
== 0x90
6338 && i
.types
[0].bitfield
.instance
== Accum
6339 && i
.types
[0].bitfield
.dword
6340 && i
.types
[1].bitfield
.instance
== Accum
6341 && i
.types
[1].bitfield
.dword
)
6343 /* xrelease mov %eax, <disp> is another special case. It must not
6344 match the accumulator-only encoding of mov. */
6345 if (flag_code
!= CODE_64BIT
6347 && t
->base_opcode
== 0xa0
6348 && i
.types
[0].bitfield
.instance
== Accum
6349 && (i
.flags
[1] & Operand_Mem
))
6354 if (!(size_match
& MATCH_STRAIGHT
))
6356 /* Reverse direction of operands if swapping is possible in the first
6357 place (operands need to be symmetric) and
6358 - the load form is requested, and the template is a store form,
6359 - the store form is requested, and the template is a load form,
6360 - the non-default (swapped) form is requested. */
6361 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6362 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6363 && !operand_type_all_zero (&overlap1
))
6364 switch (i
.dir_encoding
)
6366 case dir_encoding_load
:
6367 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6368 || t
->opcode_modifier
.regmem
)
6372 case dir_encoding_store
:
6373 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6374 && !t
->opcode_modifier
.regmem
)
6378 case dir_encoding_swap
:
6381 case dir_encoding_default
:
6384 /* If we want store form, we skip the current load. */
6385 if ((i
.dir_encoding
== dir_encoding_store
6386 || i
.dir_encoding
== dir_encoding_swap
)
6387 && i
.mem_operands
== 0
6388 && t
->opcode_modifier
.load
)
6393 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6394 if (!operand_type_match (overlap0
, i
.types
[0])
6395 || !operand_type_match (overlap1
, i
.types
[1])
6396 || ((check_register
& 3) == 3
6397 && !operand_type_register_match (i
.types
[0],
6402 /* Check if other direction is valid ... */
6403 if (!t
->opcode_modifier
.d
)
6407 if (!(size_match
& MATCH_REVERSE
))
6409 /* Try reversing direction of operands. */
6410 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6411 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6412 if (!operand_type_match (overlap0
, i
.types
[0])
6413 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6415 && !operand_type_register_match (i
.types
[0],
6416 operand_types
[i
.operands
- 1],
6417 i
.types
[i
.operands
- 1],
6420 /* Does not match either direction. */
6423 /* found_reverse_match holds which of D or FloatR
6425 if (!t
->opcode_modifier
.d
)
6426 found_reverse_match
= 0;
6427 else if (operand_types
[0].bitfield
.tbyte
)
6428 found_reverse_match
= Opcode_FloatD
;
6429 else if (operand_types
[0].bitfield
.xmmword
6430 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6431 || operand_types
[0].bitfield
.class == RegMMX
6432 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6433 || is_any_vex_encoding(t
))
6434 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6435 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6437 found_reverse_match
= Opcode_D
;
6438 if (t
->opcode_modifier
.floatr
)
6439 found_reverse_match
|= Opcode_FloatR
;
6443 /* Found a forward 2 operand match here. */
6444 switch (t
->operands
)
6447 overlap4
= operand_type_and (i
.types
[4],
6451 overlap3
= operand_type_and (i
.types
[3],
6455 overlap2
= operand_type_and (i
.types
[2],
6460 switch (t
->operands
)
6463 if (!operand_type_match (overlap4
, i
.types
[4])
6464 || !operand_type_register_match (i
.types
[3],
6471 if (!operand_type_match (overlap3
, i
.types
[3])
6472 || ((check_register
& 0xa) == 0xa
6473 && !operand_type_register_match (i
.types
[1],
6477 || ((check_register
& 0xc) == 0xc
6478 && !operand_type_register_match (i
.types
[2],
6485 /* Here we make use of the fact that there are no
6486 reverse match 3 operand instructions. */
6487 if (!operand_type_match (overlap2
, i
.types
[2])
6488 || ((check_register
& 5) == 5
6489 && !operand_type_register_match (i
.types
[0],
6493 || ((check_register
& 6) == 6
6494 && !operand_type_register_match (i
.types
[1],
6502 /* Found either forward/reverse 2, 3 or 4 operand match here:
6503 slip through to break. */
6506 /* Check if vector and VEX operands are valid. */
6507 if (check_VecOperands (t
) || VEX_check_operands (t
))
6509 specific_error
= i
.error
;
6513 /* We've found a match; break out of loop. */
6517 if (t
== current_templates
->end
)
6519 /* We found no match. */
6520 const char *err_msg
;
6521 switch (specific_error
? specific_error
: i
.error
)
6525 case operand_size_mismatch
:
6526 err_msg
= _("operand size mismatch");
6528 case operand_type_mismatch
:
6529 err_msg
= _("operand type mismatch");
6531 case register_type_mismatch
:
6532 err_msg
= _("register type mismatch");
6534 case number_of_operands_mismatch
:
6535 err_msg
= _("number of operands mismatch");
6537 case invalid_instruction_suffix
:
6538 err_msg
= _("invalid instruction suffix");
6541 err_msg
= _("constant doesn't fit in 4 bits");
6543 case unsupported_with_intel_mnemonic
:
6544 err_msg
= _("unsupported with Intel mnemonic");
6546 case unsupported_syntax
:
6547 err_msg
= _("unsupported syntax");
6550 as_bad (_("unsupported instruction `%s'"),
6551 current_templates
->start
->name
);
6553 case invalid_vsib_address
:
6554 err_msg
= _("invalid VSIB address");
6556 case invalid_vector_register_set
:
6557 err_msg
= _("mask, index, and destination registers must be distinct");
6559 case unsupported_vector_index_register
:
6560 err_msg
= _("unsupported vector index register");
6562 case unsupported_broadcast
:
6563 err_msg
= _("unsupported broadcast");
6565 case broadcast_needed
:
6566 err_msg
= _("broadcast is needed for operand of such type");
6568 case unsupported_masking
:
6569 err_msg
= _("unsupported masking");
6571 case mask_not_on_destination
:
6572 err_msg
= _("mask not on destination operand");
6574 case no_default_mask
:
6575 err_msg
= _("default mask isn't allowed");
6577 case unsupported_rc_sae
:
6578 err_msg
= _("unsupported static rounding/sae");
6580 case rc_sae_operand_not_last_imm
:
6582 err_msg
= _("RC/SAE operand must precede immediate operands");
6584 err_msg
= _("RC/SAE operand must follow immediate operands");
6586 case invalid_register_operand
:
6587 err_msg
= _("invalid register operand");
6590 as_bad (_("%s for `%s'"), err_msg
,
6591 current_templates
->start
->name
);
6595 if (!quiet_warnings
)
6598 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6599 as_warn (_("indirect %s without `*'"), t
->name
);
6601 if (t
->opcode_modifier
.isprefix
6602 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6604 /* Warn them that a data or address size prefix doesn't
6605 affect assembly of the next line of code. */
6606 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6610 /* Copy the template we found. */
6613 if (addr_prefix_disp
!= -1)
6614 i
.tm
.operand_types
[addr_prefix_disp
]
6615 = operand_types
[addr_prefix_disp
];
6617 if (found_reverse_match
)
6619 /* If we found a reverse match we must alter the opcode direction
6620 bit and clear/flip the regmem modifier one. found_reverse_match
6621 holds bits to change (different for int & float insns). */
6623 i
.tm
.base_opcode
^= found_reverse_match
;
6625 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6626 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6628 /* Certain SIMD insns have their load forms specified in the opcode
6629 table, and hence we need to _set_ RegMem instead of clearing it.
6630 We need to avoid setting the bit though on insns like KMOVW. */
6631 i
.tm
.opcode_modifier
.regmem
6632 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6633 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6634 && !i
.tm
.opcode_modifier
.regmem
;
6643 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6644 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6646 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != &es
)
6648 as_bad (_("`%s' operand %u must use `%ses' segment"),
6650 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6655 /* There's only ever one segment override allowed per instruction.
6656 This instruction possibly has a legal segment override on the
6657 second operand, so copy the segment to where non-string
6658 instructions store it, allowing common code. */
6659 i
.seg
[op
] = i
.seg
[1];
6665 process_suffix (void)
6667 /* If matched instruction specifies an explicit instruction mnemonic
6669 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6670 i
.suffix
= WORD_MNEM_SUFFIX
;
6671 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6672 i
.suffix
= LONG_MNEM_SUFFIX
;
6673 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6674 i
.suffix
= QWORD_MNEM_SUFFIX
;
6675 else if (i
.reg_operands
6676 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6677 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6679 unsigned int numop
= i
.operands
;
6681 /* movsx/movzx want only their source operand considered here, for the
6682 ambiguity checking below. The suffix will be replaced afterwards
6683 to represent the destination (register). */
6684 if (((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
)
6685 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6688 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6689 if (i
.tm
.base_opcode
== 0xf20f38f0
6690 && i
.tm
.operand_types
[1].bitfield
.qword
)
6693 /* If there's no instruction mnemonic suffix we try to invent one
6694 based on GPR operands. */
6697 /* We take i.suffix from the last register operand specified,
6698 Destination register type is more significant than source
6699 register type. crc32 in SSE4.2 prefers source register
6701 unsigned int op
= i
.tm
.base_opcode
!= 0xf20f38f0 ? i
.operands
: 1;
6704 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6705 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6707 if (i
.types
[op
].bitfield
.class != Reg
)
6709 if (i
.types
[op
].bitfield
.byte
)
6710 i
.suffix
= BYTE_MNEM_SUFFIX
;
6711 else if (i
.types
[op
].bitfield
.word
)
6712 i
.suffix
= WORD_MNEM_SUFFIX
;
6713 else if (i
.types
[op
].bitfield
.dword
)
6714 i
.suffix
= LONG_MNEM_SUFFIX
;
6715 else if (i
.types
[op
].bitfield
.qword
)
6716 i
.suffix
= QWORD_MNEM_SUFFIX
;
6722 /* As an exception, movsx/movzx silently default to a byte source
6724 if ((i
.tm
.base_opcode
| 8) == 0xfbe && i
.tm
.opcode_modifier
.w
6725 && !i
.suffix
&& !intel_syntax
)
6726 i
.suffix
= BYTE_MNEM_SUFFIX
;
6728 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6731 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6732 && i
.tm
.opcode_modifier
.no_bsuf
)
6734 else if (!check_byte_reg ())
6737 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6740 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6741 && i
.tm
.opcode_modifier
.no_lsuf
6742 && !i
.tm
.opcode_modifier
.todword
6743 && !i
.tm
.opcode_modifier
.toqword
)
6745 else if (!check_long_reg ())
6748 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6751 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6752 && i
.tm
.opcode_modifier
.no_qsuf
6753 && !i
.tm
.opcode_modifier
.todword
6754 && !i
.tm
.opcode_modifier
.toqword
)
6756 else if (!check_qword_reg ())
6759 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6762 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6763 && i
.tm
.opcode_modifier
.no_wsuf
)
6765 else if (!check_word_reg ())
6768 else if (intel_syntax
6769 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6770 /* Do nothing if the instruction is going to ignore the prefix. */
6775 /* Undo the movsx/movzx change done above. */
6778 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
6781 i
.suffix
= stackop_size
;
6782 if (stackop_size
== LONG_MNEM_SUFFIX
)
6784 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6785 .code16gcc directive to support 16-bit mode with
6786 32-bit address. For IRET without a suffix, generate
6787 16-bit IRET (opcode 0xcf) to return from an interrupt
6789 if (i
.tm
.base_opcode
== 0xcf)
6791 i
.suffix
= WORD_MNEM_SUFFIX
;
6792 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6794 /* Warn about changed behavior for segment register push/pop. */
6795 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6796 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6801 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6802 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6803 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6804 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6805 && i
.tm
.extension_opcode
<= 3)))
6810 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6812 i
.suffix
= QWORD_MNEM_SUFFIX
;
6817 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6818 i
.suffix
= LONG_MNEM_SUFFIX
;
6821 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6822 i
.suffix
= WORD_MNEM_SUFFIX
;
6828 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6829 /* Also cover lret/retf/iret in 64-bit mode. */
6830 || (flag_code
== CODE_64BIT
6831 && !i
.tm
.opcode_modifier
.no_lsuf
6832 && !i
.tm
.opcode_modifier
.no_qsuf
))
6833 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6834 /* Accept FLDENV et al without suffix. */
6835 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6837 unsigned int suffixes
, evex
= 0;
6839 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6840 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6842 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6844 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6846 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6848 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6851 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6852 also suitable for AT&T syntax mode, it was requested that this be
6853 restricted to just Intel syntax. */
6854 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
)
6858 for (op
= 0; op
< i
.tm
.operands
; ++op
)
6860 if (is_evex_encoding (&i
.tm
)
6861 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6863 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6864 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
6865 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6866 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
6867 if (!i
.tm
.opcode_modifier
.evex
6868 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
6869 i
.tm
.opcode_modifier
.evex
= EVEX512
;
6872 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
6873 + i
.tm
.operand_types
[op
].bitfield
.ymmword
6874 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
6877 /* Any properly sized operand disambiguates the insn. */
6878 if (i
.types
[op
].bitfield
.xmmword
6879 || i
.types
[op
].bitfield
.ymmword
6880 || i
.types
[op
].bitfield
.zmmword
)
6882 suffixes
&= ~(7 << 6);
6887 if ((i
.flags
[op
] & Operand_Mem
)
6888 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
6890 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
6892 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6894 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6896 if (is_evex_encoding (&i
.tm
))
6902 /* Are multiple suffixes / operand sizes allowed? */
6903 if (suffixes
& (suffixes
- 1))
6906 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6907 || operand_check
== check_error
))
6909 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6912 if (operand_check
== check_error
)
6914 as_bad (_("no instruction mnemonic suffix given and "
6915 "no register operands; can't size `%s'"), i
.tm
.name
);
6918 if (operand_check
== check_warning
)
6919 as_warn (_("%s; using default for `%s'"),
6921 ? _("ambiguous operand size")
6922 : _("no instruction mnemonic suffix given and "
6923 "no register operands"),
6926 if (i
.tm
.opcode_modifier
.floatmf
)
6927 i
.suffix
= SHORT_MNEM_SUFFIX
;
6928 else if ((i
.tm
.base_opcode
| 8) == 0xfbe
6929 || (i
.tm
.base_opcode
== 0x63
6930 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6931 /* handled below */;
6933 i
.tm
.opcode_modifier
.evex
= evex
;
6934 else if (flag_code
== CODE_16BIT
)
6935 i
.suffix
= WORD_MNEM_SUFFIX
;
6936 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
6937 i
.suffix
= LONG_MNEM_SUFFIX
;
6939 i
.suffix
= QWORD_MNEM_SUFFIX
;
6943 if ((i
.tm
.base_opcode
| 8) == 0xfbe
6944 || (i
.tm
.base_opcode
== 0x63 && i
.tm
.cpu_flags
.bitfield
.cpu64
))
6946 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6947 In AT&T syntax, if there is no suffix (warned about above), the default
6948 will be byte extension. */
6949 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
6950 i
.tm
.base_opcode
|= 1;
6952 /* For further processing, the suffix should represent the destination
6953 (register). This is already the case when one was used with
6954 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6955 no suffix to begin with. */
6956 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
6958 if (i
.types
[1].bitfield
.word
)
6959 i
.suffix
= WORD_MNEM_SUFFIX
;
6960 else if (i
.types
[1].bitfield
.qword
)
6961 i
.suffix
= QWORD_MNEM_SUFFIX
;
6963 i
.suffix
= LONG_MNEM_SUFFIX
;
6965 i
.tm
.opcode_modifier
.w
= 0;
6969 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
6970 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
6971 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
6973 /* Change the opcode based on the operand size given by i.suffix. */
6976 /* Size floating point instruction. */
6977 case LONG_MNEM_SUFFIX
:
6978 if (i
.tm
.opcode_modifier
.floatmf
)
6980 i
.tm
.base_opcode
^= 4;
6984 case WORD_MNEM_SUFFIX
:
6985 case QWORD_MNEM_SUFFIX
:
6986 /* It's not a byte, select word/dword operation. */
6987 if (i
.tm
.opcode_modifier
.w
)
6990 i
.tm
.base_opcode
|= 8;
6992 i
.tm
.base_opcode
|= 1;
6995 case SHORT_MNEM_SUFFIX
:
6996 /* Now select between word & dword operations via the operand
6997 size prefix, except for instructions that will ignore this
6999 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7000 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7001 && !i
.tm
.opcode_modifier
.floatmf
7002 && !is_any_vex_encoding (&i
.tm
)
7003 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7004 || (flag_code
== CODE_64BIT
7005 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7007 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7009 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7010 prefix
= ADDR_PREFIX_OPCODE
;
7012 if (!add_prefix (prefix
))
7016 /* Set mode64 for an operand. */
7017 if (i
.suffix
== QWORD_MNEM_SUFFIX
7018 && flag_code
== CODE_64BIT
7019 && !i
.tm
.opcode_modifier
.norex64
7020 && !i
.tm
.opcode_modifier
.vexw
7021 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7023 && ! (i
.operands
== 2
7024 && i
.tm
.base_opcode
== 0x90
7025 && i
.tm
.extension_opcode
== None
7026 && i
.types
[0].bitfield
.instance
== Accum
7027 && i
.types
[0].bitfield
.qword
7028 && i
.types
[1].bitfield
.instance
== Accum
7029 && i
.types
[1].bitfield
.qword
))
7035 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7037 gas_assert (!i
.suffix
);
7038 gas_assert (i
.reg_operands
);
7040 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7043 /* The address size override prefix changes the size of the
7045 if (flag_code
== CODE_64BIT
7046 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7048 as_bad (_("16-bit addressing unavailable for `%s'"),
7053 if ((flag_code
== CODE_32BIT
7054 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7055 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7056 && !add_prefix (ADDR_PREFIX_OPCODE
))
7061 /* Check invalid register operand when the address size override
7062 prefix changes the size of register operands. */
7064 enum { need_word
, need_dword
, need_qword
} need
;
7066 if (flag_code
== CODE_32BIT
)
7067 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7068 else if (i
.prefix
[ADDR_PREFIX
])
7071 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7073 for (op
= 0; op
< i
.operands
; op
++)
7075 if (i
.types
[op
].bitfield
.class != Reg
)
7081 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7085 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7089 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7094 as_bad (_("invalid register operand size for `%s'"),
7105 check_byte_reg (void)
7109 for (op
= i
.operands
; --op
>= 0;)
7111 /* Skip non-register operands. */
7112 if (i
.types
[op
].bitfield
.class != Reg
)
7115 /* If this is an eight bit register, it's OK. If it's the 16 or
7116 32 bit version of an eight bit register, we will just use the
7117 low portion, and that's OK too. */
7118 if (i
.types
[op
].bitfield
.byte
)
7121 /* I/O port address operands are OK too. */
7122 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7123 && i
.tm
.operand_types
[op
].bitfield
.word
)
7126 /* crc32 only wants its source operand checked here. */
7127 if (i
.tm
.base_opcode
== 0xf20f38f0 && op
)
7130 /* Any other register is bad. */
7131 if (i
.types
[op
].bitfield
.class == Reg
7132 || i
.types
[op
].bitfield
.class == RegMMX
7133 || i
.types
[op
].bitfield
.class == RegSIMD
7134 || i
.types
[op
].bitfield
.class == SReg
7135 || i
.types
[op
].bitfield
.class == RegCR
7136 || i
.types
[op
].bitfield
.class == RegDR
7137 || i
.types
[op
].bitfield
.class == RegTR
)
7139 as_bad (_("`%s%s' not allowed with `%s%c'"),
7141 i
.op
[op
].regs
->reg_name
,
7151 check_long_reg (void)
7155 for (op
= i
.operands
; --op
>= 0;)
7156 /* Skip non-register operands. */
7157 if (i
.types
[op
].bitfield
.class != Reg
)
7159 /* Reject eight bit registers, except where the template requires
7160 them. (eg. movzb) */
7161 else if (i
.types
[op
].bitfield
.byte
7162 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7163 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7164 && (i
.tm
.operand_types
[op
].bitfield
.word
7165 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7167 as_bad (_("`%s%s' not allowed with `%s%c'"),
7169 i
.op
[op
].regs
->reg_name
,
7174 /* Error if the e prefix on a general reg is missing. */
7175 else if (i
.types
[op
].bitfield
.word
7176 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7177 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7178 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7180 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7181 register_prefix
, i
.op
[op
].regs
->reg_name
,
7185 /* Warn if the r prefix on a general reg is present. */
7186 else if (i
.types
[op
].bitfield
.qword
7187 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7188 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7189 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7192 && i
.tm
.opcode_modifier
.toqword
7193 && i
.types
[0].bitfield
.class != RegSIMD
)
7195 /* Convert to QWORD. We want REX byte. */
7196 i
.suffix
= QWORD_MNEM_SUFFIX
;
7200 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7201 register_prefix
, i
.op
[op
].regs
->reg_name
,
7210 check_qword_reg (void)
7214 for (op
= i
.operands
; --op
>= 0; )
7215 /* Skip non-register operands. */
7216 if (i
.types
[op
].bitfield
.class != Reg
)
7218 /* Reject eight bit registers, except where the template requires
7219 them. (eg. movzb) */
7220 else if (i
.types
[op
].bitfield
.byte
7221 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7222 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7223 && (i
.tm
.operand_types
[op
].bitfield
.word
7224 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7226 as_bad (_("`%s%s' not allowed with `%s%c'"),
7228 i
.op
[op
].regs
->reg_name
,
7233 /* Warn if the r prefix on a general reg is missing. */
7234 else if ((i
.types
[op
].bitfield
.word
7235 || i
.types
[op
].bitfield
.dword
)
7236 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7237 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7238 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7240 /* Prohibit these changes in the 64bit mode, since the
7241 lowering is more complicated. */
7243 && i
.tm
.opcode_modifier
.todword
7244 && i
.types
[0].bitfield
.class != RegSIMD
)
7246 /* Convert to DWORD. We don't want REX byte. */
7247 i
.suffix
= LONG_MNEM_SUFFIX
;
7251 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7252 register_prefix
, i
.op
[op
].regs
->reg_name
,
7261 check_word_reg (void)
7264 for (op
= i
.operands
; --op
>= 0;)
7265 /* Skip non-register operands. */
7266 if (i
.types
[op
].bitfield
.class != Reg
)
7268 /* Reject eight bit registers, except where the template requires
7269 them. (eg. movzb) */
7270 else if (i
.types
[op
].bitfield
.byte
7271 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7272 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7273 && (i
.tm
.operand_types
[op
].bitfield
.word
7274 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7276 as_bad (_("`%s%s' not allowed with `%s%c'"),
7278 i
.op
[op
].regs
->reg_name
,
7283 /* Error if the e or r prefix on a general reg is present. */
7284 else if ((i
.types
[op
].bitfield
.dword
7285 || i
.types
[op
].bitfield
.qword
)
7286 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7287 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7288 && i
.tm
.operand_types
[op
].bitfield
.word
)
7290 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7291 register_prefix
, i
.op
[op
].regs
->reg_name
,
7299 update_imm (unsigned int j
)
7301 i386_operand_type overlap
= i
.types
[j
];
7302 if ((overlap
.bitfield
.imm8
7303 || overlap
.bitfield
.imm8s
7304 || overlap
.bitfield
.imm16
7305 || overlap
.bitfield
.imm32
7306 || overlap
.bitfield
.imm32s
7307 || overlap
.bitfield
.imm64
)
7308 && !operand_type_equal (&overlap
, &imm8
)
7309 && !operand_type_equal (&overlap
, &imm8s
)
7310 && !operand_type_equal (&overlap
, &imm16
)
7311 && !operand_type_equal (&overlap
, &imm32
)
7312 && !operand_type_equal (&overlap
, &imm32s
)
7313 && !operand_type_equal (&overlap
, &imm64
))
7317 i386_operand_type temp
;
7319 operand_type_set (&temp
, 0);
7320 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7322 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7323 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7325 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7326 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7327 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7329 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7330 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7333 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7336 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7337 || operand_type_equal (&overlap
, &imm16_32
)
7338 || operand_type_equal (&overlap
, &imm16_32s
))
7340 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7345 if (!operand_type_equal (&overlap
, &imm8
)
7346 && !operand_type_equal (&overlap
, &imm8s
)
7347 && !operand_type_equal (&overlap
, &imm16
)
7348 && !operand_type_equal (&overlap
, &imm32
)
7349 && !operand_type_equal (&overlap
, &imm32s
)
7350 && !operand_type_equal (&overlap
, &imm64
))
7352 as_bad (_("no instruction mnemonic suffix given; "
7353 "can't determine immediate size"));
7357 i
.types
[j
] = overlap
;
7367 /* Update the first 2 immediate operands. */
7368 n
= i
.operands
> 2 ? 2 : i
.operands
;
7371 for (j
= 0; j
< n
; j
++)
7372 if (update_imm (j
) == 0)
7375 /* The 3rd operand can't be immediate operand. */
7376 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7383 process_operands (void)
7385 /* Default segment register this instruction will use for memory
7386 accesses. 0 means unknown. This is only for optimizing out
7387 unnecessary segment overrides. */
7388 const seg_entry
*default_seg
= 0;
7390 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7392 unsigned int dupl
= i
.operands
;
7393 unsigned int dest
= dupl
- 1;
7396 /* The destination must be an xmm register. */
7397 gas_assert (i
.reg_operands
7398 && MAX_OPERANDS
> dupl
7399 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7401 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7402 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7404 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7406 /* Keep xmm0 for instructions with VEX prefix and 3
7408 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7409 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7414 /* We remove the first xmm0 and keep the number of
7415 operands unchanged, which in fact duplicates the
7417 for (j
= 1; j
< i
.operands
; j
++)
7419 i
.op
[j
- 1] = i
.op
[j
];
7420 i
.types
[j
- 1] = i
.types
[j
];
7421 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7422 i
.flags
[j
- 1] = i
.flags
[j
];
7426 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7428 gas_assert ((MAX_OPERANDS
- 1) > dupl
7429 && (i
.tm
.opcode_modifier
.vexsources
7432 /* Add the implicit xmm0 for instructions with VEX prefix
7434 for (j
= i
.operands
; j
> 0; j
--)
7436 i
.op
[j
] = i
.op
[j
- 1];
7437 i
.types
[j
] = i
.types
[j
- 1];
7438 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7439 i
.flags
[j
] = i
.flags
[j
- 1];
7442 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
7443 i
.types
[0] = regxmm
;
7444 i
.tm
.operand_types
[0] = regxmm
;
7447 i
.reg_operands
+= 2;
7452 i
.op
[dupl
] = i
.op
[dest
];
7453 i
.types
[dupl
] = i
.types
[dest
];
7454 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7455 i
.flags
[dupl
] = i
.flags
[dest
];
7464 i
.op
[dupl
] = i
.op
[dest
];
7465 i
.types
[dupl
] = i
.types
[dest
];
7466 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7467 i
.flags
[dupl
] = i
.flags
[dest
];
7470 if (i
.tm
.opcode_modifier
.immext
)
7473 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7474 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7478 for (j
= 1; j
< i
.operands
; j
++)
7480 i
.op
[j
- 1] = i
.op
[j
];
7481 i
.types
[j
- 1] = i
.types
[j
];
7483 /* We need to adjust fields in i.tm since they are used by
7484 build_modrm_byte. */
7485 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7487 i
.flags
[j
- 1] = i
.flags
[j
];
7494 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7496 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7498 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7499 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7500 regnum
= register_number (i
.op
[1].regs
);
7501 first_reg_in_group
= regnum
& ~3;
7502 last_reg_in_group
= first_reg_in_group
+ 3;
7503 if (regnum
!= first_reg_in_group
)
7504 as_warn (_("source register `%s%s' implicitly denotes"
7505 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7506 register_prefix
, i
.op
[1].regs
->reg_name
,
7507 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7508 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7511 else if (i
.tm
.opcode_modifier
.regkludge
)
7513 /* The imul $imm, %reg instruction is converted into
7514 imul $imm, %reg, %reg, and the clr %reg instruction
7515 is converted into xor %reg, %reg. */
7517 unsigned int first_reg_op
;
7519 if (operand_type_check (i
.types
[0], reg
))
7523 /* Pretend we saw the extra register operand. */
7524 gas_assert (i
.reg_operands
== 1
7525 && i
.op
[first_reg_op
+ 1].regs
== 0);
7526 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7527 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7532 if (i
.tm
.opcode_modifier
.modrm
)
7534 /* The opcode is completed (modulo i.tm.extension_opcode which
7535 must be put into the modrm byte). Now, we make the modrm and
7536 index base bytes based on all the info we've collected. */
7538 default_seg
= build_modrm_byte ();
7540 else if (i
.types
[0].bitfield
.class == SReg
)
7542 if (flag_code
!= CODE_64BIT
7543 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7544 && i
.op
[0].regs
->reg_num
== 1
7545 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7546 && i
.op
[0].regs
->reg_num
< 4)
7548 as_bad (_("you can't `%s %s%s'"),
7549 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7552 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7554 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7555 i
.tm
.opcode_length
= 2;
7557 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7559 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7563 else if (i
.tm
.opcode_modifier
.isstring
)
7565 /* For the string instructions that allow a segment override
7566 on one of their operands, the default segment is ds. */
7569 else if (i
.short_form
)
7571 /* The register or float register operand is in operand
7573 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7575 /* Register goes in low 3 bits of opcode. */
7576 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7577 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7579 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7581 /* Warn about some common errors, but press on regardless.
7582 The first case can be generated by gcc (<= 2.8.1). */
7583 if (i
.operands
== 2)
7585 /* Reversed arguments on faddp, fsubp, etc. */
7586 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7587 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7588 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7592 /* Extraneous `l' suffix on fp insn. */
7593 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7594 register_prefix
, i
.op
[0].regs
->reg_name
);
7599 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7600 && i
.tm
.base_opcode
== 0x8d /* lea */
7601 && !is_any_vex_encoding(&i
.tm
))
7603 if (!quiet_warnings
)
7604 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7608 i
.prefix
[SEG_PREFIX
] = 0;
7612 /* If a segment was explicitly specified, and the specified segment
7613 is neither the default nor the one already recorded from a prefix,
7614 use an opcode prefix to select it. If we never figured out what
7615 the default segment is, then default_seg will be zero at this
7616 point, and the specified segment prefix will always be used. */
7618 && i
.seg
[0] != default_seg
7619 && i
.seg
[0]->seg_prefix
!= i
.prefix
[SEG_PREFIX
])
7621 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7627 static const seg_entry
*
7628 build_modrm_byte (void)
7630 const seg_entry
*default_seg
= 0;
7631 unsigned int source
, dest
;
7634 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7637 unsigned int nds
, reg_slot
;
7640 dest
= i
.operands
- 1;
7643 /* There are 2 kinds of instructions:
7644 1. 5 operands: 4 register operands or 3 register operands
7645 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7646 VexW0 or VexW1. The destination must be either XMM, YMM or
7648 2. 4 operands: 4 register operands or 3 register operands
7649 plus 1 memory operand, with VexXDS. */
7650 gas_assert ((i
.reg_operands
== 4
7651 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7652 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7653 && i
.tm
.opcode_modifier
.vexw
7654 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7656 /* If VexW1 is set, the first non-immediate operand is the source and
7657 the second non-immediate one is encoded in the immediate operand. */
7658 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7660 source
= i
.imm_operands
;
7661 reg_slot
= i
.imm_operands
+ 1;
7665 source
= i
.imm_operands
+ 1;
7666 reg_slot
= i
.imm_operands
;
7669 if (i
.imm_operands
== 0)
7671 /* When there is no immediate operand, generate an 8bit
7672 immediate operand to encode the first operand. */
7673 exp
= &im_expressions
[i
.imm_operands
++];
7674 i
.op
[i
.operands
].imms
= exp
;
7675 i
.types
[i
.operands
] = imm8
;
7678 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7679 exp
->X_op
= O_constant
;
7680 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7681 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7685 gas_assert (i
.imm_operands
== 1);
7686 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7687 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7689 /* Turn on Imm8 again so that output_imm will generate it. */
7690 i
.types
[0].bitfield
.imm8
= 1;
7692 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7693 i
.op
[0].imms
->X_add_number
7694 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7695 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7698 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7699 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7704 /* i.reg_operands MUST be the number of real register operands;
7705 implicit registers do not count. If there are 3 register
7706 operands, it must be a instruction with VexNDS. For a
7707 instruction with VexNDD, the destination register is encoded
7708 in VEX prefix. If there are 4 register operands, it must be
7709 a instruction with VEX prefix and 3 sources. */
7710 if (i
.mem_operands
== 0
7711 && ((i
.reg_operands
== 2
7712 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7713 || (i
.reg_operands
== 3
7714 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7715 || (i
.reg_operands
== 4 && vex_3_sources
)))
7723 /* When there are 3 operands, one of them may be immediate,
7724 which may be the first or the last operand. Otherwise,
7725 the first operand must be shift count register (cl) or it
7726 is an instruction with VexNDS. */
7727 gas_assert (i
.imm_operands
== 1
7728 || (i
.imm_operands
== 0
7729 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7730 || (i
.types
[0].bitfield
.instance
== RegC
7731 && i
.types
[0].bitfield
.byte
))));
7732 if (operand_type_check (i
.types
[0], imm
)
7733 || (i
.types
[0].bitfield
.instance
== RegC
7734 && i
.types
[0].bitfield
.byte
))
7740 /* When there are 4 operands, the first two must be 8bit
7741 immediate operands. The source operand will be the 3rd
7744 For instructions with VexNDS, if the first operand
7745 an imm8, the source operand is the 2nd one. If the last
7746 operand is imm8, the source operand is the first one. */
7747 gas_assert ((i
.imm_operands
== 2
7748 && i
.types
[0].bitfield
.imm8
7749 && i
.types
[1].bitfield
.imm8
)
7750 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7751 && i
.imm_operands
== 1
7752 && (i
.types
[0].bitfield
.imm8
7753 || i
.types
[i
.operands
- 1].bitfield
.imm8
7755 if (i
.imm_operands
== 2)
7759 if (i
.types
[0].bitfield
.imm8
)
7766 if (is_evex_encoding (&i
.tm
))
7768 /* For EVEX instructions, when there are 5 operands, the
7769 first one must be immediate operand. If the second one
7770 is immediate operand, the source operand is the 3th
7771 one. If the last one is immediate operand, the source
7772 operand is the 2nd one. */
7773 gas_assert (i
.imm_operands
== 2
7774 && i
.tm
.opcode_modifier
.sae
7775 && operand_type_check (i
.types
[0], imm
));
7776 if (operand_type_check (i
.types
[1], imm
))
7778 else if (operand_type_check (i
.types
[4], imm
))
7792 /* RC/SAE operand could be between DEST and SRC. That happens
7793 when one operand is GPR and the other one is XMM/YMM/ZMM
7795 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7798 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7800 /* For instructions with VexNDS, the register-only source
7801 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7802 register. It is encoded in VEX prefix. */
7804 i386_operand_type op
;
7807 /* Check register-only source operand when two source
7808 operands are swapped. */
7809 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7810 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7818 op
= i
.tm
.operand_types
[vvvv
];
7819 if ((dest
+ 1) >= i
.operands
7820 || ((op
.bitfield
.class != Reg
7821 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7822 && op
.bitfield
.class != RegSIMD
7823 && !operand_type_equal (&op
, ®mask
)))
7825 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7831 /* One of the register operands will be encoded in the i.rm.reg
7832 field, the other in the combined i.rm.mode and i.rm.regmem
7833 fields. If no form of this instruction supports a memory
7834 destination operand, then we assume the source operand may
7835 sometimes be a memory operand and so we need to store the
7836 destination in the i.rm.reg field. */
7837 if (!i
.tm
.opcode_modifier
.regmem
7838 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7840 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7841 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7842 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7843 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7844 i
.has_regmmx
= TRUE
;
7845 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7846 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7848 if (i
.types
[dest
].bitfield
.zmmword
7849 || i
.types
[source
].bitfield
.zmmword
)
7850 i
.has_regzmm
= TRUE
;
7851 else if (i
.types
[dest
].bitfield
.ymmword
7852 || i
.types
[source
].bitfield
.ymmword
)
7853 i
.has_regymm
= TRUE
;
7855 i
.has_regxmm
= TRUE
;
7857 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7859 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7861 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7863 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7868 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7869 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7870 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7872 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7874 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7876 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7879 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7881 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7884 add_prefix (LOCK_PREFIX_OPCODE
);
7888 { /* If it's not 2 reg operands... */
7893 unsigned int fake_zero_displacement
= 0;
7896 for (op
= 0; op
< i
.operands
; op
++)
7897 if (i
.flags
[op
] & Operand_Mem
)
7899 gas_assert (op
< i
.operands
);
7901 if (i
.tm
.opcode_modifier
.vecsib
)
7903 if (i
.index_reg
->reg_num
== RegIZ
)
7906 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7909 i
.sib
.base
= NO_BASE_REGISTER
;
7910 i
.sib
.scale
= i
.log2_scale_factor
;
7911 i
.types
[op
].bitfield
.disp8
= 0;
7912 i
.types
[op
].bitfield
.disp16
= 0;
7913 i
.types
[op
].bitfield
.disp64
= 0;
7914 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7916 /* Must be 32 bit */
7917 i
.types
[op
].bitfield
.disp32
= 1;
7918 i
.types
[op
].bitfield
.disp32s
= 0;
7922 i
.types
[op
].bitfield
.disp32
= 0;
7923 i
.types
[op
].bitfield
.disp32s
= 1;
7926 i
.sib
.index
= i
.index_reg
->reg_num
;
7927 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7929 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7935 if (i
.base_reg
== 0)
7938 if (!i
.disp_operands
)
7939 fake_zero_displacement
= 1;
7940 if (i
.index_reg
== 0)
7942 i386_operand_type newdisp
;
7944 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7945 /* Operand is just <disp> */
7946 if (flag_code
== CODE_64BIT
)
7948 /* 64bit mode overwrites the 32bit absolute
7949 addressing by RIP relative addressing and
7950 absolute addressing is encoded by one of the
7951 redundant SIB forms. */
7952 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7953 i
.sib
.base
= NO_BASE_REGISTER
;
7954 i
.sib
.index
= NO_INDEX_REGISTER
;
7955 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7957 else if ((flag_code
== CODE_16BIT
)
7958 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7960 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7965 i
.rm
.regmem
= NO_BASE_REGISTER
;
7968 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7969 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7971 else if (!i
.tm
.opcode_modifier
.vecsib
)
7973 /* !i.base_reg && i.index_reg */
7974 if (i
.index_reg
->reg_num
== RegIZ
)
7975 i
.sib
.index
= NO_INDEX_REGISTER
;
7977 i
.sib
.index
= i
.index_reg
->reg_num
;
7978 i
.sib
.base
= NO_BASE_REGISTER
;
7979 i
.sib
.scale
= i
.log2_scale_factor
;
7980 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7981 i
.types
[op
].bitfield
.disp8
= 0;
7982 i
.types
[op
].bitfield
.disp16
= 0;
7983 i
.types
[op
].bitfield
.disp64
= 0;
7984 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7986 /* Must be 32 bit */
7987 i
.types
[op
].bitfield
.disp32
= 1;
7988 i
.types
[op
].bitfield
.disp32s
= 0;
7992 i
.types
[op
].bitfield
.disp32
= 0;
7993 i
.types
[op
].bitfield
.disp32s
= 1;
7995 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7999 /* RIP addressing for 64bit mode. */
8000 else if (i
.base_reg
->reg_num
== RegIP
)
8002 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8003 i
.rm
.regmem
= NO_BASE_REGISTER
;
8004 i
.types
[op
].bitfield
.disp8
= 0;
8005 i
.types
[op
].bitfield
.disp16
= 0;
8006 i
.types
[op
].bitfield
.disp32
= 0;
8007 i
.types
[op
].bitfield
.disp32s
= 1;
8008 i
.types
[op
].bitfield
.disp64
= 0;
8009 i
.flags
[op
] |= Operand_PCrel
;
8010 if (! i
.disp_operands
)
8011 fake_zero_displacement
= 1;
8013 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8015 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8016 switch (i
.base_reg
->reg_num
)
8019 if (i
.index_reg
== 0)
8021 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8022 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8026 if (i
.index_reg
== 0)
8029 if (operand_type_check (i
.types
[op
], disp
) == 0)
8031 /* fake (%bp) into 0(%bp) */
8032 i
.types
[op
].bitfield
.disp8
= 1;
8033 fake_zero_displacement
= 1;
8036 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8037 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8039 default: /* (%si) -> 4 or (%di) -> 5 */
8040 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8042 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8044 else /* i.base_reg and 32/64 bit mode */
8046 if (flag_code
== CODE_64BIT
8047 && operand_type_check (i
.types
[op
], disp
))
8049 i
.types
[op
].bitfield
.disp16
= 0;
8050 i
.types
[op
].bitfield
.disp64
= 0;
8051 if (i
.prefix
[ADDR_PREFIX
] == 0)
8053 i
.types
[op
].bitfield
.disp32
= 0;
8054 i
.types
[op
].bitfield
.disp32s
= 1;
8058 i
.types
[op
].bitfield
.disp32
= 1;
8059 i
.types
[op
].bitfield
.disp32s
= 0;
8063 if (!i
.tm
.opcode_modifier
.vecsib
)
8064 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8065 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8067 i
.sib
.base
= i
.base_reg
->reg_num
;
8068 /* x86-64 ignores REX prefix bit here to avoid decoder
8070 if (!(i
.base_reg
->reg_flags
& RegRex
)
8071 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8072 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8074 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8076 fake_zero_displacement
= 1;
8077 i
.types
[op
].bitfield
.disp8
= 1;
8079 i
.sib
.scale
= i
.log2_scale_factor
;
8080 if (i
.index_reg
== 0)
8082 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
8083 /* <disp>(%esp) becomes two byte modrm with no index
8084 register. We've already stored the code for esp
8085 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8086 Any base register besides %esp will not use the
8087 extra modrm byte. */
8088 i
.sib
.index
= NO_INDEX_REGISTER
;
8090 else if (!i
.tm
.opcode_modifier
.vecsib
)
8092 if (i
.index_reg
->reg_num
== RegIZ
)
8093 i
.sib
.index
= NO_INDEX_REGISTER
;
8095 i
.sib
.index
= i
.index_reg
->reg_num
;
8096 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8097 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8102 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8103 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8107 if (!fake_zero_displacement
8111 fake_zero_displacement
= 1;
8112 if (i
.disp_encoding
== disp_encoding_8bit
)
8113 i
.types
[op
].bitfield
.disp8
= 1;
8115 i
.types
[op
].bitfield
.disp32
= 1;
8117 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8121 if (fake_zero_displacement
)
8123 /* Fakes a zero displacement assuming that i.types[op]
8124 holds the correct displacement size. */
8127 gas_assert (i
.op
[op
].disps
== 0);
8128 exp
= &disp_expressions
[i
.disp_operands
++];
8129 i
.op
[op
].disps
= exp
;
8130 exp
->X_op
= O_constant
;
8131 exp
->X_add_number
= 0;
8132 exp
->X_add_symbol
= (symbolS
*) 0;
8133 exp
->X_op_symbol
= (symbolS
*) 0;
8141 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8143 if (operand_type_check (i
.types
[0], imm
))
8144 i
.vex
.register_specifier
= NULL
;
8147 /* VEX.vvvv encodes one of the sources when the first
8148 operand is not an immediate. */
8149 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8150 i
.vex
.register_specifier
= i
.op
[0].regs
;
8152 i
.vex
.register_specifier
= i
.op
[1].regs
;
8155 /* Destination is a XMM register encoded in the ModRM.reg
8157 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8158 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8161 /* ModRM.rm and VEX.B encodes the other source. */
8162 if (!i
.mem_operands
)
8166 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8167 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8169 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8171 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8175 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8177 i
.vex
.register_specifier
= i
.op
[2].regs
;
8178 if (!i
.mem_operands
)
8181 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8182 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8186 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8187 (if any) based on i.tm.extension_opcode. Again, we must be
8188 careful to make sure that segment/control/debug/test/MMX
8189 registers are coded into the i.rm.reg field. */
8190 else if (i
.reg_operands
)
8193 unsigned int vex_reg
= ~0;
8195 for (op
= 0; op
< i
.operands
; op
++)
8197 if (i
.types
[op
].bitfield
.class == Reg
8198 || i
.types
[op
].bitfield
.class == RegBND
8199 || i
.types
[op
].bitfield
.class == RegMask
8200 || i
.types
[op
].bitfield
.class == SReg
8201 || i
.types
[op
].bitfield
.class == RegCR
8202 || i
.types
[op
].bitfield
.class == RegDR
8203 || i
.types
[op
].bitfield
.class == RegTR
)
8205 if (i
.types
[op
].bitfield
.class == RegSIMD
)
8207 if (i
.types
[op
].bitfield
.zmmword
)
8208 i
.has_regzmm
= TRUE
;
8209 else if (i
.types
[op
].bitfield
.ymmword
)
8210 i
.has_regymm
= TRUE
;
8212 i
.has_regxmm
= TRUE
;
8215 if (i
.types
[op
].bitfield
.class == RegMMX
)
8217 i
.has_regmmx
= TRUE
;
8224 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8226 /* For instructions with VexNDS, the register-only
8227 source operand is encoded in VEX prefix. */
8228 gas_assert (mem
!= (unsigned int) ~0);
8233 gas_assert (op
< i
.operands
);
8237 /* Check register-only source operand when two source
8238 operands are swapped. */
8239 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8240 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8244 gas_assert (mem
== (vex_reg
+ 1)
8245 && op
< i
.operands
);
8250 gas_assert (vex_reg
< i
.operands
);
8254 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8256 /* For instructions with VexNDD, the register destination
8257 is encoded in VEX prefix. */
8258 if (i
.mem_operands
== 0)
8260 /* There is no memory operand. */
8261 gas_assert ((op
+ 2) == i
.operands
);
8266 /* There are only 2 non-immediate operands. */
8267 gas_assert (op
< i
.imm_operands
+ 2
8268 && i
.operands
== i
.imm_operands
+ 2);
8269 vex_reg
= i
.imm_operands
+ 1;
8273 gas_assert (op
< i
.operands
);
8275 if (vex_reg
!= (unsigned int) ~0)
8277 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8279 if ((type
->bitfield
.class != Reg
8280 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8281 && type
->bitfield
.class != RegSIMD
8282 && !operand_type_equal (type
, ®mask
))
8285 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8288 /* Don't set OP operand twice. */
8291 /* If there is an extension opcode to put here, the
8292 register number must be put into the regmem field. */
8293 if (i
.tm
.extension_opcode
!= None
)
8295 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8296 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8298 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8303 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8304 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8306 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
8311 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8312 must set it to 3 to indicate this is a register operand
8313 in the regmem field. */
8314 if (!i
.mem_operands
)
8318 /* Fill in i.rm.reg field with extension opcode (if any). */
8319 if (i
.tm
.extension_opcode
!= None
)
8320 i
.rm
.reg
= i
.tm
.extension_opcode
;
8326 flip_code16 (unsigned int code16
)
8328 gas_assert (i
.tm
.operands
== 1);
8330 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8331 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8332 || i
.tm
.operand_types
[0].bitfield
.disp32s
8333 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8338 output_branch (void)
8344 relax_substateT subtype
;
8348 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8349 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8352 if (i
.prefix
[DATA_PREFIX
] != 0)
8356 code16
^= flip_code16(code16
);
8358 /* Pentium4 branch hints. */
8359 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8360 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8365 if (i
.prefix
[REX_PREFIX
] != 0)
8371 /* BND prefixed jump. */
8372 if (i
.prefix
[BND_PREFIX
] != 0)
8378 if (i
.prefixes
!= 0)
8379 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8381 /* It's always a symbol; End frag & setup for relax.
8382 Make sure there is enough room in this frag for the largest
8383 instruction we may generate in md_convert_frag. This is 2
8384 bytes for the opcode and room for the prefix and largest
8386 frag_grow (prefix
+ 2 + 4);
8387 /* Prefix and 1 opcode byte go in fr_fix. */
8388 p
= frag_more (prefix
+ 1);
8389 if (i
.prefix
[DATA_PREFIX
] != 0)
8390 *p
++ = DATA_PREFIX_OPCODE
;
8391 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8392 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8393 *p
++ = i
.prefix
[SEG_PREFIX
];
8394 if (i
.prefix
[BND_PREFIX
] != 0)
8395 *p
++ = BND_PREFIX_OPCODE
;
8396 if (i
.prefix
[REX_PREFIX
] != 0)
8397 *p
++ = i
.prefix
[REX_PREFIX
];
8398 *p
= i
.tm
.base_opcode
;
8400 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8401 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8402 else if (cpu_arch_flags
.bitfield
.cpui386
)
8403 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8405 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8408 sym
= i
.op
[0].disps
->X_add_symbol
;
8409 off
= i
.op
[0].disps
->X_add_number
;
8411 if (i
.op
[0].disps
->X_op
!= O_constant
8412 && i
.op
[0].disps
->X_op
!= O_symbol
)
8414 /* Handle complex expressions. */
8415 sym
= make_expr_symbol (i
.op
[0].disps
);
8419 /* 1 possible extra opcode + 4 byte displacement go in var part.
8420 Pass reloc in fr_var. */
8421 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8424 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8425 /* Return TRUE iff PLT32 relocation should be used for branching to
8429 need_plt32_p (symbolS
*s
)
8431 /* PLT32 relocation is ELF only. */
8436 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8437 krtld support it. */
8441 /* Since there is no need to prepare for PLT branch on x86-64, we
8442 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8443 be used as a marker for 32-bit PC-relative branches. */
8447 /* Weak or undefined symbol need PLT32 relocation. */
8448 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8451 /* Non-global symbol doesn't need PLT32 relocation. */
8452 if (! S_IS_EXTERNAL (s
))
8455 /* Other global symbols need PLT32 relocation. NB: Symbol with
8456 non-default visibilities are treated as normal global symbol
8457 so that PLT32 relocation can be used as a marker for 32-bit
8458 PC-relative branches. It is useful for linker relaxation. */
8469 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8471 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8473 /* This is a loop or jecxz type instruction. */
8475 if (i
.prefix
[ADDR_PREFIX
] != 0)
8477 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
8480 /* Pentium4 branch hints. */
8481 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8482 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8484 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
8493 if (flag_code
== CODE_16BIT
)
8496 if (i
.prefix
[DATA_PREFIX
] != 0)
8498 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
8500 code16
^= flip_code16(code16
);
8508 /* BND prefixed jump. */
8509 if (i
.prefix
[BND_PREFIX
] != 0)
8511 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
8515 if (i
.prefix
[REX_PREFIX
] != 0)
8517 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
8521 if (i
.prefixes
!= 0)
8522 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8524 p
= frag_more (i
.tm
.opcode_length
+ size
);
8525 switch (i
.tm
.opcode_length
)
8528 *p
++ = i
.tm
.base_opcode
>> 8;
8531 *p
++ = i
.tm
.base_opcode
;
8537 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8539 && jump_reloc
== NO_RELOC
8540 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8541 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8544 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8546 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8547 i
.op
[0].disps
, 1, jump_reloc
);
8549 /* All jumps handled here are signed, but don't use a signed limit
8550 check for 32 and 16 bit jumps as we want to allow wrap around at
8551 4G and 64k respectively. */
8553 fixP
->fx_signed
= 1;
8557 output_interseg_jump (void)
8565 if (flag_code
== CODE_16BIT
)
8569 if (i
.prefix
[DATA_PREFIX
] != 0)
8576 gas_assert (!i
.prefix
[REX_PREFIX
]);
8582 if (i
.prefixes
!= 0)
8583 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8585 /* 1 opcode; 2 segment; offset */
8586 p
= frag_more (prefix
+ 1 + 2 + size
);
8588 if (i
.prefix
[DATA_PREFIX
] != 0)
8589 *p
++ = DATA_PREFIX_OPCODE
;
8591 if (i
.prefix
[REX_PREFIX
] != 0)
8592 *p
++ = i
.prefix
[REX_PREFIX
];
8594 *p
++ = i
.tm
.base_opcode
;
8595 if (i
.op
[1].imms
->X_op
== O_constant
)
8597 offsetT n
= i
.op
[1].imms
->X_add_number
;
8600 && !fits_in_unsigned_word (n
)
8601 && !fits_in_signed_word (n
))
8603 as_bad (_("16-bit jump out of range"));
8606 md_number_to_chars (p
, n
, size
);
8609 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8610 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8611 if (i
.op
[0].imms
->X_op
!= O_constant
)
8612 as_bad (_("can't handle non absolute segment in `%s'"),
8614 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8617 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8622 asection
*seg
= now_seg
;
8623 subsegT subseg
= now_subseg
;
8625 unsigned int alignment
, align_size_1
;
8626 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8627 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8628 unsigned int padding
;
8630 if (!IS_ELF
|| !x86_used_note
)
8633 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8635 /* The .note.gnu.property section layout:
8637 Field Length Contents
8640 n_descsz 4 The note descriptor size
8641 n_type 4 NT_GNU_PROPERTY_TYPE_0
8643 n_desc n_descsz The program property array
8647 /* Create the .note.gnu.property section. */
8648 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8649 bfd_set_section_flags (sec
,
8656 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8667 bfd_set_section_alignment (sec
, alignment
);
8668 elf_section_type (sec
) = SHT_NOTE
;
8670 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8672 isa_1_descsz_raw
= 4 + 4 + 4;
8673 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8674 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8676 feature_2_descsz_raw
= isa_1_descsz
;
8677 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8679 feature_2_descsz_raw
+= 4 + 4 + 4;
8680 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8681 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8684 descsz
= feature_2_descsz
;
8685 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8686 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8688 /* Write n_namsz. */
8689 md_number_to_chars (p
, (valueT
) 4, 4);
8691 /* Write n_descsz. */
8692 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8695 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8698 memcpy (p
+ 4 * 3, "GNU", 4);
8700 /* Write 4-byte type. */
8701 md_number_to_chars (p
+ 4 * 4,
8702 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8704 /* Write 4-byte data size. */
8705 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8707 /* Write 4-byte data. */
8708 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8710 /* Zero out paddings. */
8711 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8713 memset (p
+ 4 * 7, 0, padding
);
8715 /* Write 4-byte type. */
8716 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8717 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8719 /* Write 4-byte data size. */
8720 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8722 /* Write 4-byte data. */
8723 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8724 (valueT
) x86_feature_2_used
, 4);
8726 /* Zero out paddings. */
8727 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8729 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8731 /* We probably can't restore the current segment, for there likely
8734 subseg_set (seg
, subseg
);
8739 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8740 const char *frag_now_ptr
)
8742 unsigned int len
= 0;
8744 if (start_frag
!= frag_now
)
8746 const fragS
*fr
= start_frag
;
8751 } while (fr
&& fr
!= frag_now
);
8754 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8757 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8758 be macro-fused with conditional jumps.
8759 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8760 or is one of the following format:
8773 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
8775 /* No RIP address. */
8776 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
8779 /* No VEX/EVEX encoding. */
8780 if (is_any_vex_encoding (&i
.tm
))
8783 /* add, sub without add/sub m, imm. */
8784 if (i
.tm
.base_opcode
<= 5
8785 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
8786 || ((i
.tm
.base_opcode
| 3) == 0x83
8787 && (i
.tm
.extension_opcode
== 0x5
8788 || i
.tm
.extension_opcode
== 0x0)))
8790 *mf_cmp_p
= mf_cmp_alu_cmp
;
8791 return !(i
.mem_operands
&& i
.imm_operands
);
8794 /* and without and m, imm. */
8795 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
8796 || ((i
.tm
.base_opcode
| 3) == 0x83
8797 && i
.tm
.extension_opcode
== 0x4))
8799 *mf_cmp_p
= mf_cmp_test_and
;
8800 return !(i
.mem_operands
&& i
.imm_operands
);
8803 /* test without test m imm. */
8804 if ((i
.tm
.base_opcode
| 1) == 0x85
8805 || (i
.tm
.base_opcode
| 1) == 0xa9
8806 || ((i
.tm
.base_opcode
| 1) == 0xf7
8807 && i
.tm
.extension_opcode
== 0))
8809 *mf_cmp_p
= mf_cmp_test_and
;
8810 return !(i
.mem_operands
&& i
.imm_operands
);
8813 /* cmp without cmp m, imm. */
8814 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
8815 || ((i
.tm
.base_opcode
| 3) == 0x83
8816 && (i
.tm
.extension_opcode
== 0x7)))
8818 *mf_cmp_p
= mf_cmp_alu_cmp
;
8819 return !(i
.mem_operands
&& i
.imm_operands
);
8822 /* inc, dec without inc/dec m. */
8823 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
8824 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
8825 || ((i
.tm
.base_opcode
| 1) == 0xff
8826 && i
.tm
.extension_opcode
<= 0x1))
8828 *mf_cmp_p
= mf_cmp_incdec
;
8829 return !i
.mem_operands
;
8835 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8838 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
8840 /* NB: Don't work with COND_JUMP86 without i386. */
8841 if (!align_branch_power
8842 || now_seg
== absolute_section
8843 || !cpu_arch_flags
.bitfield
.cpui386
8844 || !(align_branch
& align_branch_fused_bit
))
8847 if (maybe_fused_with_jcc_p (mf_cmp_p
))
8849 if (last_insn
.kind
== last_insn_other
8850 || last_insn
.seg
!= now_seg
)
8853 as_warn_where (last_insn
.file
, last_insn
.line
,
8854 _("`%s` skips -malign-branch-boundary on `%s`"),
8855 last_insn
.name
, i
.tm
.name
);
8861 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8864 add_branch_prefix_frag_p (void)
8866 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8867 to PadLock instructions since they include prefixes in opcode. */
8868 if (!align_branch_power
8869 || !align_branch_prefix_size
8870 || now_seg
== absolute_section
8871 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
8872 || !cpu_arch_flags
.bitfield
.cpui386
)
8875 /* Don't add prefix if it is a prefix or there is no operand in case
8876 that segment prefix is special. */
8877 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
8880 if (last_insn
.kind
== last_insn_other
8881 || last_insn
.seg
!= now_seg
)
8885 as_warn_where (last_insn
.file
, last_insn
.line
,
8886 _("`%s` skips -malign-branch-boundary on `%s`"),
8887 last_insn
.name
, i
.tm
.name
);
8892 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8895 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
8896 enum mf_jcc_kind
*mf_jcc_p
)
8900 /* NB: Don't work with COND_JUMP86 without i386. */
8901 if (!align_branch_power
8902 || now_seg
== absolute_section
8903 || !cpu_arch_flags
.bitfield
.cpui386
)
8908 /* Check for jcc and direct jmp. */
8909 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
8911 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
8913 *branch_p
= align_branch_jmp
;
8914 add_padding
= align_branch
& align_branch_jmp_bit
;
8918 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
8919 igore the lowest bit. */
8920 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
8921 *branch_p
= align_branch_jcc
;
8922 if ((align_branch
& align_branch_jcc_bit
))
8926 else if (is_any_vex_encoding (&i
.tm
))
8928 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
8931 *branch_p
= align_branch_ret
;
8932 if ((align_branch
& align_branch_ret_bit
))
8937 /* Check for indirect jmp, direct and indirect calls. */
8938 if (i
.tm
.base_opcode
== 0xe8)
8941 *branch_p
= align_branch_call
;
8942 if ((align_branch
& align_branch_call_bit
))
8945 else if (i
.tm
.base_opcode
== 0xff
8946 && (i
.tm
.extension_opcode
== 2
8947 || i
.tm
.extension_opcode
== 4))
8949 /* Indirect call and jmp. */
8950 *branch_p
= align_branch_indirect
;
8951 if ((align_branch
& align_branch_indirect_bit
))
8958 && (i
.op
[0].disps
->X_op
== O_symbol
8959 || (i
.op
[0].disps
->X_op
== O_subtract
8960 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
8962 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
8963 /* No padding to call to global or undefined tls_get_addr. */
8964 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
8965 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
8971 && last_insn
.kind
!= last_insn_other
8972 && last_insn
.seg
== now_seg
)
8975 as_warn_where (last_insn
.file
, last_insn
.line
,
8976 _("`%s` skips -malign-branch-boundary on `%s`"),
8977 last_insn
.name
, i
.tm
.name
);
8987 fragS
*insn_start_frag
;
8988 offsetT insn_start_off
;
8989 fragS
*fragP
= NULL
;
8990 enum align_branch_kind branch
= align_branch_none
;
8991 /* The initializer is arbitrary just to avoid uninitialized error.
8992 it's actually either assigned in add_branch_padding_frag_p
8993 or never be used. */
8994 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
8996 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8997 if (IS_ELF
&& x86_used_note
)
8999 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
9000 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
9001 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
9002 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
9003 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
9004 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
9005 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
9006 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
9007 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
9008 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
9009 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
9010 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
9011 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
9012 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
9013 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9014 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
9015 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
9016 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
9017 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
9018 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
9019 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
9020 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
9021 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
9022 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
9023 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
9024 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
9025 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
9026 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
9027 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
9028 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
9029 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
9030 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
9031 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
9032 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
9033 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
9034 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
9035 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
9036 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
9037 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
9038 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
9039 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
9040 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
9041 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
9042 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
9043 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
9044 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
9045 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
9046 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
9047 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
9048 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
9050 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9051 || i
.tm
.cpu_flags
.bitfield
.cpu287
9052 || i
.tm
.cpu_flags
.bitfield
.cpu387
9053 || i
.tm
.cpu_flags
.bitfield
.cpu687
9054 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9055 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9057 || i
.tm
.base_opcode
== 0xf77 /* emms */
9058 || i
.tm
.base_opcode
== 0xf0e /* femms */
9059 || i
.tm
.base_opcode
== 0xf2a /* cvtpi2ps */
9060 || i
.tm
.base_opcode
== 0x660f2a /* cvtpi2pd */)
9061 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9063 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9065 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9067 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9068 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9069 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9070 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9071 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9072 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9073 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9074 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9075 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9079 /* Tie dwarf2 debug info to the address at the start of the insn.
9080 We can't do this after the insn has been output as the current
9081 frag may have been closed off. eg. by frag_var. */
9082 dwarf2_emit_insn (0);
9084 insn_start_frag
= frag_now
;
9085 insn_start_off
= frag_now_fix ();
9087 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9090 /* Branch can be 8 bytes. Leave some room for prefixes. */
9091 unsigned int max_branch_padding_size
= 14;
9093 /* Align section to boundary. */
9094 record_alignment (now_seg
, align_branch_power
);
9096 /* Make room for padding. */
9097 frag_grow (max_branch_padding_size
);
9099 /* Start of the padding. */
9104 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9105 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9108 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9109 fragP
->tc_frag_data
.branch_type
= branch
;
9110 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9114 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9116 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9117 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9119 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9120 output_interseg_jump ();
9123 /* Output normal instructions here. */
9127 unsigned int prefix
;
9128 enum mf_cmp_kind mf_cmp
;
9131 && (i
.tm
.base_opcode
== 0xfaee8
9132 || i
.tm
.base_opcode
== 0xfaef0
9133 || i
.tm
.base_opcode
== 0xfaef8))
9135 /* Encode lfence, mfence, and sfence as
9136 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9137 offsetT val
= 0x240483f0ULL
;
9139 md_number_to_chars (p
, val
, 5);
9143 /* Some processors fail on LOCK prefix. This options makes
9144 assembler ignore LOCK prefix and serves as a workaround. */
9145 if (omit_lock_prefix
)
9147 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
9149 i
.prefix
[LOCK_PREFIX
] = 0;
9153 /* Skip if this is a branch. */
9155 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9157 /* Make room for padding. */
9158 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9163 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9164 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9167 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9168 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9169 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9171 else if (add_branch_prefix_frag_p ())
9173 unsigned int max_prefix_size
= align_branch_prefix_size
;
9175 /* Make room for padding. */
9176 frag_grow (max_prefix_size
);
9181 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9182 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9185 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9188 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9189 don't need the explicit prefix. */
9190 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9192 switch (i
.tm
.opcode_length
)
9195 if (i
.tm
.base_opcode
& 0xff000000)
9197 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
9198 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9199 || prefix
!= REPE_PREFIX_OPCODE
9200 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
9201 add_prefix (prefix
);
9205 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
9207 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
9208 add_prefix (prefix
);
9214 /* Check for pseudo prefixes. */
9215 as_bad_where (insn_start_frag
->fr_file
,
9216 insn_start_frag
->fr_line
,
9217 _("pseudo prefix without instruction"));
9223 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9224 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9225 R_X86_64_GOTTPOFF relocation so that linker can safely
9226 perform IE->LE optimization. A dummy REX_OPCODE prefix
9227 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9228 relocation for GDesc -> IE/LE optimization. */
9229 if (x86_elf_abi
== X86_64_X32_ABI
9231 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9232 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9233 && i
.prefix
[REX_PREFIX
] == 0)
9234 add_prefix (REX_OPCODE
);
9237 /* The prefix bytes. */
9238 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9240 FRAG_APPEND_1_CHAR (*q
);
9244 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9249 /* REX byte is encoded in VEX prefix. */
9253 FRAG_APPEND_1_CHAR (*q
);
9256 /* There should be no other prefixes for instructions
9261 /* For EVEX instructions i.vrex should become 0 after
9262 build_evex_prefix. For VEX instructions upper 16 registers
9263 aren't available, so VREX should be 0. */
9266 /* Now the VEX prefix. */
9267 p
= frag_more (i
.vex
.length
);
9268 for (j
= 0; j
< i
.vex
.length
; j
++)
9269 p
[j
] = i
.vex
.bytes
[j
];
9272 /* Now the opcode; be careful about word order here! */
9273 if (i
.tm
.opcode_length
== 1)
9275 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9279 switch (i
.tm
.opcode_length
)
9283 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
9284 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9288 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
9298 /* Put out high byte first: can't use md_number_to_chars! */
9299 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9300 *p
= i
.tm
.base_opcode
& 0xff;
9303 /* Now the modrm byte and sib byte (if present). */
9304 if (i
.tm
.opcode_modifier
.modrm
)
9306 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
9309 /* If i.rm.regmem == ESP (4)
9310 && i.rm.mode != (Register mode)
9312 ==> need second modrm byte. */
9313 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9315 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9316 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
9318 | i
.sib
.scale
<< 6));
9321 if (i
.disp_operands
)
9322 output_disp (insn_start_frag
, insn_start_off
);
9325 output_imm (insn_start_frag
, insn_start_off
);
9328 * frag_now_fix () returning plain abs_section_offset when we're in the
9329 * absolute section, and abs_section_offset not getting updated as data
9330 * gets added to the frag breaks the logic below.
9332 if (now_seg
!= absolute_section
)
9334 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9336 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9340 /* NB: Don't add prefix with GOTPC relocation since
9341 output_disp() above depends on the fixed encoding
9342 length. Can't add prefix with TLS relocation since
9343 it breaks TLS linker optimization. */
9344 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9345 /* Prefix count on the current instruction. */
9346 unsigned int count
= i
.vex
.length
;
9348 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9349 /* REX byte is encoded in VEX/EVEX prefix. */
9350 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9353 /* Count prefixes for extended opcode maps. */
9355 switch (i
.tm
.opcode_length
)
9358 if (((i
.tm
.base_opcode
>> 16) & 0xff) == 0xf)
9361 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
9373 if (((i
.tm
.base_opcode
>> 8) & 0xff) == 0xf)
9382 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9385 /* Set the maximum prefix size in BRANCH_PREFIX
9387 if (fragP
->tc_frag_data
.max_bytes
> max
)
9388 fragP
->tc_frag_data
.max_bytes
= max
;
9389 if (fragP
->tc_frag_data
.max_bytes
> count
)
9390 fragP
->tc_frag_data
.max_bytes
-= count
;
9392 fragP
->tc_frag_data
.max_bytes
= 0;
9396 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9398 unsigned int max_prefix_size
;
9399 if (align_branch_prefix_size
> max
)
9400 max_prefix_size
= max
;
9402 max_prefix_size
= align_branch_prefix_size
;
9403 if (max_prefix_size
> count
)
9404 fragP
->tc_frag_data
.max_prefix_length
9405 = max_prefix_size
- count
;
9408 /* Use existing segment prefix if possible. Use CS
9409 segment prefix in 64-bit mode. In 32-bit mode, use SS
9410 segment prefix with ESP/EBP base register and use DS
9411 segment prefix without ESP/EBP base register. */
9412 if (i
.prefix
[SEG_PREFIX
])
9413 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9414 else if (flag_code
== CODE_64BIT
)
9415 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9417 && (i
.base_reg
->reg_num
== 4
9418 || i
.base_reg
->reg_num
== 5))
9419 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9421 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9426 /* NB: Don't work with COND_JUMP86 without i386. */
9427 if (align_branch_power
9428 && now_seg
!= absolute_section
9429 && cpu_arch_flags
.bitfield
.cpui386
)
9431 /* Terminate each frag so that we can add prefix and check for
9433 frag_wane (frag_now
);
9440 pi ("" /*line*/, &i
);
9442 #endif /* DEBUG386 */
9445 /* Return the size of the displacement operand N. */
9448 disp_size (unsigned int n
)
9452 if (i
.types
[n
].bitfield
.disp64
)
9454 else if (i
.types
[n
].bitfield
.disp8
)
9456 else if (i
.types
[n
].bitfield
.disp16
)
9461 /* Return the size of the immediate operand N. */
9464 imm_size (unsigned int n
)
9467 if (i
.types
[n
].bitfield
.imm64
)
9469 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9471 else if (i
.types
[n
].bitfield
.imm16
)
9477 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9482 for (n
= 0; n
< i
.operands
; n
++)
9484 if (operand_type_check (i
.types
[n
], disp
))
9486 if (i
.op
[n
].disps
->X_op
== O_constant
)
9488 int size
= disp_size (n
);
9489 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9491 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9493 p
= frag_more (size
);
9494 md_number_to_chars (p
, val
, size
);
9498 enum bfd_reloc_code_real reloc_type
;
9499 int size
= disp_size (n
);
9500 int sign
= i
.types
[n
].bitfield
.disp32s
;
9501 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9504 /* We can't have 8 bit displacement here. */
9505 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9507 /* The PC relative address is computed relative
9508 to the instruction boundary, so in case immediate
9509 fields follows, we need to adjust the value. */
9510 if (pcrel
&& i
.imm_operands
)
9515 for (n1
= 0; n1
< i
.operands
; n1
++)
9516 if (operand_type_check (i
.types
[n1
], imm
))
9518 /* Only one immediate is allowed for PC
9519 relative address. */
9520 gas_assert (sz
== 0);
9522 i
.op
[n
].disps
->X_add_number
-= sz
;
9524 /* We should find the immediate. */
9525 gas_assert (sz
!= 0);
9528 p
= frag_more (size
);
9529 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9531 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9532 && (((reloc_type
== BFD_RELOC_32
9533 || reloc_type
== BFD_RELOC_X86_64_32S
9534 || (reloc_type
== BFD_RELOC_64
9536 && (i
.op
[n
].disps
->X_op
== O_symbol
9537 || (i
.op
[n
].disps
->X_op
== O_add
9538 && ((symbol_get_value_expression
9539 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9541 || reloc_type
== BFD_RELOC_32_PCREL
))
9545 reloc_type
= BFD_RELOC_386_GOTPC
;
9546 i
.has_gotpc_tls_reloc
= TRUE
;
9547 i
.op
[n
].imms
->X_add_number
+=
9548 encoding_length (insn_start_frag
, insn_start_off
, p
);
9550 else if (reloc_type
== BFD_RELOC_64
)
9551 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9553 /* Don't do the adjustment for x86-64, as there
9554 the pcrel addressing is relative to the _next_
9555 insn, and that is taken care of in other code. */
9556 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9558 else if (align_branch_power
)
9562 case BFD_RELOC_386_TLS_GD
:
9563 case BFD_RELOC_386_TLS_LDM
:
9564 case BFD_RELOC_386_TLS_IE
:
9565 case BFD_RELOC_386_TLS_IE_32
:
9566 case BFD_RELOC_386_TLS_GOTIE
:
9567 case BFD_RELOC_386_TLS_GOTDESC
:
9568 case BFD_RELOC_386_TLS_DESC_CALL
:
9569 case BFD_RELOC_X86_64_TLSGD
:
9570 case BFD_RELOC_X86_64_TLSLD
:
9571 case BFD_RELOC_X86_64_GOTTPOFF
:
9572 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9573 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9574 i
.has_gotpc_tls_reloc
= TRUE
;
9579 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9580 size
, i
.op
[n
].disps
, pcrel
,
9582 /* Check for "call/jmp *mem", "mov mem, %reg",
9583 "test %reg, mem" and "binop mem, %reg" where binop
9584 is one of adc, add, and, cmp, or, sbb, sub, xor
9585 instructions without data prefix. Always generate
9586 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9587 if (i
.prefix
[DATA_PREFIX
] == 0
9588 && (generate_relax_relocations
9591 && i
.rm
.regmem
== 5))
9593 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9594 && !is_any_vex_encoding(&i
.tm
)
9595 && ((i
.operands
== 1
9596 && i
.tm
.base_opcode
== 0xff
9597 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9599 && (i
.tm
.base_opcode
== 0x8b
9600 || i
.tm
.base_opcode
== 0x85
9601 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9605 fixP
->fx_tcbit
= i
.rex
!= 0;
9607 && (i
.base_reg
->reg_num
== RegIP
))
9608 fixP
->fx_tcbit2
= 1;
9611 fixP
->fx_tcbit2
= 1;
9619 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9624 for (n
= 0; n
< i
.operands
; n
++)
9626 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9627 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
9630 if (operand_type_check (i
.types
[n
], imm
))
9632 if (i
.op
[n
].imms
->X_op
== O_constant
)
9634 int size
= imm_size (n
);
9637 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9639 p
= frag_more (size
);
9640 md_number_to_chars (p
, val
, size
);
9644 /* Not absolute_section.
9645 Need a 32-bit fixup (don't support 8bit
9646 non-absolute imms). Try to support other
9648 enum bfd_reloc_code_real reloc_type
;
9649 int size
= imm_size (n
);
9652 if (i
.types
[n
].bitfield
.imm32s
9653 && (i
.suffix
== QWORD_MNEM_SUFFIX
9654 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9659 p
= frag_more (size
);
9660 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9662 /* This is tough to explain. We end up with this one if we
9663 * have operands that look like
9664 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9665 * obtain the absolute address of the GOT, and it is strongly
9666 * preferable from a performance point of view to avoid using
9667 * a runtime relocation for this. The actual sequence of
9668 * instructions often look something like:
9673 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9675 * The call and pop essentially return the absolute address
9676 * of the label .L66 and store it in %ebx. The linker itself
9677 * will ultimately change the first operand of the addl so
9678 * that %ebx points to the GOT, but to keep things simple, the
9679 * .o file must have this operand set so that it generates not
9680 * the absolute address of .L66, but the absolute address of
9681 * itself. This allows the linker itself simply treat a GOTPC
9682 * relocation as asking for a pcrel offset to the GOT to be
9683 * added in, and the addend of the relocation is stored in the
9684 * operand field for the instruction itself.
9686 * Our job here is to fix the operand so that it would add
9687 * the correct offset so that %ebx would point to itself. The
9688 * thing that is tricky is that .-.L66 will point to the
9689 * beginning of the instruction, so we need to further modify
9690 * the operand so that it will point to itself. There are
9691 * other cases where you have something like:
9693 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9695 * and here no correction would be required. Internally in
9696 * the assembler we treat operands of this form as not being
9697 * pcrel since the '.' is explicitly mentioned, and I wonder
9698 * whether it would simplify matters to do it this way. Who
9699 * knows. In earlier versions of the PIC patches, the
9700 * pcrel_adjust field was used to store the correction, but
9701 * since the expression is not pcrel, I felt it would be
9702 * confusing to do it this way. */
9704 if ((reloc_type
== BFD_RELOC_32
9705 || reloc_type
== BFD_RELOC_X86_64_32S
9706 || reloc_type
== BFD_RELOC_64
)
9708 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9709 && (i
.op
[n
].imms
->X_op
== O_symbol
9710 || (i
.op
[n
].imms
->X_op
== O_add
9711 && ((symbol_get_value_expression
9712 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9716 reloc_type
= BFD_RELOC_386_GOTPC
;
9718 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9720 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9721 i
.has_gotpc_tls_reloc
= TRUE
;
9722 i
.op
[n
].imms
->X_add_number
+=
9723 encoding_length (insn_start_frag
, insn_start_off
, p
);
9725 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9726 i
.op
[n
].imms
, 0, reloc_type
);
9732 /* x86_cons_fix_new is called via the expression parsing code when a
9733 reloc is needed. We use this hook to get the correct .got reloc. */
9734 static int cons_sign
= -1;
9737 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
9738 expressionS
*exp
, bfd_reloc_code_real_type r
)
9740 r
= reloc (len
, 0, cons_sign
, r
);
9743 if (exp
->X_op
== O_secrel
)
9745 exp
->X_op
= O_symbol
;
9746 r
= BFD_RELOC_32_SECREL
;
9750 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
9753 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9754 purpose of the `.dc.a' internal pseudo-op. */
9757 x86_address_bytes (void)
9759 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
9761 return stdoutput
->arch_info
->bits_per_address
/ 8;
9764 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9766 # define lex_got(reloc, adjust, types) NULL
9768 /* Parse operands of the form
9769 <symbol>@GOTOFF+<nnn>
9770 and similar .plt or .got references.
9772 If we find one, set up the correct relocation in RELOC and copy the
9773 input string, minus the `@GOTOFF' into a malloc'd buffer for
9774 parsing by the calling routine. Return this buffer, and if ADJUST
9775 is non-null set it to the length of the string we removed from the
9776 input line. Otherwise return NULL. */
9778 lex_got (enum bfd_reloc_code_real
*rel
,
9780 i386_operand_type
*types
)
9782 /* Some of the relocations depend on the size of what field is to
9783 be relocated. But in our callers i386_immediate and i386_displacement
9784 we don't yet know the operand size (this will be set by insn
9785 matching). Hence we record the word32 relocation here,
9786 and adjust the reloc according to the real size in reloc(). */
9787 static const struct {
9790 const enum bfd_reloc_code_real rel
[2];
9791 const i386_operand_type types64
;
9793 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9794 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
9796 OPERAND_TYPE_IMM32_64
},
9798 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
9799 BFD_RELOC_X86_64_PLTOFF64
},
9800 OPERAND_TYPE_IMM64
},
9801 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
9802 BFD_RELOC_X86_64_PLT32
},
9803 OPERAND_TYPE_IMM32_32S_DISP32
},
9804 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
9805 BFD_RELOC_X86_64_GOTPLT64
},
9806 OPERAND_TYPE_IMM64_DISP64
},
9807 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
9808 BFD_RELOC_X86_64_GOTOFF64
},
9809 OPERAND_TYPE_IMM64_DISP64
},
9810 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
9811 BFD_RELOC_X86_64_GOTPCREL
},
9812 OPERAND_TYPE_IMM32_32S_DISP32
},
9813 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
9814 BFD_RELOC_X86_64_TLSGD
},
9815 OPERAND_TYPE_IMM32_32S_DISP32
},
9816 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
9817 _dummy_first_bfd_reloc_code_real
},
9818 OPERAND_TYPE_NONE
},
9819 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
9820 BFD_RELOC_X86_64_TLSLD
},
9821 OPERAND_TYPE_IMM32_32S_DISP32
},
9822 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
9823 BFD_RELOC_X86_64_GOTTPOFF
},
9824 OPERAND_TYPE_IMM32_32S_DISP32
},
9825 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
9826 BFD_RELOC_X86_64_TPOFF32
},
9827 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9828 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
9829 _dummy_first_bfd_reloc_code_real
},
9830 OPERAND_TYPE_NONE
},
9831 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
9832 BFD_RELOC_X86_64_DTPOFF32
},
9833 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9834 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
9835 _dummy_first_bfd_reloc_code_real
},
9836 OPERAND_TYPE_NONE
},
9837 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
9838 _dummy_first_bfd_reloc_code_real
},
9839 OPERAND_TYPE_NONE
},
9840 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
9841 BFD_RELOC_X86_64_GOT32
},
9842 OPERAND_TYPE_IMM32_32S_64_DISP32
},
9843 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
9844 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
9845 OPERAND_TYPE_IMM32_32S_DISP32
},
9846 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
9847 BFD_RELOC_X86_64_TLSDESC_CALL
},
9848 OPERAND_TYPE_IMM32_32S_DISP32
},
9853 #if defined (OBJ_MAYBE_ELF)
9858 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9859 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9862 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9864 int len
= gotrel
[j
].len
;
9865 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9867 if (gotrel
[j
].rel
[object_64bit
] != 0)
9870 char *tmpbuf
, *past_reloc
;
9872 *rel
= gotrel
[j
].rel
[object_64bit
];
9876 if (flag_code
!= CODE_64BIT
)
9878 types
->bitfield
.imm32
= 1;
9879 types
->bitfield
.disp32
= 1;
9882 *types
= gotrel
[j
].types64
;
9885 if (j
!= 0 && GOT_symbol
== NULL
)
9886 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
9888 /* The length of the first part of our input line. */
9889 first
= cp
- input_line_pointer
;
9891 /* The second part goes from after the reloc token until
9892 (and including) an end_of_line char or comma. */
9893 past_reloc
= cp
+ 1 + len
;
9895 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9897 second
= cp
+ 1 - past_reloc
;
9899 /* Allocate and copy string. The trailing NUL shouldn't
9900 be necessary, but be safe. */
9901 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9902 memcpy (tmpbuf
, input_line_pointer
, first
);
9903 if (second
!= 0 && *past_reloc
!= ' ')
9904 /* Replace the relocation token with ' ', so that
9905 errors like foo@GOTOFF1 will be detected. */
9906 tmpbuf
[first
++] = ' ';
9908 /* Increment length by 1 if the relocation token is
9913 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9914 tmpbuf
[first
+ second
] = '\0';
9918 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9919 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9924 /* Might be a symbol version string. Don't as_bad here. */
9933 /* Parse operands of the form
9934 <symbol>@SECREL32+<nnn>
9936 If we find one, set up the correct relocation in RELOC and copy the
9937 input string, minus the `@SECREL32' into a malloc'd buffer for
9938 parsing by the calling routine. Return this buffer, and if ADJUST
9939 is non-null set it to the length of the string we removed from the
9940 input line. Otherwise return NULL.
9942 This function is copied from the ELF version above adjusted for PE targets. */
9945 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9946 int *adjust ATTRIBUTE_UNUSED
,
9947 i386_operand_type
*types
)
9953 const enum bfd_reloc_code_real rel
[2];
9954 const i386_operand_type types64
;
9958 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9959 BFD_RELOC_32_SECREL
},
9960 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9966 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9967 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9970 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9972 int len
= gotrel
[j
].len
;
9974 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9976 if (gotrel
[j
].rel
[object_64bit
] != 0)
9979 char *tmpbuf
, *past_reloc
;
9981 *rel
= gotrel
[j
].rel
[object_64bit
];
9987 if (flag_code
!= CODE_64BIT
)
9989 types
->bitfield
.imm32
= 1;
9990 types
->bitfield
.disp32
= 1;
9993 *types
= gotrel
[j
].types64
;
9996 /* The length of the first part of our input line. */
9997 first
= cp
- input_line_pointer
;
9999 /* The second part goes from after the reloc token until
10000 (and including) an end_of_line char or comma. */
10001 past_reloc
= cp
+ 1 + len
;
10003 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10005 second
= cp
+ 1 - past_reloc
;
10007 /* Allocate and copy string. The trailing NUL shouldn't
10008 be necessary, but be safe. */
10009 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10010 memcpy (tmpbuf
, input_line_pointer
, first
);
10011 if (second
!= 0 && *past_reloc
!= ' ')
10012 /* Replace the relocation token with ' ', so that
10013 errors like foo@SECLREL321 will be detected. */
10014 tmpbuf
[first
++] = ' ';
10015 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10016 tmpbuf
[first
+ second
] = '\0';
10020 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10021 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10026 /* Might be a symbol version string. Don't as_bad here. */
10032 bfd_reloc_code_real_type
10033 x86_cons (expressionS
*exp
, int size
)
10035 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10037 intel_syntax
= -intel_syntax
;
10040 if (size
== 4 || (object_64bit
&& size
== 8))
10042 /* Handle @GOTOFF and the like in an expression. */
10044 char *gotfree_input_line
;
10047 save
= input_line_pointer
;
10048 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10049 if (gotfree_input_line
)
10050 input_line_pointer
= gotfree_input_line
;
10054 if (gotfree_input_line
)
10056 /* expression () has merrily parsed up to the end of line,
10057 or a comma - in the wrong buffer. Transfer how far
10058 input_line_pointer has moved to the right buffer. */
10059 input_line_pointer
= (save
10060 + (input_line_pointer
- gotfree_input_line
)
10062 free (gotfree_input_line
);
10063 if (exp
->X_op
== O_constant
10064 || exp
->X_op
== O_absent
10065 || exp
->X_op
== O_illegal
10066 || exp
->X_op
== O_register
10067 || exp
->X_op
== O_big
)
10069 char c
= *input_line_pointer
;
10070 *input_line_pointer
= 0;
10071 as_bad (_("missing or invalid expression `%s'"), save
);
10072 *input_line_pointer
= c
;
10074 else if ((got_reloc
== BFD_RELOC_386_PLT32
10075 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10076 && exp
->X_op
!= O_symbol
)
10078 char c
= *input_line_pointer
;
10079 *input_line_pointer
= 0;
10080 as_bad (_("invalid PLT expression `%s'"), save
);
10081 *input_line_pointer
= c
;
10088 intel_syntax
= -intel_syntax
;
10091 i386_intel_simplify (exp
);
10097 signed_cons (int size
)
10099 if (flag_code
== CODE_64BIT
)
10107 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10114 if (exp
.X_op
== O_symbol
)
10115 exp
.X_op
= O_secrel
;
10117 emit_expr (&exp
, 4);
10119 while (*input_line_pointer
++ == ',');
10121 input_line_pointer
--;
10122 demand_empty_rest_of_line ();
10126 /* Handle Vector operations. */
10129 check_VecOperations (char *op_string
, char *op_end
)
10131 const reg_entry
*mask
;
10136 && (op_end
== NULL
|| op_string
< op_end
))
10139 if (*op_string
== '{')
10143 /* Check broadcasts. */
10144 if (strncmp (op_string
, "1to", 3) == 0)
10149 goto duplicated_vec_op
;
10152 if (*op_string
== '8')
10154 else if (*op_string
== '4')
10156 else if (*op_string
== '2')
10158 else if (*op_string
== '1'
10159 && *(op_string
+1) == '6')
10166 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10171 broadcast_op
.type
= bcst_type
;
10172 broadcast_op
.operand
= this_operand
;
10173 broadcast_op
.bytes
= 0;
10174 i
.broadcast
= &broadcast_op
;
10176 /* Check masking operation. */
10177 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10179 /* k0 can't be used for write mask. */
10180 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10182 as_bad (_("`%s%s' can't be used for write mask"),
10183 register_prefix
, mask
->reg_name
);
10189 mask_op
.mask
= mask
;
10190 mask_op
.zeroing
= 0;
10191 mask_op
.operand
= this_operand
;
10197 goto duplicated_vec_op
;
10199 i
.mask
->mask
= mask
;
10201 /* Only "{z}" is allowed here. No need to check
10202 zeroing mask explicitly. */
10203 if (i
.mask
->operand
!= this_operand
)
10205 as_bad (_("invalid write mask `%s'"), saved
);
10210 op_string
= end_op
;
10212 /* Check zeroing-flag for masking operation. */
10213 else if (*op_string
== 'z')
10217 mask_op
.mask
= NULL
;
10218 mask_op
.zeroing
= 1;
10219 mask_op
.operand
= this_operand
;
10224 if (i
.mask
->zeroing
)
10227 as_bad (_("duplicated `%s'"), saved
);
10231 i
.mask
->zeroing
= 1;
10233 /* Only "{%k}" is allowed here. No need to check mask
10234 register explicitly. */
10235 if (i
.mask
->operand
!= this_operand
)
10237 as_bad (_("invalid zeroing-masking `%s'"),
10246 goto unknown_vec_op
;
10248 if (*op_string
!= '}')
10250 as_bad (_("missing `}' in `%s'"), saved
);
10255 /* Strip whitespace since the addition of pseudo prefixes
10256 changed how the scrubber treats '{'. */
10257 if (is_space_char (*op_string
))
10263 /* We don't know this one. */
10264 as_bad (_("unknown vector operation: `%s'"), saved
);
10268 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
10270 as_bad (_("zeroing-masking only allowed with write mask"));
10278 i386_immediate (char *imm_start
)
10280 char *save_input_line_pointer
;
10281 char *gotfree_input_line
;
10284 i386_operand_type types
;
10286 operand_type_set (&types
, ~0);
10288 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10290 as_bad (_("at most %d immediate operands are allowed"),
10291 MAX_IMMEDIATE_OPERANDS
);
10295 exp
= &im_expressions
[i
.imm_operands
++];
10296 i
.op
[this_operand
].imms
= exp
;
10298 if (is_space_char (*imm_start
))
10301 save_input_line_pointer
= input_line_pointer
;
10302 input_line_pointer
= imm_start
;
10304 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10305 if (gotfree_input_line
)
10306 input_line_pointer
= gotfree_input_line
;
10308 exp_seg
= expression (exp
);
10310 SKIP_WHITESPACE ();
10312 /* Handle vector operations. */
10313 if (*input_line_pointer
== '{')
10315 input_line_pointer
= check_VecOperations (input_line_pointer
,
10317 if (input_line_pointer
== NULL
)
10321 if (*input_line_pointer
)
10322 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10324 input_line_pointer
= save_input_line_pointer
;
10325 if (gotfree_input_line
)
10327 free (gotfree_input_line
);
10329 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10330 exp
->X_op
= O_illegal
;
10333 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10337 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10338 i386_operand_type types
, const char *imm_start
)
10340 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10343 as_bad (_("missing or invalid immediate expression `%s'"),
10347 else if (exp
->X_op
== O_constant
)
10349 /* Size it properly later. */
10350 i
.types
[this_operand
].bitfield
.imm64
= 1;
10351 /* If not 64bit, sign extend val. */
10352 if (flag_code
!= CODE_64BIT
10353 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
10355 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
10357 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10358 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10359 && exp_seg
!= absolute_section
10360 && exp_seg
!= text_section
10361 && exp_seg
!= data_section
10362 && exp_seg
!= bss_section
10363 && exp_seg
!= undefined_section
10364 && !bfd_is_com_section (exp_seg
))
10366 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10370 else if (!intel_syntax
&& exp_seg
== reg_section
)
10373 as_bad (_("illegal immediate register operand %s"), imm_start
);
10378 /* This is an address. The size of the address will be
10379 determined later, depending on destination register,
10380 suffix, or the default for the section. */
10381 i
.types
[this_operand
].bitfield
.imm8
= 1;
10382 i
.types
[this_operand
].bitfield
.imm16
= 1;
10383 i
.types
[this_operand
].bitfield
.imm32
= 1;
10384 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10385 i
.types
[this_operand
].bitfield
.imm64
= 1;
10386 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10394 i386_scale (char *scale
)
10397 char *save
= input_line_pointer
;
10399 input_line_pointer
= scale
;
10400 val
= get_absolute_expression ();
10405 i
.log2_scale_factor
= 0;
10408 i
.log2_scale_factor
= 1;
10411 i
.log2_scale_factor
= 2;
10414 i
.log2_scale_factor
= 3;
10418 char sep
= *input_line_pointer
;
10420 *input_line_pointer
= '\0';
10421 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10423 *input_line_pointer
= sep
;
10424 input_line_pointer
= save
;
10428 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10430 as_warn (_("scale factor of %d without an index register"),
10431 1 << i
.log2_scale_factor
);
10432 i
.log2_scale_factor
= 0;
10434 scale
= input_line_pointer
;
10435 input_line_pointer
= save
;
10440 i386_displacement (char *disp_start
, char *disp_end
)
10444 char *save_input_line_pointer
;
10445 char *gotfree_input_line
;
10447 i386_operand_type bigdisp
, types
= anydisp
;
10450 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10452 as_bad (_("at most %d displacement operands are allowed"),
10453 MAX_MEMORY_OPERANDS
);
10457 operand_type_set (&bigdisp
, 0);
10459 || i
.types
[this_operand
].bitfield
.baseindex
10460 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10461 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10463 i386_addressing_mode ();
10464 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10465 if (flag_code
== CODE_64BIT
)
10469 bigdisp
.bitfield
.disp32s
= 1;
10470 bigdisp
.bitfield
.disp64
= 1;
10473 bigdisp
.bitfield
.disp32
= 1;
10475 else if ((flag_code
== CODE_16BIT
) ^ override
)
10476 bigdisp
.bitfield
.disp16
= 1;
10478 bigdisp
.bitfield
.disp32
= 1;
10482 /* For PC-relative branches, the width of the displacement may be
10483 dependent upon data size, but is never dependent upon address size.
10484 Also make sure to not unintentionally match against a non-PC-relative
10485 branch template. */
10486 static templates aux_templates
;
10487 const insn_template
*t
= current_templates
->start
;
10488 bfd_boolean has_intel64
= FALSE
;
10490 aux_templates
.start
= t
;
10491 while (++t
< current_templates
->end
)
10493 if (t
->opcode_modifier
.jump
10494 != current_templates
->start
->opcode_modifier
.jump
)
10496 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10497 has_intel64
= TRUE
;
10499 if (t
< current_templates
->end
)
10501 aux_templates
.end
= t
;
10502 current_templates
= &aux_templates
;
10505 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10506 if (flag_code
== CODE_64BIT
)
10508 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10509 && (!intel64
|| !has_intel64
))
10510 bigdisp
.bitfield
.disp16
= 1;
10512 bigdisp
.bitfield
.disp32s
= 1;
10517 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10519 : LONG_MNEM_SUFFIX
));
10520 bigdisp
.bitfield
.disp32
= 1;
10521 if ((flag_code
== CODE_16BIT
) ^ override
)
10523 bigdisp
.bitfield
.disp32
= 0;
10524 bigdisp
.bitfield
.disp16
= 1;
10528 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10531 exp
= &disp_expressions
[i
.disp_operands
];
10532 i
.op
[this_operand
].disps
= exp
;
10534 save_input_line_pointer
= input_line_pointer
;
10535 input_line_pointer
= disp_start
;
10536 END_STRING_AND_SAVE (disp_end
);
10538 #ifndef GCC_ASM_O_HACK
10539 #define GCC_ASM_O_HACK 0
10542 END_STRING_AND_SAVE (disp_end
+ 1);
10543 if (i
.types
[this_operand
].bitfield
.baseIndex
10544 && displacement_string_end
[-1] == '+')
10546 /* This hack is to avoid a warning when using the "o"
10547 constraint within gcc asm statements.
10550 #define _set_tssldt_desc(n,addr,limit,type) \
10551 __asm__ __volatile__ ( \
10552 "movw %w2,%0\n\t" \
10553 "movw %w1,2+%0\n\t" \
10554 "rorl $16,%1\n\t" \
10555 "movb %b1,4+%0\n\t" \
10556 "movb %4,5+%0\n\t" \
10557 "movb $0,6+%0\n\t" \
10558 "movb %h1,7+%0\n\t" \
10560 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10562 This works great except that the output assembler ends
10563 up looking a bit weird if it turns out that there is
10564 no offset. You end up producing code that looks like:
10577 So here we provide the missing zero. */
10579 *displacement_string_end
= '0';
10582 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10583 if (gotfree_input_line
)
10584 input_line_pointer
= gotfree_input_line
;
10586 exp_seg
= expression (exp
);
10588 SKIP_WHITESPACE ();
10589 if (*input_line_pointer
)
10590 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10592 RESTORE_END_STRING (disp_end
+ 1);
10594 input_line_pointer
= save_input_line_pointer
;
10595 if (gotfree_input_line
)
10597 free (gotfree_input_line
);
10599 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10600 exp
->X_op
= O_illegal
;
10603 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10605 RESTORE_END_STRING (disp_end
);
10611 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10612 i386_operand_type types
, const char *disp_start
)
10614 i386_operand_type bigdisp
;
10617 /* We do this to make sure that the section symbol is in
10618 the symbol table. We will ultimately change the relocation
10619 to be relative to the beginning of the section. */
10620 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10621 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10622 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10624 if (exp
->X_op
!= O_symbol
)
10627 if (S_IS_LOCAL (exp
->X_add_symbol
)
10628 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10629 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10630 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10631 exp
->X_op
= O_subtract
;
10632 exp
->X_op_symbol
= GOT_symbol
;
10633 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10634 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10635 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10636 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10638 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10641 else if (exp
->X_op
== O_absent
10642 || exp
->X_op
== O_illegal
10643 || exp
->X_op
== O_big
)
10646 as_bad (_("missing or invalid displacement expression `%s'"),
10651 else if (flag_code
== CODE_64BIT
10652 && !i
.prefix
[ADDR_PREFIX
]
10653 && exp
->X_op
== O_constant
)
10655 /* Since displacement is signed extended to 64bit, don't allow
10656 disp32 and turn off disp32s if they are out of range. */
10657 i
.types
[this_operand
].bitfield
.disp32
= 0;
10658 if (!fits_in_signed_long (exp
->X_add_number
))
10660 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10661 if (i
.types
[this_operand
].bitfield
.baseindex
)
10663 as_bad (_("0x%lx out range of signed 32bit displacement"),
10664 (long) exp
->X_add_number
);
10670 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10671 else if (exp
->X_op
!= O_constant
10672 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10673 && exp_seg
!= absolute_section
10674 && exp_seg
!= text_section
10675 && exp_seg
!= data_section
10676 && exp_seg
!= bss_section
10677 && exp_seg
!= undefined_section
10678 && !bfd_is_com_section (exp_seg
))
10680 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10685 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10686 /* Constants get taken care of by optimize_disp(). */
10687 && exp
->X_op
!= O_constant
)
10688 i
.types
[this_operand
].bitfield
.disp8
= 1;
10690 /* Check if this is a displacement only operand. */
10691 bigdisp
= i
.types
[this_operand
];
10692 bigdisp
.bitfield
.disp8
= 0;
10693 bigdisp
.bitfield
.disp16
= 0;
10694 bigdisp
.bitfield
.disp32
= 0;
10695 bigdisp
.bitfield
.disp32s
= 0;
10696 bigdisp
.bitfield
.disp64
= 0;
10697 if (operand_type_all_zero (&bigdisp
))
10698 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10704 /* Return the active addressing mode, taking address override and
10705 registers forming the address into consideration. Update the
10706 address override prefix if necessary. */
10708 static enum flag_code
10709 i386_addressing_mode (void)
10711 enum flag_code addr_mode
;
10713 if (i
.prefix
[ADDR_PREFIX
])
10714 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10715 else if (flag_code
== CODE_16BIT
10716 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
10717 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10718 from md_assemble() by "is not a valid base/index expression"
10719 when there is a base and/or index. */
10720 && !i
.types
[this_operand
].bitfield
.baseindex
)
10722 /* MPX insn memory operands with neither base nor index must be forced
10723 to use 32-bit addressing in 16-bit mode. */
10724 addr_mode
= CODE_32BIT
;
10725 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10727 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
10728 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
10732 addr_mode
= flag_code
;
10734 #if INFER_ADDR_PREFIX
10735 if (i
.mem_operands
== 0)
10737 /* Infer address prefix from the first memory operand. */
10738 const reg_entry
*addr_reg
= i
.base_reg
;
10740 if (addr_reg
== NULL
)
10741 addr_reg
= i
.index_reg
;
10745 if (addr_reg
->reg_type
.bitfield
.dword
)
10746 addr_mode
= CODE_32BIT
;
10747 else if (flag_code
!= CODE_64BIT
10748 && addr_reg
->reg_type
.bitfield
.word
)
10749 addr_mode
= CODE_16BIT
;
10751 if (addr_mode
!= flag_code
)
10753 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10755 /* Change the size of any displacement too. At most one
10756 of Disp16 or Disp32 is set.
10757 FIXME. There doesn't seem to be any real need for
10758 separate Disp16 and Disp32 flags. The same goes for
10759 Imm16 and Imm32. Removing them would probably clean
10760 up the code quite a lot. */
10761 if (flag_code
!= CODE_64BIT
10762 && (i
.types
[this_operand
].bitfield
.disp16
10763 || i
.types
[this_operand
].bitfield
.disp32
))
10764 i
.types
[this_operand
]
10765 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
10775 /* Make sure the memory operand we've been dealt is valid.
10776 Return 1 on success, 0 on a failure. */
10779 i386_index_check (const char *operand_string
)
10781 const char *kind
= "base/index";
10782 enum flag_code addr_mode
= i386_addressing_mode ();
10784 if (current_templates
->start
->opcode_modifier
.isstring
10785 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
10786 && (current_templates
->end
[-1].opcode_modifier
.isstring
10787 || i
.mem_operands
))
10789 /* Memory operands of string insns are special in that they only allow
10790 a single register (rDI, rSI, or rBX) as their memory address. */
10791 const reg_entry
*expected_reg
;
10792 static const char *di_si
[][2] =
10798 static const char *bx
[] = { "ebx", "bx", "rbx" };
10800 kind
= "string address";
10802 if (current_templates
->start
->opcode_modifier
.repprefixok
)
10804 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
10805 - IS_STRING_ES_OP0
;
10808 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
10809 || ((!i
.mem_operands
!= !intel_syntax
)
10810 && current_templates
->end
[-1].operand_types
[1]
10811 .bitfield
.baseindex
))
10813 expected_reg
= hash_find (reg_hash
, di_si
[addr_mode
][op
== es_op
]);
10816 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
10818 if (i
.base_reg
!= expected_reg
10820 || operand_type_check (i
.types
[this_operand
], disp
))
10822 /* The second memory operand must have the same size as
10826 && !((addr_mode
== CODE_64BIT
10827 && i
.base_reg
->reg_type
.bitfield
.qword
)
10828 || (addr_mode
== CODE_32BIT
10829 ? i
.base_reg
->reg_type
.bitfield
.dword
10830 : i
.base_reg
->reg_type
.bitfield
.word
)))
10833 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10835 intel_syntax
? '[' : '(',
10837 expected_reg
->reg_name
,
10838 intel_syntax
? ']' : ')');
10845 as_bad (_("`%s' is not a valid %s expression"),
10846 operand_string
, kind
);
10851 if (addr_mode
!= CODE_16BIT
)
10853 /* 32-bit/64-bit checks. */
10855 && ((addr_mode
== CODE_64BIT
10856 ? !i
.base_reg
->reg_type
.bitfield
.qword
10857 : !i
.base_reg
->reg_type
.bitfield
.dword
)
10858 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
10859 || i
.base_reg
->reg_num
== RegIZ
))
10861 && !i
.index_reg
->reg_type
.bitfield
.xmmword
10862 && !i
.index_reg
->reg_type
.bitfield
.ymmword
10863 && !i
.index_reg
->reg_type
.bitfield
.zmmword
10864 && ((addr_mode
== CODE_64BIT
10865 ? !i
.index_reg
->reg_type
.bitfield
.qword
10866 : !i
.index_reg
->reg_type
.bitfield
.dword
)
10867 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
10870 /* bndmk, bndldx, and bndstx have special restrictions. */
10871 if (current_templates
->start
->base_opcode
== 0xf30f1b
10872 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
10874 /* They cannot use RIP-relative addressing. */
10875 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
10877 as_bad (_("`%s' cannot be used here"), operand_string
);
10881 /* bndldx and bndstx ignore their scale factor. */
10882 if (current_templates
->start
->base_opcode
!= 0xf30f1b
10883 && i
.log2_scale_factor
)
10884 as_warn (_("register scaling is being ignored here"));
10889 /* 16-bit checks. */
10891 && (!i
.base_reg
->reg_type
.bitfield
.word
10892 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
10894 && (!i
.index_reg
->reg_type
.bitfield
.word
10895 || !i
.index_reg
->reg_type
.bitfield
.baseindex
10897 && i
.base_reg
->reg_num
< 6
10898 && i
.index_reg
->reg_num
>= 6
10899 && i
.log2_scale_factor
== 0))))
10906 /* Handle vector immediates. */
10909 RC_SAE_immediate (const char *imm_start
)
10911 unsigned int match_found
, j
;
10912 const char *pstr
= imm_start
;
10920 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
10922 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
10926 rc_op
.type
= RC_NamesTable
[j
].type
;
10927 rc_op
.operand
= this_operand
;
10928 i
.rounding
= &rc_op
;
10932 as_bad (_("duplicated `%s'"), imm_start
);
10935 pstr
+= RC_NamesTable
[j
].len
;
10943 if (*pstr
++ != '}')
10945 as_bad (_("Missing '}': '%s'"), imm_start
);
10948 /* RC/SAE immediate string should contain nothing more. */;
10951 as_bad (_("Junk after '}': '%s'"), imm_start
);
10955 exp
= &im_expressions
[i
.imm_operands
++];
10956 i
.op
[this_operand
].imms
= exp
;
10958 exp
->X_op
= O_constant
;
10959 exp
->X_add_number
= 0;
10960 exp
->X_add_symbol
= (symbolS
*) 0;
10961 exp
->X_op_symbol
= (symbolS
*) 0;
10963 i
.types
[this_operand
].bitfield
.imm8
= 1;
10967 /* Only string instructions can have a second memory operand, so
10968 reduce current_templates to just those if it contains any. */
10970 maybe_adjust_templates (void)
10972 const insn_template
*t
;
10974 gas_assert (i
.mem_operands
== 1);
10976 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
10977 if (t
->opcode_modifier
.isstring
)
10980 if (t
< current_templates
->end
)
10982 static templates aux_templates
;
10983 bfd_boolean recheck
;
10985 aux_templates
.start
= t
;
10986 for (; t
< current_templates
->end
; ++t
)
10987 if (!t
->opcode_modifier
.isstring
)
10989 aux_templates
.end
= t
;
10991 /* Determine whether to re-check the first memory operand. */
10992 recheck
= (aux_templates
.start
!= current_templates
->start
10993 || t
!= current_templates
->end
);
10995 current_templates
= &aux_templates
;
10999 i
.mem_operands
= 0;
11000 if (i
.memop1_string
!= NULL
11001 && i386_index_check (i
.memop1_string
) == 0)
11003 i
.mem_operands
= 1;
11010 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11014 i386_att_operand (char *operand_string
)
11016 const reg_entry
*r
;
11018 char *op_string
= operand_string
;
11020 if (is_space_char (*op_string
))
11023 /* We check for an absolute prefix (differentiating,
11024 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11025 if (*op_string
== ABSOLUTE_PREFIX
)
11028 if (is_space_char (*op_string
))
11030 i
.jumpabsolute
= TRUE
;
11033 /* Check if operand is a register. */
11034 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11036 i386_operand_type temp
;
11038 /* Check for a segment override by searching for ':' after a
11039 segment register. */
11040 op_string
= end_op
;
11041 if (is_space_char (*op_string
))
11043 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11045 switch (r
->reg_num
)
11048 i
.seg
[i
.mem_operands
] = &es
;
11051 i
.seg
[i
.mem_operands
] = &cs
;
11054 i
.seg
[i
.mem_operands
] = &ss
;
11057 i
.seg
[i
.mem_operands
] = &ds
;
11060 i
.seg
[i
.mem_operands
] = &fs
;
11063 i
.seg
[i
.mem_operands
] = &gs
;
11067 /* Skip the ':' and whitespace. */
11069 if (is_space_char (*op_string
))
11072 if (!is_digit_char (*op_string
)
11073 && !is_identifier_char (*op_string
)
11074 && *op_string
!= '('
11075 && *op_string
!= ABSOLUTE_PREFIX
)
11077 as_bad (_("bad memory operand `%s'"), op_string
);
11080 /* Handle case of %es:*foo. */
11081 if (*op_string
== ABSOLUTE_PREFIX
)
11084 if (is_space_char (*op_string
))
11086 i
.jumpabsolute
= TRUE
;
11088 goto do_memory_reference
;
11091 /* Handle vector operations. */
11092 if (*op_string
== '{')
11094 op_string
= check_VecOperations (op_string
, NULL
);
11095 if (op_string
== NULL
)
11101 as_bad (_("junk `%s' after register"), op_string
);
11104 temp
= r
->reg_type
;
11105 temp
.bitfield
.baseindex
= 0;
11106 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11108 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11109 i
.op
[this_operand
].regs
= r
;
11112 else if (*op_string
== REGISTER_PREFIX
)
11114 as_bad (_("bad register name `%s'"), op_string
);
11117 else if (*op_string
== IMMEDIATE_PREFIX
)
11120 if (i
.jumpabsolute
)
11122 as_bad (_("immediate operand illegal with absolute jump"));
11125 if (!i386_immediate (op_string
))
11128 else if (RC_SAE_immediate (operand_string
))
11130 /* If it is a RC or SAE immediate, do nothing. */
11133 else if (is_digit_char (*op_string
)
11134 || is_identifier_char (*op_string
)
11135 || *op_string
== '"'
11136 || *op_string
== '(')
11138 /* This is a memory reference of some sort. */
11141 /* Start and end of displacement string expression (if found). */
11142 char *displacement_string_start
;
11143 char *displacement_string_end
;
11146 do_memory_reference
:
11147 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11149 if ((i
.mem_operands
== 1
11150 && !current_templates
->start
->opcode_modifier
.isstring
)
11151 || i
.mem_operands
== 2)
11153 as_bad (_("too many memory references for `%s'"),
11154 current_templates
->start
->name
);
11158 /* Check for base index form. We detect the base index form by
11159 looking for an ')' at the end of the operand, searching
11160 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11162 base_string
= op_string
+ strlen (op_string
);
11164 /* Handle vector operations. */
11165 vop_start
= strchr (op_string
, '{');
11166 if (vop_start
&& vop_start
< base_string
)
11168 if (check_VecOperations (vop_start
, base_string
) == NULL
)
11170 base_string
= vop_start
;
11174 if (is_space_char (*base_string
))
11177 /* If we only have a displacement, set-up for it to be parsed later. */
11178 displacement_string_start
= op_string
;
11179 displacement_string_end
= base_string
+ 1;
11181 if (*base_string
== ')')
11184 unsigned int parens_balanced
= 1;
11185 /* We've already checked that the number of left & right ()'s are
11186 equal, so this loop will not be infinite. */
11190 if (*base_string
== ')')
11192 if (*base_string
== '(')
11195 while (parens_balanced
);
11197 temp_string
= base_string
;
11199 /* Skip past '(' and whitespace. */
11201 if (is_space_char (*base_string
))
11204 if (*base_string
== ','
11205 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11208 displacement_string_end
= temp_string
;
11210 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11214 base_string
= end_op
;
11215 if (is_space_char (*base_string
))
11219 /* There may be an index reg or scale factor here. */
11220 if (*base_string
== ',')
11223 if (is_space_char (*base_string
))
11226 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11229 base_string
= end_op
;
11230 if (is_space_char (*base_string
))
11232 if (*base_string
== ',')
11235 if (is_space_char (*base_string
))
11238 else if (*base_string
!= ')')
11240 as_bad (_("expecting `,' or `)' "
11241 "after index register in `%s'"),
11246 else if (*base_string
== REGISTER_PREFIX
)
11248 end_op
= strchr (base_string
, ',');
11251 as_bad (_("bad register name `%s'"), base_string
);
11255 /* Check for scale factor. */
11256 if (*base_string
!= ')')
11258 char *end_scale
= i386_scale (base_string
);
11263 base_string
= end_scale
;
11264 if (is_space_char (*base_string
))
11266 if (*base_string
!= ')')
11268 as_bad (_("expecting `)' "
11269 "after scale factor in `%s'"),
11274 else if (!i
.index_reg
)
11276 as_bad (_("expecting index register or scale factor "
11277 "after `,'; got '%c'"),
11282 else if (*base_string
!= ')')
11284 as_bad (_("expecting `,' or `)' "
11285 "after base register in `%s'"),
11290 else if (*base_string
== REGISTER_PREFIX
)
11292 end_op
= strchr (base_string
, ',');
11295 as_bad (_("bad register name `%s'"), base_string
);
11300 /* If there's an expression beginning the operand, parse it,
11301 assuming displacement_string_start and
11302 displacement_string_end are meaningful. */
11303 if (displacement_string_start
!= displacement_string_end
)
11305 if (!i386_displacement (displacement_string_start
,
11306 displacement_string_end
))
11310 /* Special case for (%dx) while doing input/output op. */
11312 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11313 && i
.base_reg
->reg_type
.bitfield
.word
11314 && i
.index_reg
== 0
11315 && i
.log2_scale_factor
== 0
11316 && i
.seg
[i
.mem_operands
] == 0
11317 && !operand_type_check (i
.types
[this_operand
], disp
))
11319 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11323 if (i386_index_check (operand_string
) == 0)
11325 i
.flags
[this_operand
] |= Operand_Mem
;
11326 if (i
.mem_operands
== 0)
11327 i
.memop1_string
= xstrdup (operand_string
);
11332 /* It's not a memory operand; argh! */
11333 as_bad (_("invalid char %s beginning operand %d `%s'"),
11334 output_invalid (*op_string
),
11339 return 1; /* Normal return. */
11342 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11343 that an rs_machine_dependent frag may reach. */
11346 i386_frag_max_var (fragS
*frag
)
11348 /* The only relaxable frags are for jumps.
11349 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11350 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11351 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11354 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11356 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11358 /* STT_GNU_IFUNC symbol must go through PLT. */
11359 if ((symbol_get_bfdsym (fr_symbol
)->flags
11360 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11363 if (!S_IS_EXTERNAL (fr_symbol
))
11364 /* Symbol may be weak or local. */
11365 return !S_IS_WEAK (fr_symbol
);
11367 /* Global symbols with non-default visibility can't be preempted. */
11368 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11371 if (fr_var
!= NO_RELOC
)
11372 switch ((enum bfd_reloc_code_real
) fr_var
)
11374 case BFD_RELOC_386_PLT32
:
11375 case BFD_RELOC_X86_64_PLT32
:
11376 /* Symbol with PLT relocation may be preempted. */
11382 /* Global symbols with default visibility in a shared library may be
11383 preempted by another definition. */
11388 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11389 Note also work for Skylake and Cascadelake.
11390 ---------------------------------------------------------------------
11391 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11392 | ------ | ----------- | ------- | -------- |
11394 | Jno | N | N | Y |
11395 | Jc/Jb | Y | N | Y |
11396 | Jae/Jnb | Y | N | Y |
11397 | Je/Jz | Y | Y | Y |
11398 | Jne/Jnz | Y | Y | Y |
11399 | Jna/Jbe | Y | N | Y |
11400 | Ja/Jnbe | Y | N | Y |
11402 | Jns | N | N | Y |
11403 | Jp/Jpe | N | N | Y |
11404 | Jnp/Jpo | N | N | Y |
11405 | Jl/Jnge | Y | Y | Y |
11406 | Jge/Jnl | Y | Y | Y |
11407 | Jle/Jng | Y | Y | Y |
11408 | Jg/Jnle | Y | Y | Y |
11409 --------------------------------------------------------------------- */
11411 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11413 if (mf_cmp
== mf_cmp_alu_cmp
)
11414 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11415 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11416 if (mf_cmp
== mf_cmp_incdec
)
11417 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11418 || mf_jcc
== mf_jcc_jle
);
11419 if (mf_cmp
== mf_cmp_test_and
)
11424 /* Return the next non-empty frag. */
11427 i386_next_non_empty_frag (fragS
*fragP
)
11429 /* There may be a frag with a ".fill 0" when there is no room in
11430 the current frag for frag_grow in output_insn. */
11431 for (fragP
= fragP
->fr_next
;
11433 && fragP
->fr_type
== rs_fill
11434 && fragP
->fr_fix
== 0);
11435 fragP
= fragP
->fr_next
)
11440 /* Return the next jcc frag after BRANCH_PADDING. */
11443 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11445 fragS
*branch_fragP
;
11449 if (pad_fragP
->fr_type
== rs_machine_dependent
11450 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11451 == BRANCH_PADDING
))
11453 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11454 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11456 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11457 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11458 pad_fragP
->tc_frag_data
.mf_type
))
11459 return branch_fragP
;
11465 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11468 i386_classify_machine_dependent_frag (fragS
*fragP
)
11472 fragS
*branch_fragP
;
11474 unsigned int max_prefix_length
;
11476 if (fragP
->tc_frag_data
.classified
)
11479 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11480 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11481 for (next_fragP
= fragP
;
11482 next_fragP
!= NULL
;
11483 next_fragP
= next_fragP
->fr_next
)
11485 next_fragP
->tc_frag_data
.classified
= 1;
11486 if (next_fragP
->fr_type
== rs_machine_dependent
)
11487 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11489 case BRANCH_PADDING
:
11490 /* The BRANCH_PADDING frag must be followed by a branch
11492 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11493 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11495 case FUSED_JCC_PADDING
:
11496 /* Check if this is a fused jcc:
11498 CMP like instruction
11502 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11503 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11504 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11507 /* The BRANCH_PADDING frag is merged with the
11508 FUSED_JCC_PADDING frag. */
11509 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11510 /* CMP like instruction size. */
11511 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11512 frag_wane (pad_fragP
);
11513 /* Skip to branch_fragP. */
11514 next_fragP
= branch_fragP
;
11516 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11518 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11520 next_fragP
->fr_subtype
11521 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11522 next_fragP
->tc_frag_data
.max_bytes
11523 = next_fragP
->tc_frag_data
.max_prefix_length
;
11524 /* This will be updated in the BRANCH_PREFIX scan. */
11525 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11528 frag_wane (next_fragP
);
11533 /* Stop if there is no BRANCH_PREFIX. */
11534 if (!align_branch_prefix_size
)
11537 /* Scan for BRANCH_PREFIX. */
11538 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11540 if (fragP
->fr_type
!= rs_machine_dependent
11541 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11545 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11546 COND_JUMP_PREFIX. */
11547 max_prefix_length
= 0;
11548 for (next_fragP
= fragP
;
11549 next_fragP
!= NULL
;
11550 next_fragP
= next_fragP
->fr_next
)
11552 if (next_fragP
->fr_type
== rs_fill
)
11553 /* Skip rs_fill frags. */
11555 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11556 /* Stop for all other frags. */
11559 /* rs_machine_dependent frags. */
11560 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11563 /* Count BRANCH_PREFIX frags. */
11564 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11566 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11567 frag_wane (next_fragP
);
11571 += next_fragP
->tc_frag_data
.max_bytes
;
11573 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11575 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11576 == FUSED_JCC_PADDING
))
11578 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11579 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11583 /* Stop for other rs_machine_dependent frags. */
11587 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11589 /* Skip to the next frag. */
11590 fragP
= next_fragP
;
11594 /* Compute padding size for
11597 CMP like instruction
11599 COND_JUMP/UNCOND_JUMP
11604 COND_JUMP/UNCOND_JUMP
11608 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11610 unsigned int offset
, size
, padding_size
;
11611 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11613 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11615 address
= fragP
->fr_address
;
11616 address
+= fragP
->fr_fix
;
11618 /* CMP like instrunction size. */
11619 size
= fragP
->tc_frag_data
.cmp_size
;
11621 /* The base size of the branch frag. */
11622 size
+= branch_fragP
->fr_fix
;
11624 /* Add opcode and displacement bytes for the rs_machine_dependent
11626 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11627 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11629 /* Check if branch is within boundary and doesn't end at the last
11631 offset
= address
& ((1U << align_branch_power
) - 1);
11632 if ((offset
+ size
) >= (1U << align_branch_power
))
11633 /* Padding needed to avoid crossing boundary. */
11634 padding_size
= (1U << align_branch_power
) - offset
;
11636 /* No padding needed. */
11639 /* The return value may be saved in tc_frag_data.length which is
11641 if (!fits_in_unsigned_byte (padding_size
))
11644 return padding_size
;
11647 /* i386_generic_table_relax_frag()
11649 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11650 grow/shrink padding to align branch frags. Hand others to
11654 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11656 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11657 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11659 long padding_size
= i386_branch_padding_size (fragP
, 0);
11660 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11662 /* When the BRANCH_PREFIX frag is used, the computed address
11663 must match the actual address and there should be no padding. */
11664 if (fragP
->tc_frag_data
.padding_address
11665 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11669 /* Update the padding size. */
11671 fragP
->tc_frag_data
.length
= padding_size
;
11675 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11677 fragS
*padding_fragP
, *next_fragP
;
11678 long padding_size
, left_size
, last_size
;
11680 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11681 if (!padding_fragP
)
11682 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11683 return (fragP
->tc_frag_data
.length
11684 - fragP
->tc_frag_data
.last_length
);
11686 /* Compute the relative address of the padding frag in the very
11687 first time where the BRANCH_PREFIX frag sizes are zero. */
11688 if (!fragP
->tc_frag_data
.padding_address
)
11689 fragP
->tc_frag_data
.padding_address
11690 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11692 /* First update the last length from the previous interation. */
11693 left_size
= fragP
->tc_frag_data
.prefix_length
;
11694 for (next_fragP
= fragP
;
11695 next_fragP
!= padding_fragP
;
11696 next_fragP
= next_fragP
->fr_next
)
11697 if (next_fragP
->fr_type
== rs_machine_dependent
11698 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11703 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11707 if (max
> left_size
)
11712 next_fragP
->tc_frag_data
.last_length
= size
;
11716 next_fragP
->tc_frag_data
.last_length
= 0;
11719 /* Check the padding size for the padding frag. */
11720 padding_size
= i386_branch_padding_size
11721 (padding_fragP
, (fragP
->fr_address
11722 + fragP
->tc_frag_data
.padding_address
));
11724 last_size
= fragP
->tc_frag_data
.prefix_length
;
11725 /* Check if there is change from the last interation. */
11726 if (padding_size
== last_size
)
11728 /* Update the expected address of the padding frag. */
11729 padding_fragP
->tc_frag_data
.padding_address
11730 = (fragP
->fr_address
+ padding_size
11731 + fragP
->tc_frag_data
.padding_address
);
11735 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
11737 /* No padding if there is no sufficient room. Clear the
11738 expected address of the padding frag. */
11739 padding_fragP
->tc_frag_data
.padding_address
= 0;
11743 /* Store the expected address of the padding frag. */
11744 padding_fragP
->tc_frag_data
.padding_address
11745 = (fragP
->fr_address
+ padding_size
11746 + fragP
->tc_frag_data
.padding_address
);
11748 fragP
->tc_frag_data
.prefix_length
= padding_size
;
11750 /* Update the length for the current interation. */
11751 left_size
= padding_size
;
11752 for (next_fragP
= fragP
;
11753 next_fragP
!= padding_fragP
;
11754 next_fragP
= next_fragP
->fr_next
)
11755 if (next_fragP
->fr_type
== rs_machine_dependent
11756 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11761 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11765 if (max
> left_size
)
11770 next_fragP
->tc_frag_data
.length
= size
;
11774 next_fragP
->tc_frag_data
.length
= 0;
11777 return (fragP
->tc_frag_data
.length
11778 - fragP
->tc_frag_data
.last_length
);
11780 return relax_frag (segment
, fragP
, stretch
);
11783 /* md_estimate_size_before_relax()
11785 Called just before relax() for rs_machine_dependent frags. The x86
11786 assembler uses these frags to handle variable size jump
11789 Any symbol that is now undefined will not become defined.
11790 Return the correct fr_subtype in the frag.
11791 Return the initial "guess for variable size of frag" to caller.
11792 The guess is actually the growth beyond the fixed part. Whatever
11793 we do to grow the fixed or variable part contributes to our
11797 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
11799 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11800 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
11801 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11803 i386_classify_machine_dependent_frag (fragP
);
11804 return fragP
->tc_frag_data
.length
;
11807 /* We've already got fragP->fr_subtype right; all we have to do is
11808 check for un-relaxable symbols. On an ELF system, we can't relax
11809 an externally visible symbol, because it may be overridden by a
11811 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
11812 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11814 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
11817 #if defined (OBJ_COFF) && defined (TE_PE)
11818 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
11819 && S_IS_WEAK (fragP
->fr_symbol
))
11823 /* Symbol is undefined in this segment, or we need to keep a
11824 reloc so that weak symbols can be overridden. */
11825 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
11826 enum bfd_reloc_code_real reloc_type
;
11827 unsigned char *opcode
;
11830 if (fragP
->fr_var
!= NO_RELOC
)
11831 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
11832 else if (size
== 2)
11833 reloc_type
= BFD_RELOC_16_PCREL
;
11834 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11835 else if (need_plt32_p (fragP
->fr_symbol
))
11836 reloc_type
= BFD_RELOC_X86_64_PLT32
;
11839 reloc_type
= BFD_RELOC_32_PCREL
;
11841 old_fr_fix
= fragP
->fr_fix
;
11842 opcode
= (unsigned char *) fragP
->fr_opcode
;
11844 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
11847 /* Make jmp (0xeb) a (d)word displacement jump. */
11849 fragP
->fr_fix
+= size
;
11850 fix_new (fragP
, old_fr_fix
, size
,
11852 fragP
->fr_offset
, 1,
11858 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
11860 /* Negate the condition, and branch past an
11861 unconditional jump. */
11864 /* Insert an unconditional jump. */
11866 /* We added two extra opcode bytes, and have a two byte
11868 fragP
->fr_fix
+= 2 + 2;
11869 fix_new (fragP
, old_fr_fix
+ 2, 2,
11871 fragP
->fr_offset
, 1,
11875 /* Fall through. */
11878 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
11882 fragP
->fr_fix
+= 1;
11883 fixP
= fix_new (fragP
, old_fr_fix
, 1,
11885 fragP
->fr_offset
, 1,
11886 BFD_RELOC_8_PCREL
);
11887 fixP
->fx_signed
= 1;
11891 /* This changes the byte-displacement jump 0x7N
11892 to the (d)word-displacement jump 0x0f,0x8N. */
11893 opcode
[1] = opcode
[0] + 0x10;
11894 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11895 /* We've added an opcode byte. */
11896 fragP
->fr_fix
+= 1 + size
;
11897 fix_new (fragP
, old_fr_fix
+ 1, size
,
11899 fragP
->fr_offset
, 1,
11904 BAD_CASE (fragP
->fr_subtype
);
11908 return fragP
->fr_fix
- old_fr_fix
;
11911 /* Guess size depending on current relax state. Initially the relax
11912 state will correspond to a short jump and we return 1, because
11913 the variable part of the frag (the branch offset) is one byte
11914 long. However, we can relax a section more than once and in that
11915 case we must either set fr_subtype back to the unrelaxed state,
11916 or return the value for the appropriate branch. */
11917 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
11920 /* Called after relax() is finished.
11922 In: Address of frag.
11923 fr_type == rs_machine_dependent.
11924 fr_subtype is what the address relaxed to.
11926 Out: Any fixSs and constants are set up.
11927 Caller will turn frag into a ".space 0". */
11930 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
11933 unsigned char *opcode
;
11934 unsigned char *where_to_put_displacement
= NULL
;
11935 offsetT target_address
;
11936 offsetT opcode_address
;
11937 unsigned int extension
= 0;
11938 offsetT displacement_from_opcode_start
;
11940 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11941 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
11942 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11944 /* Generate nop padding. */
11945 unsigned int size
= fragP
->tc_frag_data
.length
;
11948 if (size
> fragP
->tc_frag_data
.max_bytes
)
11954 const char *branch
= "branch";
11955 const char *prefix
= "";
11956 fragS
*padding_fragP
;
11957 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11960 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11961 switch (fragP
->tc_frag_data
.default_prefix
)
11966 case CS_PREFIX_OPCODE
:
11969 case DS_PREFIX_OPCODE
:
11972 case ES_PREFIX_OPCODE
:
11975 case FS_PREFIX_OPCODE
:
11978 case GS_PREFIX_OPCODE
:
11981 case SS_PREFIX_OPCODE
:
11986 msg
= _("%s:%u: add %d%s at 0x%llx to align "
11987 "%s within %d-byte boundary\n");
11989 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
11990 "align %s within %d-byte boundary\n");
11994 padding_fragP
= fragP
;
11995 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
11996 "%s within %d-byte boundary\n");
12000 switch (padding_fragP
->tc_frag_data
.branch_type
)
12002 case align_branch_jcc
:
12005 case align_branch_fused
:
12006 branch
= "fused jcc";
12008 case align_branch_jmp
:
12011 case align_branch_call
:
12014 case align_branch_indirect
:
12015 branch
= "indiret branch";
12017 case align_branch_ret
:
12024 fprintf (stdout
, msg
,
12025 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12026 (long long) fragP
->fr_address
, branch
,
12027 1 << align_branch_power
);
12029 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12030 memset (fragP
->fr_opcode
,
12031 fragP
->tc_frag_data
.default_prefix
, size
);
12033 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12035 fragP
->fr_fix
+= size
;
12040 opcode
= (unsigned char *) fragP
->fr_opcode
;
12042 /* Address we want to reach in file space. */
12043 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12045 /* Address opcode resides at in file space. */
12046 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12048 /* Displacement from opcode start to fill into instruction. */
12049 displacement_from_opcode_start
= target_address
- opcode_address
;
12051 if ((fragP
->fr_subtype
& BIG
) == 0)
12053 /* Don't have to change opcode. */
12054 extension
= 1; /* 1 opcode + 1 displacement */
12055 where_to_put_displacement
= &opcode
[1];
12059 if (no_cond_jump_promotion
12060 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12061 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12062 _("long jump required"));
12064 switch (fragP
->fr_subtype
)
12066 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12067 extension
= 4; /* 1 opcode + 4 displacement */
12069 where_to_put_displacement
= &opcode
[1];
12072 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12073 extension
= 2; /* 1 opcode + 2 displacement */
12075 where_to_put_displacement
= &opcode
[1];
12078 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12079 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12080 extension
= 5; /* 2 opcode + 4 displacement */
12081 opcode
[1] = opcode
[0] + 0x10;
12082 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12083 where_to_put_displacement
= &opcode
[2];
12086 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12087 extension
= 3; /* 2 opcode + 2 displacement */
12088 opcode
[1] = opcode
[0] + 0x10;
12089 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12090 where_to_put_displacement
= &opcode
[2];
12093 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12098 where_to_put_displacement
= &opcode
[3];
12102 BAD_CASE (fragP
->fr_subtype
);
12107 /* If size if less then four we are sure that the operand fits,
12108 but if it's 4, then it could be that the displacement is larger
12110 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12112 && ((addressT
) (displacement_from_opcode_start
- extension
12113 + ((addressT
) 1 << 31))
12114 > (((addressT
) 2 << 31) - 1)))
12116 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12117 _("jump target out of range"));
12118 /* Make us emit 0. */
12119 displacement_from_opcode_start
= extension
;
12121 /* Now put displacement after opcode. */
12122 md_number_to_chars ((char *) where_to_put_displacement
,
12123 (valueT
) (displacement_from_opcode_start
- extension
),
12124 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12125 fragP
->fr_fix
+= extension
;
12128 /* Apply a fixup (fixP) to segment data, once it has been determined
12129 by our caller that we have all the info we need to fix it up.
12131 Parameter valP is the pointer to the value of the bits.
12133 On the 386, immediates, displacements, and data pointers are all in
12134 the same (little-endian) format, so we don't need to care about which
12135 we are handling. */
12138 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12140 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12141 valueT value
= *valP
;
12143 #if !defined (TE_Mach)
12144 if (fixP
->fx_pcrel
)
12146 switch (fixP
->fx_r_type
)
12152 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12155 case BFD_RELOC_X86_64_32S
:
12156 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12159 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12162 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12167 if (fixP
->fx_addsy
!= NULL
12168 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12169 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12170 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12171 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12172 && !use_rela_relocations
)
12174 /* This is a hack. There should be a better way to handle this.
12175 This covers for the fact that bfd_install_relocation will
12176 subtract the current location (for partial_inplace, PC relative
12177 relocations); see more below. */
12181 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12184 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12186 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12189 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12191 if ((sym_seg
== seg
12192 || (symbol_section_p (fixP
->fx_addsy
)
12193 && sym_seg
!= absolute_section
))
12194 && !generic_force_reloc (fixP
))
12196 /* Yes, we add the values in twice. This is because
12197 bfd_install_relocation subtracts them out again. I think
12198 bfd_install_relocation is broken, but I don't dare change
12200 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12204 #if defined (OBJ_COFF) && defined (TE_PE)
12205 /* For some reason, the PE format does not store a
12206 section address offset for a PC relative symbol. */
12207 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12208 || S_IS_WEAK (fixP
->fx_addsy
))
12209 value
+= md_pcrel_from (fixP
);
12212 #if defined (OBJ_COFF) && defined (TE_PE)
12213 if (fixP
->fx_addsy
!= NULL
12214 && S_IS_WEAK (fixP
->fx_addsy
)
12215 /* PR 16858: Do not modify weak function references. */
12216 && ! fixP
->fx_pcrel
)
12218 #if !defined (TE_PEP)
12219 /* For x86 PE weak function symbols are neither PC-relative
12220 nor do they set S_IS_FUNCTION. So the only reliable way
12221 to detect them is to check the flags of their containing
12223 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12224 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12228 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12232 /* Fix a few things - the dynamic linker expects certain values here,
12233 and we must not disappoint it. */
12234 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12235 if (IS_ELF
&& fixP
->fx_addsy
)
12236 switch (fixP
->fx_r_type
)
12238 case BFD_RELOC_386_PLT32
:
12239 case BFD_RELOC_X86_64_PLT32
:
12240 /* Make the jump instruction point to the address of the operand.
12241 At runtime we merely add the offset to the actual PLT entry.
12242 NB: Subtract the offset size only for jump instructions. */
12243 if (fixP
->fx_pcrel
)
12247 case BFD_RELOC_386_TLS_GD
:
12248 case BFD_RELOC_386_TLS_LDM
:
12249 case BFD_RELOC_386_TLS_IE_32
:
12250 case BFD_RELOC_386_TLS_IE
:
12251 case BFD_RELOC_386_TLS_GOTIE
:
12252 case BFD_RELOC_386_TLS_GOTDESC
:
12253 case BFD_RELOC_X86_64_TLSGD
:
12254 case BFD_RELOC_X86_64_TLSLD
:
12255 case BFD_RELOC_X86_64_GOTTPOFF
:
12256 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12257 value
= 0; /* Fully resolved at runtime. No addend. */
12259 case BFD_RELOC_386_TLS_LE
:
12260 case BFD_RELOC_386_TLS_LDO_32
:
12261 case BFD_RELOC_386_TLS_LE_32
:
12262 case BFD_RELOC_X86_64_DTPOFF32
:
12263 case BFD_RELOC_X86_64_DTPOFF64
:
12264 case BFD_RELOC_X86_64_TPOFF32
:
12265 case BFD_RELOC_X86_64_TPOFF64
:
12266 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12269 case BFD_RELOC_386_TLS_DESC_CALL
:
12270 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12271 value
= 0; /* Fully resolved at runtime. No addend. */
12272 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12276 case BFD_RELOC_VTABLE_INHERIT
:
12277 case BFD_RELOC_VTABLE_ENTRY
:
12284 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12286 #endif /* !defined (TE_Mach) */
12288 /* Are we finished with this relocation now? */
12289 if (fixP
->fx_addsy
== NULL
)
12291 #if defined (OBJ_COFF) && defined (TE_PE)
12292 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12295 /* Remember value for tc_gen_reloc. */
12296 fixP
->fx_addnumber
= value
;
12297 /* Clear out the frag for now. */
12301 else if (use_rela_relocations
)
12303 fixP
->fx_no_overflow
= 1;
12304 /* Remember value for tc_gen_reloc. */
12305 fixP
->fx_addnumber
= value
;
12309 md_number_to_chars (p
, value
, fixP
->fx_size
);
12313 md_atof (int type
, char *litP
, int *sizeP
)
12315 /* This outputs the LITTLENUMs in REVERSE order;
12316 in accord with the bigendian 386. */
12317 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
12320 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12323 output_invalid (int c
)
12326 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12329 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12330 "(0x%x)", (unsigned char) c
);
12331 return output_invalid_buf
;
12334 /* REG_STRING starts *before* REGISTER_PREFIX. */
12336 static const reg_entry
*
12337 parse_real_register (char *reg_string
, char **end_op
)
12339 char *s
= reg_string
;
12341 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12342 const reg_entry
*r
;
12344 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12345 if (*s
== REGISTER_PREFIX
)
12348 if (is_space_char (*s
))
12351 p
= reg_name_given
;
12352 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12354 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12355 return (const reg_entry
*) NULL
;
12359 /* For naked regs, make sure that we are not dealing with an identifier.
12360 This prevents confusing an identifier like `eax_var' with register
12362 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12363 return (const reg_entry
*) NULL
;
12367 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
12369 /* Handle floating point regs, allowing spaces in the (i) part. */
12370 if (r
== i386_regtab
/* %st is first entry of table */)
12372 if (!cpu_arch_flags
.bitfield
.cpu8087
12373 && !cpu_arch_flags
.bitfield
.cpu287
12374 && !cpu_arch_flags
.bitfield
.cpu387
)
12375 return (const reg_entry
*) NULL
;
12377 if (is_space_char (*s
))
12382 if (is_space_char (*s
))
12384 if (*s
>= '0' && *s
<= '7')
12386 int fpr
= *s
- '0';
12388 if (is_space_char (*s
))
12393 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
12398 /* We have "%st(" then garbage. */
12399 return (const reg_entry
*) NULL
;
12403 if (r
== NULL
|| allow_pseudo_reg
)
12406 if (operand_type_all_zero (&r
->reg_type
))
12407 return (const reg_entry
*) NULL
;
12409 if ((r
->reg_type
.bitfield
.dword
12410 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12411 || r
->reg_type
.bitfield
.class == RegCR
12412 || r
->reg_type
.bitfield
.class == RegDR
12413 || r
->reg_type
.bitfield
.class == RegTR
)
12414 && !cpu_arch_flags
.bitfield
.cpui386
)
12415 return (const reg_entry
*) NULL
;
12417 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12418 return (const reg_entry
*) NULL
;
12420 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12422 if (r
->reg_type
.bitfield
.zmmword
12423 || r
->reg_type
.bitfield
.class == RegMask
)
12424 return (const reg_entry
*) NULL
;
12426 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12428 if (r
->reg_type
.bitfield
.ymmword
)
12429 return (const reg_entry
*) NULL
;
12431 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12432 return (const reg_entry
*) NULL
;
12436 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12437 return (const reg_entry
*) NULL
;
12439 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12440 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12441 return (const reg_entry
*) NULL
;
12443 /* Upper 16 vector registers are only available with VREX in 64bit
12444 mode, and require EVEX encoding. */
12445 if (r
->reg_flags
& RegVRex
)
12447 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12448 || flag_code
!= CODE_64BIT
)
12449 return (const reg_entry
*) NULL
;
12451 i
.vec_encoding
= vex_encoding_evex
;
12454 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12455 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12456 && flag_code
!= CODE_64BIT
)
12457 return (const reg_entry
*) NULL
;
12459 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12461 return (const reg_entry
*) NULL
;
12466 /* REG_STRING starts *before* REGISTER_PREFIX. */
12468 static const reg_entry
*
12469 parse_register (char *reg_string
, char **end_op
)
12471 const reg_entry
*r
;
12473 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12474 r
= parse_real_register (reg_string
, end_op
);
12479 char *save
= input_line_pointer
;
12483 input_line_pointer
= reg_string
;
12484 c
= get_symbol_name (®_string
);
12485 symbolP
= symbol_find (reg_string
);
12486 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12488 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12490 know (e
->X_op
== O_register
);
12491 know (e
->X_add_number
>= 0
12492 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12493 r
= i386_regtab
+ e
->X_add_number
;
12494 if ((r
->reg_flags
& RegVRex
))
12495 i
.vec_encoding
= vex_encoding_evex
;
12496 *end_op
= input_line_pointer
;
12498 *input_line_pointer
= c
;
12499 input_line_pointer
= save
;
12505 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12507 const reg_entry
*r
;
12508 char *end
= input_line_pointer
;
12511 r
= parse_register (name
, &input_line_pointer
);
12512 if (r
&& end
<= input_line_pointer
)
12514 *nextcharP
= *input_line_pointer
;
12515 *input_line_pointer
= 0;
12516 e
->X_op
= O_register
;
12517 e
->X_add_number
= r
- i386_regtab
;
12520 input_line_pointer
= end
;
12522 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12526 md_operand (expressionS
*e
)
12529 const reg_entry
*r
;
12531 switch (*input_line_pointer
)
12533 case REGISTER_PREFIX
:
12534 r
= parse_real_register (input_line_pointer
, &end
);
12537 e
->X_op
= O_register
;
12538 e
->X_add_number
= r
- i386_regtab
;
12539 input_line_pointer
= end
;
12544 gas_assert (intel_syntax
);
12545 end
= input_line_pointer
++;
12547 if (*input_line_pointer
== ']')
12549 ++input_line_pointer
;
12550 e
->X_op_symbol
= make_expr_symbol (e
);
12551 e
->X_add_symbol
= NULL
;
12552 e
->X_add_number
= 0;
12557 e
->X_op
= O_absent
;
12558 input_line_pointer
= end
;
12565 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12566 const char *md_shortopts
= "kVQ:sqnO::";
12568 const char *md_shortopts
= "qnO::";
12571 #define OPTION_32 (OPTION_MD_BASE + 0)
12572 #define OPTION_64 (OPTION_MD_BASE + 1)
12573 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12574 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12575 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12576 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12577 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12578 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12579 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12580 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12581 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12582 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12583 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12584 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12585 #define OPTION_X32 (OPTION_MD_BASE + 14)
12586 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12587 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12588 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12589 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12590 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12591 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12592 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12593 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12594 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12595 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12596 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12597 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12598 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12599 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12600 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12601 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12602 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12603 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12604 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12606 struct option md_longopts
[] =
12608 {"32", no_argument
, NULL
, OPTION_32
},
12609 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12610 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12611 {"64", no_argument
, NULL
, OPTION_64
},
12613 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12614 {"x32", no_argument
, NULL
, OPTION_X32
},
12615 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12616 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12618 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12619 {"march", required_argument
, NULL
, OPTION_MARCH
},
12620 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12621 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12622 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12623 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12624 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12625 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12626 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12627 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12628 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12629 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12630 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12631 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12632 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12633 # if defined (TE_PE) || defined (TE_PEP)
12634 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12636 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12637 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12638 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12639 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12640 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12641 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12642 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12643 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12644 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
12645 {"mlfence-before-indirect-branch", required_argument
, NULL
,
12646 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
12647 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
12648 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12649 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12650 {NULL
, no_argument
, NULL
, 0}
12652 size_t md_longopts_size
= sizeof (md_longopts
);
12655 md_parse_option (int c
, const char *arg
)
12658 char *arch
, *next
, *saved
, *type
;
12663 optimize_align_code
= 0;
12667 quiet_warnings
= 1;
12670 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12671 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12672 should be emitted or not. FIXME: Not implemented. */
12674 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12678 /* -V: SVR4 argument to print version ID. */
12680 print_version_id ();
12683 /* -k: Ignore for FreeBSD compatibility. */
12688 /* -s: On i386 Solaris, this tells the native assembler to use
12689 .stab instead of .stab.excl. We always use .stab anyhow. */
12692 case OPTION_MSHARED
:
12696 case OPTION_X86_USED_NOTE
:
12697 if (strcasecmp (arg
, "yes") == 0)
12699 else if (strcasecmp (arg
, "no") == 0)
12702 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
12707 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12708 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12711 const char **list
, **l
;
12713 list
= bfd_target_list ();
12714 for (l
= list
; *l
!= NULL
; l
++)
12715 if (CONST_STRNEQ (*l
, "elf64-x86-64")
12716 || strcmp (*l
, "coff-x86-64") == 0
12717 || strcmp (*l
, "pe-x86-64") == 0
12718 || strcmp (*l
, "pei-x86-64") == 0
12719 || strcmp (*l
, "mach-o-x86-64") == 0)
12721 default_arch
= "x86_64";
12725 as_fatal (_("no compiled in support for x86_64"));
12731 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12735 const char **list
, **l
;
12737 list
= bfd_target_list ();
12738 for (l
= list
; *l
!= NULL
; l
++)
12739 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
12741 default_arch
= "x86_64:32";
12745 as_fatal (_("no compiled in support for 32bit x86_64"));
12749 as_fatal (_("32bit x86_64 is only supported for ELF"));
12754 default_arch
= "i386";
12757 case OPTION_DIVIDE
:
12758 #ifdef SVR4_COMMENT_CHARS
12763 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
12765 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
12769 i386_comment_chars
= n
;
12775 saved
= xstrdup (arg
);
12777 /* Allow -march=+nosse. */
12783 as_fatal (_("invalid -march= option: `%s'"), arg
);
12784 next
= strchr (arch
, '+');
12787 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12789 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
12792 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
12795 cpu_arch_name
= cpu_arch
[j
].name
;
12796 cpu_sub_arch_name
= NULL
;
12797 cpu_arch_flags
= cpu_arch
[j
].flags
;
12798 cpu_arch_isa
= cpu_arch
[j
].type
;
12799 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
12800 if (!cpu_arch_tune_set
)
12802 cpu_arch_tune
= cpu_arch_isa
;
12803 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
12807 else if (*cpu_arch
[j
].name
== '.'
12808 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
12810 /* ISA extension. */
12811 i386_cpu_flags flags
;
12813 flags
= cpu_flags_or (cpu_arch_flags
,
12814 cpu_arch
[j
].flags
);
12816 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12818 if (cpu_sub_arch_name
)
12820 char *name
= cpu_sub_arch_name
;
12821 cpu_sub_arch_name
= concat (name
,
12823 (const char *) NULL
);
12827 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
12828 cpu_arch_flags
= flags
;
12829 cpu_arch_isa_flags
= flags
;
12833 = cpu_flags_or (cpu_arch_isa_flags
,
12834 cpu_arch
[j
].flags
);
12839 if (j
>= ARRAY_SIZE (cpu_arch
))
12841 /* Disable an ISA extension. */
12842 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
12843 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
12845 i386_cpu_flags flags
;
12847 flags
= cpu_flags_and_not (cpu_arch_flags
,
12848 cpu_noarch
[j
].flags
);
12849 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12851 if (cpu_sub_arch_name
)
12853 char *name
= cpu_sub_arch_name
;
12854 cpu_sub_arch_name
= concat (arch
,
12855 (const char *) NULL
);
12859 cpu_sub_arch_name
= xstrdup (arch
);
12860 cpu_arch_flags
= flags
;
12861 cpu_arch_isa_flags
= flags
;
12866 if (j
>= ARRAY_SIZE (cpu_noarch
))
12867 j
= ARRAY_SIZE (cpu_arch
);
12870 if (j
>= ARRAY_SIZE (cpu_arch
))
12871 as_fatal (_("invalid -march= option: `%s'"), arg
);
12875 while (next
!= NULL
);
12881 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12882 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12884 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
12886 cpu_arch_tune_set
= 1;
12887 cpu_arch_tune
= cpu_arch
[j
].type
;
12888 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
12892 if (j
>= ARRAY_SIZE (cpu_arch
))
12893 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12896 case OPTION_MMNEMONIC
:
12897 if (strcasecmp (arg
, "att") == 0)
12898 intel_mnemonic
= 0;
12899 else if (strcasecmp (arg
, "intel") == 0)
12900 intel_mnemonic
= 1;
12902 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
12905 case OPTION_MSYNTAX
:
12906 if (strcasecmp (arg
, "att") == 0)
12908 else if (strcasecmp (arg
, "intel") == 0)
12911 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
12914 case OPTION_MINDEX_REG
:
12915 allow_index_reg
= 1;
12918 case OPTION_MNAKED_REG
:
12919 allow_naked_reg
= 1;
12922 case OPTION_MSSE2AVX
:
12926 case OPTION_MSSE_CHECK
:
12927 if (strcasecmp (arg
, "error") == 0)
12928 sse_check
= check_error
;
12929 else if (strcasecmp (arg
, "warning") == 0)
12930 sse_check
= check_warning
;
12931 else if (strcasecmp (arg
, "none") == 0)
12932 sse_check
= check_none
;
12934 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
12937 case OPTION_MOPERAND_CHECK
:
12938 if (strcasecmp (arg
, "error") == 0)
12939 operand_check
= check_error
;
12940 else if (strcasecmp (arg
, "warning") == 0)
12941 operand_check
= check_warning
;
12942 else if (strcasecmp (arg
, "none") == 0)
12943 operand_check
= check_none
;
12945 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
12948 case OPTION_MAVXSCALAR
:
12949 if (strcasecmp (arg
, "128") == 0)
12950 avxscalar
= vex128
;
12951 else if (strcasecmp (arg
, "256") == 0)
12952 avxscalar
= vex256
;
12954 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
12957 case OPTION_MVEXWIG
:
12958 if (strcmp (arg
, "0") == 0)
12960 else if (strcmp (arg
, "1") == 0)
12963 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
12966 case OPTION_MADD_BND_PREFIX
:
12967 add_bnd_prefix
= 1;
12970 case OPTION_MEVEXLIG
:
12971 if (strcmp (arg
, "128") == 0)
12972 evexlig
= evexl128
;
12973 else if (strcmp (arg
, "256") == 0)
12974 evexlig
= evexl256
;
12975 else if (strcmp (arg
, "512") == 0)
12976 evexlig
= evexl512
;
12978 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
12981 case OPTION_MEVEXRCIG
:
12982 if (strcmp (arg
, "rne") == 0)
12984 else if (strcmp (arg
, "rd") == 0)
12986 else if (strcmp (arg
, "ru") == 0)
12988 else if (strcmp (arg
, "rz") == 0)
12991 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
12994 case OPTION_MEVEXWIG
:
12995 if (strcmp (arg
, "0") == 0)
12997 else if (strcmp (arg
, "1") == 0)
13000 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13003 # if defined (TE_PE) || defined (TE_PEP)
13004 case OPTION_MBIG_OBJ
:
13009 case OPTION_MOMIT_LOCK_PREFIX
:
13010 if (strcasecmp (arg
, "yes") == 0)
13011 omit_lock_prefix
= 1;
13012 else if (strcasecmp (arg
, "no") == 0)
13013 omit_lock_prefix
= 0;
13015 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13018 case OPTION_MFENCE_AS_LOCK_ADD
:
13019 if (strcasecmp (arg
, "yes") == 0)
13021 else if (strcasecmp (arg
, "no") == 0)
13024 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13027 case OPTION_MLFENCE_AFTER_LOAD
:
13028 if (strcasecmp (arg
, "yes") == 0)
13029 lfence_after_load
= 1;
13030 else if (strcasecmp (arg
, "no") == 0)
13031 lfence_after_load
= 0;
13033 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13036 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13037 if (strcasecmp (arg
, "all") == 0)
13039 lfence_before_indirect_branch
= lfence_branch_all
;
13040 if (lfence_before_ret
== lfence_before_ret_none
)
13041 lfence_before_ret
= lfence_before_ret_shl
;
13043 else if (strcasecmp (arg
, "memory") == 0)
13044 lfence_before_indirect_branch
= lfence_branch_memory
;
13045 else if (strcasecmp (arg
, "register") == 0)
13046 lfence_before_indirect_branch
= lfence_branch_register
;
13047 else if (strcasecmp (arg
, "none") == 0)
13048 lfence_before_indirect_branch
= lfence_branch_none
;
13050 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13054 case OPTION_MLFENCE_BEFORE_RET
:
13055 if (strcasecmp (arg
, "or") == 0)
13056 lfence_before_ret
= lfence_before_ret_or
;
13057 else if (strcasecmp (arg
, "not") == 0)
13058 lfence_before_ret
= lfence_before_ret_not
;
13059 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13060 lfence_before_ret
= lfence_before_ret_shl
;
13061 else if (strcasecmp (arg
, "none") == 0)
13062 lfence_before_ret
= lfence_before_ret_none
;
13064 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13068 case OPTION_MRELAX_RELOCATIONS
:
13069 if (strcasecmp (arg
, "yes") == 0)
13070 generate_relax_relocations
= 1;
13071 else if (strcasecmp (arg
, "no") == 0)
13072 generate_relax_relocations
= 0;
13074 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13077 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13080 long int align
= strtoul (arg
, &end
, 0);
13085 align_branch_power
= 0;
13088 else if (align
>= 16)
13091 for (align_power
= 0;
13093 align
>>= 1, align_power
++)
13095 /* Limit alignment power to 31. */
13096 if (align
== 1 && align_power
< 32)
13098 align_branch_power
= align_power
;
13103 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13107 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13110 int align
= strtoul (arg
, &end
, 0);
13111 /* Some processors only support 5 prefixes. */
13112 if (*end
== '\0' && align
>= 0 && align
< 6)
13114 align_branch_prefix_size
= align
;
13117 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13122 case OPTION_MALIGN_BRANCH
:
13124 saved
= xstrdup (arg
);
13128 next
= strchr (type
, '+');
13131 if (strcasecmp (type
, "jcc") == 0)
13132 align_branch
|= align_branch_jcc_bit
;
13133 else if (strcasecmp (type
, "fused") == 0)
13134 align_branch
|= align_branch_fused_bit
;
13135 else if (strcasecmp (type
, "jmp") == 0)
13136 align_branch
|= align_branch_jmp_bit
;
13137 else if (strcasecmp (type
, "call") == 0)
13138 align_branch
|= align_branch_call_bit
;
13139 else if (strcasecmp (type
, "ret") == 0)
13140 align_branch
|= align_branch_ret_bit
;
13141 else if (strcasecmp (type
, "indirect") == 0)
13142 align_branch
|= align_branch_indirect_bit
;
13144 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13147 while (next
!= NULL
);
13151 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13152 align_branch_power
= 5;
13153 align_branch_prefix_size
= 5;
13154 align_branch
= (align_branch_jcc_bit
13155 | align_branch_fused_bit
13156 | align_branch_jmp_bit
);
13159 case OPTION_MAMD64
:
13163 case OPTION_MINTEL64
:
13171 /* Turn off -Os. */
13172 optimize_for_space
= 0;
13174 else if (*arg
== 's')
13176 optimize_for_space
= 1;
13177 /* Turn on all encoding optimizations. */
13178 optimize
= INT_MAX
;
13182 optimize
= atoi (arg
);
13183 /* Turn off -Os. */
13184 optimize_for_space
= 0;
13194 #define MESSAGE_TEMPLATE \
13198 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13199 int *left_p
, const char *name
, int len
)
13201 int size
= sizeof (MESSAGE_TEMPLATE
);
13202 int left
= *left_p
;
13204 /* Reserve 2 spaces for ", " or ",\0" */
13207 /* Check if there is any room. */
13215 p
= mempcpy (p
, name
, len
);
13219 /* Output the current message now and start a new one. */
13222 fprintf (stream
, "%s\n", message
);
13224 left
= size
- (start
- message
) - len
- 2;
13226 gas_assert (left
>= 0);
13228 p
= mempcpy (p
, name
, len
);
13236 show_arch (FILE *stream
, int ext
, int check
)
13238 static char message
[] = MESSAGE_TEMPLATE
;
13239 char *start
= message
+ 27;
13241 int size
= sizeof (MESSAGE_TEMPLATE
);
13248 left
= size
- (start
- message
);
13249 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13251 /* Should it be skipped? */
13252 if (cpu_arch
[j
].skip
)
13255 name
= cpu_arch
[j
].name
;
13256 len
= cpu_arch
[j
].len
;
13259 /* It is an extension. Skip if we aren't asked to show it. */
13270 /* It is an processor. Skip if we show only extension. */
13273 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13275 /* It is an impossible processor - skip. */
13279 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13282 /* Display disabled extensions. */
13284 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13286 name
= cpu_noarch
[j
].name
;
13287 len
= cpu_noarch
[j
].len
;
13288 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13293 fprintf (stream
, "%s\n", message
);
13297 md_show_usage (FILE *stream
)
13299 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13300 fprintf (stream
, _("\
13301 -Qy, -Qn ignored\n\
13302 -V print assembler version number\n\
13305 fprintf (stream
, _("\
13306 -n Do not optimize code alignment\n\
13307 -q quieten some warnings\n"));
13308 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13309 fprintf (stream
, _("\
13312 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13313 || defined (TE_PE) || defined (TE_PEP))
13314 fprintf (stream
, _("\
13315 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13317 #ifdef SVR4_COMMENT_CHARS
13318 fprintf (stream
, _("\
13319 --divide do not treat `/' as a comment character\n"));
13321 fprintf (stream
, _("\
13322 --divide ignored\n"));
13324 fprintf (stream
, _("\
13325 -march=CPU[,+EXTENSION...]\n\
13326 generate code for CPU and EXTENSION, CPU is one of:\n"));
13327 show_arch (stream
, 0, 1);
13328 fprintf (stream
, _("\
13329 EXTENSION is combination of:\n"));
13330 show_arch (stream
, 1, 0);
13331 fprintf (stream
, _("\
13332 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13333 show_arch (stream
, 0, 0);
13334 fprintf (stream
, _("\
13335 -msse2avx encode SSE instructions with VEX prefix\n"));
13336 fprintf (stream
, _("\
13337 -msse-check=[none|error|warning] (default: warning)\n\
13338 check SSE instructions\n"));
13339 fprintf (stream
, _("\
13340 -moperand-check=[none|error|warning] (default: warning)\n\
13341 check operand combinations for validity\n"));
13342 fprintf (stream
, _("\
13343 -mavxscalar=[128|256] (default: 128)\n\
13344 encode scalar AVX instructions with specific vector\n\
13346 fprintf (stream
, _("\
13347 -mvexwig=[0|1] (default: 0)\n\
13348 encode VEX instructions with specific VEX.W value\n\
13349 for VEX.W bit ignored instructions\n"));
13350 fprintf (stream
, _("\
13351 -mevexlig=[128|256|512] (default: 128)\n\
13352 encode scalar EVEX instructions with specific vector\n\
13354 fprintf (stream
, _("\
13355 -mevexwig=[0|1] (default: 0)\n\
13356 encode EVEX instructions with specific EVEX.W value\n\
13357 for EVEX.W bit ignored instructions\n"));
13358 fprintf (stream
, _("\
13359 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13360 encode EVEX instructions with specific EVEX.RC value\n\
13361 for SAE-only ignored instructions\n"));
13362 fprintf (stream
, _("\
13363 -mmnemonic=[att|intel] "));
13364 if (SYSV386_COMPAT
)
13365 fprintf (stream
, _("(default: att)\n"));
13367 fprintf (stream
, _("(default: intel)\n"));
13368 fprintf (stream
, _("\
13369 use AT&T/Intel mnemonic\n"));
13370 fprintf (stream
, _("\
13371 -msyntax=[att|intel] (default: att)\n\
13372 use AT&T/Intel syntax\n"));
13373 fprintf (stream
, _("\
13374 -mindex-reg support pseudo index registers\n"));
13375 fprintf (stream
, _("\
13376 -mnaked-reg don't require `%%' prefix for registers\n"));
13377 fprintf (stream
, _("\
13378 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13379 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13380 fprintf (stream
, _("\
13381 -mshared disable branch optimization for shared code\n"));
13382 fprintf (stream
, _("\
13383 -mx86-used-note=[no|yes] "));
13384 if (DEFAULT_X86_USED_NOTE
)
13385 fprintf (stream
, _("(default: yes)\n"));
13387 fprintf (stream
, _("(default: no)\n"));
13388 fprintf (stream
, _("\
13389 generate x86 used ISA and feature properties\n"));
13391 #if defined (TE_PE) || defined (TE_PEP)
13392 fprintf (stream
, _("\
13393 -mbig-obj generate big object files\n"));
13395 fprintf (stream
, _("\
13396 -momit-lock-prefix=[no|yes] (default: no)\n\
13397 strip all lock prefixes\n"));
13398 fprintf (stream
, _("\
13399 -mfence-as-lock-add=[no|yes] (default: no)\n\
13400 encode lfence, mfence and sfence as\n\
13401 lock addl $0x0, (%%{re}sp)\n"));
13402 fprintf (stream
, _("\
13403 -mrelax-relocations=[no|yes] "));
13404 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13405 fprintf (stream
, _("(default: yes)\n"));
13407 fprintf (stream
, _("(default: no)\n"));
13408 fprintf (stream
, _("\
13409 generate relax relocations\n"));
13410 fprintf (stream
, _("\
13411 -malign-branch-boundary=NUM (default: 0)\n\
13412 align branches within NUM byte boundary\n"));
13413 fprintf (stream
, _("\
13414 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13415 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13417 specify types of branches to align\n"));
13418 fprintf (stream
, _("\
13419 -malign-branch-prefix-size=NUM (default: 5)\n\
13420 align branches with NUM prefixes per instruction\n"));
13421 fprintf (stream
, _("\
13422 -mbranches-within-32B-boundaries\n\
13423 align branches within 32 byte boundary\n"));
13424 fprintf (stream
, _("\
13425 -mlfence-after-load=[no|yes] (default: no)\n\
13426 generate lfence after load\n"));
13427 fprintf (stream
, _("\
13428 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13429 generate lfence before indirect near branch\n"));
13430 fprintf (stream
, _("\
13431 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13432 generate lfence before ret\n"));
13433 fprintf (stream
, _("\
13434 -mamd64 accept only AMD64 ISA [default]\n"));
13435 fprintf (stream
, _("\
13436 -mintel64 accept only Intel64 ISA\n"));
13439 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13440 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13441 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13443 /* Pick the target format to use. */
13446 i386_target_format (void)
13448 if (!strncmp (default_arch
, "x86_64", 6))
13450 update_code_flag (CODE_64BIT
, 1);
13451 if (default_arch
[6] == '\0')
13452 x86_elf_abi
= X86_64_ABI
;
13454 x86_elf_abi
= X86_64_X32_ABI
;
13456 else if (!strcmp (default_arch
, "i386"))
13457 update_code_flag (CODE_32BIT
, 1);
13458 else if (!strcmp (default_arch
, "iamcu"))
13460 update_code_flag (CODE_32BIT
, 1);
13461 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13463 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13464 cpu_arch_name
= "iamcu";
13465 cpu_sub_arch_name
= NULL
;
13466 cpu_arch_flags
= iamcu_flags
;
13467 cpu_arch_isa
= PROCESSOR_IAMCU
;
13468 cpu_arch_isa_flags
= iamcu_flags
;
13469 if (!cpu_arch_tune_set
)
13471 cpu_arch_tune
= cpu_arch_isa
;
13472 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13475 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13476 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13480 as_fatal (_("unknown architecture"));
13482 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13483 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13484 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13485 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13487 switch (OUTPUT_FLAVOR
)
13489 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13490 case bfd_target_aout_flavour
:
13491 return AOUT_TARGET_FORMAT
;
13493 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13494 # if defined (TE_PE) || defined (TE_PEP)
13495 case bfd_target_coff_flavour
:
13496 if (flag_code
== CODE_64BIT
)
13497 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13499 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
13500 # elif defined (TE_GO32)
13501 case bfd_target_coff_flavour
:
13502 return "coff-go32";
13504 case bfd_target_coff_flavour
:
13505 return "coff-i386";
13508 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13509 case bfd_target_elf_flavour
:
13511 const char *format
;
13513 switch (x86_elf_abi
)
13516 format
= ELF_TARGET_FORMAT
;
13518 tls_get_addr
= "___tls_get_addr";
13522 use_rela_relocations
= 1;
13525 tls_get_addr
= "__tls_get_addr";
13527 format
= ELF_TARGET_FORMAT64
;
13529 case X86_64_X32_ABI
:
13530 use_rela_relocations
= 1;
13533 tls_get_addr
= "__tls_get_addr";
13535 disallow_64bit_reloc
= 1;
13536 format
= ELF_TARGET_FORMAT32
;
13539 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13541 if (x86_elf_abi
!= X86_64_ABI
)
13542 as_fatal (_("Intel L1OM is 64bit only"));
13543 return ELF_TARGET_L1OM_FORMAT
;
13545 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13547 if (x86_elf_abi
!= X86_64_ABI
)
13548 as_fatal (_("Intel K1OM is 64bit only"));
13549 return ELF_TARGET_K1OM_FORMAT
;
13551 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13553 if (x86_elf_abi
!= I386_ABI
)
13554 as_fatal (_("Intel MCU is 32bit only"));
13555 return ELF_TARGET_IAMCU_FORMAT
;
13561 #if defined (OBJ_MACH_O)
13562 case bfd_target_mach_o_flavour
:
13563 if (flag_code
== CODE_64BIT
)
13565 use_rela_relocations
= 1;
13567 return "mach-o-x86-64";
13570 return "mach-o-i386";
13578 #endif /* OBJ_MAYBE_ more than one */
13581 md_undefined_symbol (char *name
)
13583 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13584 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13585 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13586 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13590 if (symbol_find (name
))
13591 as_bad (_("GOT already in symbol table"));
13592 GOT_symbol
= symbol_new (name
, undefined_section
,
13593 (valueT
) 0, &zero_address_frag
);
13600 /* Round up a section size to the appropriate boundary. */
13603 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
13605 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13606 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
13608 /* For a.out, force the section size to be aligned. If we don't do
13609 this, BFD will align it for us, but it will not write out the
13610 final bytes of the section. This may be a bug in BFD, but it is
13611 easier to fix it here since that is how the other a.out targets
13615 align
= bfd_section_alignment (segment
);
13616 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13623 /* On the i386, PC-relative offsets are relative to the start of the
13624 next instruction. That is, the address of the offset, plus its
13625 size, since the offset is always the last part of the insn. */
13628 md_pcrel_from (fixS
*fixP
)
13630 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13636 s_bss (int ignore ATTRIBUTE_UNUSED
)
13640 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13642 obj_elf_section_change_hook ();
13644 temp
= get_absolute_expression ();
13645 subseg_set (bss_section
, (subsegT
) temp
);
13646 demand_empty_rest_of_line ();
13651 /* Remember constant directive. */
13654 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13656 if (last_insn
.kind
!= last_insn_directive
13657 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13659 last_insn
.seg
= now_seg
;
13660 last_insn
.kind
= last_insn_directive
;
13661 last_insn
.name
= "constant directive";
13662 last_insn
.file
= as_where (&last_insn
.line
);
13663 if (lfence_before_ret
!= lfence_before_ret_none
)
13665 if (lfence_before_indirect_branch
!= lfence_branch_none
)
13666 as_warn (_("constant directive skips -mlfence-before-ret "
13667 "and -mlfence-before-indirect-branch"));
13669 as_warn (_("constant directive skips -mlfence-before-ret"));
13671 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
13672 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13677 i386_validate_fix (fixS
*fixp
)
13679 if (fixp
->fx_subsy
)
13681 if (fixp
->fx_subsy
== GOT_symbol
)
13683 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13687 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13688 if (fixp
->fx_tcbit2
)
13689 fixp
->fx_r_type
= (fixp
->fx_tcbit
13690 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13691 : BFD_RELOC_X86_64_GOTPCRELX
);
13694 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
13699 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
13701 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
13703 fixp
->fx_subsy
= 0;
13706 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13707 else if (!object_64bit
)
13709 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
13710 && fixp
->fx_tcbit2
)
13711 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
13717 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
13720 bfd_reloc_code_real_type code
;
13722 switch (fixp
->fx_r_type
)
13724 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13725 case BFD_RELOC_SIZE32
:
13726 case BFD_RELOC_SIZE64
:
13727 if (S_IS_DEFINED (fixp
->fx_addsy
)
13728 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
13730 /* Resolve size relocation against local symbol to size of
13731 the symbol plus addend. */
13732 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
13733 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
13734 && !fits_in_unsigned_long (value
))
13735 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13736 _("symbol size computation overflow"));
13737 fixp
->fx_addsy
= NULL
;
13738 fixp
->fx_subsy
= NULL
;
13739 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
13743 /* Fall through. */
13745 case BFD_RELOC_X86_64_PLT32
:
13746 case BFD_RELOC_X86_64_GOT32
:
13747 case BFD_RELOC_X86_64_GOTPCREL
:
13748 case BFD_RELOC_X86_64_GOTPCRELX
:
13749 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13750 case BFD_RELOC_386_PLT32
:
13751 case BFD_RELOC_386_GOT32
:
13752 case BFD_RELOC_386_GOT32X
:
13753 case BFD_RELOC_386_GOTOFF
:
13754 case BFD_RELOC_386_GOTPC
:
13755 case BFD_RELOC_386_TLS_GD
:
13756 case BFD_RELOC_386_TLS_LDM
:
13757 case BFD_RELOC_386_TLS_LDO_32
:
13758 case BFD_RELOC_386_TLS_IE_32
:
13759 case BFD_RELOC_386_TLS_IE
:
13760 case BFD_RELOC_386_TLS_GOTIE
:
13761 case BFD_RELOC_386_TLS_LE_32
:
13762 case BFD_RELOC_386_TLS_LE
:
13763 case BFD_RELOC_386_TLS_GOTDESC
:
13764 case BFD_RELOC_386_TLS_DESC_CALL
:
13765 case BFD_RELOC_X86_64_TLSGD
:
13766 case BFD_RELOC_X86_64_TLSLD
:
13767 case BFD_RELOC_X86_64_DTPOFF32
:
13768 case BFD_RELOC_X86_64_DTPOFF64
:
13769 case BFD_RELOC_X86_64_GOTTPOFF
:
13770 case BFD_RELOC_X86_64_TPOFF32
:
13771 case BFD_RELOC_X86_64_TPOFF64
:
13772 case BFD_RELOC_X86_64_GOTOFF64
:
13773 case BFD_RELOC_X86_64_GOTPC32
:
13774 case BFD_RELOC_X86_64_GOT64
:
13775 case BFD_RELOC_X86_64_GOTPCREL64
:
13776 case BFD_RELOC_X86_64_GOTPC64
:
13777 case BFD_RELOC_X86_64_GOTPLT64
:
13778 case BFD_RELOC_X86_64_PLTOFF64
:
13779 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13780 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13781 case BFD_RELOC_RVA
:
13782 case BFD_RELOC_VTABLE_ENTRY
:
13783 case BFD_RELOC_VTABLE_INHERIT
:
13785 case BFD_RELOC_32_SECREL
:
13787 code
= fixp
->fx_r_type
;
13789 case BFD_RELOC_X86_64_32S
:
13790 if (!fixp
->fx_pcrel
)
13792 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13793 code
= fixp
->fx_r_type
;
13796 /* Fall through. */
13798 if (fixp
->fx_pcrel
)
13800 switch (fixp
->fx_size
)
13803 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13804 _("can not do %d byte pc-relative relocation"),
13806 code
= BFD_RELOC_32_PCREL
;
13808 case 1: code
= BFD_RELOC_8_PCREL
; break;
13809 case 2: code
= BFD_RELOC_16_PCREL
; break;
13810 case 4: code
= BFD_RELOC_32_PCREL
; break;
13812 case 8: code
= BFD_RELOC_64_PCREL
; break;
13818 switch (fixp
->fx_size
)
13821 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13822 _("can not do %d byte relocation"),
13824 code
= BFD_RELOC_32
;
13826 case 1: code
= BFD_RELOC_8
; break;
13827 case 2: code
= BFD_RELOC_16
; break;
13828 case 4: code
= BFD_RELOC_32
; break;
13830 case 8: code
= BFD_RELOC_64
; break;
13837 if ((code
== BFD_RELOC_32
13838 || code
== BFD_RELOC_32_PCREL
13839 || code
== BFD_RELOC_X86_64_32S
)
13841 && fixp
->fx_addsy
== GOT_symbol
)
13844 code
= BFD_RELOC_386_GOTPC
;
13846 code
= BFD_RELOC_X86_64_GOTPC32
;
13848 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
13850 && fixp
->fx_addsy
== GOT_symbol
)
13852 code
= BFD_RELOC_X86_64_GOTPC64
;
13855 rel
= XNEW (arelent
);
13856 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
13857 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
13859 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
13861 if (!use_rela_relocations
)
13863 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13864 vtable entry to be used in the relocation's section offset. */
13865 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
13866 rel
->address
= fixp
->fx_offset
;
13867 #if defined (OBJ_COFF) && defined (TE_PE)
13868 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
13869 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
13874 /* Use the rela in 64bit mode. */
13877 if (disallow_64bit_reloc
)
13880 case BFD_RELOC_X86_64_DTPOFF64
:
13881 case BFD_RELOC_X86_64_TPOFF64
:
13882 case BFD_RELOC_64_PCREL
:
13883 case BFD_RELOC_X86_64_GOTOFF64
:
13884 case BFD_RELOC_X86_64_GOT64
:
13885 case BFD_RELOC_X86_64_GOTPCREL64
:
13886 case BFD_RELOC_X86_64_GOTPC64
:
13887 case BFD_RELOC_X86_64_GOTPLT64
:
13888 case BFD_RELOC_X86_64_PLTOFF64
:
13889 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13890 _("cannot represent relocation type %s in x32 mode"),
13891 bfd_get_reloc_code_name (code
));
13897 if (!fixp
->fx_pcrel
)
13898 rel
->addend
= fixp
->fx_offset
;
13902 case BFD_RELOC_X86_64_PLT32
:
13903 case BFD_RELOC_X86_64_GOT32
:
13904 case BFD_RELOC_X86_64_GOTPCREL
:
13905 case BFD_RELOC_X86_64_GOTPCRELX
:
13906 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13907 case BFD_RELOC_X86_64_TLSGD
:
13908 case BFD_RELOC_X86_64_TLSLD
:
13909 case BFD_RELOC_X86_64_GOTTPOFF
:
13910 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13911 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13912 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
13915 rel
->addend
= (section
->vma
13917 + fixp
->fx_addnumber
13918 + md_pcrel_from (fixp
));
13923 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
13924 if (rel
->howto
== NULL
)
13926 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13927 _("cannot represent relocation type %s"),
13928 bfd_get_reloc_code_name (code
));
13929 /* Set howto to a garbage value so that we can keep going. */
13930 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
13931 gas_assert (rel
->howto
!= NULL
);
13937 #include "tc-i386-intel.c"
13940 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
13942 int saved_naked_reg
;
13943 char saved_register_dot
;
13945 saved_naked_reg
= allow_naked_reg
;
13946 allow_naked_reg
= 1;
13947 saved_register_dot
= register_chars
['.'];
13948 register_chars
['.'] = '.';
13949 allow_pseudo_reg
= 1;
13950 expression_and_evaluate (exp
);
13951 allow_pseudo_reg
= 0;
13952 register_chars
['.'] = saved_register_dot
;
13953 allow_naked_reg
= saved_naked_reg
;
13955 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
13957 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
13959 exp
->X_op
= O_constant
;
13960 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
13961 .dw2_regnum
[flag_code
>> 1];
13964 exp
->X_op
= O_illegal
;
13969 tc_x86_frame_initial_instructions (void)
13971 static unsigned int sp_regno
[2];
13973 if (!sp_regno
[flag_code
>> 1])
13975 char *saved_input
= input_line_pointer
;
13976 char sp
[][4] = {"esp", "rsp"};
13979 input_line_pointer
= sp
[flag_code
>> 1];
13980 tc_x86_parse_to_dw2regnum (&exp
);
13981 gas_assert (exp
.X_op
== O_constant
);
13982 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
13983 input_line_pointer
= saved_input
;
13986 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
13987 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
13991 x86_dwarf2_addr_size (void)
13993 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13994 if (x86_elf_abi
== X86_64_X32_ABI
)
13997 return bfd_arch_bits_per_address (stdoutput
) / 8;
14001 i386_elf_section_type (const char *str
, size_t len
)
14003 if (flag_code
== CODE_64BIT
14004 && len
== sizeof ("unwind") - 1
14005 && strncmp (str
, "unwind", 6) == 0)
14006 return SHT_X86_64_UNWIND
;
14013 i386_solaris_fix_up_eh_frame (segT sec
)
14015 if (flag_code
== CODE_64BIT
)
14016 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14022 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14026 exp
.X_op
= O_secrel
;
14027 exp
.X_add_symbol
= symbol
;
14028 exp
.X_add_number
= 0;
14029 emit_expr (&exp
, size
);
14033 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14034 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14037 x86_64_section_letter (int letter
, const char **ptr_msg
)
14039 if (flag_code
== CODE_64BIT
)
14042 return SHF_X86_64_LARGE
;
14044 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14047 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14052 x86_64_section_word (char *str
, size_t len
)
14054 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
14055 return SHF_X86_64_LARGE
;
14061 handle_large_common (int small ATTRIBUTE_UNUSED
)
14063 if (flag_code
!= CODE_64BIT
)
14065 s_comm_internal (0, elf_common_parse
);
14066 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14070 static segT lbss_section
;
14071 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14072 asection
*saved_bss_section
= bss_section
;
14074 if (lbss_section
== NULL
)
14076 flagword applicable
;
14077 segT seg
= now_seg
;
14078 subsegT subseg
= now_subseg
;
14080 /* The .lbss section is for local .largecomm symbols. */
14081 lbss_section
= subseg_new (".lbss", 0);
14082 applicable
= bfd_applicable_section_flags (stdoutput
);
14083 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14084 seg_info (lbss_section
)->bss
= 1;
14086 subseg_set (seg
, subseg
);
14089 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14090 bss_section
= lbss_section
;
14092 s_comm_internal (0, elf_common_parse
);
14094 elf_com_section_ptr
= saved_com_section_ptr
;
14095 bss_section
= saved_bss_section
;
14098 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */