1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2018 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template
*start
;
103 const insn_template
*end
;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem
; /* codes register or memory operand */
111 unsigned int reg
; /* codes register operand (or extended opcode) */
112 unsigned int mode
; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte
;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name
; /* arch name */
132 unsigned int len
; /* arch string length */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags flags
; /* cpu feature flags */
135 unsigned int skip
; /* show_arch should skip this. */
139 /* Used to turn off indicated flags. */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 i386_cpu_flags flags
; /* cpu feature flags */
148 static void update_code_flag (int, int);
149 static void set_code_flag (int);
150 static void set_16bit_gcc_code_flag (int);
151 static void set_intel_syntax (int);
152 static void set_intel_mnemonic (int);
153 static void set_allow_index_reg (int);
154 static void set_check (int);
155 static void set_cpu_arch (int);
157 static void pe_directive_secrel (int);
159 static void signed_cons (int);
160 static char *output_invalid (int c
);
161 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
163 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
165 static int i386_att_operand (char *);
166 static int i386_intel_operand (char *, int);
167 static int i386_intel_simplify (expressionS
*);
168 static int i386_intel_parse_name (const char *, expressionS
*);
169 static const reg_entry
*parse_register (char *, char **);
170 static char *parse_insn (char *, char *);
171 static char *parse_operands (char *, const char *);
172 static void swap_operands (void);
173 static void swap_2_operands (int, int);
174 static void optimize_imm (void);
175 static void optimize_disp (void);
176 static const insn_template
*match_template (char);
177 static int check_string (void);
178 static int process_suffix (void);
179 static int check_byte_reg (void);
180 static int check_long_reg (void);
181 static int check_qword_reg (void);
182 static int check_word_reg (void);
183 static int finalize_imm (void);
184 static int process_operands (void);
185 static const seg_entry
*build_modrm_byte (void);
186 static void output_insn (void);
187 static void output_imm (fragS
*, offsetT
);
188 static void output_disp (fragS
*, offsetT
);
190 static void s_bss (int);
192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
193 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
196 static const char *default_arch
= DEFAULT_ARCH
;
198 /* This struct describes rounding control and SAE in the instruction. */
212 static struct RC_Operation rc_op
;
214 /* The struct describes masking, applied to OPERAND in the instruction.
215 MASK is a pointer to the corresponding mask register. ZEROING tells
216 whether merging or zeroing mask is used. */
217 struct Mask_Operation
219 const reg_entry
*mask
;
220 unsigned int zeroing
;
221 /* The operand where this operation is associated. */
225 static struct Mask_Operation mask_op
;
227 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
229 struct Broadcast_Operation
231 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
234 /* Index of broadcasted operand. */
238 static struct Broadcast_Operation broadcast_op
;
243 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
244 unsigned char bytes
[4];
246 /* Destination or source register specifier. */
247 const reg_entry
*register_specifier
;
250 /* 'md_assemble ()' gathers together information and puts it into a
257 const reg_entry
*regs
;
262 operand_size_mismatch
,
263 operand_type_mismatch
,
264 register_type_mismatch
,
265 number_of_operands_mismatch
,
266 invalid_instruction_suffix
,
269 unsupported_with_intel_mnemonic
,
272 invalid_vsib_address
,
273 invalid_vector_register_set
,
274 unsupported_vector_index_register
,
275 unsupported_broadcast
,
276 broadcast_not_on_src_operand
,
279 mask_not_on_destination
,
282 rc_sae_operand_not_last_imm
,
283 invalid_register_operand
,
288 /* TM holds the template for the insn were currently assembling. */
291 /* SUFFIX holds the instruction size suffix for byte, word, dword
292 or qword, if given. */
295 /* OPERANDS gives the number of given operands. */
296 unsigned int operands
;
298 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
299 of given register, displacement, memory operands and immediate
301 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
303 /* TYPES [i] is the type (see above #defines) which tells us how to
304 use OP[i] for the corresponding operand. */
305 i386_operand_type types
[MAX_OPERANDS
];
307 /* Displacement expression, immediate expression, or register for each
309 union i386_op op
[MAX_OPERANDS
];
311 /* Flags for operands. */
312 unsigned int flags
[MAX_OPERANDS
];
313 #define Operand_PCrel 1
315 /* Relocation type for operand */
316 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
318 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
319 the base index byte below. */
320 const reg_entry
*base_reg
;
321 const reg_entry
*index_reg
;
322 unsigned int log2_scale_factor
;
324 /* SEG gives the seg_entries of this insn. They are zero unless
325 explicit segment overrides are given. */
326 const seg_entry
*seg
[2];
328 /* Copied first memory operand string, for re-checking. */
331 /* PREFIX holds all the given prefix opcodes (usually null).
332 PREFIXES is the number of prefix opcodes. */
333 unsigned int prefixes
;
334 unsigned char prefix
[MAX_PREFIXES
];
336 /* RM and SIB are the modrm byte and the sib byte where the
337 addressing modes of this insn are encoded. */
344 /* Masking attributes. */
345 struct Mask_Operation
*mask
;
347 /* Rounding control and SAE attributes. */
348 struct RC_Operation
*rounding
;
350 /* Broadcasting attributes. */
351 struct Broadcast_Operation
*broadcast
;
353 /* Compressed disp8*N attribute. */
354 unsigned int memshift
;
356 /* Prefer load or store in encoding. */
359 dir_encoding_default
= 0,
364 /* Prefer 8bit or 32bit displacement in encoding. */
367 disp_encoding_default
= 0,
372 /* Prefer the REX byte in encoding. */
373 bfd_boolean rex_encoding
;
375 /* Disable instruction size optimization. */
376 bfd_boolean no_optimize
;
378 /* How to encode vector instructions. */
381 vex_encoding_default
= 0,
388 const char *rep_prefix
;
391 const char *hle_prefix
;
393 /* Have BND prefix. */
394 const char *bnd_prefix
;
396 /* Have NOTRACK prefix. */
397 const char *notrack_prefix
;
400 enum i386_error error
;
403 typedef struct _i386_insn i386_insn
;
405 /* Link RC type with corresponding string, that'll be looked for in
414 static const struct RC_name RC_NamesTable
[] =
416 { rne
, STRING_COMMA_LEN ("rn-sae") },
417 { rd
, STRING_COMMA_LEN ("rd-sae") },
418 { ru
, STRING_COMMA_LEN ("ru-sae") },
419 { rz
, STRING_COMMA_LEN ("rz-sae") },
420 { saeonly
, STRING_COMMA_LEN ("sae") },
423 /* List of chars besides those in app.c:symbol_chars that can start an
424 operand. Used to prevent the scrubber eating vital white-space. */
425 const char extra_symbol_chars
[] = "*%-([{}"
434 #if (defined (TE_I386AIX) \
435 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
436 && !defined (TE_GNU) \
437 && !defined (TE_LINUX) \
438 && !defined (TE_NACL) \
439 && !defined (TE_NETWARE) \
440 && !defined (TE_FreeBSD) \
441 && !defined (TE_DragonFly) \
442 && !defined (TE_NetBSD)))
443 /* This array holds the chars that always start a comment. If the
444 pre-processor is disabled, these aren't very useful. The option
445 --divide will remove '/' from this list. */
446 const char *i386_comment_chars
= "#/";
447 #define SVR4_COMMENT_CHARS 1
448 #define PREFIX_SEPARATOR '\\'
451 const char *i386_comment_chars
= "#";
452 #define PREFIX_SEPARATOR '/'
455 /* This array holds the chars that only start a comment at the beginning of
456 a line. If the line seems to have the form '# 123 filename'
457 .line and .file directives will appear in the pre-processed output.
458 Note that input_file.c hand checks for '#' at the beginning of the
459 first line of the input file. This is because the compiler outputs
460 #NO_APP at the beginning of its output.
461 Also note that comments started like this one will always work if
462 '/' isn't otherwise defined. */
463 const char line_comment_chars
[] = "#/";
465 const char line_separator_chars
[] = ";";
467 /* Chars that can be used to separate mant from exp in floating point
469 const char EXP_CHARS
[] = "eE";
471 /* Chars that mean this number is a floating point constant
474 const char FLT_CHARS
[] = "fFdDxX";
476 /* Tables for lexical analysis. */
477 static char mnemonic_chars
[256];
478 static char register_chars
[256];
479 static char operand_chars
[256];
480 static char identifier_chars
[256];
481 static char digit_chars
[256];
483 /* Lexical macros. */
484 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
485 #define is_operand_char(x) (operand_chars[(unsigned char) x])
486 #define is_register_char(x) (register_chars[(unsigned char) x])
487 #define is_space_char(x) ((x) == ' ')
488 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
489 #define is_digit_char(x) (digit_chars[(unsigned char) x])
491 /* All non-digit non-letter characters that may occur in an operand. */
492 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
494 /* md_assemble() always leaves the strings it's passed unaltered. To
495 effect this we maintain a stack of saved characters that we've smashed
496 with '\0's (indicating end of strings for various sub-fields of the
497 assembler instruction). */
498 static char save_stack
[32];
499 static char *save_stack_p
;
500 #define END_STRING_AND_SAVE(s) \
501 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
502 #define RESTORE_END_STRING(s) \
503 do { *(s) = *--save_stack_p; } while (0)
505 /* The instruction we're assembling. */
508 /* Possible templates for current insn. */
509 static const templates
*current_templates
;
511 /* Per instruction expressionS buffers: max displacements & immediates. */
512 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
513 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
515 /* Current operand we are working on. */
516 static int this_operand
= -1;
518 /* We support four different modes. FLAG_CODE variable is used to distinguish
526 static enum flag_code flag_code
;
527 static unsigned int object_64bit
;
528 static unsigned int disallow_64bit_reloc
;
529 static int use_rela_relocations
= 0;
531 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
532 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
533 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
535 /* The ELF ABI to use. */
543 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
546 #if defined (TE_PE) || defined (TE_PEP)
547 /* Use big object file format. */
548 static int use_big_obj
= 0;
551 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
552 /* 1 if generating code for a shared library. */
553 static int shared
= 0;
556 /* 1 for intel syntax,
558 static int intel_syntax
= 0;
560 /* 1 for Intel64 ISA,
564 /* 1 for intel mnemonic,
565 0 if att mnemonic. */
566 static int intel_mnemonic
= !SYSV386_COMPAT
;
568 /* 1 if support old (<= 2.8.1) versions of gcc. */
569 static int old_gcc
= OLDGCC_COMPAT
;
571 /* 1 if pseudo registers are permitted. */
572 static int allow_pseudo_reg
= 0;
574 /* 1 if register prefix % not required. */
575 static int allow_naked_reg
= 0;
577 /* 1 if the assembler should add BND prefix for all control-transferring
578 instructions supporting it, even if this prefix wasn't specified
580 static int add_bnd_prefix
= 0;
582 /* 1 if pseudo index register, eiz/riz, is allowed . */
583 static int allow_index_reg
= 0;
585 /* 1 if the assembler should ignore LOCK prefix, even if it was
586 specified explicitly. */
587 static int omit_lock_prefix
= 0;
589 /* 1 if the assembler should encode lfence, mfence, and sfence as
590 "lock addl $0, (%{re}sp)". */
591 static int avoid_fence
= 0;
593 /* 1 if the assembler should generate relax relocations. */
595 static int generate_relax_relocations
596 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
598 static enum check_kind
604 sse_check
, operand_check
= check_warning
;
607 1. Clear the REX_W bit with register operand if possible.
608 2. Above plus use 128bit vector instruction to clear the full vector
611 static int optimize
= 0;
614 1. Clear the REX_W bit with register operand if possible.
615 2. Above plus use 128bit vector instruction to clear the full vector
617 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
620 static int optimize_for_space
= 0;
622 /* Register prefix used for error message. */
623 static const char *register_prefix
= "%";
625 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
626 leave, push, and pop instructions so that gcc has the same stack
627 frame as in 32 bit mode. */
628 static char stackop_size
= '\0';
630 /* Non-zero to optimize code alignment. */
631 int optimize_align_code
= 1;
633 /* Non-zero to quieten some warnings. */
634 static int quiet_warnings
= 0;
637 static const char *cpu_arch_name
= NULL
;
638 static char *cpu_sub_arch_name
= NULL
;
640 /* CPU feature flags. */
641 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
643 /* If we have selected a cpu we are generating instructions for. */
644 static int cpu_arch_tune_set
= 0;
646 /* Cpu we are generating instructions for. */
647 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
649 /* CPU feature flags of cpu we are generating instructions for. */
650 static i386_cpu_flags cpu_arch_tune_flags
;
652 /* CPU instruction set architecture used. */
653 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
655 /* CPU feature flags of instruction set architecture used. */
656 i386_cpu_flags cpu_arch_isa_flags
;
658 /* If set, conditional jumps are not automatically promoted to handle
659 larger than a byte offset. */
660 static unsigned int no_cond_jump_promotion
= 0;
662 /* Encode SSE instructions with VEX prefix. */
663 static unsigned int sse2avx
;
665 /* Encode scalar AVX instructions with specific vector length. */
672 /* Encode scalar EVEX LIG instructions with specific vector length. */
680 /* Encode EVEX WIG instructions with specific evex.w. */
687 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
688 static enum rc_type evexrcig
= rne
;
690 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
691 static symbolS
*GOT_symbol
;
693 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
694 unsigned int x86_dwarf2_return_column
;
696 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
697 int x86_cie_data_alignment
;
699 /* Interface to relax_segment.
700 There are 3 major relax states for 386 jump insns because the
701 different types of jumps add different sizes to frags when we're
702 figuring out what sort of jump to choose to reach a given label. */
705 #define UNCOND_JUMP 0
707 #define COND_JUMP86 2
712 #define SMALL16 (SMALL | CODE16)
714 #define BIG16 (BIG | CODE16)
718 #define INLINE __inline__
724 #define ENCODE_RELAX_STATE(type, size) \
725 ((relax_substateT) (((type) << 2) | (size)))
726 #define TYPE_FROM_RELAX_STATE(s) \
728 #define DISP_SIZE_FROM_RELAX_STATE(s) \
729 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
731 /* This table is used by relax_frag to promote short jumps to long
732 ones where necessary. SMALL (short) jumps may be promoted to BIG
733 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
734 don't allow a short jump in a 32 bit code segment to be promoted to
735 a 16 bit offset jump because it's slower (requires data size
736 prefix), and doesn't work, unless the destination is in the bottom
737 64k of the code segment (The top 16 bits of eip are zeroed). */
739 const relax_typeS md_relax_table
[] =
742 1) most positive reach of this state,
743 2) most negative reach of this state,
744 3) how many bytes this mode will have in the variable part of the frag
745 4) which index into the table to try if we can't fit into this one. */
747 /* UNCOND_JUMP states. */
748 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
749 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
750 /* dword jmp adds 4 bytes to frag:
751 0 extra opcode bytes, 4 displacement bytes. */
753 /* word jmp adds 2 byte2 to frag:
754 0 extra opcode bytes, 2 displacement bytes. */
757 /* COND_JUMP states. */
758 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
759 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
760 /* dword conditionals adds 5 bytes to frag:
761 1 extra opcode byte, 4 displacement bytes. */
763 /* word conditionals add 3 bytes to frag:
764 1 extra opcode byte, 2 displacement bytes. */
767 /* COND_JUMP86 states. */
768 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
769 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
770 /* dword conditionals adds 5 bytes to frag:
771 1 extra opcode byte, 4 displacement bytes. */
773 /* word conditionals add 4 bytes to frag:
774 1 displacement byte and a 3 byte long branch insn. */
778 static const arch_entry cpu_arch
[] =
780 /* Do not replace the first two entries - i386_target_format()
781 relies on them being there in this order. */
782 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
783 CPU_GENERIC32_FLAGS
, 0 },
784 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
785 CPU_GENERIC64_FLAGS
, 0 },
786 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
788 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
790 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
792 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
794 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
796 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
798 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
800 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
802 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
803 CPU_PENTIUMPRO_FLAGS
, 0 },
804 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
806 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
808 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
810 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
812 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
813 CPU_NOCONA_FLAGS
, 0 },
814 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
816 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
818 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
819 CPU_CORE2_FLAGS
, 1 },
820 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
821 CPU_CORE2_FLAGS
, 0 },
822 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
823 CPU_COREI7_FLAGS
, 0 },
824 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
826 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
828 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
829 CPU_IAMCU_FLAGS
, 0 },
830 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
832 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
834 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
835 CPU_ATHLON_FLAGS
, 0 },
836 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
838 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
840 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
842 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
843 CPU_AMDFAM10_FLAGS
, 0 },
844 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
845 CPU_BDVER1_FLAGS
, 0 },
846 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
847 CPU_BDVER2_FLAGS
, 0 },
848 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
849 CPU_BDVER3_FLAGS
, 0 },
850 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
851 CPU_BDVER4_FLAGS
, 0 },
852 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
853 CPU_ZNVER1_FLAGS
, 0 },
854 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
855 CPU_BTVER1_FLAGS
, 0 },
856 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
857 CPU_BTVER2_FLAGS
, 0 },
858 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
860 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
862 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
864 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
866 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
868 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
870 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
872 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
874 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
875 CPU_SSSE3_FLAGS
, 0 },
876 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
877 CPU_SSE4_1_FLAGS
, 0 },
878 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
879 CPU_SSE4_2_FLAGS
, 0 },
880 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
881 CPU_SSE4_2_FLAGS
, 0 },
882 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
884 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
886 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
887 CPU_AVX512F_FLAGS
, 0 },
888 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
889 CPU_AVX512CD_FLAGS
, 0 },
890 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
891 CPU_AVX512ER_FLAGS
, 0 },
892 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
893 CPU_AVX512PF_FLAGS
, 0 },
894 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
895 CPU_AVX512DQ_FLAGS
, 0 },
896 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
897 CPU_AVX512BW_FLAGS
, 0 },
898 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
899 CPU_AVX512VL_FLAGS
, 0 },
900 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
903 CPU_VMFUNC_FLAGS
, 0 },
904 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
906 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
907 CPU_XSAVE_FLAGS
, 0 },
908 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
909 CPU_XSAVEOPT_FLAGS
, 0 },
910 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
911 CPU_XSAVEC_FLAGS
, 0 },
912 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
913 CPU_XSAVES_FLAGS
, 0 },
914 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
916 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
917 CPU_PCLMUL_FLAGS
, 0 },
918 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
919 CPU_PCLMUL_FLAGS
, 1 },
920 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
921 CPU_FSGSBASE_FLAGS
, 0 },
922 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
923 CPU_RDRND_FLAGS
, 0 },
924 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
926 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
928 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
930 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
932 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
934 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
936 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
937 CPU_MOVBE_FLAGS
, 0 },
938 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
940 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
942 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
943 CPU_LZCNT_FLAGS
, 0 },
944 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
946 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
949 CPU_INVPCID_FLAGS
, 0 },
950 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
951 CPU_CLFLUSH_FLAGS
, 0 },
952 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
954 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
955 CPU_SYSCALL_FLAGS
, 0 },
956 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
957 CPU_RDTSCP_FLAGS
, 0 },
958 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
959 CPU_3DNOW_FLAGS
, 0 },
960 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
961 CPU_3DNOWA_FLAGS
, 0 },
962 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
963 CPU_PADLOCK_FLAGS
, 0 },
964 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
966 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
968 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
969 CPU_SSE4A_FLAGS
, 0 },
970 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
972 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
974 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
976 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
978 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
979 CPU_RDSEED_FLAGS
, 0 },
980 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
981 CPU_PRFCHW_FLAGS
, 0 },
982 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
984 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
986 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
988 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
989 CPU_CLFLUSHOPT_FLAGS
, 0 },
990 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
991 CPU_PREFETCHWT1_FLAGS
, 0 },
992 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
994 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
996 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
997 CPU_AVX512IFMA_FLAGS
, 0 },
998 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
999 CPU_AVX512VBMI_FLAGS
, 0 },
1000 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1001 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1002 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1003 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1004 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1005 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1006 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1007 CPU_AVX512_VBMI2_FLAGS
, 0 },
1008 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1009 CPU_AVX512_VNNI_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1011 CPU_AVX512_BITALG_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1013 CPU_CLZERO_FLAGS
, 0 },
1014 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1015 CPU_MWAITX_FLAGS
, 0 },
1016 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1017 CPU_OSPKE_FLAGS
, 0 },
1018 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1019 CPU_RDPID_FLAGS
, 0 },
1020 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1021 CPU_PTWRITE_FLAGS
, 0 },
1022 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1024 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1025 CPU_SHSTK_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1027 CPU_GFNI_FLAGS
, 0 },
1028 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1029 CPU_VAES_FLAGS
, 0 },
1030 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1031 CPU_VPCLMULQDQ_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1033 CPU_WBNOINVD_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1035 CPU_PCONFIG_FLAGS
, 0 },
1038 static const noarch_entry cpu_noarch
[] =
1040 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1041 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1042 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1043 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1044 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1045 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1046 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1047 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1048 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1049 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1050 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1051 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1052 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1053 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1054 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1055 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1056 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1057 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1058 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1059 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1060 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1061 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1062 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1063 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1064 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1065 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1066 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1067 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1068 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1069 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1070 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1074 /* Like s_lcomm_internal in gas/read.c but the alignment string
1075 is allowed to be optional. */
1078 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1085 && *input_line_pointer
== ',')
1087 align
= parse_align (needs_align
- 1);
1089 if (align
== (addressT
) -1)
1104 bss_alloc (symbolP
, size
, align
);
1109 pe_lcomm (int needs_align
)
1111 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1115 const pseudo_typeS md_pseudo_table
[] =
1117 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1118 {"align", s_align_bytes
, 0},
1120 {"align", s_align_ptwo
, 0},
1122 {"arch", set_cpu_arch
, 0},
1126 {"lcomm", pe_lcomm
, 1},
1128 {"ffloat", float_cons
, 'f'},
1129 {"dfloat", float_cons
, 'd'},
1130 {"tfloat", float_cons
, 'x'},
1132 {"slong", signed_cons
, 4},
1133 {"noopt", s_ignore
, 0},
1134 {"optim", s_ignore
, 0},
1135 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1136 {"code16", set_code_flag
, CODE_16BIT
},
1137 {"code32", set_code_flag
, CODE_32BIT
},
1139 {"code64", set_code_flag
, CODE_64BIT
},
1141 {"intel_syntax", set_intel_syntax
, 1},
1142 {"att_syntax", set_intel_syntax
, 0},
1143 {"intel_mnemonic", set_intel_mnemonic
, 1},
1144 {"att_mnemonic", set_intel_mnemonic
, 0},
1145 {"allow_index_reg", set_allow_index_reg
, 1},
1146 {"disallow_index_reg", set_allow_index_reg
, 0},
1147 {"sse_check", set_check
, 0},
1148 {"operand_check", set_check
, 1},
1149 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1150 {"largecomm", handle_large_common
, 0},
1152 {"file", dwarf2_directive_file
, 0},
1153 {"loc", dwarf2_directive_loc
, 0},
1154 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1157 {"secrel32", pe_directive_secrel
, 0},
1162 /* For interface with expression (). */
1163 extern char *input_line_pointer
;
1165 /* Hash table for instruction mnemonic lookup. */
1166 static struct hash_control
*op_hash
;
1168 /* Hash table for register lookup. */
1169 static struct hash_control
*reg_hash
;
1171 /* Various efficient no-op patterns for aligning code labels.
1172 Note: Don't try to assemble the instructions in the comments.
1173 0L and 0w are not legal. */
1174 static const unsigned char f32_1
[] =
1176 static const unsigned char f32_2
[] =
1177 {0x66,0x90}; /* xchg %ax,%ax */
1178 static const unsigned char f32_3
[] =
1179 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1180 static const unsigned char f32_4
[] =
1181 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1182 static const unsigned char f32_6
[] =
1183 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1184 static const unsigned char f32_7
[] =
1185 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1186 static const unsigned char f16_3
[] =
1187 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1188 static const unsigned char f16_4
[] =
1189 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1190 static const unsigned char jump_disp8
[] =
1191 {0xeb}; /* jmp disp8 */
1192 static const unsigned char jump32_disp32
[] =
1193 {0xe9}; /* jmp disp32 */
1194 static const unsigned char jump16_disp32
[] =
1195 {0x66,0xe9}; /* jmp disp32 */
1196 /* 32-bit NOPs patterns. */
1197 static const unsigned char *const f32_patt
[] = {
1198 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1200 /* 16-bit NOPs patterns. */
1201 static const unsigned char *const f16_patt
[] = {
1202 f32_1
, f32_2
, f16_3
, f16_4
1204 /* nopl (%[re]ax) */
1205 static const unsigned char alt_3
[] =
1207 /* nopl 0(%[re]ax) */
1208 static const unsigned char alt_4
[] =
1209 {0x0f,0x1f,0x40,0x00};
1210 /* nopl 0(%[re]ax,%[re]ax,1) */
1211 static const unsigned char alt_5
[] =
1212 {0x0f,0x1f,0x44,0x00,0x00};
1213 /* nopw 0(%[re]ax,%[re]ax,1) */
1214 static const unsigned char alt_6
[] =
1215 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1216 /* nopl 0L(%[re]ax) */
1217 static const unsigned char alt_7
[] =
1218 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1219 /* nopl 0L(%[re]ax,%[re]ax,1) */
1220 static const unsigned char alt_8
[] =
1221 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1222 /* nopw 0L(%[re]ax,%[re]ax,1) */
1223 static const unsigned char alt_9
[] =
1224 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1225 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1226 static const unsigned char alt_10
[] =
1227 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1228 /* data16 nopw %cs:0L(%eax,%eax,1) */
1229 static const unsigned char alt_11
[] =
1230 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1231 /* 32-bit and 64-bit NOPs patterns. */
1232 static const unsigned char *const alt_patt
[] = {
1233 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1234 alt_9
, alt_10
, alt_11
1237 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1238 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1241 i386_output_nops (char *where
, const unsigned char *const *patt
,
1242 int count
, int max_single_nop_size
)
1245 /* Place the longer NOP first. */
1248 const unsigned char *nops
= patt
[max_single_nop_size
- 1];
1250 /* Use the smaller one if the requsted one isn't available. */
1253 max_single_nop_size
--;
1254 nops
= patt
[max_single_nop_size
- 1];
1257 last
= count
% max_single_nop_size
;
1260 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1261 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1265 nops
= patt
[last
- 1];
1268 /* Use the smaller one plus one-byte NOP if the needed one
1271 nops
= patt
[last
- 1];
1272 memcpy (where
+ offset
, nops
, last
);
1273 where
[offset
+ last
] = *patt
[0];
1276 memcpy (where
+ offset
, nops
, last
);
1281 fits_in_imm7 (offsetT num
)
1283 return (num
& 0x7f) == num
;
1287 fits_in_imm31 (offsetT num
)
1289 return (num
& 0x7fffffff) == num
;
1292 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1293 single NOP instruction LIMIT. */
1296 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1298 const unsigned char *const *patt
= NULL
;
1299 int max_single_nop_size
;
1300 /* Maximum number of NOPs before switching to jump over NOPs. */
1301 int max_number_of_nops
;
1303 switch (fragP
->fr_type
)
1312 /* We need to decide which NOP sequence to use for 32bit and
1313 64bit. When -mtune= is used:
1315 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1316 PROCESSOR_GENERIC32, f32_patt will be used.
1317 2. For the rest, alt_patt will be used.
1319 When -mtune= isn't used, alt_patt will be used if
1320 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1323 When -march= or .arch is used, we can't use anything beyond
1324 cpu_arch_isa_flags. */
1326 if (flag_code
== CODE_16BIT
)
1329 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1330 /* Limit number of NOPs to 2 in 16-bit mode. */
1331 max_number_of_nops
= 2;
1335 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1337 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1338 switch (cpu_arch_tune
)
1340 case PROCESSOR_UNKNOWN
:
1341 /* We use cpu_arch_isa_flags to check if we SHOULD
1342 optimize with nops. */
1343 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1348 case PROCESSOR_PENTIUM4
:
1349 case PROCESSOR_NOCONA
:
1350 case PROCESSOR_CORE
:
1351 case PROCESSOR_CORE2
:
1352 case PROCESSOR_COREI7
:
1353 case PROCESSOR_L1OM
:
1354 case PROCESSOR_K1OM
:
1355 case PROCESSOR_GENERIC64
:
1357 case PROCESSOR_ATHLON
:
1359 case PROCESSOR_AMDFAM10
:
1361 case PROCESSOR_ZNVER
:
1365 case PROCESSOR_I386
:
1366 case PROCESSOR_I486
:
1367 case PROCESSOR_PENTIUM
:
1368 case PROCESSOR_PENTIUMPRO
:
1369 case PROCESSOR_IAMCU
:
1370 case PROCESSOR_GENERIC32
:
1377 switch (fragP
->tc_frag_data
.tune
)
1379 case PROCESSOR_UNKNOWN
:
1380 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1381 PROCESSOR_UNKNOWN. */
1385 case PROCESSOR_I386
:
1386 case PROCESSOR_I486
:
1387 case PROCESSOR_PENTIUM
:
1388 case PROCESSOR_IAMCU
:
1390 case PROCESSOR_ATHLON
:
1392 case PROCESSOR_AMDFAM10
:
1394 case PROCESSOR_ZNVER
:
1396 case PROCESSOR_GENERIC32
:
1397 /* We use cpu_arch_isa_flags to check if we CAN optimize
1399 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1404 case PROCESSOR_PENTIUMPRO
:
1405 case PROCESSOR_PENTIUM4
:
1406 case PROCESSOR_NOCONA
:
1407 case PROCESSOR_CORE
:
1408 case PROCESSOR_CORE2
:
1409 case PROCESSOR_COREI7
:
1410 case PROCESSOR_L1OM
:
1411 case PROCESSOR_K1OM
:
1412 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1417 case PROCESSOR_GENERIC64
:
1423 if (patt
== f32_patt
)
1425 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1426 /* Limit number of NOPs to 2 for older processors. */
1427 max_number_of_nops
= 2;
1431 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1432 /* Limit number of NOPs to 7 for newer processors. */
1433 max_number_of_nops
= 7;
1438 limit
= max_single_nop_size
;
1440 if (fragP
->fr_type
== rs_fill_nop
)
1442 /* Output NOPs for .nop directive. */
1443 if (limit
> max_single_nop_size
)
1445 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1446 _("invalid single nop size: %d "
1447 "(expect within [0, %d])"),
1448 limit
, max_single_nop_size
);
1453 fragP
->fr_var
= count
;
1455 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1457 /* Generate jump over NOPs. */
1458 offsetT disp
= count
- 2;
1459 if (fits_in_imm7 (disp
))
1461 /* Use "jmp disp8" if possible. */
1463 where
[0] = jump_disp8
[0];
1469 unsigned int size_of_jump
;
1471 if (flag_code
== CODE_16BIT
)
1473 where
[0] = jump16_disp32
[0];
1474 where
[1] = jump16_disp32
[1];
1479 where
[0] = jump32_disp32
[0];
1483 count
-= size_of_jump
+ 4;
1484 if (!fits_in_imm31 (count
))
1486 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1487 _("jump over nop padding out of range"));
1491 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1492 where
+= size_of_jump
+ 4;
1496 /* Generate multiple NOPs. */
1497 i386_output_nops (where
, patt
, count
, limit
);
1501 operand_type_all_zero (const union i386_operand_type
*x
)
1503 switch (ARRAY_SIZE(x
->array
))
1514 return !x
->array
[0];
1521 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1523 switch (ARRAY_SIZE(x
->array
))
1541 operand_type_equal (const union i386_operand_type
*x
,
1542 const union i386_operand_type
*y
)
1544 switch (ARRAY_SIZE(x
->array
))
1547 if (x
->array
[2] != y
->array
[2])
1551 if (x
->array
[1] != y
->array
[1])
1555 return x
->array
[0] == y
->array
[0];
1563 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1565 switch (ARRAY_SIZE(x
->array
))
1580 return !x
->array
[0];
1587 cpu_flags_equal (const union i386_cpu_flags
*x
,
1588 const union i386_cpu_flags
*y
)
1590 switch (ARRAY_SIZE(x
->array
))
1593 if (x
->array
[3] != y
->array
[3])
1597 if (x
->array
[2] != y
->array
[2])
1601 if (x
->array
[1] != y
->array
[1])
1605 return x
->array
[0] == y
->array
[0];
1613 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1615 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1616 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1619 static INLINE i386_cpu_flags
1620 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1622 switch (ARRAY_SIZE (x
.array
))
1625 x
.array
[3] &= y
.array
[3];
1628 x
.array
[2] &= y
.array
[2];
1631 x
.array
[1] &= y
.array
[1];
1634 x
.array
[0] &= y
.array
[0];
1642 static INLINE i386_cpu_flags
1643 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1645 switch (ARRAY_SIZE (x
.array
))
1648 x
.array
[3] |= y
.array
[3];
1651 x
.array
[2] |= y
.array
[2];
1654 x
.array
[1] |= y
.array
[1];
1657 x
.array
[0] |= y
.array
[0];
1665 static INLINE i386_cpu_flags
1666 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1668 switch (ARRAY_SIZE (x
.array
))
1671 x
.array
[3] &= ~y
.array
[3];
1674 x
.array
[2] &= ~y
.array
[2];
1677 x
.array
[1] &= ~y
.array
[1];
1680 x
.array
[0] &= ~y
.array
[0];
1688 #define CPU_FLAGS_ARCH_MATCH 0x1
1689 #define CPU_FLAGS_64BIT_MATCH 0x2
1690 #define CPU_FLAGS_AES_MATCH 0x4
1691 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1692 #define CPU_FLAGS_AVX_MATCH 0x10
1694 #define CPU_FLAGS_32BIT_MATCH \
1695 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1696 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1697 #define CPU_FLAGS_PERFECT_MATCH \
1698 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1700 /* Return CPU flags match bits. */
1703 cpu_flags_match (const insn_template
*t
)
1705 i386_cpu_flags x
= t
->cpu_flags
;
1706 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1708 x
.bitfield
.cpu64
= 0;
1709 x
.bitfield
.cpuno64
= 0;
1711 if (cpu_flags_all_zero (&x
))
1713 /* This instruction is available on all archs. */
1714 match
|= CPU_FLAGS_32BIT_MATCH
;
1718 /* This instruction is available only on some archs. */
1719 i386_cpu_flags cpu
= cpu_arch_flags
;
1721 cpu
= cpu_flags_and (x
, cpu
);
1722 if (!cpu_flags_all_zero (&cpu
))
1724 if (x
.bitfield
.cpuavx
)
1726 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1727 if (cpu
.bitfield
.cpuavx
)
1729 /* Check SSE2AVX. */
1730 if (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1732 match
|= (CPU_FLAGS_ARCH_MATCH
1733 | CPU_FLAGS_AVX_MATCH
);
1735 if (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1736 match
|= CPU_FLAGS_AES_MATCH
;
1738 if (!x
.bitfield
.cpupclmul
1739 || cpu
.bitfield
.cpupclmul
)
1740 match
|= CPU_FLAGS_PCLMUL_MATCH
;
1744 match
|= CPU_FLAGS_ARCH_MATCH
;
1746 else if (x
.bitfield
.cpuavx512vl
)
1748 /* Match AVX512VL. */
1749 if (cpu
.bitfield
.cpuavx512vl
)
1751 /* Need another match. */
1752 cpu
.bitfield
.cpuavx512vl
= 0;
1753 if (!cpu_flags_all_zero (&cpu
))
1754 match
|= CPU_FLAGS_32BIT_MATCH
;
1756 match
|= CPU_FLAGS_ARCH_MATCH
;
1759 match
|= CPU_FLAGS_ARCH_MATCH
;
1762 match
|= CPU_FLAGS_32BIT_MATCH
;
1768 static INLINE i386_operand_type
1769 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1771 switch (ARRAY_SIZE (x
.array
))
1774 x
.array
[2] &= y
.array
[2];
1777 x
.array
[1] &= y
.array
[1];
1780 x
.array
[0] &= y
.array
[0];
1788 static INLINE i386_operand_type
1789 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1791 switch (ARRAY_SIZE (x
.array
))
1794 x
.array
[2] &= ~y
.array
[2];
1797 x
.array
[1] &= ~y
.array
[1];
1800 x
.array
[0] &= ~y
.array
[0];
1808 static INLINE i386_operand_type
1809 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1811 switch (ARRAY_SIZE (x
.array
))
1814 x
.array
[2] |= y
.array
[2];
1817 x
.array
[1] |= y
.array
[1];
1820 x
.array
[0] |= y
.array
[0];
1828 static INLINE i386_operand_type
1829 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1831 switch (ARRAY_SIZE (x
.array
))
1834 x
.array
[2] ^= y
.array
[2];
1837 x
.array
[1] ^= y
.array
[1];
1840 x
.array
[0] ^= y
.array
[0];
1848 static const i386_operand_type acc32
= OPERAND_TYPE_ACC32
;
1849 static const i386_operand_type acc64
= OPERAND_TYPE_ACC64
;
1850 static const i386_operand_type control
= OPERAND_TYPE_CONTROL
;
1851 static const i386_operand_type inoutportreg
1852 = OPERAND_TYPE_INOUTPORTREG
;
1853 static const i386_operand_type reg16_inoutportreg
1854 = OPERAND_TYPE_REG16_INOUTPORTREG
;
1855 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
1856 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
1857 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
1858 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1859 static const i386_operand_type anydisp
1860 = OPERAND_TYPE_ANYDISP
;
1861 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1862 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
1863 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
1864 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
1865 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1866 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1867 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1868 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
1869 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1870 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1871 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1872 static const i386_operand_type vec_imm4
= OPERAND_TYPE_VEC_IMM4
;
1883 operand_type_check (i386_operand_type t
, enum operand_type c
)
1888 return t
.bitfield
.reg
;
1891 return (t
.bitfield
.imm8
1895 || t
.bitfield
.imm32s
1896 || t
.bitfield
.imm64
);
1899 return (t
.bitfield
.disp8
1900 || t
.bitfield
.disp16
1901 || t
.bitfield
.disp32
1902 || t
.bitfield
.disp32s
1903 || t
.bitfield
.disp64
);
1906 return (t
.bitfield
.disp8
1907 || t
.bitfield
.disp16
1908 || t
.bitfield
.disp32
1909 || t
.bitfield
.disp32s
1910 || t
.bitfield
.disp64
1911 || t
.bitfield
.baseindex
);
1920 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit on
1921 operand J for instruction template T. */
1924 match_reg_size (const insn_template
*t
, unsigned int j
)
1926 return !((i
.types
[j
].bitfield
.byte
1927 && !t
->operand_types
[j
].bitfield
.byte
)
1928 || (i
.types
[j
].bitfield
.word
1929 && !t
->operand_types
[j
].bitfield
.word
)
1930 || (i
.types
[j
].bitfield
.dword
1931 && !t
->operand_types
[j
].bitfield
.dword
)
1932 || (i
.types
[j
].bitfield
.qword
1933 && !t
->operand_types
[j
].bitfield
.qword
)
1934 || (i
.types
[j
].bitfield
.tbyte
1935 && !t
->operand_types
[j
].bitfield
.tbyte
));
1938 /* Return 1 if there is no conflict in SIMD register on
1939 operand J for instruction template T. */
1942 match_simd_size (const insn_template
*t
, unsigned int j
)
1944 return !((i
.types
[j
].bitfield
.xmmword
1945 && !t
->operand_types
[j
].bitfield
.xmmword
)
1946 || (i
.types
[j
].bitfield
.ymmword
1947 && !t
->operand_types
[j
].bitfield
.ymmword
)
1948 || (i
.types
[j
].bitfield
.zmmword
1949 && !t
->operand_types
[j
].bitfield
.zmmword
));
1952 /* Return 1 if there is no conflict in any size on operand J for
1953 instruction template T. */
1956 match_mem_size (const insn_template
*t
, unsigned int j
)
1958 return (match_reg_size (t
, j
)
1959 && !((i
.types
[j
].bitfield
.unspecified
1961 && !t
->operand_types
[j
].bitfield
.unspecified
)
1962 || (i
.types
[j
].bitfield
.fword
1963 && !t
->operand_types
[j
].bitfield
.fword
)
1964 /* For scalar opcode templates to allow register and memory
1965 operands at the same time, some special casing is needed
1967 || ((t
->operand_types
[j
].bitfield
.regsimd
1968 && !t
->opcode_modifier
.broadcast
1969 && (t
->operand_types
[j
].bitfield
.dword
1970 || t
->operand_types
[j
].bitfield
.qword
))
1971 ? (i
.types
[j
].bitfield
.xmmword
1972 || i
.types
[j
].bitfield
.ymmword
1973 || i
.types
[j
].bitfield
.zmmword
)
1974 : !match_simd_size(t
, j
))));
1977 /* Return 1 if there is no size conflict on any operands for
1978 instruction template T. */
1981 operand_size_match (const insn_template
*t
)
1986 /* Don't check jump instructions. */
1987 if (t
->opcode_modifier
.jump
1988 || t
->opcode_modifier
.jumpbyte
1989 || t
->opcode_modifier
.jumpdword
1990 || t
->opcode_modifier
.jumpintersegment
)
1993 /* Check memory and accumulator operand size. */
1994 for (j
= 0; j
< i
.operands
; j
++)
1996 if (!i
.types
[j
].bitfield
.reg
&& !i
.types
[j
].bitfield
.regsimd
1997 && t
->operand_types
[j
].bitfield
.anysize
)
2000 if (t
->operand_types
[j
].bitfield
.reg
2001 && !match_reg_size (t
, j
))
2007 if (t
->operand_types
[j
].bitfield
.regsimd
2008 && !match_simd_size (t
, j
))
2014 if (t
->operand_types
[j
].bitfield
.acc
2015 && (!match_reg_size (t
, j
) || !match_simd_size (t
, j
)))
2021 if (i
.types
[j
].bitfield
.mem
&& !match_mem_size (t
, j
))
2030 else if (!t
->opcode_modifier
.d
)
2033 i
.error
= operand_size_mismatch
;
2037 /* Check reverse. */
2038 gas_assert (i
.operands
== 2);
2041 for (j
= 0; j
< 2; j
++)
2043 if ((t
->operand_types
[j
].bitfield
.reg
2044 || t
->operand_types
[j
].bitfield
.acc
)
2045 && !match_reg_size (t
, j
? 0 : 1))
2048 if (i
.types
[j
].bitfield
.mem
2049 && !match_mem_size (t
, j
? 0 : 1))
2057 operand_type_match (i386_operand_type overlap
,
2058 i386_operand_type given
)
2060 i386_operand_type temp
= overlap
;
2062 temp
.bitfield
.jumpabsolute
= 0;
2063 temp
.bitfield
.unspecified
= 0;
2064 temp
.bitfield
.byte
= 0;
2065 temp
.bitfield
.word
= 0;
2066 temp
.bitfield
.dword
= 0;
2067 temp
.bitfield
.fword
= 0;
2068 temp
.bitfield
.qword
= 0;
2069 temp
.bitfield
.tbyte
= 0;
2070 temp
.bitfield
.xmmword
= 0;
2071 temp
.bitfield
.ymmword
= 0;
2072 temp
.bitfield
.zmmword
= 0;
2073 if (operand_type_all_zero (&temp
))
2076 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
2077 && given
.bitfield
.jumpabsolute
== overlap
.bitfield
.jumpabsolute
)
2081 i
.error
= operand_type_mismatch
;
2085 /* If given types g0 and g1 are registers they must be of the same type
2086 unless the expected operand type register overlap is null.
2087 Memory operand size of certain SIMD instructions is also being checked
2091 operand_type_register_match (i386_operand_type g0
,
2092 i386_operand_type t0
,
2093 i386_operand_type g1
,
2094 i386_operand_type t1
)
2096 if (!g0
.bitfield
.reg
2097 && !g0
.bitfield
.regsimd
2098 && (!operand_type_check (g0
, anymem
)
2099 || g0
.bitfield
.unspecified
2100 || !t0
.bitfield
.regsimd
))
2103 if (!g1
.bitfield
.reg
2104 && !g1
.bitfield
.regsimd
2105 && (!operand_type_check (g1
, anymem
)
2106 || g1
.bitfield
.unspecified
2107 || !t1
.bitfield
.regsimd
))
2110 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2111 && g0
.bitfield
.word
== g1
.bitfield
.word
2112 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2113 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2114 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2115 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2116 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2119 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2120 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2121 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2122 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2123 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2124 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2125 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2128 i
.error
= register_type_mismatch
;
2133 static INLINE
unsigned int
2134 register_number (const reg_entry
*r
)
2136 unsigned int nr
= r
->reg_num
;
2138 if (r
->reg_flags
& RegRex
)
2141 if (r
->reg_flags
& RegVRex
)
2147 static INLINE
unsigned int
2148 mode_from_disp_size (i386_operand_type t
)
2150 if (t
.bitfield
.disp8
)
2152 else if (t
.bitfield
.disp16
2153 || t
.bitfield
.disp32
2154 || t
.bitfield
.disp32s
)
2161 fits_in_signed_byte (addressT num
)
2163 return num
+ 0x80 <= 0xff;
2167 fits_in_unsigned_byte (addressT num
)
2173 fits_in_unsigned_word (addressT num
)
2175 return num
<= 0xffff;
2179 fits_in_signed_word (addressT num
)
2181 return num
+ 0x8000 <= 0xffff;
2185 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2190 return num
+ 0x80000000 <= 0xffffffff;
2192 } /* fits_in_signed_long() */
2195 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2200 return num
<= 0xffffffff;
2202 } /* fits_in_unsigned_long() */
2205 fits_in_disp8 (offsetT num
)
2207 int shift
= i
.memshift
;
2213 mask
= (1 << shift
) - 1;
2215 /* Return 0 if NUM isn't properly aligned. */
2219 /* Check if NUM will fit in 8bit after shift. */
2220 return fits_in_signed_byte (num
>> shift
);
2224 fits_in_imm4 (offsetT num
)
2226 return (num
& 0xf) == num
;
2229 static i386_operand_type
2230 smallest_imm_type (offsetT num
)
2232 i386_operand_type t
;
2234 operand_type_set (&t
, 0);
2235 t
.bitfield
.imm64
= 1;
2237 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2239 /* This code is disabled on the 486 because all the Imm1 forms
2240 in the opcode table are slower on the i486. They're the
2241 versions with the implicitly specified single-position
2242 displacement, which has another syntax if you really want to
2244 t
.bitfield
.imm1
= 1;
2245 t
.bitfield
.imm8
= 1;
2246 t
.bitfield
.imm8s
= 1;
2247 t
.bitfield
.imm16
= 1;
2248 t
.bitfield
.imm32
= 1;
2249 t
.bitfield
.imm32s
= 1;
2251 else if (fits_in_signed_byte (num
))
2253 t
.bitfield
.imm8
= 1;
2254 t
.bitfield
.imm8s
= 1;
2255 t
.bitfield
.imm16
= 1;
2256 t
.bitfield
.imm32
= 1;
2257 t
.bitfield
.imm32s
= 1;
2259 else if (fits_in_unsigned_byte (num
))
2261 t
.bitfield
.imm8
= 1;
2262 t
.bitfield
.imm16
= 1;
2263 t
.bitfield
.imm32
= 1;
2264 t
.bitfield
.imm32s
= 1;
2266 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2268 t
.bitfield
.imm16
= 1;
2269 t
.bitfield
.imm32
= 1;
2270 t
.bitfield
.imm32s
= 1;
2272 else if (fits_in_signed_long (num
))
2274 t
.bitfield
.imm32
= 1;
2275 t
.bitfield
.imm32s
= 1;
2277 else if (fits_in_unsigned_long (num
))
2278 t
.bitfield
.imm32
= 1;
2284 offset_in_range (offsetT val
, int size
)
2290 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2291 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2292 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2294 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2300 /* If BFD64, sign extend val for 32bit address mode. */
2301 if (flag_code
!= CODE_64BIT
2302 || i
.prefix
[ADDR_PREFIX
])
2303 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2304 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2307 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2309 char buf1
[40], buf2
[40];
2311 sprint_value (buf1
, val
);
2312 sprint_value (buf2
, val
& mask
);
2313 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2328 a. PREFIX_EXIST if attempting to add a prefix where one from the
2329 same class already exists.
2330 b. PREFIX_LOCK if lock prefix is added.
2331 c. PREFIX_REP if rep/repne prefix is added.
2332 d. PREFIX_DS if ds prefix is added.
2333 e. PREFIX_OTHER if other prefix is added.
2336 static enum PREFIX_GROUP
2337 add_prefix (unsigned int prefix
)
2339 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2342 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2343 && flag_code
== CODE_64BIT
)
2345 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2346 || ((i
.prefix
[REX_PREFIX
] & (REX_R
| REX_X
| REX_B
))
2347 && (prefix
& (REX_R
| REX_X
| REX_B
))))
2358 case DS_PREFIX_OPCODE
:
2361 case CS_PREFIX_OPCODE
:
2362 case ES_PREFIX_OPCODE
:
2363 case FS_PREFIX_OPCODE
:
2364 case GS_PREFIX_OPCODE
:
2365 case SS_PREFIX_OPCODE
:
2369 case REPNE_PREFIX_OPCODE
:
2370 case REPE_PREFIX_OPCODE
:
2375 case LOCK_PREFIX_OPCODE
:
2384 case ADDR_PREFIX_OPCODE
:
2388 case DATA_PREFIX_OPCODE
:
2392 if (i
.prefix
[q
] != 0)
2400 i
.prefix
[q
] |= prefix
;
2403 as_bad (_("same type of prefix used twice"));
2409 update_code_flag (int value
, int check
)
2411 PRINTF_LIKE ((*as_error
));
2413 flag_code
= (enum flag_code
) value
;
2414 if (flag_code
== CODE_64BIT
)
2416 cpu_arch_flags
.bitfield
.cpu64
= 1;
2417 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2421 cpu_arch_flags
.bitfield
.cpu64
= 0;
2422 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2424 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2427 as_error
= as_fatal
;
2430 (*as_error
) (_("64bit mode not supported on `%s'."),
2431 cpu_arch_name
? cpu_arch_name
: default_arch
);
2433 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2436 as_error
= as_fatal
;
2439 (*as_error
) (_("32bit mode not supported on `%s'."),
2440 cpu_arch_name
? cpu_arch_name
: default_arch
);
2442 stackop_size
= '\0';
2446 set_code_flag (int value
)
2448 update_code_flag (value
, 0);
2452 set_16bit_gcc_code_flag (int new_code_flag
)
2454 flag_code
= (enum flag_code
) new_code_flag
;
2455 if (flag_code
!= CODE_16BIT
)
2457 cpu_arch_flags
.bitfield
.cpu64
= 0;
2458 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2459 stackop_size
= LONG_MNEM_SUFFIX
;
2463 set_intel_syntax (int syntax_flag
)
2465 /* Find out if register prefixing is specified. */
2466 int ask_naked_reg
= 0;
2469 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2472 int e
= get_symbol_name (&string
);
2474 if (strcmp (string
, "prefix") == 0)
2476 else if (strcmp (string
, "noprefix") == 0)
2479 as_bad (_("bad argument to syntax directive."));
2480 (void) restore_line_pointer (e
);
2482 demand_empty_rest_of_line ();
2484 intel_syntax
= syntax_flag
;
2486 if (ask_naked_reg
== 0)
2487 allow_naked_reg
= (intel_syntax
2488 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2490 allow_naked_reg
= (ask_naked_reg
< 0);
2492 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2494 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2495 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2496 register_prefix
= allow_naked_reg
? "" : "%";
2500 set_intel_mnemonic (int mnemonic_flag
)
2502 intel_mnemonic
= mnemonic_flag
;
2506 set_allow_index_reg (int flag
)
2508 allow_index_reg
= flag
;
2512 set_check (int what
)
2514 enum check_kind
*kind
;
2519 kind
= &operand_check
;
2530 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2533 int e
= get_symbol_name (&string
);
2535 if (strcmp (string
, "none") == 0)
2537 else if (strcmp (string
, "warning") == 0)
2538 *kind
= check_warning
;
2539 else if (strcmp (string
, "error") == 0)
2540 *kind
= check_error
;
2542 as_bad (_("bad argument to %s_check directive."), str
);
2543 (void) restore_line_pointer (e
);
2546 as_bad (_("missing argument for %s_check directive"), str
);
2548 demand_empty_rest_of_line ();
2552 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2553 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2555 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2556 static const char *arch
;
2558 /* Intel LIOM is only supported on ELF. */
2564 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2565 use default_arch. */
2566 arch
= cpu_arch_name
;
2568 arch
= default_arch
;
2571 /* If we are targeting Intel MCU, we must enable it. */
2572 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2573 || new_flag
.bitfield
.cpuiamcu
)
2576 /* If we are targeting Intel L1OM, we must enable it. */
2577 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2578 || new_flag
.bitfield
.cpul1om
)
2581 /* If we are targeting Intel K1OM, we must enable it. */
2582 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2583 || new_flag
.bitfield
.cpuk1om
)
2586 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2591 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2595 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2598 int e
= get_symbol_name (&string
);
2600 i386_cpu_flags flags
;
2602 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2604 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2606 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2610 cpu_arch_name
= cpu_arch
[j
].name
;
2611 cpu_sub_arch_name
= NULL
;
2612 cpu_arch_flags
= cpu_arch
[j
].flags
;
2613 if (flag_code
== CODE_64BIT
)
2615 cpu_arch_flags
.bitfield
.cpu64
= 1;
2616 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2620 cpu_arch_flags
.bitfield
.cpu64
= 0;
2621 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2623 cpu_arch_isa
= cpu_arch
[j
].type
;
2624 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2625 if (!cpu_arch_tune_set
)
2627 cpu_arch_tune
= cpu_arch_isa
;
2628 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2633 flags
= cpu_flags_or (cpu_arch_flags
,
2636 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2638 if (cpu_sub_arch_name
)
2640 char *name
= cpu_sub_arch_name
;
2641 cpu_sub_arch_name
= concat (name
,
2643 (const char *) NULL
);
2647 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2648 cpu_arch_flags
= flags
;
2649 cpu_arch_isa_flags
= flags
;
2651 (void) restore_line_pointer (e
);
2652 demand_empty_rest_of_line ();
2657 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2659 /* Disable an ISA extension. */
2660 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2661 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2663 flags
= cpu_flags_and_not (cpu_arch_flags
,
2664 cpu_noarch
[j
].flags
);
2665 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2667 if (cpu_sub_arch_name
)
2669 char *name
= cpu_sub_arch_name
;
2670 cpu_sub_arch_name
= concat (name
, string
,
2671 (const char *) NULL
);
2675 cpu_sub_arch_name
= xstrdup (string
);
2676 cpu_arch_flags
= flags
;
2677 cpu_arch_isa_flags
= flags
;
2679 (void) restore_line_pointer (e
);
2680 demand_empty_rest_of_line ();
2684 j
= ARRAY_SIZE (cpu_arch
);
2687 if (j
>= ARRAY_SIZE (cpu_arch
))
2688 as_bad (_("no such architecture: `%s'"), string
);
2690 *input_line_pointer
= e
;
2693 as_bad (_("missing cpu architecture"));
2695 no_cond_jump_promotion
= 0;
2696 if (*input_line_pointer
== ','
2697 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2702 ++input_line_pointer
;
2703 e
= get_symbol_name (&string
);
2705 if (strcmp (string
, "nojumps") == 0)
2706 no_cond_jump_promotion
= 1;
2707 else if (strcmp (string
, "jumps") == 0)
2710 as_bad (_("no such architecture modifier: `%s'"), string
);
2712 (void) restore_line_pointer (e
);
2715 demand_empty_rest_of_line ();
2718 enum bfd_architecture
2721 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2723 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2724 || flag_code
!= CODE_64BIT
)
2725 as_fatal (_("Intel L1OM is 64bit ELF only"));
2726 return bfd_arch_l1om
;
2728 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2730 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2731 || flag_code
!= CODE_64BIT
)
2732 as_fatal (_("Intel K1OM is 64bit ELF only"));
2733 return bfd_arch_k1om
;
2735 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2737 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2738 || flag_code
== CODE_64BIT
)
2739 as_fatal (_("Intel MCU is 32bit ELF only"));
2740 return bfd_arch_iamcu
;
2743 return bfd_arch_i386
;
2749 if (!strncmp (default_arch
, "x86_64", 6))
2751 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2753 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2754 || default_arch
[6] != '\0')
2755 as_fatal (_("Intel L1OM is 64bit ELF only"));
2756 return bfd_mach_l1om
;
2758 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2760 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2761 || default_arch
[6] != '\0')
2762 as_fatal (_("Intel K1OM is 64bit ELF only"));
2763 return bfd_mach_k1om
;
2765 else if (default_arch
[6] == '\0')
2766 return bfd_mach_x86_64
;
2768 return bfd_mach_x64_32
;
2770 else if (!strcmp (default_arch
, "i386")
2771 || !strcmp (default_arch
, "iamcu"))
2773 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2775 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2776 as_fatal (_("Intel MCU is 32bit ELF only"));
2777 return bfd_mach_i386_iamcu
;
2780 return bfd_mach_i386_i386
;
2783 as_fatal (_("unknown architecture"));
2789 const char *hash_err
;
2791 /* Support pseudo prefixes like {disp32}. */
2792 lex_type
['{'] = LEX_BEGIN_NAME
;
2794 /* Initialize op_hash hash table. */
2795 op_hash
= hash_new ();
2798 const insn_template
*optab
;
2799 templates
*core_optab
;
2801 /* Setup for loop. */
2803 core_optab
= XNEW (templates
);
2804 core_optab
->start
= optab
;
2809 if (optab
->name
== NULL
2810 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2812 /* different name --> ship out current template list;
2813 add to hash table; & begin anew. */
2814 core_optab
->end
= optab
;
2815 hash_err
= hash_insert (op_hash
,
2817 (void *) core_optab
);
2820 as_fatal (_("can't hash %s: %s"),
2824 if (optab
->name
== NULL
)
2826 core_optab
= XNEW (templates
);
2827 core_optab
->start
= optab
;
2832 /* Initialize reg_hash hash table. */
2833 reg_hash
= hash_new ();
2835 const reg_entry
*regtab
;
2836 unsigned int regtab_size
= i386_regtab_size
;
2838 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
2840 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
2842 as_fatal (_("can't hash %s: %s"),
2848 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2853 for (c
= 0; c
< 256; c
++)
2858 mnemonic_chars
[c
] = c
;
2859 register_chars
[c
] = c
;
2860 operand_chars
[c
] = c
;
2862 else if (ISLOWER (c
))
2864 mnemonic_chars
[c
] = c
;
2865 register_chars
[c
] = c
;
2866 operand_chars
[c
] = c
;
2868 else if (ISUPPER (c
))
2870 mnemonic_chars
[c
] = TOLOWER (c
);
2871 register_chars
[c
] = mnemonic_chars
[c
];
2872 operand_chars
[c
] = c
;
2874 else if (c
== '{' || c
== '}')
2876 mnemonic_chars
[c
] = c
;
2877 operand_chars
[c
] = c
;
2880 if (ISALPHA (c
) || ISDIGIT (c
))
2881 identifier_chars
[c
] = c
;
2884 identifier_chars
[c
] = c
;
2885 operand_chars
[c
] = c
;
2890 identifier_chars
['@'] = '@';
2893 identifier_chars
['?'] = '?';
2894 operand_chars
['?'] = '?';
2896 digit_chars
['-'] = '-';
2897 mnemonic_chars
['_'] = '_';
2898 mnemonic_chars
['-'] = '-';
2899 mnemonic_chars
['.'] = '.';
2900 identifier_chars
['_'] = '_';
2901 identifier_chars
['.'] = '.';
2903 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
2904 operand_chars
[(unsigned char) *p
] = *p
;
2907 if (flag_code
== CODE_64BIT
)
2909 #if defined (OBJ_COFF) && defined (TE_PE)
2910 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
2913 x86_dwarf2_return_column
= 16;
2915 x86_cie_data_alignment
= -8;
2919 x86_dwarf2_return_column
= 8;
2920 x86_cie_data_alignment
= -4;
2925 i386_print_statistics (FILE *file
)
2927 hash_print_statistics (file
, "i386 opcode", op_hash
);
2928 hash_print_statistics (file
, "i386 register", reg_hash
);
2933 /* Debugging routines for md_assemble. */
2934 static void pte (insn_template
*);
2935 static void pt (i386_operand_type
);
2936 static void pe (expressionS
*);
2937 static void ps (symbolS
*);
2940 pi (char *line
, i386_insn
*x
)
2944 fprintf (stdout
, "%s: template ", line
);
2946 fprintf (stdout
, " address: base %s index %s scale %x\n",
2947 x
->base_reg
? x
->base_reg
->reg_name
: "none",
2948 x
->index_reg
? x
->index_reg
->reg_name
: "none",
2949 x
->log2_scale_factor
);
2950 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
2951 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
2952 fprintf (stdout
, " sib: base %x index %x scale %x\n",
2953 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
2954 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
2955 (x
->rex
& REX_W
) != 0,
2956 (x
->rex
& REX_R
) != 0,
2957 (x
->rex
& REX_X
) != 0,
2958 (x
->rex
& REX_B
) != 0);
2959 for (j
= 0; j
< x
->operands
; j
++)
2961 fprintf (stdout
, " #%d: ", j
+ 1);
2963 fprintf (stdout
, "\n");
2964 if (x
->types
[j
].bitfield
.reg
2965 || x
->types
[j
].bitfield
.regmmx
2966 || x
->types
[j
].bitfield
.regsimd
2967 || x
->types
[j
].bitfield
.sreg2
2968 || x
->types
[j
].bitfield
.sreg3
2969 || x
->types
[j
].bitfield
.control
2970 || x
->types
[j
].bitfield
.debug
2971 || x
->types
[j
].bitfield
.test
)
2972 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
2973 if (operand_type_check (x
->types
[j
], imm
))
2975 if (operand_type_check (x
->types
[j
], disp
))
2976 pe (x
->op
[j
].disps
);
2981 pte (insn_template
*t
)
2984 fprintf (stdout
, " %d operands ", t
->operands
);
2985 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
2986 if (t
->extension_opcode
!= None
)
2987 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
2988 if (t
->opcode_modifier
.d
)
2989 fprintf (stdout
, "D");
2990 if (t
->opcode_modifier
.w
)
2991 fprintf (stdout
, "W");
2992 fprintf (stdout
, "\n");
2993 for (j
= 0; j
< t
->operands
; j
++)
2995 fprintf (stdout
, " #%d type ", j
+ 1);
2996 pt (t
->operand_types
[j
]);
2997 fprintf (stdout
, "\n");
3004 fprintf (stdout
, " operation %d\n", e
->X_op
);
3005 fprintf (stdout
, " add_number %ld (%lx)\n",
3006 (long) e
->X_add_number
, (long) e
->X_add_number
);
3007 if (e
->X_add_symbol
)
3009 fprintf (stdout
, " add_symbol ");
3010 ps (e
->X_add_symbol
);
3011 fprintf (stdout
, "\n");
3015 fprintf (stdout
, " op_symbol ");
3016 ps (e
->X_op_symbol
);
3017 fprintf (stdout
, "\n");
3024 fprintf (stdout
, "%s type %s%s",
3026 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3027 segment_name (S_GET_SEGMENT (s
)));
3030 static struct type_name
3032 i386_operand_type mask
;
3035 const type_names
[] =
3037 { OPERAND_TYPE_REG8
, "r8" },
3038 { OPERAND_TYPE_REG16
, "r16" },
3039 { OPERAND_TYPE_REG32
, "r32" },
3040 { OPERAND_TYPE_REG64
, "r64" },
3041 { OPERAND_TYPE_IMM8
, "i8" },
3042 { OPERAND_TYPE_IMM8
, "i8s" },
3043 { OPERAND_TYPE_IMM16
, "i16" },
3044 { OPERAND_TYPE_IMM32
, "i32" },
3045 { OPERAND_TYPE_IMM32S
, "i32s" },
3046 { OPERAND_TYPE_IMM64
, "i64" },
3047 { OPERAND_TYPE_IMM1
, "i1" },
3048 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3049 { OPERAND_TYPE_DISP8
, "d8" },
3050 { OPERAND_TYPE_DISP16
, "d16" },
3051 { OPERAND_TYPE_DISP32
, "d32" },
3052 { OPERAND_TYPE_DISP32S
, "d32s" },
3053 { OPERAND_TYPE_DISP64
, "d64" },
3054 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3055 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3056 { OPERAND_TYPE_CONTROL
, "control reg" },
3057 { OPERAND_TYPE_TEST
, "test reg" },
3058 { OPERAND_TYPE_DEBUG
, "debug reg" },
3059 { OPERAND_TYPE_FLOATREG
, "FReg" },
3060 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3061 { OPERAND_TYPE_SREG2
, "SReg2" },
3062 { OPERAND_TYPE_SREG3
, "SReg3" },
3063 { OPERAND_TYPE_ACC
, "Acc" },
3064 { OPERAND_TYPE_JUMPABSOLUTE
, "Jump Absolute" },
3065 { OPERAND_TYPE_REGMMX
, "rMMX" },
3066 { OPERAND_TYPE_REGXMM
, "rXMM" },
3067 { OPERAND_TYPE_REGYMM
, "rYMM" },
3068 { OPERAND_TYPE_REGZMM
, "rZMM" },
3069 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3070 { OPERAND_TYPE_ESSEG
, "es" },
3074 pt (i386_operand_type t
)
3077 i386_operand_type a
;
3079 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3081 a
= operand_type_and (t
, type_names
[j
].mask
);
3082 if (!operand_type_all_zero (&a
))
3083 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3088 #endif /* DEBUG386 */
3090 static bfd_reloc_code_real_type
3091 reloc (unsigned int size
,
3094 bfd_reloc_code_real_type other
)
3096 if (other
!= NO_RELOC
)
3098 reloc_howto_type
*rel
;
3103 case BFD_RELOC_X86_64_GOT32
:
3104 return BFD_RELOC_X86_64_GOT64
;
3106 case BFD_RELOC_X86_64_GOTPLT64
:
3107 return BFD_RELOC_X86_64_GOTPLT64
;
3109 case BFD_RELOC_X86_64_PLTOFF64
:
3110 return BFD_RELOC_X86_64_PLTOFF64
;
3112 case BFD_RELOC_X86_64_GOTPC32
:
3113 other
= BFD_RELOC_X86_64_GOTPC64
;
3115 case BFD_RELOC_X86_64_GOTPCREL
:
3116 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3118 case BFD_RELOC_X86_64_TPOFF32
:
3119 other
= BFD_RELOC_X86_64_TPOFF64
;
3121 case BFD_RELOC_X86_64_DTPOFF32
:
3122 other
= BFD_RELOC_X86_64_DTPOFF64
;
3128 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3129 if (other
== BFD_RELOC_SIZE32
)
3132 other
= BFD_RELOC_SIZE64
;
3135 as_bad (_("there are no pc-relative size relocations"));
3141 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3142 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3145 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3147 as_bad (_("unknown relocation (%u)"), other
);
3148 else if (size
!= bfd_get_reloc_size (rel
))
3149 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3150 bfd_get_reloc_size (rel
),
3152 else if (pcrel
&& !rel
->pc_relative
)
3153 as_bad (_("non-pc-relative relocation for pc-relative field"));
3154 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3156 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3158 as_bad (_("relocated field and relocation type differ in signedness"));
3167 as_bad (_("there are no unsigned pc-relative relocations"));
3170 case 1: return BFD_RELOC_8_PCREL
;
3171 case 2: return BFD_RELOC_16_PCREL
;
3172 case 4: return BFD_RELOC_32_PCREL
;
3173 case 8: return BFD_RELOC_64_PCREL
;
3175 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3182 case 4: return BFD_RELOC_X86_64_32S
;
3187 case 1: return BFD_RELOC_8
;
3188 case 2: return BFD_RELOC_16
;
3189 case 4: return BFD_RELOC_32
;
3190 case 8: return BFD_RELOC_64
;
3192 as_bad (_("cannot do %s %u byte relocation"),
3193 sign
> 0 ? "signed" : "unsigned", size
);
3199 /* Here we decide which fixups can be adjusted to make them relative to
3200 the beginning of the section instead of the symbol. Basically we need
3201 to make sure that the dynamic relocations are done correctly, so in
3202 some cases we force the original symbol to be used. */
3205 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3207 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3211 /* Don't adjust pc-relative references to merge sections in 64-bit
3213 if (use_rela_relocations
3214 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3218 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3219 and changed later by validate_fix. */
3220 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3221 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3224 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3225 for size relocations. */
3226 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3227 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3228 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3229 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3230 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3231 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3232 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3233 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3234 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3235 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3236 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3237 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3238 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3239 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3240 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3241 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3242 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3243 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3244 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3245 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3246 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3247 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3248 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3249 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3250 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3251 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3252 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3253 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3254 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3255 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3256 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3257 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3258 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3265 intel_float_operand (const char *mnemonic
)
3267 /* Note that the value returned is meaningful only for opcodes with (memory)
3268 operands, hence the code here is free to improperly handle opcodes that
3269 have no operands (for better performance and smaller code). */
3271 if (mnemonic
[0] != 'f')
3272 return 0; /* non-math */
3274 switch (mnemonic
[1])
3276 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3277 the fs segment override prefix not currently handled because no
3278 call path can make opcodes without operands get here */
3280 return 2 /* integer op */;
3282 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3283 return 3; /* fldcw/fldenv */
3286 if (mnemonic
[2] != 'o' /* fnop */)
3287 return 3; /* non-waiting control op */
3290 if (mnemonic
[2] == 's')
3291 return 3; /* frstor/frstpm */
3294 if (mnemonic
[2] == 'a')
3295 return 3; /* fsave */
3296 if (mnemonic
[2] == 't')
3298 switch (mnemonic
[3])
3300 case 'c': /* fstcw */
3301 case 'd': /* fstdw */
3302 case 'e': /* fstenv */
3303 case 's': /* fsts[gw] */
3309 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3310 return 0; /* fxsave/fxrstor are not really math ops */
3317 /* Build the VEX prefix. */
3320 build_vex_prefix (const insn_template
*t
)
3322 unsigned int register_specifier
;
3323 unsigned int implied_prefix
;
3324 unsigned int vector_length
;
3326 /* Check register specifier. */
3327 if (i
.vex
.register_specifier
)
3329 register_specifier
=
3330 ~register_number (i
.vex
.register_specifier
) & 0xf;
3331 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3334 register_specifier
= 0xf;
3336 /* Use 2-byte VEX prefix by swapping destination and source
3338 if (i
.vec_encoding
!= vex_encoding_vex3
3339 && i
.dir_encoding
== dir_encoding_default
3340 && i
.operands
== i
.reg_operands
3341 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3342 && i
.tm
.opcode_modifier
.load
3345 unsigned int xchg
= i
.operands
- 1;
3346 union i386_op temp_op
;
3347 i386_operand_type temp_type
;
3349 temp_type
= i
.types
[xchg
];
3350 i
.types
[xchg
] = i
.types
[0];
3351 i
.types
[0] = temp_type
;
3352 temp_op
= i
.op
[xchg
];
3353 i
.op
[xchg
] = i
.op
[0];
3356 gas_assert (i
.rm
.mode
== 3);
3360 i
.rm
.regmem
= i
.rm
.reg
;
3363 /* Use the next insn. */
3367 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3368 vector_length
= avxscalar
;
3369 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3376 for (op
= 0; op
< t
->operands
; ++op
)
3377 if (t
->operand_types
[op
].bitfield
.xmmword
3378 && t
->operand_types
[op
].bitfield
.ymmword
3379 && i
.types
[op
].bitfield
.ymmword
)
3386 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3391 case DATA_PREFIX_OPCODE
:
3394 case REPE_PREFIX_OPCODE
:
3397 case REPNE_PREFIX_OPCODE
:
3404 /* Use 2-byte VEX prefix if possible. */
3405 if (i
.vec_encoding
!= vex_encoding_vex3
3406 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3407 && i
.tm
.opcode_modifier
.vexw
!= VEXW1
3408 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3410 /* 2-byte VEX prefix. */
3414 i
.vex
.bytes
[0] = 0xc5;
3416 /* Check the REX.R bit. */
3417 r
= (i
.rex
& REX_R
) ? 0 : 1;
3418 i
.vex
.bytes
[1] = (r
<< 7
3419 | register_specifier
<< 3
3420 | vector_length
<< 2
3425 /* 3-byte VEX prefix. */
3430 switch (i
.tm
.opcode_modifier
.vexopcode
)
3434 i
.vex
.bytes
[0] = 0xc4;
3438 i
.vex
.bytes
[0] = 0xc4;
3442 i
.vex
.bytes
[0] = 0xc4;
3446 i
.vex
.bytes
[0] = 0x8f;
3450 i
.vex
.bytes
[0] = 0x8f;
3454 i
.vex
.bytes
[0] = 0x8f;
3460 /* The high 3 bits of the second VEX byte are 1's compliment
3461 of RXB bits from REX. */
3462 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3464 /* Check the REX.W bit. */
3465 w
= (i
.rex
& REX_W
) ? 1 : 0;
3466 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3469 i
.vex
.bytes
[2] = (w
<< 7
3470 | register_specifier
<< 3
3471 | vector_length
<< 2
3476 /* Build the EVEX prefix. */
3479 build_evex_prefix (void)
3481 unsigned int register_specifier
;
3482 unsigned int implied_prefix
;
3484 rex_byte vrex_used
= 0;
3486 /* Check register specifier. */
3487 if (i
.vex
.register_specifier
)
3489 gas_assert ((i
.vrex
& REX_X
) == 0);
3491 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3492 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3493 register_specifier
+= 8;
3494 /* The upper 16 registers are encoded in the fourth byte of the
3496 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3497 i
.vex
.bytes
[3] = 0x8;
3498 register_specifier
= ~register_specifier
& 0xf;
3502 register_specifier
= 0xf;
3504 /* Encode upper 16 vector index register in the fourth byte of
3506 if (!(i
.vrex
& REX_X
))
3507 i
.vex
.bytes
[3] = 0x8;
3512 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3517 case DATA_PREFIX_OPCODE
:
3520 case REPE_PREFIX_OPCODE
:
3523 case REPNE_PREFIX_OPCODE
:
3530 /* 4 byte EVEX prefix. */
3532 i
.vex
.bytes
[0] = 0x62;
3535 switch (i
.tm
.opcode_modifier
.vexopcode
)
3551 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3553 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3555 /* The fifth bit of the second EVEX byte is 1's compliment of the
3556 REX_R bit in VREX. */
3557 if (!(i
.vrex
& REX_R
))
3558 i
.vex
.bytes
[1] |= 0x10;
3562 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3564 /* When all operands are registers, the REX_X bit in REX is not
3565 used. We reuse it to encode the upper 16 registers, which is
3566 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3567 as 1's compliment. */
3568 if ((i
.vrex
& REX_B
))
3571 i
.vex
.bytes
[1] &= ~0x40;
3575 /* EVEX instructions shouldn't need the REX prefix. */
3576 i
.vrex
&= ~vrex_used
;
3577 gas_assert (i
.vrex
== 0);
3579 /* Check the REX.W bit. */
3580 w
= (i
.rex
& REX_W
) ? 1 : 0;
3581 if (i
.tm
.opcode_modifier
.vexw
)
3583 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
3586 /* If w is not set it means we are dealing with WIG instruction. */
3589 if (evexwig
== evexw1
)
3593 /* Encode the U bit. */
3594 implied_prefix
|= 0x4;
3596 /* The third byte of the EVEX prefix. */
3597 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3599 /* The fourth byte of the EVEX prefix. */
3600 /* The zeroing-masking bit. */
3601 if (i
.mask
&& i
.mask
->zeroing
)
3602 i
.vex
.bytes
[3] |= 0x80;
3604 /* Don't always set the broadcast bit if there is no RC. */
3607 /* Encode the vector length. */
3608 unsigned int vec_length
;
3610 switch (i
.tm
.opcode_modifier
.evex
)
3612 case EVEXLIG
: /* LL' is ignored */
3613 vec_length
= evexlig
<< 5;
3616 vec_length
= 0 << 5;
3619 vec_length
= 1 << 5;
3622 vec_length
= 2 << 5;
3628 i
.vex
.bytes
[3] |= vec_length
;
3629 /* Encode the broadcast bit. */
3631 i
.vex
.bytes
[3] |= 0x10;
3635 if (i
.rounding
->type
!= saeonly
)
3636 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3638 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3641 if (i
.mask
&& i
.mask
->mask
)
3642 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3646 process_immext (void)
3650 if ((i
.tm
.cpu_flags
.bitfield
.cpusse3
|| i
.tm
.cpu_flags
.bitfield
.cpusvme
)
3653 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3654 with an opcode suffix which is coded in the same place as an
3655 8-bit immediate field would be.
3656 Here we check those operands and remove them afterwards. */
3659 for (x
= 0; x
< i
.operands
; x
++)
3660 if (register_number (i
.op
[x
].regs
) != x
)
3661 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3662 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+ 1,
3668 if (i
.tm
.cpu_flags
.bitfield
.cpumwaitx
&& i
.operands
> 0)
3670 /* MONITORX/MWAITX instructions have fixed operands with an opcode
3671 suffix which is coded in the same place as an 8-bit immediate
3673 Here we check those operands and remove them afterwards. */
3676 if (i
.operands
!= 3)
3679 for (x
= 0; x
< 2; x
++)
3680 if (register_number (i
.op
[x
].regs
) != x
)
3681 goto bad_register_operand
;
3683 /* Check for third operand for mwaitx/monitorx insn. */
3684 if (register_number (i
.op
[x
].regs
)
3685 != (x
+ (i
.tm
.extension_opcode
== 0xfb)))
3687 bad_register_operand
:
3688 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3689 register_prefix
, i
.op
[x
].regs
->reg_name
, x
+1,
3696 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3697 which is coded in the same place as an 8-bit immediate field
3698 would be. Here we fake an 8-bit immediate operand from the
3699 opcode suffix stored in tm.extension_opcode.
3701 AVX instructions also use this encoding, for some of
3702 3 argument instructions. */
3704 gas_assert (i
.imm_operands
<= 1
3706 || ((i
.tm
.opcode_modifier
.vex
3707 || i
.tm
.opcode_modifier
.evex
)
3708 && i
.operands
<= 4)));
3710 exp
= &im_expressions
[i
.imm_operands
++];
3711 i
.op
[i
.operands
].imms
= exp
;
3712 i
.types
[i
.operands
] = imm8
;
3714 exp
->X_op
= O_constant
;
3715 exp
->X_add_number
= i
.tm
.extension_opcode
;
3716 i
.tm
.extension_opcode
= None
;
3723 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3728 as_bad (_("invalid instruction `%s' after `%s'"),
3729 i
.tm
.name
, i
.hle_prefix
);
3732 if (i
.prefix
[LOCK_PREFIX
])
3734 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3738 case HLEPrefixRelease
:
3739 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3741 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3745 if (i
.mem_operands
== 0
3746 || !operand_type_check (i
.types
[i
.operands
- 1], anymem
))
3748 as_bad (_("memory destination needed for instruction `%s'"
3749 " after `xrelease'"), i
.tm
.name
);
3756 /* Try the shortest encoding by shortening operand size. */
3759 optimize_encoding (void)
3763 if (optimize_for_space
3764 && i
.reg_operands
== 1
3765 && i
.imm_operands
== 1
3766 && !i
.types
[1].bitfield
.byte
3767 && i
.op
[0].imms
->X_op
== O_constant
3768 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
3769 && ((i
.tm
.base_opcode
== 0xa8
3770 && i
.tm
.extension_opcode
== None
)
3771 || (i
.tm
.base_opcode
== 0xf6
3772 && i
.tm
.extension_opcode
== 0x0)))
3775 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
3777 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
3778 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
3780 i
.types
[1].bitfield
.byte
= 1;
3781 /* Ignore the suffix. */
3783 if (base_regnum
>= 4
3784 && !(i
.op
[1].regs
->reg_flags
& RegRex
))
3786 /* Handle SP, BP, SI and DI registers. */
3787 if (i
.types
[1].bitfield
.word
)
3789 else if (i
.types
[1].bitfield
.dword
)
3797 else if (flag_code
== CODE_64BIT
3798 && ((i
.reg_operands
== 1
3799 && i
.imm_operands
== 1
3800 && i
.op
[0].imms
->X_op
== O_constant
3801 && ((i
.tm
.base_opcode
== 0xb0
3802 && i
.tm
.extension_opcode
== None
3803 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
3804 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
3805 && (((i
.tm
.base_opcode
== 0x24
3806 || i
.tm
.base_opcode
== 0xa8)
3807 && i
.tm
.extension_opcode
== None
)
3808 || (i
.tm
.base_opcode
== 0x80
3809 && i
.tm
.extension_opcode
== 0x4)
3810 || ((i
.tm
.base_opcode
== 0xf6
3811 || i
.tm
.base_opcode
== 0xc6)
3812 && i
.tm
.extension_opcode
== 0x0)))))
3813 || (i
.reg_operands
== 2
3814 && i
.op
[0].regs
== i
.op
[1].regs
3815 && ((i
.tm
.base_opcode
== 0x30
3816 || i
.tm
.base_opcode
== 0x28)
3817 && i
.tm
.extension_opcode
== None
)))
3818 && i
.types
[1].bitfield
.qword
)
3821 andq $imm31, %r64 -> andl $imm31, %r32
3822 testq $imm31, %r64 -> testl $imm31, %r32
3823 xorq %r64, %r64 -> xorl %r32, %r32
3824 subq %r64, %r64 -> subl %r32, %r32
3825 movq $imm31, %r64 -> movl $imm31, %r32
3826 movq $imm32, %r64 -> movl $imm32, %r32
3828 i
.tm
.opcode_modifier
.norex64
= 1;
3829 if (i
.tm
.base_opcode
== 0xb0 || i
.tm
.base_opcode
== 0xc6)
3832 movq $imm31, %r64 -> movl $imm31, %r32
3833 movq $imm32, %r64 -> movl $imm32, %r32
3835 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
3836 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
3837 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
3838 i
.types
[0].bitfield
.imm32
= 1;
3839 i
.types
[0].bitfield
.imm32s
= 0;
3840 i
.types
[0].bitfield
.imm64
= 0;
3841 i
.types
[1].bitfield
.dword
= 1;
3842 i
.types
[1].bitfield
.qword
= 0;
3843 if (i
.tm
.base_opcode
== 0xc6)
3846 movq $imm31, %r64 -> movl $imm31, %r32
3848 i
.tm
.base_opcode
= 0xb0;
3849 i
.tm
.extension_opcode
= None
;
3850 i
.tm
.opcode_modifier
.shortform
= 1;
3851 i
.tm
.opcode_modifier
.modrm
= 0;
3855 else if (optimize
> 1
3856 && i
.reg_operands
== 3
3857 && i
.op
[0].regs
== i
.op
[1].regs
3858 && !i
.types
[2].bitfield
.xmmword
3859 && (i
.tm
.opcode_modifier
.vex
3862 && i
.tm
.opcode_modifier
.evex
3863 && cpu_arch_flags
.bitfield
.cpuavx512vl
))
3864 && ((i
.tm
.base_opcode
== 0x55
3865 || i
.tm
.base_opcode
== 0x6655
3866 || i
.tm
.base_opcode
== 0x66df
3867 || i
.tm
.base_opcode
== 0x57
3868 || i
.tm
.base_opcode
== 0x6657
3869 || i
.tm
.base_opcode
== 0x66ef
3870 || i
.tm
.base_opcode
== 0x66f8
3871 || i
.tm
.base_opcode
== 0x66f9
3872 || i
.tm
.base_opcode
== 0x66fa
3873 || i
.tm
.base_opcode
== 0x66fb)
3874 && i
.tm
.extension_opcode
== None
))
3877 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
3879 EVEX VOP %zmmM, %zmmM, %zmmN
3880 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
3881 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3882 EVEX VOP %ymmM, %ymmM, %ymmN
3883 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
3884 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3885 VEX VOP %ymmM, %ymmM, %ymmN
3886 -> VEX VOP %xmmM, %xmmM, %xmmN
3887 VOP, one of vpandn and vpxor:
3888 VEX VOP %ymmM, %ymmM, %ymmN
3889 -> VEX VOP %xmmM, %xmmM, %xmmN
3890 VOP, one of vpandnd and vpandnq:
3891 EVEX VOP %zmmM, %zmmM, %zmmN
3892 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
3893 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3894 EVEX VOP %ymmM, %ymmM, %ymmN
3895 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
3896 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3897 VOP, one of vpxord and vpxorq:
3898 EVEX VOP %zmmM, %zmmM, %zmmN
3899 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
3900 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3901 EVEX VOP %ymmM, %ymmM, %ymmN
3902 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
3903 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16)
3905 if (i
.tm
.opcode_modifier
.evex
)
3907 /* If only lower 16 vector registers are used, we can use
3909 for (j
= 0; j
< 3; j
++)
3910 if (register_number (i
.op
[j
].regs
) > 15)
3914 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3917 i
.tm
.opcode_modifier
.vex
= VEX128
;
3918 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
3919 i
.tm
.opcode_modifier
.evex
= 0;
3923 i
.tm
.opcode_modifier
.vex
= VEX128
;
3925 if (i
.tm
.opcode_modifier
.vex
)
3926 for (j
= 0; j
< 3; j
++)
3928 i
.types
[j
].bitfield
.xmmword
= 1;
3929 i
.types
[j
].bitfield
.ymmword
= 0;
3934 /* This is the guts of the machine-dependent assembler. LINE points to a
3935 machine dependent instruction. This function is supposed to emit
3936 the frags/bytes it assembles to. */
3939 md_assemble (char *line
)
3942 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
3943 const insn_template
*t
;
3945 /* Initialize globals. */
3946 memset (&i
, '\0', sizeof (i
));
3947 for (j
= 0; j
< MAX_OPERANDS
; j
++)
3948 i
.reloc
[j
] = NO_RELOC
;
3949 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
3950 memset (im_expressions
, '\0', sizeof (im_expressions
));
3951 save_stack_p
= save_stack
;
3953 /* First parse an instruction mnemonic & call i386_operand for the operands.
3954 We assume that the scrubber has arranged it so that line[0] is the valid
3955 start of a (possibly prefixed) mnemonic. */
3957 line
= parse_insn (line
, mnemonic
);
3960 mnem_suffix
= i
.suffix
;
3962 line
= parse_operands (line
, mnemonic
);
3964 xfree (i
.memop1_string
);
3965 i
.memop1_string
= NULL
;
3969 /* Now we've parsed the mnemonic into a set of templates, and have the
3970 operands at hand. */
3972 /* All intel opcodes have reversed operands except for "bound" and
3973 "enter". We also don't reverse intersegment "jmp" and "call"
3974 instructions with 2 immediate operands so that the immediate segment
3975 precedes the offset, as it does when in AT&T mode. */
3978 && (strcmp (mnemonic
, "bound") != 0)
3979 && (strcmp (mnemonic
, "invlpga") != 0)
3980 && !(operand_type_check (i
.types
[0], imm
)
3981 && operand_type_check (i
.types
[1], imm
)))
3984 /* The order of the immediates should be reversed
3985 for 2 immediates extrq and insertq instructions */
3986 if (i
.imm_operands
== 2
3987 && (strcmp (mnemonic
, "extrq") == 0
3988 || strcmp (mnemonic
, "insertq") == 0))
3989 swap_2_operands (0, 1);
3994 /* Don't optimize displacement for movabs since it only takes 64bit
3997 && i
.disp_encoding
!= disp_encoding_32bit
3998 && (flag_code
!= CODE_64BIT
3999 || strcmp (mnemonic
, "movabs") != 0))
4002 /* Next, we find a template that matches the given insn,
4003 making sure the overlap of the given operands types is consistent
4004 with the template operand types. */
4006 if (!(t
= match_template (mnem_suffix
)))
4009 if (sse_check
!= check_none
4010 && !i
.tm
.opcode_modifier
.noavx
4011 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4012 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4013 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4014 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4015 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4016 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4017 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4018 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4019 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4020 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4022 (sse_check
== check_warning
4024 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4027 /* Zap movzx and movsx suffix. The suffix has been set from
4028 "word ptr" or "byte ptr" on the source operand in Intel syntax
4029 or extracted from mnemonic in AT&T syntax. But we'll use
4030 the destination register to choose the suffix for encoding. */
4031 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
4033 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4034 there is no suffix, the default will be byte extension. */
4035 if (i
.reg_operands
!= 2
4038 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4043 if (i
.tm
.opcode_modifier
.fwait
)
4044 if (!add_prefix (FWAIT_OPCODE
))
4047 /* Check if REP prefix is OK. */
4048 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4050 as_bad (_("invalid instruction `%s' after `%s'"),
4051 i
.tm
.name
, i
.rep_prefix
);
4055 /* Check for lock without a lockable instruction. Destination operand
4056 must be memory unless it is xchg (0x86). */
4057 if (i
.prefix
[LOCK_PREFIX
]
4058 && (!i
.tm
.opcode_modifier
.islockable
4059 || i
.mem_operands
== 0
4060 || (i
.tm
.base_opcode
!= 0x86
4061 && !operand_type_check (i
.types
[i
.operands
- 1], anymem
))))
4063 as_bad (_("expecting lockable instruction after `lock'"));
4067 /* Check if HLE prefix is OK. */
4068 if (i
.hle_prefix
&& !check_hle ())
4071 /* Check BND prefix. */
4072 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4073 as_bad (_("expecting valid branch instruction after `bnd'"));
4075 /* Check NOTRACK prefix. */
4076 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4077 as_bad (_("expecting indirect branch instruction after `notrack'"));
4079 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4081 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4082 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4083 else if (flag_code
!= CODE_16BIT
4084 ? i
.prefix
[ADDR_PREFIX
]
4085 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4086 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4089 /* Insert BND prefix. */
4091 && i
.tm
.opcode_modifier
.bndprefixok
4092 && !i
.prefix
[BND_PREFIX
])
4093 add_prefix (BND_PREFIX_OPCODE
);
4095 /* Check string instruction segment overrides. */
4096 if (i
.tm
.opcode_modifier
.isstring
&& i
.mem_operands
!= 0)
4098 if (!check_string ())
4100 i
.disp_operands
= 0;
4103 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4104 optimize_encoding ();
4106 if (!process_suffix ())
4109 /* Update operand types. */
4110 for (j
= 0; j
< i
.operands
; j
++)
4111 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4113 /* Make still unresolved immediate matches conform to size of immediate
4114 given in i.suffix. */
4115 if (!finalize_imm ())
4118 if (i
.types
[0].bitfield
.imm1
)
4119 i
.imm_operands
= 0; /* kludge for shift insns. */
4121 /* We only need to check those implicit registers for instructions
4122 with 3 operands or less. */
4123 if (i
.operands
<= 3)
4124 for (j
= 0; j
< i
.operands
; j
++)
4125 if (i
.types
[j
].bitfield
.inoutportreg
4126 || i
.types
[j
].bitfield
.shiftcount
4127 || (i
.types
[j
].bitfield
.acc
&& !i
.types
[j
].bitfield
.xmmword
))
4130 /* ImmExt should be processed after SSE2AVX. */
4131 if (!i
.tm
.opcode_modifier
.sse2avx
4132 && i
.tm
.opcode_modifier
.immext
)
4135 /* For insns with operands there are more diddles to do to the opcode. */
4138 if (!process_operands ())
4141 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4143 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4144 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4147 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
4149 if (flag_code
== CODE_16BIT
)
4151 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
4156 if (i
.tm
.opcode_modifier
.vex
)
4157 build_vex_prefix (t
);
4159 build_evex_prefix ();
4162 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4163 instructions may define INT_OPCODE as well, so avoid this corner
4164 case for those instructions that use MODRM. */
4165 if (i
.tm
.base_opcode
== INT_OPCODE
4166 && !i
.tm
.opcode_modifier
.modrm
4167 && i
.op
[0].imms
->X_add_number
== 3)
4169 i
.tm
.base_opcode
= INT3_OPCODE
;
4173 if ((i
.tm
.opcode_modifier
.jump
4174 || i
.tm
.opcode_modifier
.jumpbyte
4175 || i
.tm
.opcode_modifier
.jumpdword
)
4176 && i
.op
[0].disps
->X_op
== O_constant
)
4178 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4179 the absolute address given by the constant. Since ix86 jumps and
4180 calls are pc relative, we need to generate a reloc. */
4181 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4182 i
.op
[0].disps
->X_op
= O_symbol
;
4185 if (i
.tm
.opcode_modifier
.rex64
)
4188 /* For 8 bit registers we need an empty rex prefix. Also if the
4189 instruction already has a prefix, we need to convert old
4190 registers to new ones. */
4192 if ((i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.byte
4193 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4194 || (i
.types
[1].bitfield
.reg
&& i
.types
[1].bitfield
.byte
4195 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4196 || (((i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.byte
)
4197 || (i
.types
[1].bitfield
.reg
&& i
.types
[1].bitfield
.byte
))
4202 i
.rex
|= REX_OPCODE
;
4203 for (x
= 0; x
< 2; x
++)
4205 /* Look for 8 bit operand that uses old registers. */
4206 if (i
.types
[x
].bitfield
.reg
&& i
.types
[x
].bitfield
.byte
4207 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4209 /* In case it is "hi" register, give up. */
4210 if (i
.op
[x
].regs
->reg_num
> 3)
4211 as_bad (_("can't encode register '%s%s' in an "
4212 "instruction requiring REX prefix."),
4213 register_prefix
, i
.op
[x
].regs
->reg_name
);
4215 /* Otherwise it is equivalent to the extended register.
4216 Since the encoding doesn't change this is merely
4217 cosmetic cleanup for debug output. */
4219 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4224 if (i
.rex
== 0 && i
.rex_encoding
)
4226 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4227 that uses legacy register. If it is "hi" register, don't add
4228 the REX_OPCODE byte. */
4230 for (x
= 0; x
< 2; x
++)
4231 if (i
.types
[x
].bitfield
.reg
4232 && i
.types
[x
].bitfield
.byte
4233 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4234 && i
.op
[x
].regs
->reg_num
> 3)
4236 i
.rex_encoding
= FALSE
;
4245 add_prefix (REX_OPCODE
| i
.rex
);
4247 /* We are ready to output the insn. */
4252 parse_insn (char *line
, char *mnemonic
)
4255 char *token_start
= l
;
4258 const insn_template
*t
;
4264 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4269 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4271 as_bad (_("no such instruction: `%s'"), token_start
);
4276 if (!is_space_char (*l
)
4277 && *l
!= END_OF_INSN
4279 || (*l
!= PREFIX_SEPARATOR
4282 as_bad (_("invalid character %s in mnemonic"),
4283 output_invalid (*l
));
4286 if (token_start
== l
)
4288 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4289 as_bad (_("expecting prefix; got nothing"));
4291 as_bad (_("expecting mnemonic; got nothing"));
4295 /* Look up instruction (or prefix) via hash table. */
4296 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4298 if (*l
!= END_OF_INSN
4299 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
4300 && current_templates
4301 && current_templates
->start
->opcode_modifier
.isprefix
)
4303 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
4305 as_bad ((flag_code
!= CODE_64BIT
4306 ? _("`%s' is only supported in 64-bit mode")
4307 : _("`%s' is not supported in 64-bit mode")),
4308 current_templates
->start
->name
);
4311 /* If we are in 16-bit mode, do not allow addr16 or data16.
4312 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4313 if ((current_templates
->start
->opcode_modifier
.size16
4314 || current_templates
->start
->opcode_modifier
.size32
)
4315 && flag_code
!= CODE_64BIT
4316 && (current_templates
->start
->opcode_modifier
.size32
4317 ^ (flag_code
== CODE_16BIT
)))
4319 as_bad (_("redundant %s prefix"),
4320 current_templates
->start
->name
);
4323 if (current_templates
->start
->opcode_length
== 0)
4325 /* Handle pseudo prefixes. */
4326 switch (current_templates
->start
->base_opcode
)
4330 i
.disp_encoding
= disp_encoding_8bit
;
4334 i
.disp_encoding
= disp_encoding_32bit
;
4338 i
.dir_encoding
= dir_encoding_load
;
4342 i
.dir_encoding
= dir_encoding_store
;
4346 i
.vec_encoding
= vex_encoding_vex2
;
4350 i
.vec_encoding
= vex_encoding_vex3
;
4354 i
.vec_encoding
= vex_encoding_evex
;
4358 i
.rex_encoding
= TRUE
;
4362 i
.no_optimize
= TRUE
;
4370 /* Add prefix, checking for repeated prefixes. */
4371 switch (add_prefix (current_templates
->start
->base_opcode
))
4376 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
4377 i
.notrack_prefix
= current_templates
->start
->name
;
4380 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4381 i
.hle_prefix
= current_templates
->start
->name
;
4382 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4383 i
.bnd_prefix
= current_templates
->start
->name
;
4385 i
.rep_prefix
= current_templates
->start
->name
;
4391 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4398 if (!current_templates
)
4400 /* Check if we should swap operand or force 32bit displacement in
4402 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4403 i
.dir_encoding
= dir_encoding_store
;
4404 else if (mnem_p
- 3 == dot_p
4407 i
.disp_encoding
= disp_encoding_8bit
;
4408 else if (mnem_p
- 4 == dot_p
4412 i
.disp_encoding
= disp_encoding_32bit
;
4417 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4420 if (!current_templates
)
4423 /* See if we can get a match by trimming off a suffix. */
4426 case WORD_MNEM_SUFFIX
:
4427 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4428 i
.suffix
= SHORT_MNEM_SUFFIX
;
4431 case BYTE_MNEM_SUFFIX
:
4432 case QWORD_MNEM_SUFFIX
:
4433 i
.suffix
= mnem_p
[-1];
4435 current_templates
= (const templates
*) hash_find (op_hash
,
4438 case SHORT_MNEM_SUFFIX
:
4439 case LONG_MNEM_SUFFIX
:
4442 i
.suffix
= mnem_p
[-1];
4444 current_templates
= (const templates
*) hash_find (op_hash
,
4453 if (intel_float_operand (mnemonic
) == 1)
4454 i
.suffix
= SHORT_MNEM_SUFFIX
;
4456 i
.suffix
= LONG_MNEM_SUFFIX
;
4458 current_templates
= (const templates
*) hash_find (op_hash
,
4463 if (!current_templates
)
4465 as_bad (_("no such instruction: `%s'"), token_start
);
4470 if (current_templates
->start
->opcode_modifier
.jump
4471 || current_templates
->start
->opcode_modifier
.jumpbyte
)
4473 /* Check for a branch hint. We allow ",pt" and ",pn" for
4474 predict taken and predict not taken respectively.
4475 I'm not sure that branch hints actually do anything on loop
4476 and jcxz insns (JumpByte) for current Pentium4 chips. They
4477 may work in the future and it doesn't hurt to accept them
4479 if (l
[0] == ',' && l
[1] == 'p')
4483 if (!add_prefix (DS_PREFIX_OPCODE
))
4487 else if (l
[2] == 'n')
4489 if (!add_prefix (CS_PREFIX_OPCODE
))
4495 /* Any other comma loses. */
4498 as_bad (_("invalid character %s in mnemonic"),
4499 output_invalid (*l
));
4503 /* Check if instruction is supported on specified architecture. */
4505 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4507 supported
|= cpu_flags_match (t
);
4508 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4512 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4514 as_bad (flag_code
== CODE_64BIT
4515 ? _("`%s' is not supported in 64-bit mode")
4516 : _("`%s' is only supported in 64-bit mode"),
4517 current_templates
->start
->name
);
4520 if (supported
!= CPU_FLAGS_PERFECT_MATCH
)
4522 as_bad (_("`%s' is not supported on `%s%s'"),
4523 current_templates
->start
->name
,
4524 cpu_arch_name
? cpu_arch_name
: default_arch
,
4525 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4530 if (!cpu_arch_flags
.bitfield
.cpui386
4531 && (flag_code
!= CODE_16BIT
))
4533 as_warn (_("use .code16 to ensure correct addressing mode"));
4540 parse_operands (char *l
, const char *mnemonic
)
4544 /* 1 if operand is pending after ','. */
4545 unsigned int expecting_operand
= 0;
4547 /* Non-zero if operand parens not balanced. */
4548 unsigned int paren_not_balanced
;
4550 while (*l
!= END_OF_INSN
)
4552 /* Skip optional white space before operand. */
4553 if (is_space_char (*l
))
4555 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4557 as_bad (_("invalid character %s before operand %d"),
4558 output_invalid (*l
),
4562 token_start
= l
; /* After white space. */
4563 paren_not_balanced
= 0;
4564 while (paren_not_balanced
|| *l
!= ',')
4566 if (*l
== END_OF_INSN
)
4568 if (paren_not_balanced
)
4571 as_bad (_("unbalanced parenthesis in operand %d."),
4574 as_bad (_("unbalanced brackets in operand %d."),
4579 break; /* we are done */
4581 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4583 as_bad (_("invalid character %s in operand %d"),
4584 output_invalid (*l
),
4591 ++paren_not_balanced
;
4593 --paren_not_balanced
;
4598 ++paren_not_balanced
;
4600 --paren_not_balanced
;
4604 if (l
!= token_start
)
4605 { /* Yes, we've read in another operand. */
4606 unsigned int operand_ok
;
4607 this_operand
= i
.operands
++;
4608 if (i
.operands
> MAX_OPERANDS
)
4610 as_bad (_("spurious operands; (%d operands/instruction max)"),
4614 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4615 /* Now parse operand adding info to 'i' as we go along. */
4616 END_STRING_AND_SAVE (l
);
4620 i386_intel_operand (token_start
,
4621 intel_float_operand (mnemonic
));
4623 operand_ok
= i386_att_operand (token_start
);
4625 RESTORE_END_STRING (l
);
4631 if (expecting_operand
)
4633 expecting_operand_after_comma
:
4634 as_bad (_("expecting operand after ','; got nothing"));
4639 as_bad (_("expecting operand before ','; got nothing"));
4644 /* Now *l must be either ',' or END_OF_INSN. */
4647 if (*++l
== END_OF_INSN
)
4649 /* Just skip it, if it's \n complain. */
4650 goto expecting_operand_after_comma
;
4652 expecting_operand
= 1;
4659 swap_2_operands (int xchg1
, int xchg2
)
4661 union i386_op temp_op
;
4662 i386_operand_type temp_type
;
4663 enum bfd_reloc_code_real temp_reloc
;
4665 temp_type
= i
.types
[xchg2
];
4666 i
.types
[xchg2
] = i
.types
[xchg1
];
4667 i
.types
[xchg1
] = temp_type
;
4668 temp_op
= i
.op
[xchg2
];
4669 i
.op
[xchg2
] = i
.op
[xchg1
];
4670 i
.op
[xchg1
] = temp_op
;
4671 temp_reloc
= i
.reloc
[xchg2
];
4672 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
4673 i
.reloc
[xchg1
] = temp_reloc
;
4677 if (i
.mask
->operand
== xchg1
)
4678 i
.mask
->operand
= xchg2
;
4679 else if (i
.mask
->operand
== xchg2
)
4680 i
.mask
->operand
= xchg1
;
4684 if (i
.broadcast
->operand
== xchg1
)
4685 i
.broadcast
->operand
= xchg2
;
4686 else if (i
.broadcast
->operand
== xchg2
)
4687 i
.broadcast
->operand
= xchg1
;
4691 if (i
.rounding
->operand
== xchg1
)
4692 i
.rounding
->operand
= xchg2
;
4693 else if (i
.rounding
->operand
== xchg2
)
4694 i
.rounding
->operand
= xchg1
;
4699 swap_operands (void)
4705 swap_2_operands (1, i
.operands
- 2);
4709 swap_2_operands (0, i
.operands
- 1);
4715 if (i
.mem_operands
== 2)
4717 const seg_entry
*temp_seg
;
4718 temp_seg
= i
.seg
[0];
4719 i
.seg
[0] = i
.seg
[1];
4720 i
.seg
[1] = temp_seg
;
4724 /* Try to ensure constant immediates are represented in the smallest
4729 char guess_suffix
= 0;
4733 guess_suffix
= i
.suffix
;
4734 else if (i
.reg_operands
)
4736 /* Figure out a suffix from the last register operand specified.
4737 We can't do this properly yet, ie. excluding InOutPortReg,
4738 but the following works for instructions with immediates.
4739 In any case, we can't set i.suffix yet. */
4740 for (op
= i
.operands
; --op
>= 0;)
4741 if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.byte
)
4743 guess_suffix
= BYTE_MNEM_SUFFIX
;
4746 else if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.word
)
4748 guess_suffix
= WORD_MNEM_SUFFIX
;
4751 else if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.dword
)
4753 guess_suffix
= LONG_MNEM_SUFFIX
;
4756 else if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.qword
)
4758 guess_suffix
= QWORD_MNEM_SUFFIX
;
4762 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
4763 guess_suffix
= WORD_MNEM_SUFFIX
;
4765 for (op
= i
.operands
; --op
>= 0;)
4766 if (operand_type_check (i
.types
[op
], imm
))
4768 switch (i
.op
[op
].imms
->X_op
)
4771 /* If a suffix is given, this operand may be shortened. */
4772 switch (guess_suffix
)
4774 case LONG_MNEM_SUFFIX
:
4775 i
.types
[op
].bitfield
.imm32
= 1;
4776 i
.types
[op
].bitfield
.imm64
= 1;
4778 case WORD_MNEM_SUFFIX
:
4779 i
.types
[op
].bitfield
.imm16
= 1;
4780 i
.types
[op
].bitfield
.imm32
= 1;
4781 i
.types
[op
].bitfield
.imm32s
= 1;
4782 i
.types
[op
].bitfield
.imm64
= 1;
4784 case BYTE_MNEM_SUFFIX
:
4785 i
.types
[op
].bitfield
.imm8
= 1;
4786 i
.types
[op
].bitfield
.imm8s
= 1;
4787 i
.types
[op
].bitfield
.imm16
= 1;
4788 i
.types
[op
].bitfield
.imm32
= 1;
4789 i
.types
[op
].bitfield
.imm32s
= 1;
4790 i
.types
[op
].bitfield
.imm64
= 1;
4794 /* If this operand is at most 16 bits, convert it
4795 to a signed 16 bit number before trying to see
4796 whether it will fit in an even smaller size.
4797 This allows a 16-bit operand such as $0xffe0 to
4798 be recognised as within Imm8S range. */
4799 if ((i
.types
[op
].bitfield
.imm16
)
4800 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
4802 i
.op
[op
].imms
->X_add_number
=
4803 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
4806 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
4807 if ((i
.types
[op
].bitfield
.imm32
)
4808 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
4811 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
4812 ^ ((offsetT
) 1 << 31))
4813 - ((offsetT
) 1 << 31));
4817 = operand_type_or (i
.types
[op
],
4818 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
4820 /* We must avoid matching of Imm32 templates when 64bit
4821 only immediate is available. */
4822 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
4823 i
.types
[op
].bitfield
.imm32
= 0;
4830 /* Symbols and expressions. */
4832 /* Convert symbolic operand to proper sizes for matching, but don't
4833 prevent matching a set of insns that only supports sizes other
4834 than those matching the insn suffix. */
4836 i386_operand_type mask
, allowed
;
4837 const insn_template
*t
;
4839 operand_type_set (&mask
, 0);
4840 operand_type_set (&allowed
, 0);
4842 for (t
= current_templates
->start
;
4843 t
< current_templates
->end
;
4845 allowed
= operand_type_or (allowed
,
4846 t
->operand_types
[op
]);
4847 switch (guess_suffix
)
4849 case QWORD_MNEM_SUFFIX
:
4850 mask
.bitfield
.imm64
= 1;
4851 mask
.bitfield
.imm32s
= 1;
4853 case LONG_MNEM_SUFFIX
:
4854 mask
.bitfield
.imm32
= 1;
4856 case WORD_MNEM_SUFFIX
:
4857 mask
.bitfield
.imm16
= 1;
4859 case BYTE_MNEM_SUFFIX
:
4860 mask
.bitfield
.imm8
= 1;
4865 allowed
= operand_type_and (mask
, allowed
);
4866 if (!operand_type_all_zero (&allowed
))
4867 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
4874 /* Try to use the smallest displacement type too. */
4876 optimize_disp (void)
4880 for (op
= i
.operands
; --op
>= 0;)
4881 if (operand_type_check (i
.types
[op
], disp
))
4883 if (i
.op
[op
].disps
->X_op
== O_constant
)
4885 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
4887 if (i
.types
[op
].bitfield
.disp16
4888 && (op_disp
& ~(offsetT
) 0xffff) == 0)
4890 /* If this operand is at most 16 bits, convert
4891 to a signed 16 bit number and don't use 64bit
4893 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
4894 i
.types
[op
].bitfield
.disp64
= 0;
4897 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
4898 if (i
.types
[op
].bitfield
.disp32
4899 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
4901 /* If this operand is at most 32 bits, convert
4902 to a signed 32 bit number and don't use 64bit
4904 op_disp
&= (((offsetT
) 2 << 31) - 1);
4905 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
4906 i
.types
[op
].bitfield
.disp64
= 0;
4909 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
4911 i
.types
[op
].bitfield
.disp8
= 0;
4912 i
.types
[op
].bitfield
.disp16
= 0;
4913 i
.types
[op
].bitfield
.disp32
= 0;
4914 i
.types
[op
].bitfield
.disp32s
= 0;
4915 i
.types
[op
].bitfield
.disp64
= 0;
4919 else if (flag_code
== CODE_64BIT
)
4921 if (fits_in_signed_long (op_disp
))
4923 i
.types
[op
].bitfield
.disp64
= 0;
4924 i
.types
[op
].bitfield
.disp32s
= 1;
4926 if (i
.prefix
[ADDR_PREFIX
]
4927 && fits_in_unsigned_long (op_disp
))
4928 i
.types
[op
].bitfield
.disp32
= 1;
4930 if ((i
.types
[op
].bitfield
.disp32
4931 || i
.types
[op
].bitfield
.disp32s
4932 || i
.types
[op
].bitfield
.disp16
)
4933 && fits_in_disp8 (op_disp
))
4934 i
.types
[op
].bitfield
.disp8
= 1;
4936 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
4937 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
4939 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
4940 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
4941 i
.types
[op
].bitfield
.disp8
= 0;
4942 i
.types
[op
].bitfield
.disp16
= 0;
4943 i
.types
[op
].bitfield
.disp32
= 0;
4944 i
.types
[op
].bitfield
.disp32s
= 0;
4945 i
.types
[op
].bitfield
.disp64
= 0;
4948 /* We only support 64bit displacement on constants. */
4949 i
.types
[op
].bitfield
.disp64
= 0;
4953 /* Check if operands are valid for the instruction. */
4956 check_VecOperands (const insn_template
*t
)
4960 /* Without VSIB byte, we can't have a vector register for index. */
4961 if (!t
->opcode_modifier
.vecsib
4963 && (i
.index_reg
->reg_type
.bitfield
.xmmword
4964 || i
.index_reg
->reg_type
.bitfield
.ymmword
4965 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
4967 i
.error
= unsupported_vector_index_register
;
4971 /* Check if default mask is allowed. */
4972 if (t
->opcode_modifier
.nodefmask
4973 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
4975 i
.error
= no_default_mask
;
4979 /* For VSIB byte, we need a vector register for index, and all vector
4980 registers must be distinct. */
4981 if (t
->opcode_modifier
.vecsib
)
4984 || !((t
->opcode_modifier
.vecsib
== VecSIB128
4985 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
4986 || (t
->opcode_modifier
.vecsib
== VecSIB256
4987 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
4988 || (t
->opcode_modifier
.vecsib
== VecSIB512
4989 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
4991 i
.error
= invalid_vsib_address
;
4995 gas_assert (i
.reg_operands
== 2 || i
.mask
);
4996 if (i
.reg_operands
== 2 && !i
.mask
)
4998 gas_assert (i
.types
[0].bitfield
.regsimd
);
4999 gas_assert (i
.types
[0].bitfield
.xmmword
5000 || i
.types
[0].bitfield
.ymmword
);
5001 gas_assert (i
.types
[2].bitfield
.regsimd
);
5002 gas_assert (i
.types
[2].bitfield
.xmmword
5003 || i
.types
[2].bitfield
.ymmword
);
5004 if (operand_check
== check_none
)
5006 if (register_number (i
.op
[0].regs
)
5007 != register_number (i
.index_reg
)
5008 && register_number (i
.op
[2].regs
)
5009 != register_number (i
.index_reg
)
5010 && register_number (i
.op
[0].regs
)
5011 != register_number (i
.op
[2].regs
))
5013 if (operand_check
== check_error
)
5015 i
.error
= invalid_vector_register_set
;
5018 as_warn (_("mask, index, and destination registers should be distinct"));
5020 else if (i
.reg_operands
== 1 && i
.mask
)
5022 if (i
.types
[1].bitfield
.regsimd
5023 && (i
.types
[1].bitfield
.xmmword
5024 || i
.types
[1].bitfield
.ymmword
5025 || i
.types
[1].bitfield
.zmmword
)
5026 && (register_number (i
.op
[1].regs
)
5027 == register_number (i
.index_reg
)))
5029 if (operand_check
== check_error
)
5031 i
.error
= invalid_vector_register_set
;
5034 if (operand_check
!= check_none
)
5035 as_warn (_("index and destination registers should be distinct"));
5040 /* Check if broadcast is supported by the instruction and is applied
5041 to the memory operand. */
5044 int broadcasted_opnd_size
;
5046 /* Check if specified broadcast is supported in this instruction,
5047 and it's applied to memory operand of DWORD or QWORD type,
5048 depending on VecESize. */
5049 if (i
.broadcast
->type
!= t
->opcode_modifier
.broadcast
5050 || !i
.types
[i
.broadcast
->operand
].bitfield
.mem
5051 || (t
->opcode_modifier
.vecesize
== 0
5052 && !i
.types
[i
.broadcast
->operand
].bitfield
.dword
5053 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
)
5054 || (t
->opcode_modifier
.vecesize
== 1
5055 && !i
.types
[i
.broadcast
->operand
].bitfield
.qword
5056 && !i
.types
[i
.broadcast
->operand
].bitfield
.unspecified
))
5059 broadcasted_opnd_size
= t
->opcode_modifier
.vecesize
? 64 : 32;
5060 if (i
.broadcast
->type
== BROADCAST_1TO16
)
5061 broadcasted_opnd_size
<<= 4; /* Broadcast 1to16. */
5062 else if (i
.broadcast
->type
== BROADCAST_1TO8
)
5063 broadcasted_opnd_size
<<= 3; /* Broadcast 1to8. */
5064 else if (i
.broadcast
->type
== BROADCAST_1TO4
)
5065 broadcasted_opnd_size
<<= 2; /* Broadcast 1to4. */
5066 else if (i
.broadcast
->type
== BROADCAST_1TO2
)
5067 broadcasted_opnd_size
<<= 1; /* Broadcast 1to2. */
5071 if ((broadcasted_opnd_size
== 256
5072 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.ymmword
)
5073 || (broadcasted_opnd_size
== 512
5074 && !t
->operand_types
[i
.broadcast
->operand
].bitfield
.zmmword
))
5077 i
.error
= unsupported_broadcast
;
5081 /* If broadcast is supported in this instruction, we need to check if
5082 operand of one-element size isn't specified without broadcast. */
5083 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5085 /* Find memory operand. */
5086 for (op
= 0; op
< i
.operands
; op
++)
5087 if (operand_type_check (i
.types
[op
], anymem
))
5089 gas_assert (op
< i
.operands
);
5090 /* Check size of the memory operand. */
5091 if ((t
->opcode_modifier
.vecesize
== 0
5092 && i
.types
[op
].bitfield
.dword
)
5093 || (t
->opcode_modifier
.vecesize
== 1
5094 && i
.types
[op
].bitfield
.qword
))
5096 i
.error
= broadcast_needed
;
5101 /* Check if requested masking is supported. */
5103 && (!t
->opcode_modifier
.masking
5105 && t
->opcode_modifier
.masking
== MERGING_MASKING
)))
5107 i
.error
= unsupported_masking
;
5111 /* Check if masking is applied to dest operand. */
5112 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5114 i
.error
= mask_not_on_destination
;
5121 if ((i
.rounding
->type
!= saeonly
5122 && !t
->opcode_modifier
.staticrounding
)
5123 || (i
.rounding
->type
== saeonly
5124 && (t
->opcode_modifier
.staticrounding
5125 || !t
->opcode_modifier
.sae
)))
5127 i
.error
= unsupported_rc_sae
;
5130 /* If the instruction has several immediate operands and one of
5131 them is rounding, the rounding operand should be the last
5132 immediate operand. */
5133 if (i
.imm_operands
> 1
5134 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5136 i
.error
= rc_sae_operand_not_last_imm
;
5141 /* Check vector Disp8 operand. */
5142 if (t
->opcode_modifier
.disp8memshift
5143 && i
.disp_encoding
!= disp_encoding_32bit
)
5146 i
.memshift
= t
->opcode_modifier
.vecesize
? 3 : 2;
5148 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5150 for (op
= 0; op
< i
.operands
; op
++)
5151 if (operand_type_check (i
.types
[op
], disp
)
5152 && i
.op
[op
].disps
->X_op
== O_constant
)
5154 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
5156 i
.types
[op
].bitfield
.disp8
= 1;
5159 i
.types
[op
].bitfield
.disp8
= 0;
5168 /* Check if operands are valid for the instruction. Update VEX
5172 VEX_check_operands (const insn_template
*t
)
5174 if (i
.vec_encoding
== vex_encoding_evex
)
5176 /* This instruction must be encoded with EVEX prefix. */
5177 if (!t
->opcode_modifier
.evex
)
5179 i
.error
= unsupported
;
5185 if (!t
->opcode_modifier
.vex
)
5187 /* This instruction template doesn't have VEX prefix. */
5188 if (i
.vec_encoding
!= vex_encoding_default
)
5190 i
.error
= unsupported
;
5196 /* Only check VEX_Imm4, which must be the first operand. */
5197 if (t
->operand_types
[0].bitfield
.vec_imm4
)
5199 if (i
.op
[0].imms
->X_op
!= O_constant
5200 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
5206 /* Turn off Imm8 so that update_imm won't complain. */
5207 i
.types
[0] = vec_imm4
;
5213 static const insn_template
*
5214 match_template (char mnem_suffix
)
5216 /* Points to template once we've found it. */
5217 const insn_template
*t
;
5218 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
5219 i386_operand_type overlap4
;
5220 unsigned int found_reverse_match
;
5221 i386_opcode_modifier suffix_check
, mnemsuf_check
;
5222 i386_operand_type operand_types
[MAX_OPERANDS
];
5223 int addr_prefix_disp
;
5225 unsigned int found_cpu_match
;
5226 unsigned int check_register
;
5227 enum i386_error specific_error
= 0;
5229 #if MAX_OPERANDS != 5
5230 # error "MAX_OPERANDS must be 5."
5233 found_reverse_match
= 0;
5234 addr_prefix_disp
= -1;
5236 memset (&suffix_check
, 0, sizeof (suffix_check
));
5237 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5238 suffix_check
.no_bsuf
= 1;
5239 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5240 suffix_check
.no_wsuf
= 1;
5241 else if (i
.suffix
== SHORT_MNEM_SUFFIX
)
5242 suffix_check
.no_ssuf
= 1;
5243 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5244 suffix_check
.no_lsuf
= 1;
5245 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5246 suffix_check
.no_qsuf
= 1;
5247 else if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
5248 suffix_check
.no_ldsuf
= 1;
5250 memset (&mnemsuf_check
, 0, sizeof (mnemsuf_check
));
5253 switch (mnem_suffix
)
5255 case BYTE_MNEM_SUFFIX
: mnemsuf_check
.no_bsuf
= 1; break;
5256 case WORD_MNEM_SUFFIX
: mnemsuf_check
.no_wsuf
= 1; break;
5257 case SHORT_MNEM_SUFFIX
: mnemsuf_check
.no_ssuf
= 1; break;
5258 case LONG_MNEM_SUFFIX
: mnemsuf_check
.no_lsuf
= 1; break;
5259 case QWORD_MNEM_SUFFIX
: mnemsuf_check
.no_qsuf
= 1; break;
5263 /* Must have right number of operands. */
5264 i
.error
= number_of_operands_mismatch
;
5266 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
5268 addr_prefix_disp
= -1;
5270 if (i
.operands
!= t
->operands
)
5273 /* Check processor support. */
5274 i
.error
= unsupported
;
5275 found_cpu_match
= (cpu_flags_match (t
)
5276 == CPU_FLAGS_PERFECT_MATCH
);
5277 if (!found_cpu_match
)
5280 /* Check old gcc support. */
5281 i
.error
= old_gcc_only
;
5282 if (!old_gcc
&& t
->opcode_modifier
.oldgcc
)
5285 /* Check AT&T mnemonic. */
5286 i
.error
= unsupported_with_intel_mnemonic
;
5287 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
5290 /* Check AT&T/Intel syntax and Intel64/AMD64 ISA. */
5291 i
.error
= unsupported_syntax
;
5292 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
5293 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
)
5294 || (intel64
&& t
->opcode_modifier
.amd64
)
5295 || (!intel64
&& t
->opcode_modifier
.intel64
))
5298 /* Check the suffix, except for some instructions in intel mode. */
5299 i
.error
= invalid_instruction_suffix
;
5300 if ((!intel_syntax
|| !t
->opcode_modifier
.ignoresize
)
5301 && ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
5302 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
5303 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
5304 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
5305 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
5306 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
)))
5308 /* In Intel mode all mnemonic suffixes must be explicitly allowed. */
5309 if ((t
->opcode_modifier
.no_bsuf
&& mnemsuf_check
.no_bsuf
)
5310 || (t
->opcode_modifier
.no_wsuf
&& mnemsuf_check
.no_wsuf
)
5311 || (t
->opcode_modifier
.no_lsuf
&& mnemsuf_check
.no_lsuf
)
5312 || (t
->opcode_modifier
.no_ssuf
&& mnemsuf_check
.no_ssuf
)
5313 || (t
->opcode_modifier
.no_qsuf
&& mnemsuf_check
.no_qsuf
)
5314 || (t
->opcode_modifier
.no_ldsuf
&& mnemsuf_check
.no_ldsuf
))
5317 if (!operand_size_match (t
))
5320 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5321 operand_types
[j
] = t
->operand_types
[j
];
5323 /* In general, don't allow 64-bit operands in 32-bit mode. */
5324 if (i
.suffix
== QWORD_MNEM_SUFFIX
5325 && flag_code
!= CODE_64BIT
5327 ? (!t
->opcode_modifier
.ignoresize
5328 && !intel_float_operand (t
->name
))
5329 : intel_float_operand (t
->name
) != 2)
5330 && ((!operand_types
[0].bitfield
.regmmx
5331 && !operand_types
[0].bitfield
.regsimd
)
5332 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
5333 && !operand_types
[t
->operands
> 1].bitfield
.regsimd
))
5334 && (t
->base_opcode
!= 0x0fc7
5335 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
5338 /* In general, don't allow 32-bit operands on pre-386. */
5339 else if (i
.suffix
== LONG_MNEM_SUFFIX
5340 && !cpu_arch_flags
.bitfield
.cpui386
5342 ? (!t
->opcode_modifier
.ignoresize
5343 && !intel_float_operand (t
->name
))
5344 : intel_float_operand (t
->name
) != 2)
5345 && ((!operand_types
[0].bitfield
.regmmx
5346 && !operand_types
[0].bitfield
.regsimd
)
5347 || (!operand_types
[t
->operands
> 1].bitfield
.regmmx
5348 && !operand_types
[t
->operands
> 1].bitfield
.regsimd
)))
5351 /* Do not verify operands when there are none. */
5355 /* We've found a match; break out of loop. */
5359 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
5360 into Disp32/Disp16/Disp32 operand. */
5361 if (i
.prefix
[ADDR_PREFIX
] != 0)
5363 /* There should be only one Disp operand. */
5367 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5369 if (operand_types
[j
].bitfield
.disp16
)
5371 addr_prefix_disp
= j
;
5372 operand_types
[j
].bitfield
.disp32
= 1;
5373 operand_types
[j
].bitfield
.disp16
= 0;
5379 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5381 if (operand_types
[j
].bitfield
.disp32
)
5383 addr_prefix_disp
= j
;
5384 operand_types
[j
].bitfield
.disp32
= 0;
5385 operand_types
[j
].bitfield
.disp16
= 1;
5391 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5393 if (operand_types
[j
].bitfield
.disp64
)
5395 addr_prefix_disp
= j
;
5396 operand_types
[j
].bitfield
.disp64
= 0;
5397 operand_types
[j
].bitfield
.disp32
= 1;
5405 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5406 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5409 /* We check register size if needed. */
5410 check_register
= t
->opcode_modifier
.checkregsize
;
5411 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5412 switch (t
->operands
)
5415 if (!operand_type_match (overlap0
, i
.types
[0]))
5419 /* xchg %eax, %eax is a special case. It is an alias for nop
5420 only in 32bit mode and we can use opcode 0x90. In 64bit
5421 mode, we can't use 0x90 for xchg %eax, %eax since it should
5422 zero-extend %eax to %rax. */
5423 if (flag_code
== CODE_64BIT
5424 && t
->base_opcode
== 0x90
5425 && operand_type_equal (&i
.types
[0], &acc32
)
5426 && operand_type_equal (&i
.types
[1], &acc32
))
5428 /* If we want store form, we reverse direction of operands. */
5429 if (i
.dir_encoding
== dir_encoding_store
5430 && t
->opcode_modifier
.d
)
5435 /* If we want store form, we skip the current load. */
5436 if (i
.dir_encoding
== dir_encoding_store
5437 && i
.mem_operands
== 0
5438 && t
->opcode_modifier
.load
)
5443 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
5444 if (!operand_type_match (overlap0
, i
.types
[0])
5445 || !operand_type_match (overlap1
, i
.types
[1])
5447 && !operand_type_register_match (i
.types
[0],
5452 /* Check if other direction is valid ... */
5453 if (!t
->opcode_modifier
.d
)
5457 /* Try reversing direction of operands. */
5458 overlap0
= operand_type_and (i
.types
[0], operand_types
[1]);
5459 overlap1
= operand_type_and (i
.types
[1], operand_types
[0]);
5460 if (!operand_type_match (overlap0
, i
.types
[0])
5461 || !operand_type_match (overlap1
, i
.types
[1])
5463 && !operand_type_register_match (i
.types
[0],
5468 /* Does not match either direction. */
5471 /* found_reverse_match holds which of D or FloatR
5473 if (!t
->opcode_modifier
.d
)
5474 found_reverse_match
= 0;
5475 else if (operand_types
[0].bitfield
.tbyte
)
5476 found_reverse_match
= Opcode_FloatD
;
5478 found_reverse_match
= Opcode_D
;
5479 if (t
->opcode_modifier
.floatr
)
5480 found_reverse_match
|= Opcode_FloatR
;
5484 /* Found a forward 2 operand match here. */
5485 switch (t
->operands
)
5488 overlap4
= operand_type_and (i
.types
[4],
5492 overlap3
= operand_type_and (i
.types
[3],
5496 overlap2
= operand_type_and (i
.types
[2],
5501 switch (t
->operands
)
5504 if (!operand_type_match (overlap4
, i
.types
[4])
5505 || !operand_type_register_match (i
.types
[3],
5512 if (!operand_type_match (overlap3
, i
.types
[3])
5514 && !operand_type_register_match (i
.types
[2],
5521 /* Here we make use of the fact that there are no
5522 reverse match 3 operand instructions, and all 3
5523 operand instructions only need to be checked for
5524 register consistency between operands 2 and 3. */
5525 if (!operand_type_match (overlap2
, i
.types
[2])
5527 && !operand_type_register_match (i
.types
[1],
5535 /* Found either forward/reverse 2, 3 or 4 operand match here:
5536 slip through to break. */
5538 if (!found_cpu_match
)
5540 found_reverse_match
= 0;
5544 /* Check if vector and VEX operands are valid. */
5545 if (check_VecOperands (t
) || VEX_check_operands (t
))
5547 specific_error
= i
.error
;
5551 /* We've found a match; break out of loop. */
5555 if (t
== current_templates
->end
)
5557 /* We found no match. */
5558 const char *err_msg
;
5559 switch (specific_error
? specific_error
: i
.error
)
5563 case operand_size_mismatch
:
5564 err_msg
= _("operand size mismatch");
5566 case operand_type_mismatch
:
5567 err_msg
= _("operand type mismatch");
5569 case register_type_mismatch
:
5570 err_msg
= _("register type mismatch");
5572 case number_of_operands_mismatch
:
5573 err_msg
= _("number of operands mismatch");
5575 case invalid_instruction_suffix
:
5576 err_msg
= _("invalid instruction suffix");
5579 err_msg
= _("constant doesn't fit in 4 bits");
5582 err_msg
= _("only supported with old gcc");
5584 case unsupported_with_intel_mnemonic
:
5585 err_msg
= _("unsupported with Intel mnemonic");
5587 case unsupported_syntax
:
5588 err_msg
= _("unsupported syntax");
5591 as_bad (_("unsupported instruction `%s'"),
5592 current_templates
->start
->name
);
5594 case invalid_vsib_address
:
5595 err_msg
= _("invalid VSIB address");
5597 case invalid_vector_register_set
:
5598 err_msg
= _("mask, index, and destination registers must be distinct");
5600 case unsupported_vector_index_register
:
5601 err_msg
= _("unsupported vector index register");
5603 case unsupported_broadcast
:
5604 err_msg
= _("unsupported broadcast");
5606 case broadcast_not_on_src_operand
:
5607 err_msg
= _("broadcast not on source memory operand");
5609 case broadcast_needed
:
5610 err_msg
= _("broadcast is needed for operand of such type");
5612 case unsupported_masking
:
5613 err_msg
= _("unsupported masking");
5615 case mask_not_on_destination
:
5616 err_msg
= _("mask not on destination operand");
5618 case no_default_mask
:
5619 err_msg
= _("default mask isn't allowed");
5621 case unsupported_rc_sae
:
5622 err_msg
= _("unsupported static rounding/sae");
5624 case rc_sae_operand_not_last_imm
:
5626 err_msg
= _("RC/SAE operand must precede immediate operands");
5628 err_msg
= _("RC/SAE operand must follow immediate operands");
5630 case invalid_register_operand
:
5631 err_msg
= _("invalid register operand");
5634 as_bad (_("%s for `%s'"), err_msg
,
5635 current_templates
->start
->name
);
5639 if (!quiet_warnings
)
5642 && (i
.types
[0].bitfield
.jumpabsolute
5643 != operand_types
[0].bitfield
.jumpabsolute
))
5645 as_warn (_("indirect %s without `*'"), t
->name
);
5648 if (t
->opcode_modifier
.isprefix
5649 && t
->opcode_modifier
.ignoresize
)
5651 /* Warn them that a data or address size prefix doesn't
5652 affect assembly of the next line of code. */
5653 as_warn (_("stand-alone `%s' prefix"), t
->name
);
5657 /* Copy the template we found. */
5660 if (addr_prefix_disp
!= -1)
5661 i
.tm
.operand_types
[addr_prefix_disp
]
5662 = operand_types
[addr_prefix_disp
];
5664 if (found_reverse_match
)
5666 /* If we found a reverse match we must alter the opcode
5667 direction bit. found_reverse_match holds bits to change
5668 (different for int & float insns). */
5670 i
.tm
.base_opcode
^= found_reverse_match
;
5672 i
.tm
.operand_types
[0] = operand_types
[1];
5673 i
.tm
.operand_types
[1] = operand_types
[0];
5682 int mem_op
= operand_type_check (i
.types
[0], anymem
) ? 0 : 1;
5683 if (i
.tm
.operand_types
[mem_op
].bitfield
.esseg
)
5685 if (i
.seg
[0] != NULL
&& i
.seg
[0] != &es
)
5687 as_bad (_("`%s' operand %d must use `%ses' segment"),
5693 /* There's only ever one segment override allowed per instruction.
5694 This instruction possibly has a legal segment override on the
5695 second operand, so copy the segment to where non-string
5696 instructions store it, allowing common code. */
5697 i
.seg
[0] = i
.seg
[1];
5699 else if (i
.tm
.operand_types
[mem_op
+ 1].bitfield
.esseg
)
5701 if (i
.seg
[1] != NULL
&& i
.seg
[1] != &es
)
5703 as_bad (_("`%s' operand %d must use `%ses' segment"),
5714 process_suffix (void)
5716 /* If matched instruction specifies an explicit instruction mnemonic
5718 if (i
.tm
.opcode_modifier
.size16
)
5719 i
.suffix
= WORD_MNEM_SUFFIX
;
5720 else if (i
.tm
.opcode_modifier
.size32
)
5721 i
.suffix
= LONG_MNEM_SUFFIX
;
5722 else if (i
.tm
.opcode_modifier
.size64
)
5723 i
.suffix
= QWORD_MNEM_SUFFIX
;
5724 else if (i
.reg_operands
)
5726 /* If there's no instruction mnemonic suffix we try to invent one
5727 based on register operands. */
5730 /* We take i.suffix from the last register operand specified,
5731 Destination register type is more significant than source
5732 register type. crc32 in SSE4.2 prefers source register
5734 if (i
.tm
.base_opcode
== 0xf20f38f1)
5736 if (i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.word
)
5737 i
.suffix
= WORD_MNEM_SUFFIX
;
5738 else if (i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.dword
)
5739 i
.suffix
= LONG_MNEM_SUFFIX
;
5740 else if (i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.qword
)
5741 i
.suffix
= QWORD_MNEM_SUFFIX
;
5743 else if (i
.tm
.base_opcode
== 0xf20f38f0)
5745 if (i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.byte
)
5746 i
.suffix
= BYTE_MNEM_SUFFIX
;
5753 if (i
.tm
.base_opcode
== 0xf20f38f1
5754 || i
.tm
.base_opcode
== 0xf20f38f0)
5756 /* We have to know the operand size for crc32. */
5757 as_bad (_("ambiguous memory operand size for `%s`"),
5762 for (op
= i
.operands
; --op
>= 0;)
5763 if (!i
.tm
.operand_types
[op
].bitfield
.inoutportreg
5764 && !i
.tm
.operand_types
[op
].bitfield
.shiftcount
)
5766 if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.byte
)
5768 i
.suffix
= BYTE_MNEM_SUFFIX
;
5771 if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.word
)
5773 i
.suffix
= WORD_MNEM_SUFFIX
;
5776 if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.dword
)
5778 i
.suffix
= LONG_MNEM_SUFFIX
;
5781 if (i
.types
[op
].bitfield
.reg
&& i
.types
[op
].bitfield
.qword
)
5783 i
.suffix
= QWORD_MNEM_SUFFIX
;
5789 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
5792 && i
.tm
.opcode_modifier
.ignoresize
5793 && i
.tm
.opcode_modifier
.no_bsuf
)
5795 else if (!check_byte_reg ())
5798 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
5801 && i
.tm
.opcode_modifier
.ignoresize
5802 && i
.tm
.opcode_modifier
.no_lsuf
)
5804 else if (!check_long_reg ())
5807 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
5810 && i
.tm
.opcode_modifier
.ignoresize
5811 && i
.tm
.opcode_modifier
.no_qsuf
)
5813 else if (!check_qword_reg ())
5816 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
5819 && i
.tm
.opcode_modifier
.ignoresize
5820 && i
.tm
.opcode_modifier
.no_wsuf
)
5822 else if (!check_word_reg ())
5825 else if (i
.suffix
== XMMWORD_MNEM_SUFFIX
5826 || i
.suffix
== YMMWORD_MNEM_SUFFIX
5827 || i
.suffix
== ZMMWORD_MNEM_SUFFIX
)
5829 /* Skip if the instruction has x/y/z suffix. match_template
5830 should check if it is a valid suffix. */
5832 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
5833 /* Do nothing if the instruction is going to ignore the prefix. */
5838 else if (i
.tm
.opcode_modifier
.defaultsize
5840 /* exclude fldenv/frstor/fsave/fstenv */
5841 && i
.tm
.opcode_modifier
.no_ssuf
)
5843 i
.suffix
= stackop_size
;
5845 else if (intel_syntax
5847 && (i
.tm
.operand_types
[0].bitfield
.jumpabsolute
5848 || i
.tm
.opcode_modifier
.jumpbyte
5849 || i
.tm
.opcode_modifier
.jumpintersegment
5850 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
5851 && i
.tm
.extension_opcode
<= 3)))
5856 if (!i
.tm
.opcode_modifier
.no_qsuf
)
5858 i
.suffix
= QWORD_MNEM_SUFFIX
;
5863 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5864 i
.suffix
= LONG_MNEM_SUFFIX
;
5867 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5868 i
.suffix
= WORD_MNEM_SUFFIX
;
5877 if (i
.tm
.opcode_modifier
.w
)
5879 as_bad (_("no instruction mnemonic suffix given and "
5880 "no register operands; can't size instruction"));
5886 unsigned int suffixes
;
5888 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
5889 if (!i
.tm
.opcode_modifier
.no_wsuf
)
5891 if (!i
.tm
.opcode_modifier
.no_lsuf
)
5893 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
5895 if (!i
.tm
.opcode_modifier
.no_ssuf
)
5897 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
5900 /* There are more than suffix matches. */
5901 if (i
.tm
.opcode_modifier
.w
5902 || ((suffixes
& (suffixes
- 1))
5903 && !i
.tm
.opcode_modifier
.defaultsize
5904 && !i
.tm
.opcode_modifier
.ignoresize
))
5906 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
5912 /* Change the opcode based on the operand size given by i.suffix;
5913 We don't need to change things for byte insns. */
5916 && i
.suffix
!= BYTE_MNEM_SUFFIX
5917 && i
.suffix
!= XMMWORD_MNEM_SUFFIX
5918 && i
.suffix
!= YMMWORD_MNEM_SUFFIX
5919 && i
.suffix
!= ZMMWORD_MNEM_SUFFIX
)
5921 /* It's not a byte, select word/dword operation. */
5922 if (i
.tm
.opcode_modifier
.w
)
5924 if (i
.tm
.opcode_modifier
.shortform
)
5925 i
.tm
.base_opcode
|= 8;
5927 i
.tm
.base_opcode
|= 1;
5930 /* Now select between word & dword operations via the operand
5931 size prefix, except for instructions that will ignore this
5933 if (i
.tm
.opcode_modifier
.addrprefixop0
)
5935 /* The address size override prefix changes the size of the
5937 if ((flag_code
== CODE_32BIT
5938 && i
.op
->regs
[0].reg_type
.bitfield
.word
)
5939 || (flag_code
!= CODE_32BIT
5940 && i
.op
->regs
[0].reg_type
.bitfield
.dword
))
5941 if (!add_prefix (ADDR_PREFIX_OPCODE
))
5944 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
5945 && i
.suffix
!= LONG_DOUBLE_MNEM_SUFFIX
5946 && !i
.tm
.opcode_modifier
.ignoresize
5947 && !i
.tm
.opcode_modifier
.floatmf
5948 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
5949 || (flag_code
== CODE_64BIT
5950 && i
.tm
.opcode_modifier
.jumpbyte
)))
5952 unsigned int prefix
= DATA_PREFIX_OPCODE
;
5954 if (i
.tm
.opcode_modifier
.jumpbyte
) /* jcxz, loop */
5955 prefix
= ADDR_PREFIX_OPCODE
;
5957 if (!add_prefix (prefix
))
5961 /* Set mode64 for an operand. */
5962 if (i
.suffix
== QWORD_MNEM_SUFFIX
5963 && flag_code
== CODE_64BIT
5964 && !i
.tm
.opcode_modifier
.norex64
)
5966 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5967 need rex64. cmpxchg8b is also a special case. */
5968 if (! (i
.operands
== 2
5969 && i
.tm
.base_opcode
== 0x90
5970 && i
.tm
.extension_opcode
== None
5971 && operand_type_equal (&i
.types
[0], &acc64
)
5972 && operand_type_equal (&i
.types
[1], &acc64
))
5973 && ! (i
.operands
== 1
5974 && i
.tm
.base_opcode
== 0xfc7
5975 && i
.tm
.extension_opcode
== 1
5976 && !operand_type_check (i
.types
[0], reg
)
5977 && operand_type_check (i
.types
[0], anymem
)))
5981 /* Size floating point instruction. */
5982 if (i
.suffix
== LONG_MNEM_SUFFIX
)
5983 if (i
.tm
.opcode_modifier
.floatmf
)
5984 i
.tm
.base_opcode
^= 4;
5991 check_byte_reg (void)
5995 for (op
= i
.operands
; --op
>= 0;)
5997 /* Skip non-register operands. */
5998 if (!i
.types
[op
].bitfield
.reg
)
6001 /* If this is an eight bit register, it's OK. If it's the 16 or
6002 32 bit version of an eight bit register, we will just use the
6003 low portion, and that's OK too. */
6004 if (i
.types
[op
].bitfield
.byte
)
6007 /* I/O port address operands are OK too. */
6008 if (i
.tm
.operand_types
[op
].bitfield
.inoutportreg
)
6011 /* crc32 doesn't generate this warning. */
6012 if (i
.tm
.base_opcode
== 0xf20f38f0)
6015 if ((i
.types
[op
].bitfield
.word
6016 || i
.types
[op
].bitfield
.dword
6017 || i
.types
[op
].bitfield
.qword
)
6018 && i
.op
[op
].regs
->reg_num
< 4
6019 /* Prohibit these changes in 64bit mode, since the lowering
6020 would be more complicated. */
6021 && flag_code
!= CODE_64BIT
)
6023 #if REGISTER_WARNINGS
6024 if (!quiet_warnings
)
6025 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6027 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.word
6028 ? REGNAM_AL
- REGNAM_AX
6029 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
6031 i
.op
[op
].regs
->reg_name
,
6036 /* Any other register is bad. */
6037 if (i
.types
[op
].bitfield
.reg
6038 || i
.types
[op
].bitfield
.regmmx
6039 || i
.types
[op
].bitfield
.regsimd
6040 || i
.types
[op
].bitfield
.sreg2
6041 || i
.types
[op
].bitfield
.sreg3
6042 || i
.types
[op
].bitfield
.control
6043 || i
.types
[op
].bitfield
.debug
6044 || i
.types
[op
].bitfield
.test
)
6046 as_bad (_("`%s%s' not allowed with `%s%c'"),
6048 i
.op
[op
].regs
->reg_name
,
6058 check_long_reg (void)
6062 for (op
= i
.operands
; --op
>= 0;)
6063 /* Skip non-register operands. */
6064 if (!i
.types
[op
].bitfield
.reg
)
6066 /* Reject eight bit registers, except where the template requires
6067 them. (eg. movzb) */
6068 else if (i
.types
[op
].bitfield
.byte
6069 && (i
.tm
.operand_types
[op
].bitfield
.reg
6070 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6071 && (i
.tm
.operand_types
[op
].bitfield
.word
6072 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6074 as_bad (_("`%s%s' not allowed with `%s%c'"),
6076 i
.op
[op
].regs
->reg_name
,
6081 /* Warn if the e prefix on a general reg is missing. */
6082 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6083 && i
.types
[op
].bitfield
.word
6084 && (i
.tm
.operand_types
[op
].bitfield
.reg
6085 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6086 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6088 /* Prohibit these changes in the 64bit mode, since the
6089 lowering is more complicated. */
6090 if (flag_code
== CODE_64BIT
)
6092 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6093 register_prefix
, i
.op
[op
].regs
->reg_name
,
6097 #if REGISTER_WARNINGS
6098 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6100 (i
.op
[op
].regs
+ REGNAM_EAX
- REGNAM_AX
)->reg_name
,
6101 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6104 /* Warn if the r prefix on a general reg is present. */
6105 else if (i
.types
[op
].bitfield
.qword
6106 && (i
.tm
.operand_types
[op
].bitfield
.reg
6107 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6108 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6111 && i
.tm
.opcode_modifier
.toqword
6112 && !i
.types
[0].bitfield
.regsimd
)
6114 /* Convert to QWORD. We want REX byte. */
6115 i
.suffix
= QWORD_MNEM_SUFFIX
;
6119 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6120 register_prefix
, i
.op
[op
].regs
->reg_name
,
6129 check_qword_reg (void)
6133 for (op
= i
.operands
; --op
>= 0; )
6134 /* Skip non-register operands. */
6135 if (!i
.types
[op
].bitfield
.reg
)
6137 /* Reject eight bit registers, except where the template requires
6138 them. (eg. movzb) */
6139 else if (i
.types
[op
].bitfield
.byte
6140 && (i
.tm
.operand_types
[op
].bitfield
.reg
6141 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6142 && (i
.tm
.operand_types
[op
].bitfield
.word
6143 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6145 as_bad (_("`%s%s' not allowed with `%s%c'"),
6147 i
.op
[op
].regs
->reg_name
,
6152 /* Warn if the r prefix on a general reg is missing. */
6153 else if ((i
.types
[op
].bitfield
.word
6154 || i
.types
[op
].bitfield
.dword
)
6155 && (i
.tm
.operand_types
[op
].bitfield
.reg
6156 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6157 && i
.tm
.operand_types
[op
].bitfield
.qword
)
6159 /* Prohibit these changes in the 64bit mode, since the
6160 lowering is more complicated. */
6162 && i
.tm
.opcode_modifier
.todword
6163 && !i
.types
[0].bitfield
.regsimd
)
6165 /* Convert to DWORD. We don't want REX byte. */
6166 i
.suffix
= LONG_MNEM_SUFFIX
;
6170 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6171 register_prefix
, i
.op
[op
].regs
->reg_name
,
6180 check_word_reg (void)
6183 for (op
= i
.operands
; --op
>= 0;)
6184 /* Skip non-register operands. */
6185 if (!i
.types
[op
].bitfield
.reg
)
6187 /* Reject eight bit registers, except where the template requires
6188 them. (eg. movzb) */
6189 else if (i
.types
[op
].bitfield
.byte
6190 && (i
.tm
.operand_types
[op
].bitfield
.reg
6191 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6192 && (i
.tm
.operand_types
[op
].bitfield
.word
6193 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6195 as_bad (_("`%s%s' not allowed with `%s%c'"),
6197 i
.op
[op
].regs
->reg_name
,
6202 /* Warn if the e or r prefix on a general reg is present. */
6203 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6204 && (i
.types
[op
].bitfield
.dword
6205 || i
.types
[op
].bitfield
.qword
)
6206 && (i
.tm
.operand_types
[op
].bitfield
.reg
6207 || i
.tm
.operand_types
[op
].bitfield
.acc
)
6208 && i
.tm
.operand_types
[op
].bitfield
.word
)
6210 /* Prohibit these changes in the 64bit mode, since the
6211 lowering is more complicated. */
6212 if (flag_code
== CODE_64BIT
)
6214 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6215 register_prefix
, i
.op
[op
].regs
->reg_name
,
6219 #if REGISTER_WARNINGS
6220 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6222 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
6223 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6230 update_imm (unsigned int j
)
6232 i386_operand_type overlap
= i
.types
[j
];
6233 if ((overlap
.bitfield
.imm8
6234 || overlap
.bitfield
.imm8s
6235 || overlap
.bitfield
.imm16
6236 || overlap
.bitfield
.imm32
6237 || overlap
.bitfield
.imm32s
6238 || overlap
.bitfield
.imm64
)
6239 && !operand_type_equal (&overlap
, &imm8
)
6240 && !operand_type_equal (&overlap
, &imm8s
)
6241 && !operand_type_equal (&overlap
, &imm16
)
6242 && !operand_type_equal (&overlap
, &imm32
)
6243 && !operand_type_equal (&overlap
, &imm32s
)
6244 && !operand_type_equal (&overlap
, &imm64
))
6248 i386_operand_type temp
;
6250 operand_type_set (&temp
, 0);
6251 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6253 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
6254 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
6256 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6257 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
6258 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6260 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
6261 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
6264 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
6267 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
6268 || operand_type_equal (&overlap
, &imm16_32
)
6269 || operand_type_equal (&overlap
, &imm16_32s
))
6271 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6276 if (!operand_type_equal (&overlap
, &imm8
)
6277 && !operand_type_equal (&overlap
, &imm8s
)
6278 && !operand_type_equal (&overlap
, &imm16
)
6279 && !operand_type_equal (&overlap
, &imm32
)
6280 && !operand_type_equal (&overlap
, &imm32s
)
6281 && !operand_type_equal (&overlap
, &imm64
))
6283 as_bad (_("no instruction mnemonic suffix given; "
6284 "can't determine immediate size"));
6288 i
.types
[j
] = overlap
;
6298 /* Update the first 2 immediate operands. */
6299 n
= i
.operands
> 2 ? 2 : i
.operands
;
6302 for (j
= 0; j
< n
; j
++)
6303 if (update_imm (j
) == 0)
6306 /* The 3rd operand can't be immediate operand. */
6307 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
6314 process_operands (void)
6316 /* Default segment register this instruction will use for memory
6317 accesses. 0 means unknown. This is only for optimizing out
6318 unnecessary segment overrides. */
6319 const seg_entry
*default_seg
= 0;
6321 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
6323 unsigned int dupl
= i
.operands
;
6324 unsigned int dest
= dupl
- 1;
6327 /* The destination must be an xmm register. */
6328 gas_assert (i
.reg_operands
6329 && MAX_OPERANDS
> dupl
6330 && operand_type_equal (&i
.types
[dest
], ®xmm
));
6332 if (i
.tm
.operand_types
[0].bitfield
.acc
6333 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6335 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
6337 /* Keep xmm0 for instructions with VEX prefix and 3
6339 i
.tm
.operand_types
[0].bitfield
.acc
= 0;
6340 i
.tm
.operand_types
[0].bitfield
.regsimd
= 1;
6345 /* We remove the first xmm0 and keep the number of
6346 operands unchanged, which in fact duplicates the
6348 for (j
= 1; j
< i
.operands
; j
++)
6350 i
.op
[j
- 1] = i
.op
[j
];
6351 i
.types
[j
- 1] = i
.types
[j
];
6352 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6356 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6358 gas_assert ((MAX_OPERANDS
- 1) > dupl
6359 && (i
.tm
.opcode_modifier
.vexsources
6362 /* Add the implicit xmm0 for instructions with VEX prefix
6364 for (j
= i
.operands
; j
> 0; j
--)
6366 i
.op
[j
] = i
.op
[j
- 1];
6367 i
.types
[j
] = i
.types
[j
- 1];
6368 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6371 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6372 i
.types
[0] = regxmm
;
6373 i
.tm
.operand_types
[0] = regxmm
;
6376 i
.reg_operands
+= 2;
6381 i
.op
[dupl
] = i
.op
[dest
];
6382 i
.types
[dupl
] = i
.types
[dest
];
6383 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6392 i
.op
[dupl
] = i
.op
[dest
];
6393 i
.types
[dupl
] = i
.types
[dest
];
6394 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
6397 if (i
.tm
.opcode_modifier
.immext
)
6400 else if (i
.tm
.operand_types
[0].bitfield
.acc
6401 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6405 for (j
= 1; j
< i
.operands
; j
++)
6407 i
.op
[j
- 1] = i
.op
[j
];
6408 i
.types
[j
- 1] = i
.types
[j
];
6410 /* We need to adjust fields in i.tm since they are used by
6411 build_modrm_byte. */
6412 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6419 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
6421 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
6423 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
6424 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.regsimd
);
6425 regnum
= register_number (i
.op
[1].regs
);
6426 first_reg_in_group
= regnum
& ~3;
6427 last_reg_in_group
= first_reg_in_group
+ 3;
6428 if (regnum
!= first_reg_in_group
)
6429 as_warn (_("source register `%s%s' implicitly denotes"
6430 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
6431 register_prefix
, i
.op
[1].regs
->reg_name
,
6432 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
6433 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
6436 else if (i
.tm
.opcode_modifier
.regkludge
)
6438 /* The imul $imm, %reg instruction is converted into
6439 imul $imm, %reg, %reg, and the clr %reg instruction
6440 is converted into xor %reg, %reg. */
6442 unsigned int first_reg_op
;
6444 if (operand_type_check (i
.types
[0], reg
))
6448 /* Pretend we saw the extra register operand. */
6449 gas_assert (i
.reg_operands
== 1
6450 && i
.op
[first_reg_op
+ 1].regs
== 0);
6451 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
6452 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
6457 if (i
.tm
.opcode_modifier
.shortform
)
6459 if (i
.types
[0].bitfield
.sreg2
6460 || i
.types
[0].bitfield
.sreg3
)
6462 if (i
.tm
.base_opcode
== POP_SEG_SHORT
6463 && i
.op
[0].regs
->reg_num
== 1)
6465 as_bad (_("you can't `pop %scs'"), register_prefix
);
6468 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
6469 if ((i
.op
[0].regs
->reg_flags
& RegRex
) != 0)
6474 /* The register or float register operand is in operand
6478 if ((i
.types
[0].bitfield
.reg
&& i
.types
[0].bitfield
.tbyte
)
6479 || operand_type_check (i
.types
[0], reg
))
6483 /* Register goes in low 3 bits of opcode. */
6484 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
6485 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
6487 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
6489 /* Warn about some common errors, but press on regardless.
6490 The first case can be generated by gcc (<= 2.8.1). */
6491 if (i
.operands
== 2)
6493 /* Reversed arguments on faddp, fsubp, etc. */
6494 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
6495 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
6496 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
6500 /* Extraneous `l' suffix on fp insn. */
6501 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
6502 register_prefix
, i
.op
[0].regs
->reg_name
);
6507 else if (i
.tm
.opcode_modifier
.modrm
)
6509 /* The opcode is completed (modulo i.tm.extension_opcode which
6510 must be put into the modrm byte). Now, we make the modrm and
6511 index base bytes based on all the info we've collected. */
6513 default_seg
= build_modrm_byte ();
6515 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
6519 else if (i
.tm
.opcode_modifier
.isstring
)
6521 /* For the string instructions that allow a segment override
6522 on one of their operands, the default segment is ds. */
6526 if (i
.tm
.base_opcode
== 0x8d /* lea */
6529 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
6531 /* If a segment was explicitly specified, and the specified segment
6532 is not the default, use an opcode prefix to select it. If we
6533 never figured out what the default segment is, then default_seg
6534 will be zero at this point, and the specified segment prefix will
6536 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
6538 if (!add_prefix (i
.seg
[0]->seg_prefix
))
6544 static const seg_entry
*
6545 build_modrm_byte (void)
6547 const seg_entry
*default_seg
= 0;
6548 unsigned int source
, dest
;
6551 /* The first operand of instructions with VEX prefix and 3 sources
6552 must be VEX_Imm4. */
6553 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
6556 unsigned int nds
, reg_slot
;
6559 if (i
.tm
.opcode_modifier
.veximmext
6560 && i
.tm
.opcode_modifier
.immext
)
6562 dest
= i
.operands
- 2;
6563 gas_assert (dest
== 3);
6566 dest
= i
.operands
- 1;
6569 /* There are 2 kinds of instructions:
6570 1. 5 operands: 4 register operands or 3 register operands
6571 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6572 VexW0 or VexW1. The destination must be either XMM, YMM or
6574 2. 4 operands: 4 register operands or 3 register operands
6575 plus 1 memory operand, VexXDS, and VexImmExt */
6576 gas_assert ((i
.reg_operands
== 4
6577 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
6578 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6579 && (i
.tm
.opcode_modifier
.veximmext
6580 || (i
.imm_operands
== 1
6581 && i
.types
[0].bitfield
.vec_imm4
6582 && (i
.tm
.opcode_modifier
.vexw
== VEXW0
6583 || i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6584 && i
.tm
.operand_types
[dest
].bitfield
.regsimd
)));
6586 if (i
.imm_operands
== 0)
6588 /* When there is no immediate operand, generate an 8bit
6589 immediate operand to encode the first operand. */
6590 exp
= &im_expressions
[i
.imm_operands
++];
6591 i
.op
[i
.operands
].imms
= exp
;
6592 i
.types
[i
.operands
] = imm8
;
6594 /* If VexW1 is set, the first operand is the source and
6595 the second operand is encoded in the immediate operand. */
6596 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
6607 /* FMA swaps REG and NDS. */
6608 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
6616 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.regsimd
);
6617 exp
->X_op
= O_constant
;
6618 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
6619 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6623 unsigned int imm_slot
;
6625 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
6627 /* If VexW0 is set, the third operand is the source and
6628 the second operand is encoded in the immediate
6635 /* VexW1 is set, the second operand is the source and
6636 the third operand is encoded in the immediate
6642 if (i
.tm
.opcode_modifier
.immext
)
6644 /* When ImmExt is set, the immediate byte is the last
6646 imm_slot
= i
.operands
- 1;
6654 /* Turn on Imm8 so that output_imm will generate it. */
6655 i
.types
[imm_slot
].bitfield
.imm8
= 1;
6658 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.regsimd
);
6659 i
.op
[imm_slot
].imms
->X_add_number
6660 |= register_number (i
.op
[reg_slot
].regs
) << 4;
6661 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
6664 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.regsimd
);
6665 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
6670 /* i.reg_operands MUST be the number of real register operands;
6671 implicit registers do not count. If there are 3 register
6672 operands, it must be a instruction with VexNDS. For a
6673 instruction with VexNDD, the destination register is encoded
6674 in VEX prefix. If there are 4 register operands, it must be
6675 a instruction with VEX prefix and 3 sources. */
6676 if (i
.mem_operands
== 0
6677 && ((i
.reg_operands
== 2
6678 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
6679 || (i
.reg_operands
== 3
6680 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6681 || (i
.reg_operands
== 4 && vex_3_sources
)))
6689 /* When there are 3 operands, one of them may be immediate,
6690 which may be the first or the last operand. Otherwise,
6691 the first operand must be shift count register (cl) or it
6692 is an instruction with VexNDS. */
6693 gas_assert (i
.imm_operands
== 1
6694 || (i
.imm_operands
== 0
6695 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6696 || i
.types
[0].bitfield
.shiftcount
)));
6697 if (operand_type_check (i
.types
[0], imm
)
6698 || i
.types
[0].bitfield
.shiftcount
)
6704 /* When there are 4 operands, the first two must be 8bit
6705 immediate operands. The source operand will be the 3rd
6708 For instructions with VexNDS, if the first operand
6709 an imm8, the source operand is the 2nd one. If the last
6710 operand is imm8, the source operand is the first one. */
6711 gas_assert ((i
.imm_operands
== 2
6712 && i
.types
[0].bitfield
.imm8
6713 && i
.types
[1].bitfield
.imm8
)
6714 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
6715 && i
.imm_operands
== 1
6716 && (i
.types
[0].bitfield
.imm8
6717 || i
.types
[i
.operands
- 1].bitfield
.imm8
6719 if (i
.imm_operands
== 2)
6723 if (i
.types
[0].bitfield
.imm8
)
6730 if (i
.tm
.opcode_modifier
.evex
)
6732 /* For EVEX instructions, when there are 5 operands, the
6733 first one must be immediate operand. If the second one
6734 is immediate operand, the source operand is the 3th
6735 one. If the last one is immediate operand, the source
6736 operand is the 2nd one. */
6737 gas_assert (i
.imm_operands
== 2
6738 && i
.tm
.opcode_modifier
.sae
6739 && operand_type_check (i
.types
[0], imm
));
6740 if (operand_type_check (i
.types
[1], imm
))
6742 else if (operand_type_check (i
.types
[4], imm
))
6756 /* RC/SAE operand could be between DEST and SRC. That happens
6757 when one operand is GPR and the other one is XMM/YMM/ZMM
6759 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
6762 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
6764 /* For instructions with VexNDS, the register-only source
6765 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
6766 register. It is encoded in VEX prefix. We need to
6767 clear RegMem bit before calling operand_type_equal. */
6769 i386_operand_type op
;
6772 /* Check register-only source operand when two source
6773 operands are swapped. */
6774 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
6775 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
6783 op
= i
.tm
.operand_types
[vvvv
];
6784 op
.bitfield
.regmem
= 0;
6785 if ((dest
+ 1) >= i
.operands
6786 || ((!op
.bitfield
.reg
6787 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
6788 && !op
.bitfield
.regsimd
6789 && !operand_type_equal (&op
, ®mask
)))
6791 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
6797 /* One of the register operands will be encoded in the i.tm.reg
6798 field, the other in the combined i.tm.mode and i.tm.regmem
6799 fields. If no form of this instruction supports a memory
6800 destination operand, then we assume the source operand may
6801 sometimes be a memory operand and so we need to store the
6802 destination in the i.rm.reg field. */
6803 if (!i
.tm
.operand_types
[dest
].bitfield
.regmem
6804 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
6806 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
6807 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
6808 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6810 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6812 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6814 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6819 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
6820 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
6821 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
6823 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
6825 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
6827 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
6830 if (flag_code
!= CODE_64BIT
&& (i
.rex
& (REX_R
| REX_B
)))
6832 if (!i
.types
[0].bitfield
.control
6833 && !i
.types
[1].bitfield
.control
)
6835 i
.rex
&= ~(REX_R
| REX_B
);
6836 add_prefix (LOCK_PREFIX_OPCODE
);
6840 { /* If it's not 2 reg operands... */
6845 unsigned int fake_zero_displacement
= 0;
6848 for (op
= 0; op
< i
.operands
; op
++)
6849 if (operand_type_check (i
.types
[op
], anymem
))
6851 gas_assert (op
< i
.operands
);
6853 if (i
.tm
.opcode_modifier
.vecsib
)
6855 if (i
.index_reg
->reg_num
== RegEiz
6856 || i
.index_reg
->reg_num
== RegRiz
)
6859 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6862 i
.sib
.base
= NO_BASE_REGISTER
;
6863 i
.sib
.scale
= i
.log2_scale_factor
;
6864 i
.types
[op
].bitfield
.disp8
= 0;
6865 i
.types
[op
].bitfield
.disp16
= 0;
6866 i
.types
[op
].bitfield
.disp64
= 0;
6867 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
6869 /* Must be 32 bit */
6870 i
.types
[op
].bitfield
.disp32
= 1;
6871 i
.types
[op
].bitfield
.disp32s
= 0;
6875 i
.types
[op
].bitfield
.disp32
= 0;
6876 i
.types
[op
].bitfield
.disp32s
= 1;
6879 i
.sib
.index
= i
.index_reg
->reg_num
;
6880 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6882 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
6888 if (i
.base_reg
== 0)
6891 if (!i
.disp_operands
)
6892 fake_zero_displacement
= 1;
6893 if (i
.index_reg
== 0)
6895 i386_operand_type newdisp
;
6897 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6898 /* Operand is just <disp> */
6899 if (flag_code
== CODE_64BIT
)
6901 /* 64bit mode overwrites the 32bit absolute
6902 addressing by RIP relative addressing and
6903 absolute addressing is encoded by one of the
6904 redundant SIB forms. */
6905 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6906 i
.sib
.base
= NO_BASE_REGISTER
;
6907 i
.sib
.index
= NO_INDEX_REGISTER
;
6908 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
6910 else if ((flag_code
== CODE_16BIT
)
6911 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
6913 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
6918 i
.rm
.regmem
= NO_BASE_REGISTER
;
6921 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
6922 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
6924 else if (!i
.tm
.opcode_modifier
.vecsib
)
6926 /* !i.base_reg && i.index_reg */
6927 if (i
.index_reg
->reg_num
== RegEiz
6928 || i
.index_reg
->reg_num
== RegRiz
)
6929 i
.sib
.index
= NO_INDEX_REGISTER
;
6931 i
.sib
.index
= i
.index_reg
->reg_num
;
6932 i
.sib
.base
= NO_BASE_REGISTER
;
6933 i
.sib
.scale
= i
.log2_scale_factor
;
6934 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
6935 i
.types
[op
].bitfield
.disp8
= 0;
6936 i
.types
[op
].bitfield
.disp16
= 0;
6937 i
.types
[op
].bitfield
.disp64
= 0;
6938 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
6940 /* Must be 32 bit */
6941 i
.types
[op
].bitfield
.disp32
= 1;
6942 i
.types
[op
].bitfield
.disp32s
= 0;
6946 i
.types
[op
].bitfield
.disp32
= 0;
6947 i
.types
[op
].bitfield
.disp32s
= 1;
6949 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
6953 /* RIP addressing for 64bit mode. */
6954 else if (i
.base_reg
->reg_num
== RegRip
||
6955 i
.base_reg
->reg_num
== RegEip
)
6957 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6958 i
.rm
.regmem
= NO_BASE_REGISTER
;
6959 i
.types
[op
].bitfield
.disp8
= 0;
6960 i
.types
[op
].bitfield
.disp16
= 0;
6961 i
.types
[op
].bitfield
.disp32
= 0;
6962 i
.types
[op
].bitfield
.disp32s
= 1;
6963 i
.types
[op
].bitfield
.disp64
= 0;
6964 i
.flags
[op
] |= Operand_PCrel
;
6965 if (! i
.disp_operands
)
6966 fake_zero_displacement
= 1;
6968 else if (i
.base_reg
->reg_type
.bitfield
.word
)
6970 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
6971 switch (i
.base_reg
->reg_num
)
6974 if (i
.index_reg
== 0)
6976 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6977 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
6981 if (i
.index_reg
== 0)
6984 if (operand_type_check (i
.types
[op
], disp
) == 0)
6986 /* fake (%bp) into 0(%bp) */
6987 i
.types
[op
].bitfield
.disp8
= 1;
6988 fake_zero_displacement
= 1;
6991 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6992 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
6994 default: /* (%si) -> 4 or (%di) -> 5 */
6995 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
6997 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
6999 else /* i.base_reg and 32/64 bit mode */
7001 if (flag_code
== CODE_64BIT
7002 && operand_type_check (i
.types
[op
], disp
))
7004 i
.types
[op
].bitfield
.disp16
= 0;
7005 i
.types
[op
].bitfield
.disp64
= 0;
7006 if (i
.prefix
[ADDR_PREFIX
] == 0)
7008 i
.types
[op
].bitfield
.disp32
= 0;
7009 i
.types
[op
].bitfield
.disp32s
= 1;
7013 i
.types
[op
].bitfield
.disp32
= 1;
7014 i
.types
[op
].bitfield
.disp32s
= 0;
7018 if (!i
.tm
.opcode_modifier
.vecsib
)
7019 i
.rm
.regmem
= i
.base_reg
->reg_num
;
7020 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
7022 i
.sib
.base
= i
.base_reg
->reg_num
;
7023 /* x86-64 ignores REX prefix bit here to avoid decoder
7025 if (!(i
.base_reg
->reg_flags
& RegRex
)
7026 && (i
.base_reg
->reg_num
== EBP_REG_NUM
7027 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
7029 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
7031 fake_zero_displacement
= 1;
7032 i
.types
[op
].bitfield
.disp8
= 1;
7034 i
.sib
.scale
= i
.log2_scale_factor
;
7035 if (i
.index_reg
== 0)
7037 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7038 /* <disp>(%esp) becomes two byte modrm with no index
7039 register. We've already stored the code for esp
7040 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7041 Any base register besides %esp will not use the
7042 extra modrm byte. */
7043 i
.sib
.index
= NO_INDEX_REGISTER
;
7045 else if (!i
.tm
.opcode_modifier
.vecsib
)
7047 if (i
.index_reg
->reg_num
== RegEiz
7048 || i
.index_reg
->reg_num
== RegRiz
)
7049 i
.sib
.index
= NO_INDEX_REGISTER
;
7051 i
.sib
.index
= i
.index_reg
->reg_num
;
7052 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7053 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7058 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
7059 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
7063 if (!fake_zero_displacement
7067 fake_zero_displacement
= 1;
7068 if (i
.disp_encoding
== disp_encoding_8bit
)
7069 i
.types
[op
].bitfield
.disp8
= 1;
7071 i
.types
[op
].bitfield
.disp32
= 1;
7073 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7077 if (fake_zero_displacement
)
7079 /* Fakes a zero displacement assuming that i.types[op]
7080 holds the correct displacement size. */
7083 gas_assert (i
.op
[op
].disps
== 0);
7084 exp
= &disp_expressions
[i
.disp_operands
++];
7085 i
.op
[op
].disps
= exp
;
7086 exp
->X_op
= O_constant
;
7087 exp
->X_add_number
= 0;
7088 exp
->X_add_symbol
= (symbolS
*) 0;
7089 exp
->X_op_symbol
= (symbolS
*) 0;
7097 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
7099 if (operand_type_check (i
.types
[0], imm
))
7100 i
.vex
.register_specifier
= NULL
;
7103 /* VEX.vvvv encodes one of the sources when the first
7104 operand is not an immediate. */
7105 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7106 i
.vex
.register_specifier
= i
.op
[0].regs
;
7108 i
.vex
.register_specifier
= i
.op
[1].regs
;
7111 /* Destination is a XMM register encoded in the ModRM.reg
7113 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
7114 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
7117 /* ModRM.rm and VEX.B encodes the other source. */
7118 if (!i
.mem_operands
)
7122 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7123 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7125 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
7127 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7131 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
7133 i
.vex
.register_specifier
= i
.op
[2].regs
;
7134 if (!i
.mem_operands
)
7137 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7138 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7142 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7143 (if any) based on i.tm.extension_opcode. Again, we must be
7144 careful to make sure that segment/control/debug/test/MMX
7145 registers are coded into the i.rm.reg field. */
7146 else if (i
.reg_operands
)
7149 unsigned int vex_reg
= ~0;
7151 for (op
= 0; op
< i
.operands
; op
++)
7152 if (i
.types
[op
].bitfield
.reg
7153 || i
.types
[op
].bitfield
.regmmx
7154 || i
.types
[op
].bitfield
.regsimd
7155 || i
.types
[op
].bitfield
.regbnd
7156 || i
.types
[op
].bitfield
.regmask
7157 || i
.types
[op
].bitfield
.sreg2
7158 || i
.types
[op
].bitfield
.sreg3
7159 || i
.types
[op
].bitfield
.control
7160 || i
.types
[op
].bitfield
.debug
7161 || i
.types
[op
].bitfield
.test
)
7166 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7168 /* For instructions with VexNDS, the register-only
7169 source operand is encoded in VEX prefix. */
7170 gas_assert (mem
!= (unsigned int) ~0);
7175 gas_assert (op
< i
.operands
);
7179 /* Check register-only source operand when two source
7180 operands are swapped. */
7181 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
7182 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
7186 gas_assert (mem
== (vex_reg
+ 1)
7187 && op
< i
.operands
);
7192 gas_assert (vex_reg
< i
.operands
);
7196 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
7198 /* For instructions with VexNDD, the register destination
7199 is encoded in VEX prefix. */
7200 if (i
.mem_operands
== 0)
7202 /* There is no memory operand. */
7203 gas_assert ((op
+ 2) == i
.operands
);
7208 /* There are only 2 operands. */
7209 gas_assert (op
< 2 && i
.operands
== 2);
7214 gas_assert (op
< i
.operands
);
7216 if (vex_reg
!= (unsigned int) ~0)
7218 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
7220 if ((!type
->bitfield
.reg
7221 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
7222 && !type
->bitfield
.regsimd
7223 && !operand_type_equal (type
, ®mask
))
7226 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
7229 /* Don't set OP operand twice. */
7232 /* If there is an extension opcode to put here, the
7233 register number must be put into the regmem field. */
7234 if (i
.tm
.extension_opcode
!= None
)
7236 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
7237 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7239 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7244 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
7245 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7247 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7252 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7253 must set it to 3 to indicate this is a register operand
7254 in the regmem field. */
7255 if (!i
.mem_operands
)
7259 /* Fill in i.rm.reg field with extension opcode (if any). */
7260 if (i
.tm
.extension_opcode
!= None
)
7261 i
.rm
.reg
= i
.tm
.extension_opcode
;
7267 output_branch (void)
7273 relax_substateT subtype
;
7277 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
7278 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
7281 if (i
.prefix
[DATA_PREFIX
] != 0)
7287 /* Pentium4 branch hints. */
7288 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7289 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7294 if (i
.prefix
[REX_PREFIX
] != 0)
7300 /* BND prefixed jump. */
7301 if (i
.prefix
[BND_PREFIX
] != 0)
7303 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7307 if (i
.prefixes
!= 0 && !intel_syntax
)
7308 as_warn (_("skipping prefixes on this instruction"));
7310 /* It's always a symbol; End frag & setup for relax.
7311 Make sure there is enough room in this frag for the largest
7312 instruction we may generate in md_convert_frag. This is 2
7313 bytes for the opcode and room for the prefix and largest
7315 frag_grow (prefix
+ 2 + 4);
7316 /* Prefix and 1 opcode byte go in fr_fix. */
7317 p
= frag_more (prefix
+ 1);
7318 if (i
.prefix
[DATA_PREFIX
] != 0)
7319 *p
++ = DATA_PREFIX_OPCODE
;
7320 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7321 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7322 *p
++ = i
.prefix
[SEG_PREFIX
];
7323 if (i
.prefix
[REX_PREFIX
] != 0)
7324 *p
++ = i
.prefix
[REX_PREFIX
];
7325 *p
= i
.tm
.base_opcode
;
7327 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7328 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7329 else if (cpu_arch_flags
.bitfield
.cpui386
)
7330 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7332 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7335 sym
= i
.op
[0].disps
->X_add_symbol
;
7336 off
= i
.op
[0].disps
->X_add_number
;
7338 if (i
.op
[0].disps
->X_op
!= O_constant
7339 && i
.op
[0].disps
->X_op
!= O_symbol
)
7341 /* Handle complex expressions. */
7342 sym
= make_expr_symbol (i
.op
[0].disps
);
7346 /* 1 possible extra opcode + 4 byte displacement go in var part.
7347 Pass reloc in fr_var. */
7348 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7351 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7352 /* Return TRUE iff PLT32 relocation should be used for branching to
7356 need_plt32_p (symbolS
*s
)
7358 /* PLT32 relocation is ELF only. */
7362 /* Since there is no need to prepare for PLT branch on x86-64, we
7363 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7364 be used as a marker for 32-bit PC-relative branches. */
7368 /* Weak or undefined symbol need PLT32 relocation. */
7369 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
7372 /* Non-global symbol doesn't need PLT32 relocation. */
7373 if (! S_IS_EXTERNAL (s
))
7376 /* Other global symbols need PLT32 relocation. NB: Symbol with
7377 non-default visibilities are treated as normal global symbol
7378 so that PLT32 relocation can be used as a marker for 32-bit
7379 PC-relative branches. It is useful for linker relaxation. */
7390 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
7392 if (i
.tm
.opcode_modifier
.jumpbyte
)
7394 /* This is a loop or jecxz type instruction. */
7396 if (i
.prefix
[ADDR_PREFIX
] != 0)
7398 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
7401 /* Pentium4 branch hints. */
7402 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7403 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7405 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
7414 if (flag_code
== CODE_16BIT
)
7417 if (i
.prefix
[DATA_PREFIX
] != 0)
7419 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
7429 if (i
.prefix
[REX_PREFIX
] != 0)
7431 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
7435 /* BND prefixed jump. */
7436 if (i
.prefix
[BND_PREFIX
] != 0)
7438 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
7442 if (i
.prefixes
!= 0 && !intel_syntax
)
7443 as_warn (_("skipping prefixes on this instruction"));
7445 p
= frag_more (i
.tm
.opcode_length
+ size
);
7446 switch (i
.tm
.opcode_length
)
7449 *p
++ = i
.tm
.base_opcode
>> 8;
7452 *p
++ = i
.tm
.base_opcode
;
7458 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7460 && jump_reloc
== NO_RELOC
7461 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
7462 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
7465 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
7467 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7468 i
.op
[0].disps
, 1, jump_reloc
);
7470 /* All jumps handled here are signed, but don't use a signed limit
7471 check for 32 and 16 bit jumps as we want to allow wrap around at
7472 4G and 64k respectively. */
7474 fixP
->fx_signed
= 1;
7478 output_interseg_jump (void)
7486 if (flag_code
== CODE_16BIT
)
7490 if (i
.prefix
[DATA_PREFIX
] != 0)
7496 if (i
.prefix
[REX_PREFIX
] != 0)
7506 if (i
.prefixes
!= 0 && !intel_syntax
)
7507 as_warn (_("skipping prefixes on this instruction"));
7509 /* 1 opcode; 2 segment; offset */
7510 p
= frag_more (prefix
+ 1 + 2 + size
);
7512 if (i
.prefix
[DATA_PREFIX
] != 0)
7513 *p
++ = DATA_PREFIX_OPCODE
;
7515 if (i
.prefix
[REX_PREFIX
] != 0)
7516 *p
++ = i
.prefix
[REX_PREFIX
];
7518 *p
++ = i
.tm
.base_opcode
;
7519 if (i
.op
[1].imms
->X_op
== O_constant
)
7521 offsetT n
= i
.op
[1].imms
->X_add_number
;
7524 && !fits_in_unsigned_word (n
)
7525 && !fits_in_signed_word (n
))
7527 as_bad (_("16-bit jump out of range"));
7530 md_number_to_chars (p
, n
, size
);
7533 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
7534 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
7535 if (i
.op
[0].imms
->X_op
!= O_constant
)
7536 as_bad (_("can't handle non absolute segment in `%s'"),
7538 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
7544 fragS
*insn_start_frag
;
7545 offsetT insn_start_off
;
7547 /* Tie dwarf2 debug info to the address at the start of the insn.
7548 We can't do this after the insn has been output as the current
7549 frag may have been closed off. eg. by frag_var. */
7550 dwarf2_emit_insn (0);
7552 insn_start_frag
= frag_now
;
7553 insn_start_off
= frag_now_fix ();
7556 if (i
.tm
.opcode_modifier
.jump
)
7558 else if (i
.tm
.opcode_modifier
.jumpbyte
7559 || i
.tm
.opcode_modifier
.jumpdword
)
7561 else if (i
.tm
.opcode_modifier
.jumpintersegment
)
7562 output_interseg_jump ();
7565 /* Output normal instructions here. */
7569 unsigned int prefix
;
7572 && i
.tm
.base_opcode
== 0xfae
7574 && i
.imm_operands
== 1
7575 && (i
.op
[0].imms
->X_add_number
== 0xe8
7576 || i
.op
[0].imms
->X_add_number
== 0xf0
7577 || i
.op
[0].imms
->X_add_number
== 0xf8))
7579 /* Encode lfence, mfence, and sfence as
7580 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
7581 offsetT val
= 0x240483f0ULL
;
7583 md_number_to_chars (p
, val
, 5);
7587 /* Some processors fail on LOCK prefix. This options makes
7588 assembler ignore LOCK prefix and serves as a workaround. */
7589 if (omit_lock_prefix
)
7591 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
7593 i
.prefix
[LOCK_PREFIX
] = 0;
7596 /* Since the VEX/EVEX prefix contains the implicit prefix, we
7597 don't need the explicit prefix. */
7598 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
7600 switch (i
.tm
.opcode_length
)
7603 if (i
.tm
.base_opcode
& 0xff000000)
7605 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
7610 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
7612 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
7613 if (i
.tm
.cpu_flags
.bitfield
.cpupadlock
)
7616 if (prefix
!= REPE_PREFIX_OPCODE
7617 || (i
.prefix
[REP_PREFIX
]
7618 != REPE_PREFIX_OPCODE
))
7619 add_prefix (prefix
);
7622 add_prefix (prefix
);
7628 /* Check for pseudo prefixes. */
7629 as_bad_where (insn_start_frag
->fr_file
,
7630 insn_start_frag
->fr_line
,
7631 _("pseudo prefix without instruction"));
7637 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7638 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7639 R_X86_64_GOTTPOFF relocation so that linker can safely
7640 perform IE->LE optimization. */
7641 if (x86_elf_abi
== X86_64_X32_ABI
7643 && i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
7644 && i
.prefix
[REX_PREFIX
] == 0)
7645 add_prefix (REX_OPCODE
);
7648 /* The prefix bytes. */
7649 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
7651 FRAG_APPEND_1_CHAR (*q
);
7655 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
7660 /* REX byte is encoded in VEX prefix. */
7664 FRAG_APPEND_1_CHAR (*q
);
7667 /* There should be no other prefixes for instructions
7672 /* For EVEX instructions i.vrex should become 0 after
7673 build_evex_prefix. For VEX instructions upper 16 registers
7674 aren't available, so VREX should be 0. */
7677 /* Now the VEX prefix. */
7678 p
= frag_more (i
.vex
.length
);
7679 for (j
= 0; j
< i
.vex
.length
; j
++)
7680 p
[j
] = i
.vex
.bytes
[j
];
7683 /* Now the opcode; be careful about word order here! */
7684 if (i
.tm
.opcode_length
== 1)
7686 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
7690 switch (i
.tm
.opcode_length
)
7694 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
7695 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7699 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
7709 /* Put out high byte first: can't use md_number_to_chars! */
7710 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
7711 *p
= i
.tm
.base_opcode
& 0xff;
7714 /* Now the modrm byte and sib byte (if present). */
7715 if (i
.tm
.opcode_modifier
.modrm
)
7717 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
7720 /* If i.rm.regmem == ESP (4)
7721 && i.rm.mode != (Register mode)
7723 ==> need second modrm byte. */
7724 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
7726 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
7727 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
7729 | i
.sib
.scale
<< 6));
7732 if (i
.disp_operands
)
7733 output_disp (insn_start_frag
, insn_start_off
);
7736 output_imm (insn_start_frag
, insn_start_off
);
7742 pi ("" /*line*/, &i
);
7744 #endif /* DEBUG386 */
7747 /* Return the size of the displacement operand N. */
7750 disp_size (unsigned int n
)
7754 if (i
.types
[n
].bitfield
.disp64
)
7756 else if (i
.types
[n
].bitfield
.disp8
)
7758 else if (i
.types
[n
].bitfield
.disp16
)
7763 /* Return the size of the immediate operand N. */
7766 imm_size (unsigned int n
)
7769 if (i
.types
[n
].bitfield
.imm64
)
7771 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
7773 else if (i
.types
[n
].bitfield
.imm16
)
7779 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
7784 for (n
= 0; n
< i
.operands
; n
++)
7786 if (operand_type_check (i
.types
[n
], disp
))
7788 if (i
.op
[n
].disps
->X_op
== O_constant
)
7790 int size
= disp_size (n
);
7791 offsetT val
= i
.op
[n
].disps
->X_add_number
;
7793 val
= offset_in_range (val
>> i
.memshift
, size
);
7794 p
= frag_more (size
);
7795 md_number_to_chars (p
, val
, size
);
7799 enum bfd_reloc_code_real reloc_type
;
7800 int size
= disp_size (n
);
7801 int sign
= i
.types
[n
].bitfield
.disp32s
;
7802 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
7805 /* We can't have 8 bit displacement here. */
7806 gas_assert (!i
.types
[n
].bitfield
.disp8
);
7808 /* The PC relative address is computed relative
7809 to the instruction boundary, so in case immediate
7810 fields follows, we need to adjust the value. */
7811 if (pcrel
&& i
.imm_operands
)
7816 for (n1
= 0; n1
< i
.operands
; n1
++)
7817 if (operand_type_check (i
.types
[n1
], imm
))
7819 /* Only one immediate is allowed for PC
7820 relative address. */
7821 gas_assert (sz
== 0);
7823 i
.op
[n
].disps
->X_add_number
-= sz
;
7825 /* We should find the immediate. */
7826 gas_assert (sz
!= 0);
7829 p
= frag_more (size
);
7830 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
7832 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
7833 && (((reloc_type
== BFD_RELOC_32
7834 || reloc_type
== BFD_RELOC_X86_64_32S
7835 || (reloc_type
== BFD_RELOC_64
7837 && (i
.op
[n
].disps
->X_op
== O_symbol
7838 || (i
.op
[n
].disps
->X_op
== O_add
7839 && ((symbol_get_value_expression
7840 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
7842 || reloc_type
== BFD_RELOC_32_PCREL
))
7846 if (insn_start_frag
== frag_now
)
7847 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
7852 add
= insn_start_frag
->fr_fix
- insn_start_off
;
7853 for (fr
= insn_start_frag
->fr_next
;
7854 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
7856 add
+= p
- frag_now
->fr_literal
;
7861 reloc_type
= BFD_RELOC_386_GOTPC
;
7862 i
.op
[n
].imms
->X_add_number
+= add
;
7864 else if (reloc_type
== BFD_RELOC_64
)
7865 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
7867 /* Don't do the adjustment for x86-64, as there
7868 the pcrel addressing is relative to the _next_
7869 insn, and that is taken care of in other code. */
7870 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
7872 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
7873 size
, i
.op
[n
].disps
, pcrel
,
7875 /* Check for "call/jmp *mem", "mov mem, %reg",
7876 "test %reg, mem" and "binop mem, %reg" where binop
7877 is one of adc, add, and, cmp, or, sbb, sub, xor
7878 instructions. Always generate R_386_GOT32X for
7879 "sym*GOT" operand in 32-bit mode. */
7880 if ((generate_relax_relocations
7883 && i
.rm
.regmem
== 5))
7885 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
7886 && ((i
.operands
== 1
7887 && i
.tm
.base_opcode
== 0xff
7888 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
7890 && (i
.tm
.base_opcode
== 0x8b
7891 || i
.tm
.base_opcode
== 0x85
7892 || (i
.tm
.base_opcode
& 0xc7) == 0x03))))
7896 fixP
->fx_tcbit
= i
.rex
!= 0;
7898 && (i
.base_reg
->reg_num
== RegRip
7899 || i
.base_reg
->reg_num
== RegEip
))
7900 fixP
->fx_tcbit2
= 1;
7903 fixP
->fx_tcbit2
= 1;
7911 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
7916 for (n
= 0; n
< i
.operands
; n
++)
7918 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7919 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
7922 if (operand_type_check (i
.types
[n
], imm
))
7924 if (i
.op
[n
].imms
->X_op
== O_constant
)
7926 int size
= imm_size (n
);
7929 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
7931 p
= frag_more (size
);
7932 md_number_to_chars (p
, val
, size
);
7936 /* Not absolute_section.
7937 Need a 32-bit fixup (don't support 8bit
7938 non-absolute imms). Try to support other
7940 enum bfd_reloc_code_real reloc_type
;
7941 int size
= imm_size (n
);
7944 if (i
.types
[n
].bitfield
.imm32s
7945 && (i
.suffix
== QWORD_MNEM_SUFFIX
7946 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
7951 p
= frag_more (size
);
7952 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
7954 /* This is tough to explain. We end up with this one if we
7955 * have operands that look like
7956 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7957 * obtain the absolute address of the GOT, and it is strongly
7958 * preferable from a performance point of view to avoid using
7959 * a runtime relocation for this. The actual sequence of
7960 * instructions often look something like:
7965 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7967 * The call and pop essentially return the absolute address
7968 * of the label .L66 and store it in %ebx. The linker itself
7969 * will ultimately change the first operand of the addl so
7970 * that %ebx points to the GOT, but to keep things simple, the
7971 * .o file must have this operand set so that it generates not
7972 * the absolute address of .L66, but the absolute address of
7973 * itself. This allows the linker itself simply treat a GOTPC
7974 * relocation as asking for a pcrel offset to the GOT to be
7975 * added in, and the addend of the relocation is stored in the
7976 * operand field for the instruction itself.
7978 * Our job here is to fix the operand so that it would add
7979 * the correct offset so that %ebx would point to itself. The
7980 * thing that is tricky is that .-.L66 will point to the
7981 * beginning of the instruction, so we need to further modify
7982 * the operand so that it will point to itself. There are
7983 * other cases where you have something like:
7985 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7987 * and here no correction would be required. Internally in
7988 * the assembler we treat operands of this form as not being
7989 * pcrel since the '.' is explicitly mentioned, and I wonder
7990 * whether it would simplify matters to do it this way. Who
7991 * knows. In earlier versions of the PIC patches, the
7992 * pcrel_adjust field was used to store the correction, but
7993 * since the expression is not pcrel, I felt it would be
7994 * confusing to do it this way. */
7996 if ((reloc_type
== BFD_RELOC_32
7997 || reloc_type
== BFD_RELOC_X86_64_32S
7998 || reloc_type
== BFD_RELOC_64
)
8000 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
8001 && (i
.op
[n
].imms
->X_op
== O_symbol
8002 || (i
.op
[n
].imms
->X_op
== O_add
8003 && ((symbol_get_value_expression
8004 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
8009 if (insn_start_frag
== frag_now
)
8010 add
= (p
- frag_now
->fr_literal
) - insn_start_off
;
8015 add
= insn_start_frag
->fr_fix
- insn_start_off
;
8016 for (fr
= insn_start_frag
->fr_next
;
8017 fr
&& fr
!= frag_now
; fr
= fr
->fr_next
)
8019 add
+= p
- frag_now
->fr_literal
;
8023 reloc_type
= BFD_RELOC_386_GOTPC
;
8025 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
8027 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
8028 i
.op
[n
].imms
->X_add_number
+= add
;
8030 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8031 i
.op
[n
].imms
, 0, reloc_type
);
8037 /* x86_cons_fix_new is called via the expression parsing code when a
8038 reloc is needed. We use this hook to get the correct .got reloc. */
8039 static int cons_sign
= -1;
8042 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
8043 expressionS
*exp
, bfd_reloc_code_real_type r
)
8045 r
= reloc (len
, 0, cons_sign
, r
);
8048 if (exp
->X_op
== O_secrel
)
8050 exp
->X_op
= O_symbol
;
8051 r
= BFD_RELOC_32_SECREL
;
8055 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
8058 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
8059 purpose of the `.dc.a' internal pseudo-op. */
8062 x86_address_bytes (void)
8064 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
8066 return stdoutput
->arch_info
->bits_per_address
/ 8;
8069 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
8071 # define lex_got(reloc, adjust, types) NULL
8073 /* Parse operands of the form
8074 <symbol>@GOTOFF+<nnn>
8075 and similar .plt or .got references.
8077 If we find one, set up the correct relocation in RELOC and copy the
8078 input string, minus the `@GOTOFF' into a malloc'd buffer for
8079 parsing by the calling routine. Return this buffer, and if ADJUST
8080 is non-null set it to the length of the string we removed from the
8081 input line. Otherwise return NULL. */
8083 lex_got (enum bfd_reloc_code_real
*rel
,
8085 i386_operand_type
*types
)
8087 /* Some of the relocations depend on the size of what field is to
8088 be relocated. But in our callers i386_immediate and i386_displacement
8089 we don't yet know the operand size (this will be set by insn
8090 matching). Hence we record the word32 relocation here,
8091 and adjust the reloc according to the real size in reloc(). */
8092 static const struct {
8095 const enum bfd_reloc_code_real rel
[2];
8096 const i386_operand_type types64
;
8098 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8099 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
8101 OPERAND_TYPE_IMM32_64
},
8103 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
8104 BFD_RELOC_X86_64_PLTOFF64
},
8105 OPERAND_TYPE_IMM64
},
8106 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
8107 BFD_RELOC_X86_64_PLT32
},
8108 OPERAND_TYPE_IMM32_32S_DISP32
},
8109 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
8110 BFD_RELOC_X86_64_GOTPLT64
},
8111 OPERAND_TYPE_IMM64_DISP64
},
8112 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
8113 BFD_RELOC_X86_64_GOTOFF64
},
8114 OPERAND_TYPE_IMM64_DISP64
},
8115 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
8116 BFD_RELOC_X86_64_GOTPCREL
},
8117 OPERAND_TYPE_IMM32_32S_DISP32
},
8118 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
8119 BFD_RELOC_X86_64_TLSGD
},
8120 OPERAND_TYPE_IMM32_32S_DISP32
},
8121 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
8122 _dummy_first_bfd_reloc_code_real
},
8123 OPERAND_TYPE_NONE
},
8124 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
8125 BFD_RELOC_X86_64_TLSLD
},
8126 OPERAND_TYPE_IMM32_32S_DISP32
},
8127 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
8128 BFD_RELOC_X86_64_GOTTPOFF
},
8129 OPERAND_TYPE_IMM32_32S_DISP32
},
8130 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
8131 BFD_RELOC_X86_64_TPOFF32
},
8132 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8133 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
8134 _dummy_first_bfd_reloc_code_real
},
8135 OPERAND_TYPE_NONE
},
8136 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
8137 BFD_RELOC_X86_64_DTPOFF32
},
8138 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8139 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
8140 _dummy_first_bfd_reloc_code_real
},
8141 OPERAND_TYPE_NONE
},
8142 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
8143 _dummy_first_bfd_reloc_code_real
},
8144 OPERAND_TYPE_NONE
},
8145 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
8146 BFD_RELOC_X86_64_GOT32
},
8147 OPERAND_TYPE_IMM32_32S_64_DISP32
},
8148 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
8149 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
8150 OPERAND_TYPE_IMM32_32S_DISP32
},
8151 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
8152 BFD_RELOC_X86_64_TLSDESC_CALL
},
8153 OPERAND_TYPE_IMM32_32S_DISP32
},
8158 #if defined (OBJ_MAYBE_ELF)
8163 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8164 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8167 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8169 int len
= gotrel
[j
].len
;
8170 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8172 if (gotrel
[j
].rel
[object_64bit
] != 0)
8175 char *tmpbuf
, *past_reloc
;
8177 *rel
= gotrel
[j
].rel
[object_64bit
];
8181 if (flag_code
!= CODE_64BIT
)
8183 types
->bitfield
.imm32
= 1;
8184 types
->bitfield
.disp32
= 1;
8187 *types
= gotrel
[j
].types64
;
8190 if (j
!= 0 && GOT_symbol
== NULL
)
8191 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
8193 /* The length of the first part of our input line. */
8194 first
= cp
- input_line_pointer
;
8196 /* The second part goes from after the reloc token until
8197 (and including) an end_of_line char or comma. */
8198 past_reloc
= cp
+ 1 + len
;
8200 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
8202 second
= cp
+ 1 - past_reloc
;
8204 /* Allocate and copy string. The trailing NUL shouldn't
8205 be necessary, but be safe. */
8206 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
8207 memcpy (tmpbuf
, input_line_pointer
, first
);
8208 if (second
!= 0 && *past_reloc
!= ' ')
8209 /* Replace the relocation token with ' ', so that
8210 errors like foo@GOTOFF1 will be detected. */
8211 tmpbuf
[first
++] = ' ';
8213 /* Increment length by 1 if the relocation token is
8218 memcpy (tmpbuf
+ first
, past_reloc
, second
);
8219 tmpbuf
[first
+ second
] = '\0';
8223 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8224 gotrel
[j
].str
, 1 << (5 + object_64bit
));
8229 /* Might be a symbol version string. Don't as_bad here. */
8238 /* Parse operands of the form
8239 <symbol>@SECREL32+<nnn>
8241 If we find one, set up the correct relocation in RELOC and copy the
8242 input string, minus the `@SECREL32' into a malloc'd buffer for
8243 parsing by the calling routine. Return this buffer, and if ADJUST
8244 is non-null set it to the length of the string we removed from the
8245 input line. Otherwise return NULL.
8247 This function is copied from the ELF version above adjusted for PE targets. */
8250 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
8251 int *adjust ATTRIBUTE_UNUSED
,
8252 i386_operand_type
*types
)
8258 const enum bfd_reloc_code_real rel
[2];
8259 const i386_operand_type types64
;
8263 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
8264 BFD_RELOC_32_SECREL
},
8265 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
8271 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
8272 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
8275 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
8277 int len
= gotrel
[j
].len
;
8279 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
8281 if (gotrel
[j
].rel
[object_64bit
] != 0)
8284 char *tmpbuf
, *past_reloc
;
8286 *rel
= gotrel
[j
].rel
[object_64bit
];
8292 if (flag_code
!= CODE_64BIT
)
8294 types
->bitfield
.imm32
= 1;
8295 types
->bitfield
.disp32
= 1;
8298 *types
= gotrel
[j
].types64
;
8301 /* The length of the first part of our input line. */
8302 first
= cp
- input_line_pointer
;
8304 /* The second part goes from after the reloc token until
8305 (and including) an end_of_line char or comma. */
8306 past_reloc
= cp
+ 1 + len
;
8308 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
8310 second
= cp
+ 1 - past_reloc
;
8312 /* Allocate and copy string. The trailing NUL shouldn't
8313 be necessary, but be safe. */
8314 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
8315 memcpy (tmpbuf
, input_line_pointer
, first
);
8316 if (second
!= 0 && *past_reloc
!= ' ')
8317 /* Replace the relocation token with ' ', so that
8318 errors like foo@SECLREL321 will be detected. */
8319 tmpbuf
[first
++] = ' ';
8320 memcpy (tmpbuf
+ first
, past_reloc
, second
);
8321 tmpbuf
[first
+ second
] = '\0';
8325 as_bad (_("@%s reloc is not supported with %d-bit output format"),
8326 gotrel
[j
].str
, 1 << (5 + object_64bit
));
8331 /* Might be a symbol version string. Don't as_bad here. */
8337 bfd_reloc_code_real_type
8338 x86_cons (expressionS
*exp
, int size
)
8340 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
8342 intel_syntax
= -intel_syntax
;
8345 if (size
== 4 || (object_64bit
&& size
== 8))
8347 /* Handle @GOTOFF and the like in an expression. */
8349 char *gotfree_input_line
;
8352 save
= input_line_pointer
;
8353 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
8354 if (gotfree_input_line
)
8355 input_line_pointer
= gotfree_input_line
;
8359 if (gotfree_input_line
)
8361 /* expression () has merrily parsed up to the end of line,
8362 or a comma - in the wrong buffer. Transfer how far
8363 input_line_pointer has moved to the right buffer. */
8364 input_line_pointer
= (save
8365 + (input_line_pointer
- gotfree_input_line
)
8367 free (gotfree_input_line
);
8368 if (exp
->X_op
== O_constant
8369 || exp
->X_op
== O_absent
8370 || exp
->X_op
== O_illegal
8371 || exp
->X_op
== O_register
8372 || exp
->X_op
== O_big
)
8374 char c
= *input_line_pointer
;
8375 *input_line_pointer
= 0;
8376 as_bad (_("missing or invalid expression `%s'"), save
);
8377 *input_line_pointer
= c
;
8384 intel_syntax
= -intel_syntax
;
8387 i386_intel_simplify (exp
);
8393 signed_cons (int size
)
8395 if (flag_code
== CODE_64BIT
)
8403 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
8410 if (exp
.X_op
== O_symbol
)
8411 exp
.X_op
= O_secrel
;
8413 emit_expr (&exp
, 4);
8415 while (*input_line_pointer
++ == ',');
8417 input_line_pointer
--;
8418 demand_empty_rest_of_line ();
8422 /* Handle Vector operations. */
8425 check_VecOperations (char *op_string
, char *op_end
)
8427 const reg_entry
*mask
;
8432 && (op_end
== NULL
|| op_string
< op_end
))
8435 if (*op_string
== '{')
8439 /* Check broadcasts. */
8440 if (strncmp (op_string
, "1to", 3) == 0)
8445 goto duplicated_vec_op
;
8448 if (*op_string
== '8')
8449 bcst_type
= BROADCAST_1TO8
;
8450 else if (*op_string
== '4')
8451 bcst_type
= BROADCAST_1TO4
;
8452 else if (*op_string
== '2')
8453 bcst_type
= BROADCAST_1TO2
;
8454 else if (*op_string
== '1'
8455 && *(op_string
+1) == '6')
8457 bcst_type
= BROADCAST_1TO16
;
8462 as_bad (_("Unsupported broadcast: `%s'"), saved
);
8467 broadcast_op
.type
= bcst_type
;
8468 broadcast_op
.operand
= this_operand
;
8469 i
.broadcast
= &broadcast_op
;
8471 /* Check masking operation. */
8472 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
8474 /* k0 can't be used for write mask. */
8475 if (!mask
->reg_type
.bitfield
.regmask
|| mask
->reg_num
== 0)
8477 as_bad (_("`%s%s' can't be used for write mask"),
8478 register_prefix
, mask
->reg_name
);
8484 mask_op
.mask
= mask
;
8485 mask_op
.zeroing
= 0;
8486 mask_op
.operand
= this_operand
;
8492 goto duplicated_vec_op
;
8494 i
.mask
->mask
= mask
;
8496 /* Only "{z}" is allowed here. No need to check
8497 zeroing mask explicitly. */
8498 if (i
.mask
->operand
!= this_operand
)
8500 as_bad (_("invalid write mask `%s'"), saved
);
8507 /* Check zeroing-flag for masking operation. */
8508 else if (*op_string
== 'z')
8512 mask_op
.mask
= NULL
;
8513 mask_op
.zeroing
= 1;
8514 mask_op
.operand
= this_operand
;
8519 if (i
.mask
->zeroing
)
8522 as_bad (_("duplicated `%s'"), saved
);
8526 i
.mask
->zeroing
= 1;
8528 /* Only "{%k}" is allowed here. No need to check mask
8529 register explicitly. */
8530 if (i
.mask
->operand
!= this_operand
)
8532 as_bad (_("invalid zeroing-masking `%s'"),
8541 goto unknown_vec_op
;
8543 if (*op_string
!= '}')
8545 as_bad (_("missing `}' in `%s'"), saved
);
8552 /* We don't know this one. */
8553 as_bad (_("unknown vector operation: `%s'"), saved
);
8557 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
8559 as_bad (_("zeroing-masking only allowed with write mask"));
8567 i386_immediate (char *imm_start
)
8569 char *save_input_line_pointer
;
8570 char *gotfree_input_line
;
8573 i386_operand_type types
;
8575 operand_type_set (&types
, ~0);
8577 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
8579 as_bad (_("at most %d immediate operands are allowed"),
8580 MAX_IMMEDIATE_OPERANDS
);
8584 exp
= &im_expressions
[i
.imm_operands
++];
8585 i
.op
[this_operand
].imms
= exp
;
8587 if (is_space_char (*imm_start
))
8590 save_input_line_pointer
= input_line_pointer
;
8591 input_line_pointer
= imm_start
;
8593 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8594 if (gotfree_input_line
)
8595 input_line_pointer
= gotfree_input_line
;
8597 exp_seg
= expression (exp
);
8601 /* Handle vector operations. */
8602 if (*input_line_pointer
== '{')
8604 input_line_pointer
= check_VecOperations (input_line_pointer
,
8606 if (input_line_pointer
== NULL
)
8610 if (*input_line_pointer
)
8611 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8613 input_line_pointer
= save_input_line_pointer
;
8614 if (gotfree_input_line
)
8616 free (gotfree_input_line
);
8618 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8619 exp
->X_op
= O_illegal
;
8622 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
8626 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8627 i386_operand_type types
, const char *imm_start
)
8629 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
8632 as_bad (_("missing or invalid immediate expression `%s'"),
8636 else if (exp
->X_op
== O_constant
)
8638 /* Size it properly later. */
8639 i
.types
[this_operand
].bitfield
.imm64
= 1;
8640 /* If not 64bit, sign extend val. */
8641 if (flag_code
!= CODE_64BIT
8642 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
8644 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
8646 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8647 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
8648 && exp_seg
!= absolute_section
8649 && exp_seg
!= text_section
8650 && exp_seg
!= data_section
8651 && exp_seg
!= bss_section
8652 && exp_seg
!= undefined_section
8653 && !bfd_is_com_section (exp_seg
))
8655 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8659 else if (!intel_syntax
&& exp_seg
== reg_section
)
8662 as_bad (_("illegal immediate register operand %s"), imm_start
);
8667 /* This is an address. The size of the address will be
8668 determined later, depending on destination register,
8669 suffix, or the default for the section. */
8670 i
.types
[this_operand
].bitfield
.imm8
= 1;
8671 i
.types
[this_operand
].bitfield
.imm16
= 1;
8672 i
.types
[this_operand
].bitfield
.imm32
= 1;
8673 i
.types
[this_operand
].bitfield
.imm32s
= 1;
8674 i
.types
[this_operand
].bitfield
.imm64
= 1;
8675 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8683 i386_scale (char *scale
)
8686 char *save
= input_line_pointer
;
8688 input_line_pointer
= scale
;
8689 val
= get_absolute_expression ();
8694 i
.log2_scale_factor
= 0;
8697 i
.log2_scale_factor
= 1;
8700 i
.log2_scale_factor
= 2;
8703 i
.log2_scale_factor
= 3;
8707 char sep
= *input_line_pointer
;
8709 *input_line_pointer
= '\0';
8710 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8712 *input_line_pointer
= sep
;
8713 input_line_pointer
= save
;
8717 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
8719 as_warn (_("scale factor of %d without an index register"),
8720 1 << i
.log2_scale_factor
);
8721 i
.log2_scale_factor
= 0;
8723 scale
= input_line_pointer
;
8724 input_line_pointer
= save
;
8729 i386_displacement (char *disp_start
, char *disp_end
)
8733 char *save_input_line_pointer
;
8734 char *gotfree_input_line
;
8736 i386_operand_type bigdisp
, types
= anydisp
;
8739 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
8741 as_bad (_("at most %d displacement operands are allowed"),
8742 MAX_MEMORY_OPERANDS
);
8746 operand_type_set (&bigdisp
, 0);
8747 if ((i
.types
[this_operand
].bitfield
.jumpabsolute
)
8748 || (!current_templates
->start
->opcode_modifier
.jump
8749 && !current_templates
->start
->opcode_modifier
.jumpdword
))
8751 bigdisp
.bitfield
.disp32
= 1;
8752 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
8753 if (flag_code
== CODE_64BIT
)
8757 bigdisp
.bitfield
.disp32s
= 1;
8758 bigdisp
.bitfield
.disp64
= 1;
8761 else if ((flag_code
== CODE_16BIT
) ^ override
)
8763 bigdisp
.bitfield
.disp32
= 0;
8764 bigdisp
.bitfield
.disp16
= 1;
8769 /* For PC-relative branches, the width of the displacement
8770 is dependent upon data size, not address size. */
8771 override
= (i
.prefix
[DATA_PREFIX
] != 0);
8772 if (flag_code
== CODE_64BIT
)
8774 if (override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
8775 bigdisp
.bitfield
.disp16
= 1;
8778 bigdisp
.bitfield
.disp32
= 1;
8779 bigdisp
.bitfield
.disp32s
= 1;
8785 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
8787 : LONG_MNEM_SUFFIX
));
8788 bigdisp
.bitfield
.disp32
= 1;
8789 if ((flag_code
== CODE_16BIT
) ^ override
)
8791 bigdisp
.bitfield
.disp32
= 0;
8792 bigdisp
.bitfield
.disp16
= 1;
8796 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
8799 exp
= &disp_expressions
[i
.disp_operands
];
8800 i
.op
[this_operand
].disps
= exp
;
8802 save_input_line_pointer
= input_line_pointer
;
8803 input_line_pointer
= disp_start
;
8804 END_STRING_AND_SAVE (disp_end
);
8806 #ifndef GCC_ASM_O_HACK
8807 #define GCC_ASM_O_HACK 0
8810 END_STRING_AND_SAVE (disp_end
+ 1);
8811 if (i
.types
[this_operand
].bitfield
.baseIndex
8812 && displacement_string_end
[-1] == '+')
8814 /* This hack is to avoid a warning when using the "o"
8815 constraint within gcc asm statements.
8818 #define _set_tssldt_desc(n,addr,limit,type) \
8819 __asm__ __volatile__ ( \
8821 "movw %w1,2+%0\n\t" \
8823 "movb %b1,4+%0\n\t" \
8824 "movb %4,5+%0\n\t" \
8825 "movb $0,6+%0\n\t" \
8826 "movb %h1,7+%0\n\t" \
8828 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8830 This works great except that the output assembler ends
8831 up looking a bit weird if it turns out that there is
8832 no offset. You end up producing code that looks like:
8845 So here we provide the missing zero. */
8847 *displacement_string_end
= '0';
8850 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
8851 if (gotfree_input_line
)
8852 input_line_pointer
= gotfree_input_line
;
8854 exp_seg
= expression (exp
);
8857 if (*input_line_pointer
)
8858 as_bad (_("junk `%s' after expression"), input_line_pointer
);
8860 RESTORE_END_STRING (disp_end
+ 1);
8862 input_line_pointer
= save_input_line_pointer
;
8863 if (gotfree_input_line
)
8865 free (gotfree_input_line
);
8867 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
8868 exp
->X_op
= O_illegal
;
8871 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
8873 RESTORE_END_STRING (disp_end
);
8879 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
8880 i386_operand_type types
, const char *disp_start
)
8882 i386_operand_type bigdisp
;
8885 /* We do this to make sure that the section symbol is in
8886 the symbol table. We will ultimately change the relocation
8887 to be relative to the beginning of the section. */
8888 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
8889 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
8890 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8892 if (exp
->X_op
!= O_symbol
)
8895 if (S_IS_LOCAL (exp
->X_add_symbol
)
8896 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
8897 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
8898 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
8899 exp
->X_op
= O_subtract
;
8900 exp
->X_op_symbol
= GOT_symbol
;
8901 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
8902 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
8903 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
8904 i
.reloc
[this_operand
] = BFD_RELOC_64
;
8906 i
.reloc
[this_operand
] = BFD_RELOC_32
;
8909 else if (exp
->X_op
== O_absent
8910 || exp
->X_op
== O_illegal
8911 || exp
->X_op
== O_big
)
8914 as_bad (_("missing or invalid displacement expression `%s'"),
8919 else if (flag_code
== CODE_64BIT
8920 && !i
.prefix
[ADDR_PREFIX
]
8921 && exp
->X_op
== O_constant
)
8923 /* Since displacement is signed extended to 64bit, don't allow
8924 disp32 and turn off disp32s if they are out of range. */
8925 i
.types
[this_operand
].bitfield
.disp32
= 0;
8926 if (!fits_in_signed_long (exp
->X_add_number
))
8928 i
.types
[this_operand
].bitfield
.disp32s
= 0;
8929 if (i
.types
[this_operand
].bitfield
.baseindex
)
8931 as_bad (_("0x%lx out range of signed 32bit displacement"),
8932 (long) exp
->X_add_number
);
8938 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8939 else if (exp
->X_op
!= O_constant
8940 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
8941 && exp_seg
!= absolute_section
8942 && exp_seg
!= text_section
8943 && exp_seg
!= data_section
8944 && exp_seg
!= bss_section
8945 && exp_seg
!= undefined_section
8946 && !bfd_is_com_section (exp_seg
))
8948 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
8953 /* Check if this is a displacement only operand. */
8954 bigdisp
= i
.types
[this_operand
];
8955 bigdisp
.bitfield
.disp8
= 0;
8956 bigdisp
.bitfield
.disp16
= 0;
8957 bigdisp
.bitfield
.disp32
= 0;
8958 bigdisp
.bitfield
.disp32s
= 0;
8959 bigdisp
.bitfield
.disp64
= 0;
8960 if (operand_type_all_zero (&bigdisp
))
8961 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
8967 /* Return the active addressing mode, taking address override and
8968 registers forming the address into consideration. Update the
8969 address override prefix if necessary. */
8971 static enum flag_code
8972 i386_addressing_mode (void)
8974 enum flag_code addr_mode
;
8976 if (i
.prefix
[ADDR_PREFIX
])
8977 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
8980 addr_mode
= flag_code
;
8982 #if INFER_ADDR_PREFIX
8983 if (i
.mem_operands
== 0)
8985 /* Infer address prefix from the first memory operand. */
8986 const reg_entry
*addr_reg
= i
.base_reg
;
8988 if (addr_reg
== NULL
)
8989 addr_reg
= i
.index_reg
;
8993 if (addr_reg
->reg_num
== RegEip
8994 || addr_reg
->reg_num
== RegEiz
8995 || addr_reg
->reg_type
.bitfield
.dword
)
8996 addr_mode
= CODE_32BIT
;
8997 else if (flag_code
!= CODE_64BIT
8998 && addr_reg
->reg_type
.bitfield
.word
)
8999 addr_mode
= CODE_16BIT
;
9001 if (addr_mode
!= flag_code
)
9003 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
9005 /* Change the size of any displacement too. At most one
9006 of Disp16 or Disp32 is set.
9007 FIXME. There doesn't seem to be any real need for
9008 separate Disp16 and Disp32 flags. The same goes for
9009 Imm16 and Imm32. Removing them would probably clean
9010 up the code quite a lot. */
9011 if (flag_code
!= CODE_64BIT
9012 && (i
.types
[this_operand
].bitfield
.disp16
9013 || i
.types
[this_operand
].bitfield
.disp32
))
9014 i
.types
[this_operand
]
9015 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
9025 /* Make sure the memory operand we've been dealt is valid.
9026 Return 1 on success, 0 on a failure. */
9029 i386_index_check (const char *operand_string
)
9031 const char *kind
= "base/index";
9032 enum flag_code addr_mode
= i386_addressing_mode ();
9034 if (current_templates
->start
->opcode_modifier
.isstring
9035 && !current_templates
->start
->opcode_modifier
.immext
9036 && (current_templates
->end
[-1].opcode_modifier
.isstring
9039 /* Memory operands of string insns are special in that they only allow
9040 a single register (rDI, rSI, or rBX) as their memory address. */
9041 const reg_entry
*expected_reg
;
9042 static const char *di_si
[][2] =
9048 static const char *bx
[] = { "ebx", "bx", "rbx" };
9050 kind
= "string address";
9052 if (current_templates
->start
->opcode_modifier
.repprefixok
)
9054 i386_operand_type type
= current_templates
->end
[-1].operand_types
[0];
9056 if (!type
.bitfield
.baseindex
9057 || ((!i
.mem_operands
!= !intel_syntax
)
9058 && current_templates
->end
[-1].operand_types
[1]
9059 .bitfield
.baseindex
))
9060 type
= current_templates
->end
[-1].operand_types
[1];
9061 expected_reg
= hash_find (reg_hash
,
9062 di_si
[addr_mode
][type
.bitfield
.esseg
]);
9066 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
9068 if (i
.base_reg
!= expected_reg
9070 || operand_type_check (i
.types
[this_operand
], disp
))
9072 /* The second memory operand must have the same size as
9076 && !((addr_mode
== CODE_64BIT
9077 && i
.base_reg
->reg_type
.bitfield
.qword
)
9078 || (addr_mode
== CODE_32BIT
9079 ? i
.base_reg
->reg_type
.bitfield
.dword
9080 : i
.base_reg
->reg_type
.bitfield
.word
)))
9083 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
9085 intel_syntax
? '[' : '(',
9087 expected_reg
->reg_name
,
9088 intel_syntax
? ']' : ')');
9095 as_bad (_("`%s' is not a valid %s expression"),
9096 operand_string
, kind
);
9101 if (addr_mode
!= CODE_16BIT
)
9103 /* 32-bit/64-bit checks. */
9105 && (addr_mode
== CODE_64BIT
9106 ? !i
.base_reg
->reg_type
.bitfield
.qword
9107 : !i
.base_reg
->reg_type
.bitfield
.dword
)
9109 || (i
.base_reg
->reg_num
9110 != (addr_mode
== CODE_64BIT
? RegRip
: RegEip
))))
9112 && !i
.index_reg
->reg_type
.bitfield
.xmmword
9113 && !i
.index_reg
->reg_type
.bitfield
.ymmword
9114 && !i
.index_reg
->reg_type
.bitfield
.zmmword
9115 && ((addr_mode
== CODE_64BIT
9116 ? !(i
.index_reg
->reg_type
.bitfield
.qword
9117 || i
.index_reg
->reg_num
== RegRiz
)
9118 : !(i
.index_reg
->reg_type
.bitfield
.dword
9119 || i
.index_reg
->reg_num
== RegEiz
))
9120 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
9123 /* bndmk, bndldx, and bndstx have special restrictions. */
9124 if (current_templates
->start
->base_opcode
== 0xf30f1b
9125 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
9127 /* They cannot use RIP-relative addressing. */
9128 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegRip
)
9130 as_bad (_("`%s' cannot be used here"), operand_string
);
9134 /* bndldx and bndstx ignore their scale factor. */
9135 if (current_templates
->start
->base_opcode
!= 0xf30f1b
9136 && i
.log2_scale_factor
)
9137 as_warn (_("register scaling is being ignored here"));
9142 /* 16-bit checks. */
9144 && (!i
.base_reg
->reg_type
.bitfield
.word
9145 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
9147 && (!i
.index_reg
->reg_type
.bitfield
.word
9148 || !i
.index_reg
->reg_type
.bitfield
.baseindex
9150 && i
.base_reg
->reg_num
< 6
9151 && i
.index_reg
->reg_num
>= 6
9152 && i
.log2_scale_factor
== 0))))
9159 /* Handle vector immediates. */
9162 RC_SAE_immediate (const char *imm_start
)
9164 unsigned int match_found
, j
;
9165 const char *pstr
= imm_start
;
9173 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
9175 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
9179 rc_op
.type
= RC_NamesTable
[j
].type
;
9180 rc_op
.operand
= this_operand
;
9181 i
.rounding
= &rc_op
;
9185 as_bad (_("duplicated `%s'"), imm_start
);
9188 pstr
+= RC_NamesTable
[j
].len
;
9198 as_bad (_("Missing '}': '%s'"), imm_start
);
9201 /* RC/SAE immediate string should contain nothing more. */;
9204 as_bad (_("Junk after '}': '%s'"), imm_start
);
9208 exp
= &im_expressions
[i
.imm_operands
++];
9209 i
.op
[this_operand
].imms
= exp
;
9211 exp
->X_op
= O_constant
;
9212 exp
->X_add_number
= 0;
9213 exp
->X_add_symbol
= (symbolS
*) 0;
9214 exp
->X_op_symbol
= (symbolS
*) 0;
9216 i
.types
[this_operand
].bitfield
.imm8
= 1;
9220 /* Only string instructions can have a second memory operand, so
9221 reduce current_templates to just those if it contains any. */
9223 maybe_adjust_templates (void)
9225 const insn_template
*t
;
9227 gas_assert (i
.mem_operands
== 1);
9229 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
9230 if (t
->opcode_modifier
.isstring
)
9233 if (t
< current_templates
->end
)
9235 static templates aux_templates
;
9236 bfd_boolean recheck
;
9238 aux_templates
.start
= t
;
9239 for (; t
< current_templates
->end
; ++t
)
9240 if (!t
->opcode_modifier
.isstring
)
9242 aux_templates
.end
= t
;
9244 /* Determine whether to re-check the first memory operand. */
9245 recheck
= (aux_templates
.start
!= current_templates
->start
9246 || t
!= current_templates
->end
);
9248 current_templates
= &aux_templates
;
9253 if (i
.memop1_string
!= NULL
9254 && i386_index_check (i
.memop1_string
) == 0)
9263 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
9267 i386_att_operand (char *operand_string
)
9271 char *op_string
= operand_string
;
9273 if (is_space_char (*op_string
))
9276 /* We check for an absolute prefix (differentiating,
9277 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
9278 if (*op_string
== ABSOLUTE_PREFIX
)
9281 if (is_space_char (*op_string
))
9283 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
9286 /* Check if operand is a register. */
9287 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
9289 i386_operand_type temp
;
9291 /* Check for a segment override by searching for ':' after a
9292 segment register. */
9294 if (is_space_char (*op_string
))
9296 if (*op_string
== ':'
9297 && (r
->reg_type
.bitfield
.sreg2
9298 || r
->reg_type
.bitfield
.sreg3
))
9303 i
.seg
[i
.mem_operands
] = &es
;
9306 i
.seg
[i
.mem_operands
] = &cs
;
9309 i
.seg
[i
.mem_operands
] = &ss
;
9312 i
.seg
[i
.mem_operands
] = &ds
;
9315 i
.seg
[i
.mem_operands
] = &fs
;
9318 i
.seg
[i
.mem_operands
] = &gs
;
9322 /* Skip the ':' and whitespace. */
9324 if (is_space_char (*op_string
))
9327 if (!is_digit_char (*op_string
)
9328 && !is_identifier_char (*op_string
)
9329 && *op_string
!= '('
9330 && *op_string
!= ABSOLUTE_PREFIX
)
9332 as_bad (_("bad memory operand `%s'"), op_string
);
9335 /* Handle case of %es:*foo. */
9336 if (*op_string
== ABSOLUTE_PREFIX
)
9339 if (is_space_char (*op_string
))
9341 i
.types
[this_operand
].bitfield
.jumpabsolute
= 1;
9343 goto do_memory_reference
;
9346 /* Handle vector operations. */
9347 if (*op_string
== '{')
9349 op_string
= check_VecOperations (op_string
, NULL
);
9350 if (op_string
== NULL
)
9356 as_bad (_("junk `%s' after register"), op_string
);
9360 temp
.bitfield
.baseindex
= 0;
9361 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
9363 i
.types
[this_operand
].bitfield
.unspecified
= 0;
9364 i
.op
[this_operand
].regs
= r
;
9367 else if (*op_string
== REGISTER_PREFIX
)
9369 as_bad (_("bad register name `%s'"), op_string
);
9372 else if (*op_string
== IMMEDIATE_PREFIX
)
9375 if (i
.types
[this_operand
].bitfield
.jumpabsolute
)
9377 as_bad (_("immediate operand illegal with absolute jump"));
9380 if (!i386_immediate (op_string
))
9383 else if (RC_SAE_immediate (operand_string
))
9385 /* If it is a RC or SAE immediate, do nothing. */
9388 else if (is_digit_char (*op_string
)
9389 || is_identifier_char (*op_string
)
9390 || *op_string
== '"'
9391 || *op_string
== '(')
9393 /* This is a memory reference of some sort. */
9396 /* Start and end of displacement string expression (if found). */
9397 char *displacement_string_start
;
9398 char *displacement_string_end
;
9401 do_memory_reference
:
9402 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
9404 if ((i
.mem_operands
== 1
9405 && !current_templates
->start
->opcode_modifier
.isstring
)
9406 || i
.mem_operands
== 2)
9408 as_bad (_("too many memory references for `%s'"),
9409 current_templates
->start
->name
);
9413 /* Check for base index form. We detect the base index form by
9414 looking for an ')' at the end of the operand, searching
9415 for the '(' matching it, and finding a REGISTER_PREFIX or ','
9417 base_string
= op_string
+ strlen (op_string
);
9419 /* Handle vector operations. */
9420 vop_start
= strchr (op_string
, '{');
9421 if (vop_start
&& vop_start
< base_string
)
9423 if (check_VecOperations (vop_start
, base_string
) == NULL
)
9425 base_string
= vop_start
;
9429 if (is_space_char (*base_string
))
9432 /* If we only have a displacement, set-up for it to be parsed later. */
9433 displacement_string_start
= op_string
;
9434 displacement_string_end
= base_string
+ 1;
9436 if (*base_string
== ')')
9439 unsigned int parens_balanced
= 1;
9440 /* We've already checked that the number of left & right ()'s are
9441 equal, so this loop will not be infinite. */
9445 if (*base_string
== ')')
9447 if (*base_string
== '(')
9450 while (parens_balanced
);
9452 temp_string
= base_string
;
9454 /* Skip past '(' and whitespace. */
9456 if (is_space_char (*base_string
))
9459 if (*base_string
== ','
9460 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
9463 displacement_string_end
= temp_string
;
9465 i
.types
[this_operand
].bitfield
.baseindex
= 1;
9469 base_string
= end_op
;
9470 if (is_space_char (*base_string
))
9474 /* There may be an index reg or scale factor here. */
9475 if (*base_string
== ',')
9478 if (is_space_char (*base_string
))
9481 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
9484 base_string
= end_op
;
9485 if (is_space_char (*base_string
))
9487 if (*base_string
== ',')
9490 if (is_space_char (*base_string
))
9493 else if (*base_string
!= ')')
9495 as_bad (_("expecting `,' or `)' "
9496 "after index register in `%s'"),
9501 else if (*base_string
== REGISTER_PREFIX
)
9503 end_op
= strchr (base_string
, ',');
9506 as_bad (_("bad register name `%s'"), base_string
);
9510 /* Check for scale factor. */
9511 if (*base_string
!= ')')
9513 char *end_scale
= i386_scale (base_string
);
9518 base_string
= end_scale
;
9519 if (is_space_char (*base_string
))
9521 if (*base_string
!= ')')
9523 as_bad (_("expecting `)' "
9524 "after scale factor in `%s'"),
9529 else if (!i
.index_reg
)
9531 as_bad (_("expecting index register or scale factor "
9532 "after `,'; got '%c'"),
9537 else if (*base_string
!= ')')
9539 as_bad (_("expecting `,' or `)' "
9540 "after base register in `%s'"),
9545 else if (*base_string
== REGISTER_PREFIX
)
9547 end_op
= strchr (base_string
, ',');
9550 as_bad (_("bad register name `%s'"), base_string
);
9555 /* If there's an expression beginning the operand, parse it,
9556 assuming displacement_string_start and
9557 displacement_string_end are meaningful. */
9558 if (displacement_string_start
!= displacement_string_end
)
9560 if (!i386_displacement (displacement_string_start
,
9561 displacement_string_end
))
9565 /* Special case for (%dx) while doing input/output op. */
9567 && operand_type_equal (&i
.base_reg
->reg_type
,
9568 ®16_inoutportreg
)
9570 && i
.log2_scale_factor
== 0
9571 && i
.seg
[i
.mem_operands
] == 0
9572 && !operand_type_check (i
.types
[this_operand
], disp
))
9574 i
.types
[this_operand
] = inoutportreg
;
9578 if (i386_index_check (operand_string
) == 0)
9580 i
.types
[this_operand
].bitfield
.mem
= 1;
9581 if (i
.mem_operands
== 0)
9582 i
.memop1_string
= xstrdup (operand_string
);
9587 /* It's not a memory operand; argh! */
9588 as_bad (_("invalid char %s beginning operand %d `%s'"),
9589 output_invalid (*op_string
),
9594 return 1; /* Normal return. */
9597 /* Calculate the maximum variable size (i.e., excluding fr_fix)
9598 that an rs_machine_dependent frag may reach. */
9601 i386_frag_max_var (fragS
*frag
)
9603 /* The only relaxable frags are for jumps.
9604 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
9605 gas_assert (frag
->fr_type
== rs_machine_dependent
);
9606 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
9609 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9611 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
9613 /* STT_GNU_IFUNC symbol must go through PLT. */
9614 if ((symbol_get_bfdsym (fr_symbol
)->flags
9615 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
9618 if (!S_IS_EXTERNAL (fr_symbol
))
9619 /* Symbol may be weak or local. */
9620 return !S_IS_WEAK (fr_symbol
);
9622 /* Global symbols with non-default visibility can't be preempted. */
9623 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
9626 if (fr_var
!= NO_RELOC
)
9627 switch ((enum bfd_reloc_code_real
) fr_var
)
9629 case BFD_RELOC_386_PLT32
:
9630 case BFD_RELOC_X86_64_PLT32
:
9631 /* Symbol with PLT relocation may be preempted. */
9637 /* Global symbols with default visibility in a shared library may be
9638 preempted by another definition. */
9643 /* md_estimate_size_before_relax()
9645 Called just before relax() for rs_machine_dependent frags. The x86
9646 assembler uses these frags to handle variable size jump
9649 Any symbol that is now undefined will not become defined.
9650 Return the correct fr_subtype in the frag.
9651 Return the initial "guess for variable size of frag" to caller.
9652 The guess is actually the growth beyond the fixed part. Whatever
9653 we do to grow the fixed or variable part contributes to our
9657 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
9659 /* We've already got fragP->fr_subtype right; all we have to do is
9660 check for un-relaxable symbols. On an ELF system, we can't relax
9661 an externally visible symbol, because it may be overridden by a
9663 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
9664 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9666 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
9669 #if defined (OBJ_COFF) && defined (TE_PE)
9670 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
9671 && S_IS_WEAK (fragP
->fr_symbol
))
9675 /* Symbol is undefined in this segment, or we need to keep a
9676 reloc so that weak symbols can be overridden. */
9677 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
9678 enum bfd_reloc_code_real reloc_type
;
9679 unsigned char *opcode
;
9682 if (fragP
->fr_var
!= NO_RELOC
)
9683 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
9685 reloc_type
= BFD_RELOC_16_PCREL
;
9686 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9687 else if (need_plt32_p (fragP
->fr_symbol
))
9688 reloc_type
= BFD_RELOC_X86_64_PLT32
;
9691 reloc_type
= BFD_RELOC_32_PCREL
;
9693 old_fr_fix
= fragP
->fr_fix
;
9694 opcode
= (unsigned char *) fragP
->fr_opcode
;
9696 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
9699 /* Make jmp (0xeb) a (d)word displacement jump. */
9701 fragP
->fr_fix
+= size
;
9702 fix_new (fragP
, old_fr_fix
, size
,
9704 fragP
->fr_offset
, 1,
9710 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
9712 /* Negate the condition, and branch past an
9713 unconditional jump. */
9716 /* Insert an unconditional jump. */
9718 /* We added two extra opcode bytes, and have a two byte
9720 fragP
->fr_fix
+= 2 + 2;
9721 fix_new (fragP
, old_fr_fix
+ 2, 2,
9723 fragP
->fr_offset
, 1,
9730 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
9735 fixP
= fix_new (fragP
, old_fr_fix
, 1,
9737 fragP
->fr_offset
, 1,
9739 fixP
->fx_signed
= 1;
9743 /* This changes the byte-displacement jump 0x7N
9744 to the (d)word-displacement jump 0x0f,0x8N. */
9745 opcode
[1] = opcode
[0] + 0x10;
9746 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9747 /* We've added an opcode byte. */
9748 fragP
->fr_fix
+= 1 + size
;
9749 fix_new (fragP
, old_fr_fix
+ 1, size
,
9751 fragP
->fr_offset
, 1,
9756 BAD_CASE (fragP
->fr_subtype
);
9760 return fragP
->fr_fix
- old_fr_fix
;
9763 /* Guess size depending on current relax state. Initially the relax
9764 state will correspond to a short jump and we return 1, because
9765 the variable part of the frag (the branch offset) is one byte
9766 long. However, we can relax a section more than once and in that
9767 case we must either set fr_subtype back to the unrelaxed state,
9768 or return the value for the appropriate branch. */
9769 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
9772 /* Called after relax() is finished.
9774 In: Address of frag.
9775 fr_type == rs_machine_dependent.
9776 fr_subtype is what the address relaxed to.
9778 Out: Any fixSs and constants are set up.
9779 Caller will turn frag into a ".space 0". */
9782 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
9785 unsigned char *opcode
;
9786 unsigned char *where_to_put_displacement
= NULL
;
9787 offsetT target_address
;
9788 offsetT opcode_address
;
9789 unsigned int extension
= 0;
9790 offsetT displacement_from_opcode_start
;
9792 opcode
= (unsigned char *) fragP
->fr_opcode
;
9794 /* Address we want to reach in file space. */
9795 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
9797 /* Address opcode resides at in file space. */
9798 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
9800 /* Displacement from opcode start to fill into instruction. */
9801 displacement_from_opcode_start
= target_address
- opcode_address
;
9803 if ((fragP
->fr_subtype
& BIG
) == 0)
9805 /* Don't have to change opcode. */
9806 extension
= 1; /* 1 opcode + 1 displacement */
9807 where_to_put_displacement
= &opcode
[1];
9811 if (no_cond_jump_promotion
9812 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
9813 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
9814 _("long jump required"));
9816 switch (fragP
->fr_subtype
)
9818 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
9819 extension
= 4; /* 1 opcode + 4 displacement */
9821 where_to_put_displacement
= &opcode
[1];
9824 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
9825 extension
= 2; /* 1 opcode + 2 displacement */
9827 where_to_put_displacement
= &opcode
[1];
9830 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
9831 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
9832 extension
= 5; /* 2 opcode + 4 displacement */
9833 opcode
[1] = opcode
[0] + 0x10;
9834 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9835 where_to_put_displacement
= &opcode
[2];
9838 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
9839 extension
= 3; /* 2 opcode + 2 displacement */
9840 opcode
[1] = opcode
[0] + 0x10;
9841 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
9842 where_to_put_displacement
= &opcode
[2];
9845 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
9850 where_to_put_displacement
= &opcode
[3];
9854 BAD_CASE (fragP
->fr_subtype
);
9859 /* If size if less then four we are sure that the operand fits,
9860 but if it's 4, then it could be that the displacement is larger
9862 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
9864 && ((addressT
) (displacement_from_opcode_start
- extension
9865 + ((addressT
) 1 << 31))
9866 > (((addressT
) 2 << 31) - 1)))
9868 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
9869 _("jump target out of range"));
9870 /* Make us emit 0. */
9871 displacement_from_opcode_start
= extension
;
9873 /* Now put displacement after opcode. */
9874 md_number_to_chars ((char *) where_to_put_displacement
,
9875 (valueT
) (displacement_from_opcode_start
- extension
),
9876 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
9877 fragP
->fr_fix
+= extension
;
9880 /* Apply a fixup (fixP) to segment data, once it has been determined
9881 by our caller that we have all the info we need to fix it up.
9883 Parameter valP is the pointer to the value of the bits.
9885 On the 386, immediates, displacements, and data pointers are all in
9886 the same (little-endian) format, so we don't need to care about which
9890 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
9892 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
9893 valueT value
= *valP
;
9895 #if !defined (TE_Mach)
9898 switch (fixP
->fx_r_type
)
9904 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
9907 case BFD_RELOC_X86_64_32S
:
9908 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
9911 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
9914 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
9919 if (fixP
->fx_addsy
!= NULL
9920 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
9921 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
9922 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
9923 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
9924 && !use_rela_relocations
)
9926 /* This is a hack. There should be a better way to handle this.
9927 This covers for the fact that bfd_install_relocation will
9928 subtract the current location (for partial_inplace, PC relative
9929 relocations); see more below. */
9933 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
9936 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9938 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9941 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
9944 || (symbol_section_p (fixP
->fx_addsy
)
9945 && sym_seg
!= absolute_section
))
9946 && !generic_force_reloc (fixP
))
9948 /* Yes, we add the values in twice. This is because
9949 bfd_install_relocation subtracts them out again. I think
9950 bfd_install_relocation is broken, but I don't dare change
9952 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
9956 #if defined (OBJ_COFF) && defined (TE_PE)
9957 /* For some reason, the PE format does not store a
9958 section address offset for a PC relative symbol. */
9959 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
9960 || S_IS_WEAK (fixP
->fx_addsy
))
9961 value
+= md_pcrel_from (fixP
);
9964 #if defined (OBJ_COFF) && defined (TE_PE)
9965 if (fixP
->fx_addsy
!= NULL
9966 && S_IS_WEAK (fixP
->fx_addsy
)
9967 /* PR 16858: Do not modify weak function references. */
9968 && ! fixP
->fx_pcrel
)
9970 #if !defined (TE_PEP)
9971 /* For x86 PE weak function symbols are neither PC-relative
9972 nor do they set S_IS_FUNCTION. So the only reliable way
9973 to detect them is to check the flags of their containing
9975 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
9976 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
9980 value
-= S_GET_VALUE (fixP
->fx_addsy
);
9984 /* Fix a few things - the dynamic linker expects certain values here,
9985 and we must not disappoint it. */
9986 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9987 if (IS_ELF
&& fixP
->fx_addsy
)
9988 switch (fixP
->fx_r_type
)
9990 case BFD_RELOC_386_PLT32
:
9991 case BFD_RELOC_X86_64_PLT32
:
9992 /* Make the jump instruction point to the address of the operand. At
9993 runtime we merely add the offset to the actual PLT entry. */
9997 case BFD_RELOC_386_TLS_GD
:
9998 case BFD_RELOC_386_TLS_LDM
:
9999 case BFD_RELOC_386_TLS_IE_32
:
10000 case BFD_RELOC_386_TLS_IE
:
10001 case BFD_RELOC_386_TLS_GOTIE
:
10002 case BFD_RELOC_386_TLS_GOTDESC
:
10003 case BFD_RELOC_X86_64_TLSGD
:
10004 case BFD_RELOC_X86_64_TLSLD
:
10005 case BFD_RELOC_X86_64_GOTTPOFF
:
10006 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10007 value
= 0; /* Fully resolved at runtime. No addend. */
10009 case BFD_RELOC_386_TLS_LE
:
10010 case BFD_RELOC_386_TLS_LDO_32
:
10011 case BFD_RELOC_386_TLS_LE_32
:
10012 case BFD_RELOC_X86_64_DTPOFF32
:
10013 case BFD_RELOC_X86_64_DTPOFF64
:
10014 case BFD_RELOC_X86_64_TPOFF32
:
10015 case BFD_RELOC_X86_64_TPOFF64
:
10016 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10019 case BFD_RELOC_386_TLS_DESC_CALL
:
10020 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10021 value
= 0; /* Fully resolved at runtime. No addend. */
10022 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
10026 case BFD_RELOC_VTABLE_INHERIT
:
10027 case BFD_RELOC_VTABLE_ENTRY
:
10034 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
10036 #endif /* !defined (TE_Mach) */
10038 /* Are we finished with this relocation now? */
10039 if (fixP
->fx_addsy
== NULL
)
10041 #if defined (OBJ_COFF) && defined (TE_PE)
10042 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
10045 /* Remember value for tc_gen_reloc. */
10046 fixP
->fx_addnumber
= value
;
10047 /* Clear out the frag for now. */
10051 else if (use_rela_relocations
)
10053 fixP
->fx_no_overflow
= 1;
10054 /* Remember value for tc_gen_reloc. */
10055 fixP
->fx_addnumber
= value
;
10059 md_number_to_chars (p
, value
, fixP
->fx_size
);
10063 md_atof (int type
, char *litP
, int *sizeP
)
10065 /* This outputs the LITTLENUMs in REVERSE order;
10066 in accord with the bigendian 386. */
10067 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
10070 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
10073 output_invalid (int c
)
10076 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10079 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
10080 "(0x%x)", (unsigned char) c
);
10081 return output_invalid_buf
;
10084 /* REG_STRING starts *before* REGISTER_PREFIX. */
10086 static const reg_entry
*
10087 parse_real_register (char *reg_string
, char **end_op
)
10089 char *s
= reg_string
;
10091 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
10092 const reg_entry
*r
;
10094 /* Skip possible REGISTER_PREFIX and possible whitespace. */
10095 if (*s
== REGISTER_PREFIX
)
10098 if (is_space_char (*s
))
10101 p
= reg_name_given
;
10102 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
10104 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
10105 return (const reg_entry
*) NULL
;
10109 /* For naked regs, make sure that we are not dealing with an identifier.
10110 This prevents confusing an identifier like `eax_var' with register
10112 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
10113 return (const reg_entry
*) NULL
;
10117 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
10119 /* Handle floating point regs, allowing spaces in the (i) part. */
10120 if (r
== i386_regtab
/* %st is first entry of table */)
10122 if (is_space_char (*s
))
10127 if (is_space_char (*s
))
10129 if (*s
>= '0' && *s
<= '7')
10131 int fpr
= *s
- '0';
10133 if (is_space_char (*s
))
10138 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
10143 /* We have "%st(" then garbage. */
10144 return (const reg_entry
*) NULL
;
10148 if (r
== NULL
|| allow_pseudo_reg
)
10151 if (operand_type_all_zero (&r
->reg_type
))
10152 return (const reg_entry
*) NULL
;
10154 if ((r
->reg_type
.bitfield
.dword
10155 || r
->reg_type
.bitfield
.sreg3
10156 || r
->reg_type
.bitfield
.control
10157 || r
->reg_type
.bitfield
.debug
10158 || r
->reg_type
.bitfield
.test
)
10159 && !cpu_arch_flags
.bitfield
.cpui386
)
10160 return (const reg_entry
*) NULL
;
10162 if (r
->reg_type
.bitfield
.tbyte
10163 && !cpu_arch_flags
.bitfield
.cpu8087
10164 && !cpu_arch_flags
.bitfield
.cpu287
10165 && !cpu_arch_flags
.bitfield
.cpu387
)
10166 return (const reg_entry
*) NULL
;
10168 if (r
->reg_type
.bitfield
.regmmx
&& !cpu_arch_flags
.bitfield
.cpuregmmx
)
10169 return (const reg_entry
*) NULL
;
10171 if (r
->reg_type
.bitfield
.xmmword
&& !cpu_arch_flags
.bitfield
.cpuregxmm
)
10172 return (const reg_entry
*) NULL
;
10174 if (r
->reg_type
.bitfield
.ymmword
&& !cpu_arch_flags
.bitfield
.cpuregymm
)
10175 return (const reg_entry
*) NULL
;
10177 if (r
->reg_type
.bitfield
.zmmword
&& !cpu_arch_flags
.bitfield
.cpuregzmm
)
10178 return (const reg_entry
*) NULL
;
10180 if (r
->reg_type
.bitfield
.regmask
10181 && !cpu_arch_flags
.bitfield
.cpuregmask
)
10182 return (const reg_entry
*) NULL
;
10184 /* Don't allow fake index register unless allow_index_reg isn't 0. */
10185 if (!allow_index_reg
10186 && (r
->reg_num
== RegEiz
|| r
->reg_num
== RegRiz
))
10187 return (const reg_entry
*) NULL
;
10189 /* Upper 16 vector register is only available with VREX in 64bit
10191 if ((r
->reg_flags
& RegVRex
))
10193 if (i
.vec_encoding
== vex_encoding_default
)
10194 i
.vec_encoding
= vex_encoding_evex
;
10196 if (!cpu_arch_flags
.bitfield
.cpuvrex
10197 || i
.vec_encoding
!= vex_encoding_evex
10198 || flag_code
!= CODE_64BIT
)
10199 return (const reg_entry
*) NULL
;
10202 if (((r
->reg_flags
& (RegRex64
| RegRex
))
10203 || r
->reg_type
.bitfield
.qword
)
10204 && (!cpu_arch_flags
.bitfield
.cpulm
10205 || !operand_type_equal (&r
->reg_type
, &control
))
10206 && flag_code
!= CODE_64BIT
)
10207 return (const reg_entry
*) NULL
;
10209 if (r
->reg_type
.bitfield
.sreg3
&& r
->reg_num
== RegFlat
&& !intel_syntax
)
10210 return (const reg_entry
*) NULL
;
10215 /* REG_STRING starts *before* REGISTER_PREFIX. */
10217 static const reg_entry
*
10218 parse_register (char *reg_string
, char **end_op
)
10220 const reg_entry
*r
;
10222 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
10223 r
= parse_real_register (reg_string
, end_op
);
10228 char *save
= input_line_pointer
;
10232 input_line_pointer
= reg_string
;
10233 c
= get_symbol_name (®_string
);
10234 symbolP
= symbol_find (reg_string
);
10235 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
10237 const expressionS
*e
= symbol_get_value_expression (symbolP
);
10239 know (e
->X_op
== O_register
);
10240 know (e
->X_add_number
>= 0
10241 && (valueT
) e
->X_add_number
< i386_regtab_size
);
10242 r
= i386_regtab
+ e
->X_add_number
;
10243 if ((r
->reg_flags
& RegVRex
))
10244 i
.vec_encoding
= vex_encoding_evex
;
10245 *end_op
= input_line_pointer
;
10247 *input_line_pointer
= c
;
10248 input_line_pointer
= save
;
10254 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
10256 const reg_entry
*r
;
10257 char *end
= input_line_pointer
;
10260 r
= parse_register (name
, &input_line_pointer
);
10261 if (r
&& end
<= input_line_pointer
)
10263 *nextcharP
= *input_line_pointer
;
10264 *input_line_pointer
= 0;
10265 e
->X_op
= O_register
;
10266 e
->X_add_number
= r
- i386_regtab
;
10269 input_line_pointer
= end
;
10271 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
10275 md_operand (expressionS
*e
)
10278 const reg_entry
*r
;
10280 switch (*input_line_pointer
)
10282 case REGISTER_PREFIX
:
10283 r
= parse_real_register (input_line_pointer
, &end
);
10286 e
->X_op
= O_register
;
10287 e
->X_add_number
= r
- i386_regtab
;
10288 input_line_pointer
= end
;
10293 gas_assert (intel_syntax
);
10294 end
= input_line_pointer
++;
10296 if (*input_line_pointer
== ']')
10298 ++input_line_pointer
;
10299 e
->X_op_symbol
= make_expr_symbol (e
);
10300 e
->X_add_symbol
= NULL
;
10301 e
->X_add_number
= 0;
10306 e
->X_op
= O_absent
;
10307 input_line_pointer
= end
;
10314 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10315 const char *md_shortopts
= "kVQ:sqnO::";
10317 const char *md_shortopts
= "qnO::";
10320 #define OPTION_32 (OPTION_MD_BASE + 0)
10321 #define OPTION_64 (OPTION_MD_BASE + 1)
10322 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
10323 #define OPTION_MARCH (OPTION_MD_BASE + 3)
10324 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
10325 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
10326 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
10327 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
10328 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
10329 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
10330 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
10331 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
10332 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
10333 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
10334 #define OPTION_X32 (OPTION_MD_BASE + 14)
10335 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
10336 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
10337 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
10338 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
10339 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
10340 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
10341 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
10342 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
10343 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
10344 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
10345 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
10347 struct option md_longopts
[] =
10349 {"32", no_argument
, NULL
, OPTION_32
},
10350 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10351 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10352 {"64", no_argument
, NULL
, OPTION_64
},
10354 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10355 {"x32", no_argument
, NULL
, OPTION_X32
},
10356 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
10358 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
10359 {"march", required_argument
, NULL
, OPTION_MARCH
},
10360 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
10361 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
10362 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
10363 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
10364 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
10365 {"mold-gcc", no_argument
, NULL
, OPTION_MOLD_GCC
},
10366 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
10367 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
10368 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
10369 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
10370 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
10371 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
10372 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
10373 # if defined (TE_PE) || defined (TE_PEP)
10374 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
10376 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
10377 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
10378 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
10379 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
10380 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
10381 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
10382 {NULL
, no_argument
, NULL
, 0}
10384 size_t md_longopts_size
= sizeof (md_longopts
);
10387 md_parse_option (int c
, const char *arg
)
10390 char *arch
, *next
, *saved
;
10395 optimize_align_code
= 0;
10399 quiet_warnings
= 1;
10402 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10403 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
10404 should be emitted or not. FIXME: Not implemented. */
10408 /* -V: SVR4 argument to print version ID. */
10410 print_version_id ();
10413 /* -k: Ignore for FreeBSD compatibility. */
10418 /* -s: On i386 Solaris, this tells the native assembler to use
10419 .stab instead of .stab.excl. We always use .stab anyhow. */
10422 case OPTION_MSHARED
:
10426 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10427 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10430 const char **list
, **l
;
10432 list
= bfd_target_list ();
10433 for (l
= list
; *l
!= NULL
; l
++)
10434 if (CONST_STRNEQ (*l
, "elf64-x86-64")
10435 || strcmp (*l
, "coff-x86-64") == 0
10436 || strcmp (*l
, "pe-x86-64") == 0
10437 || strcmp (*l
, "pei-x86-64") == 0
10438 || strcmp (*l
, "mach-o-x86-64") == 0)
10440 default_arch
= "x86_64";
10444 as_fatal (_("no compiled in support for x86_64"));
10450 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10454 const char **list
, **l
;
10456 list
= bfd_target_list ();
10457 for (l
= list
; *l
!= NULL
; l
++)
10458 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
10460 default_arch
= "x86_64:32";
10464 as_fatal (_("no compiled in support for 32bit x86_64"));
10468 as_fatal (_("32bit x86_64 is only supported for ELF"));
10473 default_arch
= "i386";
10476 case OPTION_DIVIDE
:
10477 #ifdef SVR4_COMMENT_CHARS
10482 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
10484 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
10488 i386_comment_chars
= n
;
10494 saved
= xstrdup (arg
);
10496 /* Allow -march=+nosse. */
10502 as_fatal (_("invalid -march= option: `%s'"), arg
);
10503 next
= strchr (arch
, '+');
10506 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10508 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
10511 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
10514 cpu_arch_name
= cpu_arch
[j
].name
;
10515 cpu_sub_arch_name
= NULL
;
10516 cpu_arch_flags
= cpu_arch
[j
].flags
;
10517 cpu_arch_isa
= cpu_arch
[j
].type
;
10518 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
10519 if (!cpu_arch_tune_set
)
10521 cpu_arch_tune
= cpu_arch_isa
;
10522 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
10526 else if (*cpu_arch
[j
].name
== '.'
10527 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
10529 /* ISA extension. */
10530 i386_cpu_flags flags
;
10532 flags
= cpu_flags_or (cpu_arch_flags
,
10533 cpu_arch
[j
].flags
);
10535 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
10537 if (cpu_sub_arch_name
)
10539 char *name
= cpu_sub_arch_name
;
10540 cpu_sub_arch_name
= concat (name
,
10542 (const char *) NULL
);
10546 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
10547 cpu_arch_flags
= flags
;
10548 cpu_arch_isa_flags
= flags
;
10554 if (j
>= ARRAY_SIZE (cpu_arch
))
10556 /* Disable an ISA extension. */
10557 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
10558 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
10560 i386_cpu_flags flags
;
10562 flags
= cpu_flags_and_not (cpu_arch_flags
,
10563 cpu_noarch
[j
].flags
);
10564 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
10566 if (cpu_sub_arch_name
)
10568 char *name
= cpu_sub_arch_name
;
10569 cpu_sub_arch_name
= concat (arch
,
10570 (const char *) NULL
);
10574 cpu_sub_arch_name
= xstrdup (arch
);
10575 cpu_arch_flags
= flags
;
10576 cpu_arch_isa_flags
= flags
;
10581 if (j
>= ARRAY_SIZE (cpu_noarch
))
10582 j
= ARRAY_SIZE (cpu_arch
);
10585 if (j
>= ARRAY_SIZE (cpu_arch
))
10586 as_fatal (_("invalid -march= option: `%s'"), arg
);
10590 while (next
!= NULL
);
10596 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
10597 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10599 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
10601 cpu_arch_tune_set
= 1;
10602 cpu_arch_tune
= cpu_arch
[j
].type
;
10603 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
10607 if (j
>= ARRAY_SIZE (cpu_arch
))
10608 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
10611 case OPTION_MMNEMONIC
:
10612 if (strcasecmp (arg
, "att") == 0)
10613 intel_mnemonic
= 0;
10614 else if (strcasecmp (arg
, "intel") == 0)
10615 intel_mnemonic
= 1;
10617 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
10620 case OPTION_MSYNTAX
:
10621 if (strcasecmp (arg
, "att") == 0)
10623 else if (strcasecmp (arg
, "intel") == 0)
10626 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
10629 case OPTION_MINDEX_REG
:
10630 allow_index_reg
= 1;
10633 case OPTION_MNAKED_REG
:
10634 allow_naked_reg
= 1;
10637 case OPTION_MOLD_GCC
:
10641 case OPTION_MSSE2AVX
:
10645 case OPTION_MSSE_CHECK
:
10646 if (strcasecmp (arg
, "error") == 0)
10647 sse_check
= check_error
;
10648 else if (strcasecmp (arg
, "warning") == 0)
10649 sse_check
= check_warning
;
10650 else if (strcasecmp (arg
, "none") == 0)
10651 sse_check
= check_none
;
10653 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
10656 case OPTION_MOPERAND_CHECK
:
10657 if (strcasecmp (arg
, "error") == 0)
10658 operand_check
= check_error
;
10659 else if (strcasecmp (arg
, "warning") == 0)
10660 operand_check
= check_warning
;
10661 else if (strcasecmp (arg
, "none") == 0)
10662 operand_check
= check_none
;
10664 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
10667 case OPTION_MAVXSCALAR
:
10668 if (strcasecmp (arg
, "128") == 0)
10669 avxscalar
= vex128
;
10670 else if (strcasecmp (arg
, "256") == 0)
10671 avxscalar
= vex256
;
10673 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
10676 case OPTION_MADD_BND_PREFIX
:
10677 add_bnd_prefix
= 1;
10680 case OPTION_MEVEXLIG
:
10681 if (strcmp (arg
, "128") == 0)
10682 evexlig
= evexl128
;
10683 else if (strcmp (arg
, "256") == 0)
10684 evexlig
= evexl256
;
10685 else if (strcmp (arg
, "512") == 0)
10686 evexlig
= evexl512
;
10688 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
10691 case OPTION_MEVEXRCIG
:
10692 if (strcmp (arg
, "rne") == 0)
10694 else if (strcmp (arg
, "rd") == 0)
10696 else if (strcmp (arg
, "ru") == 0)
10698 else if (strcmp (arg
, "rz") == 0)
10701 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
10704 case OPTION_MEVEXWIG
:
10705 if (strcmp (arg
, "0") == 0)
10707 else if (strcmp (arg
, "1") == 0)
10710 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
10713 # if defined (TE_PE) || defined (TE_PEP)
10714 case OPTION_MBIG_OBJ
:
10719 case OPTION_MOMIT_LOCK_PREFIX
:
10720 if (strcasecmp (arg
, "yes") == 0)
10721 omit_lock_prefix
= 1;
10722 else if (strcasecmp (arg
, "no") == 0)
10723 omit_lock_prefix
= 0;
10725 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
10728 case OPTION_MFENCE_AS_LOCK_ADD
:
10729 if (strcasecmp (arg
, "yes") == 0)
10731 else if (strcasecmp (arg
, "no") == 0)
10734 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
10737 case OPTION_MRELAX_RELOCATIONS
:
10738 if (strcasecmp (arg
, "yes") == 0)
10739 generate_relax_relocations
= 1;
10740 else if (strcasecmp (arg
, "no") == 0)
10741 generate_relax_relocations
= 0;
10743 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
10746 case OPTION_MAMD64
:
10750 case OPTION_MINTEL64
:
10758 /* Turn off -Os. */
10759 optimize_for_space
= 0;
10761 else if (*arg
== 's')
10763 optimize_for_space
= 1;
10764 /* Turn on all encoding optimizations. */
10769 optimize
= atoi (arg
);
10770 /* Turn off -Os. */
10771 optimize_for_space
= 0;
10781 #define MESSAGE_TEMPLATE \
10785 output_message (FILE *stream
, char *p
, char *message
, char *start
,
10786 int *left_p
, const char *name
, int len
)
10788 int size
= sizeof (MESSAGE_TEMPLATE
);
10789 int left
= *left_p
;
10791 /* Reserve 2 spaces for ", " or ",\0" */
10794 /* Check if there is any room. */
10802 p
= mempcpy (p
, name
, len
);
10806 /* Output the current message now and start a new one. */
10809 fprintf (stream
, "%s\n", message
);
10811 left
= size
- (start
- message
) - len
- 2;
10813 gas_assert (left
>= 0);
10815 p
= mempcpy (p
, name
, len
);
10823 show_arch (FILE *stream
, int ext
, int check
)
10825 static char message
[] = MESSAGE_TEMPLATE
;
10826 char *start
= message
+ 27;
10828 int size
= sizeof (MESSAGE_TEMPLATE
);
10835 left
= size
- (start
- message
);
10836 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
10838 /* Should it be skipped? */
10839 if (cpu_arch
[j
].skip
)
10842 name
= cpu_arch
[j
].name
;
10843 len
= cpu_arch
[j
].len
;
10846 /* It is an extension. Skip if we aren't asked to show it. */
10857 /* It is an processor. Skip if we show only extension. */
10860 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
10862 /* It is an impossible processor - skip. */
10866 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
10869 /* Display disabled extensions. */
10871 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
10873 name
= cpu_noarch
[j
].name
;
10874 len
= cpu_noarch
[j
].len
;
10875 p
= output_message (stream
, p
, message
, start
, &left
, name
,
10880 fprintf (stream
, "%s\n", message
);
10884 md_show_usage (FILE *stream
)
10886 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10887 fprintf (stream
, _("\
10889 -V print assembler version number\n\
10892 fprintf (stream
, _("\
10893 -n Do not optimize code alignment\n\
10894 -q quieten some warnings\n"));
10895 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10896 fprintf (stream
, _("\
10899 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10900 || defined (TE_PE) || defined (TE_PEP))
10901 fprintf (stream
, _("\
10902 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10904 #ifdef SVR4_COMMENT_CHARS
10905 fprintf (stream
, _("\
10906 --divide do not treat `/' as a comment character\n"));
10908 fprintf (stream
, _("\
10909 --divide ignored\n"));
10911 fprintf (stream
, _("\
10912 -march=CPU[,+EXTENSION...]\n\
10913 generate code for CPU and EXTENSION, CPU is one of:\n"));
10914 show_arch (stream
, 0, 1);
10915 fprintf (stream
, _("\
10916 EXTENSION is combination of:\n"));
10917 show_arch (stream
, 1, 0);
10918 fprintf (stream
, _("\
10919 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10920 show_arch (stream
, 0, 0);
10921 fprintf (stream
, _("\
10922 -msse2avx encode SSE instructions with VEX prefix\n"));
10923 fprintf (stream
, _("\
10924 -msse-check=[none|error|warning]\n\
10925 check SSE instructions\n"));
10926 fprintf (stream
, _("\
10927 -moperand-check=[none|error|warning]\n\
10928 check operand combinations for validity\n"));
10929 fprintf (stream
, _("\
10930 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10932 fprintf (stream
, _("\
10933 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10935 fprintf (stream
, _("\
10936 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10937 for EVEX.W bit ignored instructions\n"));
10938 fprintf (stream
, _("\
10939 -mevexrcig=[rne|rd|ru|rz]\n\
10940 encode EVEX instructions with specific EVEX.RC value\n\
10941 for SAE-only ignored instructions\n"));
10942 fprintf (stream
, _("\
10943 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10944 fprintf (stream
, _("\
10945 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10946 fprintf (stream
, _("\
10947 -mindex-reg support pseudo index registers\n"));
10948 fprintf (stream
, _("\
10949 -mnaked-reg don't require `%%' prefix for registers\n"));
10950 fprintf (stream
, _("\
10951 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10952 fprintf (stream
, _("\
10953 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10954 fprintf (stream
, _("\
10955 -mshared disable branch optimization for shared code\n"));
10956 # if defined (TE_PE) || defined (TE_PEP)
10957 fprintf (stream
, _("\
10958 -mbig-obj generate big object files\n"));
10960 fprintf (stream
, _("\
10961 -momit-lock-prefix=[no|yes]\n\
10962 strip all lock prefixes\n"));
10963 fprintf (stream
, _("\
10964 -mfence-as-lock-add=[no|yes]\n\
10965 encode lfence, mfence and sfence as\n\
10966 lock addl $0x0, (%%{re}sp)\n"));
10967 fprintf (stream
, _("\
10968 -mrelax-relocations=[no|yes]\n\
10969 generate relax relocations\n"));
10970 fprintf (stream
, _("\
10971 -mamd64 accept only AMD64 ISA\n"));
10972 fprintf (stream
, _("\
10973 -mintel64 accept only Intel64 ISA\n"));
10976 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10977 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10978 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10980 /* Pick the target format to use. */
10983 i386_target_format (void)
10985 if (!strncmp (default_arch
, "x86_64", 6))
10987 update_code_flag (CODE_64BIT
, 1);
10988 if (default_arch
[6] == '\0')
10989 x86_elf_abi
= X86_64_ABI
;
10991 x86_elf_abi
= X86_64_X32_ABI
;
10993 else if (!strcmp (default_arch
, "i386"))
10994 update_code_flag (CODE_32BIT
, 1);
10995 else if (!strcmp (default_arch
, "iamcu"))
10997 update_code_flag (CODE_32BIT
, 1);
10998 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
11000 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
11001 cpu_arch_name
= "iamcu";
11002 cpu_sub_arch_name
= NULL
;
11003 cpu_arch_flags
= iamcu_flags
;
11004 cpu_arch_isa
= PROCESSOR_IAMCU
;
11005 cpu_arch_isa_flags
= iamcu_flags
;
11006 if (!cpu_arch_tune_set
)
11008 cpu_arch_tune
= cpu_arch_isa
;
11009 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
11012 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
11013 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
11017 as_fatal (_("unknown architecture"));
11019 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
11020 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11021 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
11022 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
11024 switch (OUTPUT_FLAVOR
)
11026 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
11027 case bfd_target_aout_flavour
:
11028 return AOUT_TARGET_FORMAT
;
11030 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
11031 # if defined (TE_PE) || defined (TE_PEP)
11032 case bfd_target_coff_flavour
:
11033 if (flag_code
== CODE_64BIT
)
11034 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
11037 # elif defined (TE_GO32)
11038 case bfd_target_coff_flavour
:
11039 return "coff-go32";
11041 case bfd_target_coff_flavour
:
11042 return "coff-i386";
11045 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11046 case bfd_target_elf_flavour
:
11048 const char *format
;
11050 switch (x86_elf_abi
)
11053 format
= ELF_TARGET_FORMAT
;
11056 use_rela_relocations
= 1;
11058 format
= ELF_TARGET_FORMAT64
;
11060 case X86_64_X32_ABI
:
11061 use_rela_relocations
= 1;
11063 disallow_64bit_reloc
= 1;
11064 format
= ELF_TARGET_FORMAT32
;
11067 if (cpu_arch_isa
== PROCESSOR_L1OM
)
11069 if (x86_elf_abi
!= X86_64_ABI
)
11070 as_fatal (_("Intel L1OM is 64bit only"));
11071 return ELF_TARGET_L1OM_FORMAT
;
11073 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
11075 if (x86_elf_abi
!= X86_64_ABI
)
11076 as_fatal (_("Intel K1OM is 64bit only"));
11077 return ELF_TARGET_K1OM_FORMAT
;
11079 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
11081 if (x86_elf_abi
!= I386_ABI
)
11082 as_fatal (_("Intel MCU is 32bit only"));
11083 return ELF_TARGET_IAMCU_FORMAT
;
11089 #if defined (OBJ_MACH_O)
11090 case bfd_target_mach_o_flavour
:
11091 if (flag_code
== CODE_64BIT
)
11093 use_rela_relocations
= 1;
11095 return "mach-o-x86-64";
11098 return "mach-o-i386";
11106 #endif /* OBJ_MAYBE_ more than one */
11109 md_undefined_symbol (char *name
)
11111 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
11112 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
11113 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
11114 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
11118 if (symbol_find (name
))
11119 as_bad (_("GOT already in symbol table"));
11120 GOT_symbol
= symbol_new (name
, undefined_section
,
11121 (valueT
) 0, &zero_address_frag
);
11128 /* Round up a section size to the appropriate boundary. */
11131 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
11133 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11134 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
11136 /* For a.out, force the section size to be aligned. If we don't do
11137 this, BFD will align it for us, but it will not write out the
11138 final bytes of the section. This may be a bug in BFD, but it is
11139 easier to fix it here since that is how the other a.out targets
11143 align
= bfd_get_section_alignment (stdoutput
, segment
);
11144 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
11151 /* On the i386, PC-relative offsets are relative to the start of the
11152 next instruction. That is, the address of the offset, plus its
11153 size, since the offset is always the last part of the insn. */
11156 md_pcrel_from (fixS
*fixP
)
11158 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11164 s_bss (int ignore ATTRIBUTE_UNUSED
)
11168 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11170 obj_elf_section_change_hook ();
11172 temp
= get_absolute_expression ();
11173 subseg_set (bss_section
, (subsegT
) temp
);
11174 demand_empty_rest_of_line ();
11180 i386_validate_fix (fixS
*fixp
)
11182 if (fixp
->fx_subsy
)
11184 if (fixp
->fx_subsy
== GOT_symbol
)
11186 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
11190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11191 if (fixp
->fx_tcbit2
)
11192 fixp
->fx_r_type
= (fixp
->fx_tcbit
11193 ? BFD_RELOC_X86_64_REX_GOTPCRELX
11194 : BFD_RELOC_X86_64_GOTPCRELX
);
11197 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
11202 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
11204 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
11206 fixp
->fx_subsy
= 0;
11209 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11210 else if (!object_64bit
)
11212 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
11213 && fixp
->fx_tcbit2
)
11214 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
11220 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
11223 bfd_reloc_code_real_type code
;
11225 switch (fixp
->fx_r_type
)
11227 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11228 case BFD_RELOC_SIZE32
:
11229 case BFD_RELOC_SIZE64
:
11230 if (S_IS_DEFINED (fixp
->fx_addsy
)
11231 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
11233 /* Resolve size relocation against local symbol to size of
11234 the symbol plus addend. */
11235 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
11236 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
11237 && !fits_in_unsigned_long (value
))
11238 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11239 _("symbol size computation overflow"));
11240 fixp
->fx_addsy
= NULL
;
11241 fixp
->fx_subsy
= NULL
;
11242 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
11246 /* Fall through. */
11248 case BFD_RELOC_X86_64_PLT32
:
11249 case BFD_RELOC_X86_64_GOT32
:
11250 case BFD_RELOC_X86_64_GOTPCREL
:
11251 case BFD_RELOC_X86_64_GOTPCRELX
:
11252 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
11253 case BFD_RELOC_386_PLT32
:
11254 case BFD_RELOC_386_GOT32
:
11255 case BFD_RELOC_386_GOT32X
:
11256 case BFD_RELOC_386_GOTOFF
:
11257 case BFD_RELOC_386_GOTPC
:
11258 case BFD_RELOC_386_TLS_GD
:
11259 case BFD_RELOC_386_TLS_LDM
:
11260 case BFD_RELOC_386_TLS_LDO_32
:
11261 case BFD_RELOC_386_TLS_IE_32
:
11262 case BFD_RELOC_386_TLS_IE
:
11263 case BFD_RELOC_386_TLS_GOTIE
:
11264 case BFD_RELOC_386_TLS_LE_32
:
11265 case BFD_RELOC_386_TLS_LE
:
11266 case BFD_RELOC_386_TLS_GOTDESC
:
11267 case BFD_RELOC_386_TLS_DESC_CALL
:
11268 case BFD_RELOC_X86_64_TLSGD
:
11269 case BFD_RELOC_X86_64_TLSLD
:
11270 case BFD_RELOC_X86_64_DTPOFF32
:
11271 case BFD_RELOC_X86_64_DTPOFF64
:
11272 case BFD_RELOC_X86_64_GOTTPOFF
:
11273 case BFD_RELOC_X86_64_TPOFF32
:
11274 case BFD_RELOC_X86_64_TPOFF64
:
11275 case BFD_RELOC_X86_64_GOTOFF64
:
11276 case BFD_RELOC_X86_64_GOTPC32
:
11277 case BFD_RELOC_X86_64_GOT64
:
11278 case BFD_RELOC_X86_64_GOTPCREL64
:
11279 case BFD_RELOC_X86_64_GOTPC64
:
11280 case BFD_RELOC_X86_64_GOTPLT64
:
11281 case BFD_RELOC_X86_64_PLTOFF64
:
11282 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
11283 case BFD_RELOC_X86_64_TLSDESC_CALL
:
11284 case BFD_RELOC_RVA
:
11285 case BFD_RELOC_VTABLE_ENTRY
:
11286 case BFD_RELOC_VTABLE_INHERIT
:
11288 case BFD_RELOC_32_SECREL
:
11290 code
= fixp
->fx_r_type
;
11292 case BFD_RELOC_X86_64_32S
:
11293 if (!fixp
->fx_pcrel
)
11295 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
11296 code
= fixp
->fx_r_type
;
11299 /* Fall through. */
11301 if (fixp
->fx_pcrel
)
11303 switch (fixp
->fx_size
)
11306 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11307 _("can not do %d byte pc-relative relocation"),
11309 code
= BFD_RELOC_32_PCREL
;
11311 case 1: code
= BFD_RELOC_8_PCREL
; break;
11312 case 2: code
= BFD_RELOC_16_PCREL
; break;
11313 case 4: code
= BFD_RELOC_32_PCREL
; break;
11315 case 8: code
= BFD_RELOC_64_PCREL
; break;
11321 switch (fixp
->fx_size
)
11324 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11325 _("can not do %d byte relocation"),
11327 code
= BFD_RELOC_32
;
11329 case 1: code
= BFD_RELOC_8
; break;
11330 case 2: code
= BFD_RELOC_16
; break;
11331 case 4: code
= BFD_RELOC_32
; break;
11333 case 8: code
= BFD_RELOC_64
; break;
11340 if ((code
== BFD_RELOC_32
11341 || code
== BFD_RELOC_32_PCREL
11342 || code
== BFD_RELOC_X86_64_32S
)
11344 && fixp
->fx_addsy
== GOT_symbol
)
11347 code
= BFD_RELOC_386_GOTPC
;
11349 code
= BFD_RELOC_X86_64_GOTPC32
;
11351 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
11353 && fixp
->fx_addsy
== GOT_symbol
)
11355 code
= BFD_RELOC_X86_64_GOTPC64
;
11358 rel
= XNEW (arelent
);
11359 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
11360 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
11362 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
11364 if (!use_rela_relocations
)
11366 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
11367 vtable entry to be used in the relocation's section offset. */
11368 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
11369 rel
->address
= fixp
->fx_offset
;
11370 #if defined (OBJ_COFF) && defined (TE_PE)
11371 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
11372 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
11377 /* Use the rela in 64bit mode. */
11380 if (disallow_64bit_reloc
)
11383 case BFD_RELOC_X86_64_DTPOFF64
:
11384 case BFD_RELOC_X86_64_TPOFF64
:
11385 case BFD_RELOC_64_PCREL
:
11386 case BFD_RELOC_X86_64_GOTOFF64
:
11387 case BFD_RELOC_X86_64_GOT64
:
11388 case BFD_RELOC_X86_64_GOTPCREL64
:
11389 case BFD_RELOC_X86_64_GOTPC64
:
11390 case BFD_RELOC_X86_64_GOTPLT64
:
11391 case BFD_RELOC_X86_64_PLTOFF64
:
11392 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11393 _("cannot represent relocation type %s in x32 mode"),
11394 bfd_get_reloc_code_name (code
));
11400 if (!fixp
->fx_pcrel
)
11401 rel
->addend
= fixp
->fx_offset
;
11405 case BFD_RELOC_X86_64_PLT32
:
11406 case BFD_RELOC_X86_64_GOT32
:
11407 case BFD_RELOC_X86_64_GOTPCREL
:
11408 case BFD_RELOC_X86_64_GOTPCRELX
:
11409 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
11410 case BFD_RELOC_X86_64_TLSGD
:
11411 case BFD_RELOC_X86_64_TLSLD
:
11412 case BFD_RELOC_X86_64_GOTTPOFF
:
11413 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
11414 case BFD_RELOC_X86_64_TLSDESC_CALL
:
11415 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
11418 rel
->addend
= (section
->vma
11420 + fixp
->fx_addnumber
11421 + md_pcrel_from (fixp
));
11426 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
11427 if (rel
->howto
== NULL
)
11429 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
11430 _("cannot represent relocation type %s"),
11431 bfd_get_reloc_code_name (code
));
11432 /* Set howto to a garbage value so that we can keep going. */
11433 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
11434 gas_assert (rel
->howto
!= NULL
);
11440 #include "tc-i386-intel.c"
11443 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
11445 int saved_naked_reg
;
11446 char saved_register_dot
;
11448 saved_naked_reg
= allow_naked_reg
;
11449 allow_naked_reg
= 1;
11450 saved_register_dot
= register_chars
['.'];
11451 register_chars
['.'] = '.';
11452 allow_pseudo_reg
= 1;
11453 expression_and_evaluate (exp
);
11454 allow_pseudo_reg
= 0;
11455 register_chars
['.'] = saved_register_dot
;
11456 allow_naked_reg
= saved_naked_reg
;
11458 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
11460 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
11462 exp
->X_op
= O_constant
;
11463 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
11464 .dw2_regnum
[flag_code
>> 1];
11467 exp
->X_op
= O_illegal
;
11472 tc_x86_frame_initial_instructions (void)
11474 static unsigned int sp_regno
[2];
11476 if (!sp_regno
[flag_code
>> 1])
11478 char *saved_input
= input_line_pointer
;
11479 char sp
[][4] = {"esp", "rsp"};
11482 input_line_pointer
= sp
[flag_code
>> 1];
11483 tc_x86_parse_to_dw2regnum (&exp
);
11484 gas_assert (exp
.X_op
== O_constant
);
11485 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
11486 input_line_pointer
= saved_input
;
11489 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
11490 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
11494 x86_dwarf2_addr_size (void)
11496 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
11497 if (x86_elf_abi
== X86_64_X32_ABI
)
11500 return bfd_arch_bits_per_address (stdoutput
) / 8;
11504 i386_elf_section_type (const char *str
, size_t len
)
11506 if (flag_code
== CODE_64BIT
11507 && len
== sizeof ("unwind") - 1
11508 && strncmp (str
, "unwind", 6) == 0)
11509 return SHT_X86_64_UNWIND
;
11516 i386_solaris_fix_up_eh_frame (segT sec
)
11518 if (flag_code
== CODE_64BIT
)
11519 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
11525 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
11529 exp
.X_op
= O_secrel
;
11530 exp
.X_add_symbol
= symbol
;
11531 exp
.X_add_number
= 0;
11532 emit_expr (&exp
, size
);
11536 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11537 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
11540 x86_64_section_letter (int letter
, const char **ptr_msg
)
11542 if (flag_code
== CODE_64BIT
)
11545 return SHF_X86_64_LARGE
;
11547 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
11550 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
11555 x86_64_section_word (char *str
, size_t len
)
11557 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
11558 return SHF_X86_64_LARGE
;
11564 handle_large_common (int small ATTRIBUTE_UNUSED
)
11566 if (flag_code
!= CODE_64BIT
)
11568 s_comm_internal (0, elf_common_parse
);
11569 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
11573 static segT lbss_section
;
11574 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
11575 asection
*saved_bss_section
= bss_section
;
11577 if (lbss_section
== NULL
)
11579 flagword applicable
;
11580 segT seg
= now_seg
;
11581 subsegT subseg
= now_subseg
;
11583 /* The .lbss section is for local .largecomm symbols. */
11584 lbss_section
= subseg_new (".lbss", 0);
11585 applicable
= bfd_applicable_section_flags (stdoutput
);
11586 bfd_set_section_flags (stdoutput
, lbss_section
,
11587 applicable
& SEC_ALLOC
);
11588 seg_info (lbss_section
)->bss
= 1;
11590 subseg_set (seg
, subseg
);
11593 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
11594 bss_section
= lbss_section
;
11596 s_comm_internal (0, elf_common_parse
);
11598 elf_com_section_ptr
= saved_com_section_ptr
;
11599 bss_section
= saved_bss_section
;
11602 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */