1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2023 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "gen-sframe.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
37 #include "opcodes/i386-mnem.h"
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
85 #define END_OF_INSN '\0'
87 #define OPERAND_TYPE_NONE { .bitfield = { .class = ClassNone } }
89 /* This matches the C -> StaticRounding alias in the opcode table. */
90 #define commutative staticrounding
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
101 const insn_template
*start
;
102 const insn_template
*end
;
106 /* 386 operand encoding bytes: see 386 book for details of this. */
109 unsigned int regmem
; /* codes register or memory operand */
110 unsigned int reg
; /* codes register operand (or extended opcode) */
111 unsigned int mode
; /* how to interpret regmem & reg */
115 /* x86-64 extension prefix. */
116 typedef int rex_byte
;
118 /* 386 opcode byte to code indirect addressing. */
127 /* x86 arch names, types and features */
130 const char *name
; /* arch name */
131 unsigned int len
:8; /* arch string length */
132 bool skip
:1; /* show_arch should skip this. */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags enable
; /* cpu feature enable flags */
135 i386_cpu_flags disable
; /* cpu feature disable flags */
139 static void update_code_flag (int, int);
140 static void s_insn (int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_check (int);
147 static void set_cpu_arch (int);
149 static void pe_directive_secrel (int);
150 static void pe_directive_secidx (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c
);
154 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
156 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS
*);
161 static int i386_intel_parse_name (const char *, expressionS
*);
162 static const reg_entry
*parse_register (const char *, char **);
163 static const char *parse_insn (const char *, char *, bool);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (unsigned int, unsigned int);
167 static enum flag_code
i386_addressing_mode (void);
168 static void optimize_imm (void);
169 static bool optimize_disp (const insn_template
*t
);
170 static const insn_template
*match_template (char);
171 static int check_string (void);
172 static int process_suffix (void);
173 static int check_byte_reg (void);
174 static int check_long_reg (void);
175 static int check_qword_reg (void);
176 static int check_word_reg (void);
177 static int finalize_imm (void);
178 static int process_operands (void);
179 static const reg_entry
*build_modrm_byte (void);
180 static void output_insn (void);
181 static void output_imm (fragS
*, offsetT
);
182 static void output_disp (fragS
*, offsetT
);
184 static void s_bss (int);
186 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
187 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
189 /* GNU_PROPERTY_X86_ISA_1_USED. */
190 static unsigned int x86_isa_1_used
;
191 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
192 static unsigned int x86_feature_2_used
;
193 /* Generate x86 used ISA and feature properties. */
194 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
197 static const char *default_arch
= DEFAULT_ARCH
;
199 /* parse_register() returns this when a register alias cannot be used. */
200 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
201 { Dw2Inval
, Dw2Inval
} };
203 static const reg_entry
*reg_eax
;
204 static const reg_entry
*reg_ds
;
205 static const reg_entry
*reg_es
;
206 static const reg_entry
*reg_ss
;
207 static const reg_entry
*reg_st0
;
208 static const reg_entry
*reg_k0
;
213 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
214 unsigned char bytes
[4];
216 /* Destination or source register specifier. */
217 const reg_entry
*register_specifier
;
220 /* 'md_assemble ()' gathers together information and puts it into a
227 const reg_entry
*regs
;
232 no_error
, /* Must be first. */
233 operand_size_mismatch
,
234 operand_type_mismatch
,
235 register_type_mismatch
,
236 number_of_operands_mismatch
,
237 invalid_instruction_suffix
,
239 unsupported_with_intel_mnemonic
,
245 invalid_vsib_address
,
246 invalid_vector_register_set
,
247 invalid_tmm_register_set
,
248 invalid_dest_and_src_register_set
,
249 unsupported_vector_index_register
,
250 unsupported_broadcast
,
253 mask_not_on_destination
,
256 invalid_register_operand
,
261 /* TM holds the template for the insn were currently assembling. */
264 /* SUFFIX holds the instruction size suffix for byte, word, dword
265 or qword, if given. */
268 /* OPCODE_LENGTH holds the number of base opcode bytes. */
269 unsigned char opcode_length
;
271 /* OPERANDS gives the number of given operands. */
272 unsigned int operands
;
274 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
275 of given register, displacement, memory operands and immediate
277 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
279 /* TYPES [i] is the type (see above #defines) which tells us how to
280 use OP[i] for the corresponding operand. */
281 i386_operand_type types
[MAX_OPERANDS
];
283 /* Displacement expression, immediate expression, or register for each
285 union i386_op op
[MAX_OPERANDS
];
287 /* Flags for operands. */
288 unsigned int flags
[MAX_OPERANDS
];
289 #define Operand_PCrel 1
290 #define Operand_Mem 2
291 #define Operand_Signed 4 /* .insn only */
293 /* Relocation type for operand */
294 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
296 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
297 the base index byte below. */
298 const reg_entry
*base_reg
;
299 const reg_entry
*index_reg
;
300 unsigned int log2_scale_factor
;
302 /* SEG gives the seg_entries of this insn. They are zero unless
303 explicit segment overrides are given. */
304 const reg_entry
*seg
[2];
306 /* PREFIX holds all the given prefix opcodes (usually null).
307 PREFIXES is the number of prefix opcodes. */
308 unsigned int prefixes
;
309 unsigned char prefix
[MAX_PREFIXES
];
311 /* .insn allows for reserved opcode spaces. */
312 unsigned char insn_opcode_space
;
314 /* .insn also allows (requires) specifying immediate size. */
315 unsigned char imm_bits
[MAX_OPERANDS
];
317 /* Register is in low 3 bits of opcode. */
320 /* The operand to a branch insn indicates an absolute branch. */
323 /* The operand to a branch insn indicates a far branch. */
326 /* There is a memory operand of (%dx) which should be only used
327 with input/output instructions. */
328 bool input_output_operand
;
330 /* Extended states. */
338 xstate_ymm
= 1 << 2 | xstate_xmm
,
340 xstate_zmm
= 1 << 3 | xstate_ymm
,
343 /* Use MASK state. */
347 /* Has GOTPC or TLS relocation. */
348 bool has_gotpc_tls_reloc
;
350 /* RM and SIB are the modrm byte and the sib byte where the
351 addressing modes of this insn are encoded. */
358 /* Masking attributes.
360 The struct describes masking, applied to OPERAND in the instruction.
361 REG is a pointer to the corresponding mask register. ZEROING tells
362 whether merging or zeroing mask is used. */
363 struct Mask_Operation
365 const reg_entry
*reg
;
366 unsigned int zeroing
;
367 /* The operand where this operation is associated. */
368 unsigned int operand
;
371 /* Rounding control and SAE attributes. */
383 /* In Intel syntax the operand modifier form is supposed to be used, but
384 we continue to accept the immediate forms as well. */
388 /* Broadcasting attributes.
390 The struct describes broadcasting, applied to OPERAND. TYPE is
391 expresses the broadcast factor. */
392 struct Broadcast_Operation
394 /* Type of broadcast: {1to2}, {1to4}, {1to8}, {1to16} or {1to32}. */
397 /* Index of broadcasted operand. */
398 unsigned int operand
;
400 /* Number of bytes to broadcast. */
404 /* Compressed disp8*N attribute. */
405 unsigned int memshift
;
407 /* Prefer load or store in encoding. */
410 dir_encoding_default
= 0,
416 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
419 disp_encoding_default
= 0,
425 /* Prefer the REX byte in encoding. */
428 /* Disable instruction size optimization. */
431 /* How to encode vector instructions. */
434 vex_encoding_default
= 0,
442 const char *rep_prefix
;
445 const char *hle_prefix
;
447 /* Have BND prefix. */
448 const char *bnd_prefix
;
450 /* Have NOTRACK prefix. */
451 const char *notrack_prefix
;
454 enum i386_error error
;
457 typedef struct _i386_insn i386_insn
;
459 /* Link RC type with corresponding string, that'll be looked for in
468 static const struct RC_name RC_NamesTable
[] =
470 { rne
, STRING_COMMA_LEN ("rn-sae") },
471 { rd
, STRING_COMMA_LEN ("rd-sae") },
472 { ru
, STRING_COMMA_LEN ("ru-sae") },
473 { rz
, STRING_COMMA_LEN ("rz-sae") },
474 { saeonly
, STRING_COMMA_LEN ("sae") },
477 /* To be indexed by segment register number. */
478 static const unsigned char i386_seg_prefixes
[] = {
487 /* List of chars besides those in app.c:symbol_chars that can start an
488 operand. Used to prevent the scrubber eating vital white-space. */
489 const char extra_symbol_chars
[] = "*%-([{}"
498 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
499 && !defined (TE_GNU) \
500 && !defined (TE_LINUX) \
501 && !defined (TE_Haiku) \
502 && !defined (TE_FreeBSD) \
503 && !defined (TE_DragonFly) \
504 && !defined (TE_NetBSD))
505 /* This array holds the chars that always start a comment. If the
506 pre-processor is disabled, these aren't very useful. The option
507 --divide will remove '/' from this list. */
508 const char *i386_comment_chars
= "#/";
509 #define SVR4_COMMENT_CHARS 1
510 #define PREFIX_SEPARATOR '\\'
513 const char *i386_comment_chars
= "#";
514 #define PREFIX_SEPARATOR '/'
517 /* This array holds the chars that only start a comment at the beginning of
518 a line. If the line seems to have the form '# 123 filename'
519 .line and .file directives will appear in the pre-processed output.
520 Note that input_file.c hand checks for '#' at the beginning of the
521 first line of the input file. This is because the compiler outputs
522 #NO_APP at the beginning of its output.
523 Also note that comments started like this one will always work if
524 '/' isn't otherwise defined. */
525 const char line_comment_chars
[] = "#/";
527 const char line_separator_chars
[] = ";";
529 /* Chars that can be used to separate mant from exp in floating point
531 const char EXP_CHARS
[] = "eE";
533 /* Chars that mean this number is a floating point constant
536 const char FLT_CHARS
[] = "fFdDxXhHbB";
538 /* Tables for lexical analysis. */
539 static char mnemonic_chars
[256];
540 static char register_chars
[256];
541 static char operand_chars
[256];
543 /* Lexical macros. */
544 #define is_operand_char(x) (operand_chars[(unsigned char) x])
545 #define is_register_char(x) (register_chars[(unsigned char) x])
546 #define is_space_char(x) ((x) == ' ')
548 /* All non-digit non-letter characters that may occur in an operand and
549 which aren't already in extra_symbol_chars[]. */
550 static const char operand_special_chars
[] = "$+,)._~/<>|&^!=:@]";
552 /* md_assemble() always leaves the strings it's passed unaltered. To
553 effect this we maintain a stack of saved characters that we've smashed
554 with '\0's (indicating end of strings for various sub-fields of the
555 assembler instruction). */
556 static char save_stack
[32];
557 static char *save_stack_p
;
558 #define END_STRING_AND_SAVE(s) \
559 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
560 #define RESTORE_END_STRING(s) \
561 do { *(s) = *--save_stack_p; } while (0)
563 /* The instruction we're assembling. */
566 /* Possible templates for current insn. */
567 static const templates
*current_templates
;
569 /* Per instruction expressionS buffers: max displacements & immediates. */
570 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
571 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
573 /* Current operand we are working on. */
574 static int this_operand
= -1;
576 /* Are we processing a .insn directive? */
577 #define dot_insn() (i.tm.mnem_off == MN__insn)
579 /* We support four different modes. FLAG_CODE variable is used to distinguish
587 static enum flag_code flag_code
;
588 static unsigned int object_64bit
;
589 static unsigned int disallow_64bit_reloc
;
590 static int use_rela_relocations
= 0;
591 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
592 static const char *tls_get_addr
;
594 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
595 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
596 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
598 /* The ELF ABI to use. */
606 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
609 #if defined (TE_PE) || defined (TE_PEP)
610 /* Use big object file format. */
611 static int use_big_obj
= 0;
614 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
615 /* 1 if generating code for a shared library. */
616 static int shared
= 0;
618 unsigned int x86_sframe_cfa_sp_reg
;
619 /* The other CFA base register for SFrame stack trace info. */
620 unsigned int x86_sframe_cfa_fp_reg
;
621 unsigned int x86_sframe_cfa_ra_reg
;
625 /* 1 for intel syntax,
627 static int intel_syntax
= 0;
629 static enum x86_64_isa
631 amd64
= 1, /* AMD64 ISA. */
632 intel64
/* Intel64 ISA. */
635 /* 1 for intel mnemonic,
636 0 if att mnemonic. */
637 static int intel_mnemonic
= !SYSV386_COMPAT
;
639 /* 1 if pseudo registers are permitted. */
640 static int allow_pseudo_reg
= 0;
642 /* 1 if register prefix % not required. */
643 static int allow_naked_reg
= 0;
645 /* 1 if the assembler should add BND prefix for all control-transferring
646 instructions supporting it, even if this prefix wasn't specified
648 static int add_bnd_prefix
= 0;
650 /* 1 if pseudo index register, eiz/riz, is allowed . */
651 static int allow_index_reg
= 0;
653 /* 1 if the assembler should ignore LOCK prefix, even if it was
654 specified explicitly. */
655 static int omit_lock_prefix
= 0;
657 /* 1 if the assembler should encode lfence, mfence, and sfence as
658 "lock addl $0, (%{re}sp)". */
659 static int avoid_fence
= 0;
661 /* 1 if lfence should be inserted after every load. */
662 static int lfence_after_load
= 0;
664 /* Non-zero if lfence should be inserted before indirect branch. */
665 static enum lfence_before_indirect_branch_kind
667 lfence_branch_none
= 0,
668 lfence_branch_register
,
669 lfence_branch_memory
,
672 lfence_before_indirect_branch
;
674 /* Non-zero if lfence should be inserted before ret. */
675 static enum lfence_before_ret_kind
677 lfence_before_ret_none
= 0,
678 lfence_before_ret_not
,
679 lfence_before_ret_or
,
680 lfence_before_ret_shl
684 /* Types of previous instruction is .byte or prefix. */
699 /* 1 if the assembler should generate relax relocations. */
701 static int generate_relax_relocations
702 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
704 static enum check_kind
710 sse_check
, operand_check
= check_warning
;
712 /* Non-zero if branches should be aligned within power of 2 boundary. */
713 static int align_branch_power
= 0;
715 /* Types of branches to align. */
716 enum align_branch_kind
718 align_branch_none
= 0,
719 align_branch_jcc
= 1,
720 align_branch_fused
= 2,
721 align_branch_jmp
= 3,
722 align_branch_call
= 4,
723 align_branch_indirect
= 5,
727 /* Type bits of branches to align. */
728 enum align_branch_bit
730 align_branch_jcc_bit
= 1 << align_branch_jcc
,
731 align_branch_fused_bit
= 1 << align_branch_fused
,
732 align_branch_jmp_bit
= 1 << align_branch_jmp
,
733 align_branch_call_bit
= 1 << align_branch_call
,
734 align_branch_indirect_bit
= 1 << align_branch_indirect
,
735 align_branch_ret_bit
= 1 << align_branch_ret
738 static unsigned int align_branch
= (align_branch_jcc_bit
739 | align_branch_fused_bit
740 | align_branch_jmp_bit
);
742 /* Types of condition jump used by macro-fusion. */
745 mf_jcc_jo
= 0, /* base opcode 0x70 */
746 mf_jcc_jc
, /* base opcode 0x72 */
747 mf_jcc_je
, /* base opcode 0x74 */
748 mf_jcc_jna
, /* base opcode 0x76 */
749 mf_jcc_js
, /* base opcode 0x78 */
750 mf_jcc_jp
, /* base opcode 0x7a */
751 mf_jcc_jl
, /* base opcode 0x7c */
752 mf_jcc_jle
, /* base opcode 0x7e */
755 /* Types of compare flag-modifying insntructions used by macro-fusion. */
758 mf_cmp_test_and
, /* test/cmp */
759 mf_cmp_alu_cmp
, /* add/sub/cmp */
760 mf_cmp_incdec
/* inc/dec */
763 /* The maximum padding size for fused jcc. CMP like instruction can
764 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
766 #define MAX_FUSED_JCC_PADDING_SIZE 20
768 /* The maximum number of prefixes added for an instruction. */
769 static unsigned int align_branch_prefix_size
= 5;
772 1. Clear the REX_W bit with register operand if possible.
773 2. Above plus use 128bit vector instruction to clear the full vector
776 static int optimize
= 0;
779 1. Clear the REX_W bit with register operand if possible.
780 2. Above plus use 128bit vector instruction to clear the full vector
782 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
785 static int optimize_for_space
= 0;
787 /* Register prefix used for error message. */
788 static const char *register_prefix
= "%";
790 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
791 leave, push, and pop instructions so that gcc has the same stack
792 frame as in 32 bit mode. */
793 static char stackop_size
= '\0';
795 /* Non-zero to optimize code alignment. */
796 int optimize_align_code
= 1;
798 /* Non-zero to quieten some warnings. */
799 static int quiet_warnings
= 0;
801 /* Guard to avoid repeated warnings about non-16-bit code on 16-bit CPUs. */
802 static bool pre_386_16bit_warned
;
805 static const char *cpu_arch_name
= NULL
;
806 static char *cpu_sub_arch_name
= NULL
;
808 /* CPU feature flags. */
809 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
811 /* If we have selected a cpu we are generating instructions for. */
812 static int cpu_arch_tune_set
= 0;
814 /* Cpu we are generating instructions for. */
815 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
817 /* CPU feature flags of cpu we are generating instructions for. */
818 static i386_cpu_flags cpu_arch_tune_flags
;
820 /* CPU instruction set architecture used. */
821 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
823 /* CPU feature flags of instruction set architecture used. */
824 i386_cpu_flags cpu_arch_isa_flags
;
826 /* If set, conditional jumps are not automatically promoted to handle
827 larger than a byte offset. */
828 static bool no_cond_jump_promotion
= false;
830 /* This will be set from an expression parser hook if there's any
831 applicable operator involved in an expression. */
834 expr_operator_present
,
838 /* Encode SSE instructions with VEX prefix. */
839 static unsigned int sse2avx
;
841 /* Encode aligned vector move as unaligned vector move. */
842 static unsigned int use_unaligned_vector_move
;
844 /* Encode scalar AVX instructions with specific vector length. */
851 /* Encode VEX WIG instructions with specific vex.w. */
858 /* Encode scalar EVEX LIG instructions with specific vector length. */
866 /* Encode EVEX WIG instructions with specific evex.w. */
873 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
874 static enum rc_type evexrcig
= rne
;
876 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
877 static symbolS
*GOT_symbol
;
879 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
880 unsigned int x86_dwarf2_return_column
;
882 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
883 int x86_cie_data_alignment
;
885 /* Interface to relax_segment.
886 There are 3 major relax states for 386 jump insns because the
887 different types of jumps add different sizes to frags when we're
888 figuring out what sort of jump to choose to reach a given label.
890 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
891 branches which are handled by md_estimate_size_before_relax() and
892 i386_generic_table_relax_frag(). */
895 #define UNCOND_JUMP 0
897 #define COND_JUMP86 2
898 #define BRANCH_PADDING 3
899 #define BRANCH_PREFIX 4
900 #define FUSED_JCC_PADDING 5
905 #define SMALL16 (SMALL | CODE16)
907 #define BIG16 (BIG | CODE16)
911 #define INLINE __inline__
917 #define ENCODE_RELAX_STATE(type, size) \
918 ((relax_substateT) (((type) << 2) | (size)))
919 #define TYPE_FROM_RELAX_STATE(s) \
921 #define DISP_SIZE_FROM_RELAX_STATE(s) \
922 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
924 /* This table is used by relax_frag to promote short jumps to long
925 ones where necessary. SMALL (short) jumps may be promoted to BIG
926 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
927 don't allow a short jump in a 32 bit code segment to be promoted to
928 a 16 bit offset jump because it's slower (requires data size
929 prefix), and doesn't work, unless the destination is in the bottom
930 64k of the code segment (The top 16 bits of eip are zeroed). */
932 const relax_typeS md_relax_table
[] =
935 1) most positive reach of this state,
936 2) most negative reach of this state,
937 3) how many bytes this mode will have in the variable part of the frag
938 4) which index into the table to try if we can't fit into this one. */
940 /* UNCOND_JUMP states. */
941 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
942 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
943 /* dword jmp adds 4 bytes to frag:
944 0 extra opcode bytes, 4 displacement bytes. */
946 /* word jmp adds 2 byte2 to frag:
947 0 extra opcode bytes, 2 displacement bytes. */
950 /* COND_JUMP states. */
951 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
952 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
953 /* dword conditionals adds 5 bytes to frag:
954 1 extra opcode byte, 4 displacement bytes. */
956 /* word conditionals add 3 bytes to frag:
957 1 extra opcode byte, 2 displacement bytes. */
960 /* COND_JUMP86 states. */
961 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
962 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
963 /* dword conditionals adds 5 bytes to frag:
964 1 extra opcode byte, 4 displacement bytes. */
966 /* word conditionals add 4 bytes to frag:
967 1 displacement byte and a 3 byte long branch insn. */
971 #define ARCH(n, t, f, s) \
972 { STRING_COMMA_LEN (#n), s, PROCESSOR_ ## t, CPU_ ## f ## _FLAGS, \
974 #define SUBARCH(n, e, d, s) \
975 { STRING_COMMA_LEN (#n), s, PROCESSOR_NONE, CPU_ ## e ## _FLAGS, \
976 CPU_ ## d ## _FLAGS }
978 static const arch_entry cpu_arch
[] =
980 /* Do not replace the first two entries - i386_target_format() and
981 set_cpu_arch() rely on them being there in this order. */
982 ARCH (generic32
, GENERIC32
, GENERIC32
, false),
983 ARCH (generic64
, GENERIC64
, GENERIC64
, false),
984 ARCH (i8086
, UNKNOWN
, NONE
, false),
985 ARCH (i186
, UNKNOWN
, 186, false),
986 ARCH (i286
, UNKNOWN
, 286, false),
987 ARCH (i386
, I386
, 386, false),
988 ARCH (i486
, I486
, 486, false),
989 ARCH (i586
, PENTIUM
, 586, false),
990 ARCH (i686
, PENTIUMPRO
, 686, false),
991 ARCH (pentium
, PENTIUM
, 586, false),
992 ARCH (pentiumpro
, PENTIUMPRO
, PENTIUMPRO
, false),
993 ARCH (pentiumii
, PENTIUMPRO
, P2
, false),
994 ARCH (pentiumiii
, PENTIUMPRO
, P3
, false),
995 ARCH (pentium4
, PENTIUM4
, P4
, false),
996 ARCH (prescott
, NOCONA
, CORE
, false),
997 ARCH (nocona
, NOCONA
, NOCONA
, false),
998 ARCH (yonah
, CORE
, CORE
, true),
999 ARCH (core
, CORE
, CORE
, false),
1000 ARCH (merom
, CORE2
, CORE2
, true),
1001 ARCH (core2
, CORE2
, CORE2
, false),
1002 ARCH (corei7
, COREI7
, COREI7
, false),
1003 ARCH (iamcu
, IAMCU
, IAMCU
, false),
1004 ARCH (k6
, K6
, K6
, false),
1005 ARCH (k6_2
, K6
, K6_2
, false),
1006 ARCH (athlon
, ATHLON
, ATHLON
, false),
1007 ARCH (sledgehammer
, K8
, K8
, true),
1008 ARCH (opteron
, K8
, K8
, false),
1009 ARCH (k8
, K8
, K8
, false),
1010 ARCH (amdfam10
, AMDFAM10
, AMDFAM10
, false),
1011 ARCH (bdver1
, BD
, BDVER1
, false),
1012 ARCH (bdver2
, BD
, BDVER2
, false),
1013 ARCH (bdver3
, BD
, BDVER3
, false),
1014 ARCH (bdver4
, BD
, BDVER4
, false),
1015 ARCH (znver1
, ZNVER
, ZNVER1
, false),
1016 ARCH (znver2
, ZNVER
, ZNVER2
, false),
1017 ARCH (znver3
, ZNVER
, ZNVER3
, false),
1018 ARCH (znver4
, ZNVER
, ZNVER4
, false),
1019 ARCH (btver1
, BT
, BTVER1
, false),
1020 ARCH (btver2
, BT
, BTVER2
, false),
1022 SUBARCH (8087, 8087, ANY_8087
, false),
1023 SUBARCH (87, NONE
, ANY_8087
, false), /* Disable only! */
1024 SUBARCH (287, 287, ANY_287
, false),
1025 SUBARCH (387, 387, ANY_387
, false),
1026 SUBARCH (687, 687, ANY_687
, false),
1027 SUBARCH (cmov
, CMOV
, CMOV
, false),
1028 SUBARCH (fxsr
, FXSR
, ANY_FXSR
, false),
1029 SUBARCH (mmx
, MMX
, ANY_MMX
, false),
1030 SUBARCH (sse
, SSE
, ANY_SSE
, false),
1031 SUBARCH (sse2
, SSE2
, ANY_SSE2
, false),
1032 SUBARCH (sse3
, SSE3
, ANY_SSE3
, false),
1033 SUBARCH (sse4a
, SSE4A
, ANY_SSE4A
, false),
1034 SUBARCH (ssse3
, SSSE3
, ANY_SSSE3
, false),
1035 SUBARCH (sse4
.1
, SSE4_1
, ANY_SSE4_1
, false),
1036 SUBARCH (sse4
.2
, SSE4_2
, ANY_SSE4_2
, false),
1037 SUBARCH (sse4
, SSE4_2
, ANY_SSE4_1
, false),
1038 SUBARCH (avx
, AVX
, ANY_AVX
, false),
1039 SUBARCH (avx2
, AVX2
, ANY_AVX2
, false),
1040 SUBARCH (avx512f
, AVX512F
, ANY_AVX512F
, false),
1041 SUBARCH (avx512cd
, AVX512CD
, ANY_AVX512CD
, false),
1042 SUBARCH (avx512er
, AVX512ER
, ANY_AVX512ER
, false),
1043 SUBARCH (avx512pf
, AVX512PF
, ANY_AVX512PF
, false),
1044 SUBARCH (avx512dq
, AVX512DQ
, ANY_AVX512DQ
, false),
1045 SUBARCH (avx512bw
, AVX512BW
, ANY_AVX512BW
, false),
1046 SUBARCH (avx512vl
, AVX512VL
, ANY_AVX512VL
, false),
1047 SUBARCH (monitor
, MONITOR
, MONITOR
, false),
1048 SUBARCH (vmx
, VMX
, ANY_VMX
, false),
1049 SUBARCH (vmfunc
, VMFUNC
, ANY_VMFUNC
, false),
1050 SUBARCH (smx
, SMX
, SMX
, false),
1051 SUBARCH (xsave
, XSAVE
, ANY_XSAVE
, false),
1052 SUBARCH (xsaveopt
, XSAVEOPT
, ANY_XSAVEOPT
, false),
1053 SUBARCH (xsavec
, XSAVEC
, ANY_XSAVEC
, false),
1054 SUBARCH (xsaves
, XSAVES
, ANY_XSAVES
, false),
1055 SUBARCH (aes
, AES
, ANY_AES
, false),
1056 SUBARCH (pclmul
, PCLMUL
, ANY_PCLMUL
, false),
1057 SUBARCH (clmul
, PCLMUL
, ANY_PCLMUL
, true),
1058 SUBARCH (fsgsbase
, FSGSBASE
, FSGSBASE
, false),
1059 SUBARCH (rdrnd
, RDRND
, RDRND
, false),
1060 SUBARCH (f16c
, F16C
, ANY_F16C
, false),
1061 SUBARCH (bmi2
, BMI2
, BMI2
, false),
1062 SUBARCH (fma
, FMA
, ANY_FMA
, false),
1063 SUBARCH (fma4
, FMA4
, ANY_FMA4
, false),
1064 SUBARCH (xop
, XOP
, ANY_XOP
, false),
1065 SUBARCH (lwp
, LWP
, ANY_LWP
, false),
1066 SUBARCH (movbe
, MOVBE
, MOVBE
, false),
1067 SUBARCH (cx16
, CX16
, CX16
, false),
1068 SUBARCH (lahf_sahf
, LAHF_SAHF
, LAHF_SAHF
, false),
1069 SUBARCH (ept
, EPT
, ANY_EPT
, false),
1070 SUBARCH (lzcnt
, LZCNT
, LZCNT
, false),
1071 SUBARCH (popcnt
, POPCNT
, POPCNT
, false),
1072 SUBARCH (hle
, HLE
, HLE
, false),
1073 SUBARCH (rtm
, RTM
, ANY_RTM
, false),
1074 SUBARCH (tsx
, TSX
, TSX
, false),
1075 SUBARCH (invpcid
, INVPCID
, INVPCID
, false),
1076 SUBARCH (clflush
, CLFLUSH
, CLFLUSH
, false),
1077 SUBARCH (nop
, NOP
, NOP
, false),
1078 SUBARCH (syscall
, SYSCALL
, SYSCALL
, false),
1079 SUBARCH (rdtscp
, RDTSCP
, RDTSCP
, false),
1080 SUBARCH (3dnow
, 3DNOW
, ANY_3DNOW
, false),
1081 SUBARCH (3dnowa
, 3DNOWA
, ANY_3DNOWA
, false),
1082 SUBARCH (padlock
, PADLOCK
, PADLOCK
, false),
1083 SUBARCH (pacifica
, SVME
, ANY_SVME
, true),
1084 SUBARCH (svme
, SVME
, ANY_SVME
, false),
1085 SUBARCH (abm
, ABM
, ABM
, false),
1086 SUBARCH (bmi
, BMI
, BMI
, false),
1087 SUBARCH (tbm
, TBM
, TBM
, false),
1088 SUBARCH (adx
, ADX
, ADX
, false),
1089 SUBARCH (rdseed
, RDSEED
, RDSEED
, false),
1090 SUBARCH (prfchw
, PRFCHW
, PRFCHW
, false),
1091 SUBARCH (smap
, SMAP
, SMAP
, false),
1092 SUBARCH (mpx
, MPX
, ANY_MPX
, false),
1093 SUBARCH (sha
, SHA
, ANY_SHA
, false),
1094 SUBARCH (clflushopt
, CLFLUSHOPT
, CLFLUSHOPT
, false),
1095 SUBARCH (prefetchwt1
, PREFETCHWT1
, PREFETCHWT1
, false),
1096 SUBARCH (se1
, SE1
, SE1
, false),
1097 SUBARCH (clwb
, CLWB
, CLWB
, false),
1098 SUBARCH (avx512ifma
, AVX512IFMA
, ANY_AVX512IFMA
, false),
1099 SUBARCH (avx512vbmi
, AVX512VBMI
, ANY_AVX512VBMI
, false),
1100 SUBARCH (avx512_4fmaps
, AVX512_4FMAPS
, ANY_AVX512_4FMAPS
, false),
1101 SUBARCH (avx512_4vnniw
, AVX512_4VNNIW
, ANY_AVX512_4VNNIW
, false),
1102 SUBARCH (avx512_vpopcntdq
, AVX512_VPOPCNTDQ
, ANY_AVX512_VPOPCNTDQ
, false),
1103 SUBARCH (avx512_vbmi2
, AVX512_VBMI2
, ANY_AVX512_VBMI2
, false),
1104 SUBARCH (avx512_vnni
, AVX512_VNNI
, ANY_AVX512_VNNI
, false),
1105 SUBARCH (avx512_bitalg
, AVX512_BITALG
, ANY_AVX512_BITALG
, false),
1106 SUBARCH (avx_vnni
, AVX_VNNI
, ANY_AVX_VNNI
, false),
1107 SUBARCH (clzero
, CLZERO
, CLZERO
, false),
1108 SUBARCH (mwaitx
, MWAITX
, MWAITX
, false),
1109 SUBARCH (ospke
, OSPKE
, ANY_OSPKE
, false),
1110 SUBARCH (rdpid
, RDPID
, RDPID
, false),
1111 SUBARCH (ptwrite
, PTWRITE
, PTWRITE
, false),
1112 SUBARCH (ibt
, IBT
, IBT
, false),
1113 SUBARCH (shstk
, SHSTK
, SHSTK
, false),
1114 SUBARCH (gfni
, GFNI
, ANY_GFNI
, false),
1115 SUBARCH (vaes
, VAES
, ANY_VAES
, false),
1116 SUBARCH (vpclmulqdq
, VPCLMULQDQ
, ANY_VPCLMULQDQ
, false),
1117 SUBARCH (wbnoinvd
, WBNOINVD
, WBNOINVD
, false),
1118 SUBARCH (pconfig
, PCONFIG
, PCONFIG
, false),
1119 SUBARCH (waitpkg
, WAITPKG
, WAITPKG
, false),
1120 SUBARCH (cldemote
, CLDEMOTE
, CLDEMOTE
, false),
1121 SUBARCH (amx_int8
, AMX_INT8
, ANY_AMX_INT8
, false),
1122 SUBARCH (amx_bf16
, AMX_BF16
, ANY_AMX_BF16
, false),
1123 SUBARCH (amx_fp16
, AMX_FP16
, ANY_AMX_FP16
, false),
1124 SUBARCH (amx_complex
, AMX_COMPLEX
, ANY_AMX_COMPLEX
, false),
1125 SUBARCH (amx_tile
, AMX_TILE
, ANY_AMX_TILE
, false),
1126 SUBARCH (movdiri
, MOVDIRI
, MOVDIRI
, false),
1127 SUBARCH (movdir64b
, MOVDIR64B
, MOVDIR64B
, false),
1128 SUBARCH (avx512_bf16
, AVX512_BF16
, ANY_AVX512_BF16
, false),
1129 SUBARCH (avx512_vp2intersect
, AVX512_VP2INTERSECT
,
1130 ANY_AVX512_VP2INTERSECT
, false),
1131 SUBARCH (tdx
, TDX
, TDX
, false),
1132 SUBARCH (enqcmd
, ENQCMD
, ENQCMD
, false),
1133 SUBARCH (serialize
, SERIALIZE
, SERIALIZE
, false),
1134 SUBARCH (rdpru
, RDPRU
, RDPRU
, false),
1135 SUBARCH (mcommit
, MCOMMIT
, MCOMMIT
, false),
1136 SUBARCH (sev_es
, SEV_ES
, ANY_SEV_ES
, false),
1137 SUBARCH (tsxldtrk
, TSXLDTRK
, ANY_TSXLDTRK
, false),
1138 SUBARCH (kl
, KL
, ANY_KL
, false),
1139 SUBARCH (widekl
, WIDEKL
, ANY_WIDEKL
, false),
1140 SUBARCH (uintr
, UINTR
, UINTR
, false),
1141 SUBARCH (hreset
, HRESET
, HRESET
, false),
1142 SUBARCH (avx512_fp16
, AVX512_FP16
, ANY_AVX512_FP16
, false),
1143 SUBARCH (prefetchi
, PREFETCHI
, PREFETCHI
, false),
1144 SUBARCH (avx_ifma
, AVX_IFMA
, ANY_AVX_IFMA
, false),
1145 SUBARCH (avx_vnni_int8
, AVX_VNNI_INT8
, ANY_AVX_VNNI_INT8
, false),
1146 SUBARCH (cmpccxadd
, CMPCCXADD
, CMPCCXADD
, false),
1147 SUBARCH (wrmsrns
, WRMSRNS
, WRMSRNS
, false),
1148 SUBARCH (msrlist
, MSRLIST
, MSRLIST
, false),
1149 SUBARCH (avx_ne_convert
, AVX_NE_CONVERT
, ANY_AVX_NE_CONVERT
, false),
1150 SUBARCH (rao_int
, RAO_INT
, RAO_INT
, false),
1151 SUBARCH (rmpquery
, RMPQUERY
, ANY_RMPQUERY
, false),
1152 SUBARCH (fred
, FRED
, ANY_FRED
, false),
1153 SUBARCH (lkgs
, LKGS
, ANY_LKGS
, false),
1154 SUBARCH (avx_vnni_int16
, AVX_VNNI_INT16
, ANY_AVX_VNNI_INT16
, false),
1155 SUBARCH (sha512
, SHA512
, ANY_SHA512
, false),
1156 SUBARCH (sm3
, SM3
, ANY_SM3
, false),
1157 SUBARCH (sm4
, SM4
, ANY_SM4
, false),
1158 SUBARCH (pbndkb
, PBNDKB
, PBNDKB
, false),
1165 /* Like s_lcomm_internal in gas/read.c but the alignment string
1166 is allowed to be optional. */
1169 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1176 && *input_line_pointer
== ',')
1178 align
= parse_align (needs_align
- 1);
1180 if (align
== (addressT
) -1)
1195 bss_alloc (symbolP
, size
, align
);
1200 pe_lcomm (int needs_align
)
1202 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1206 const pseudo_typeS md_pseudo_table
[] =
1208 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1209 {"align", s_align_bytes
, 0},
1211 {"align", s_align_ptwo
, 0},
1213 {"arch", set_cpu_arch
, 0},
1217 {"lcomm", pe_lcomm
, 1},
1219 {"ffloat", float_cons
, 'f'},
1220 {"dfloat", float_cons
, 'd'},
1221 {"tfloat", float_cons
, 'x'},
1222 {"hfloat", float_cons
, 'h'},
1223 {"bfloat16", float_cons
, 'b'},
1225 {"slong", signed_cons
, 4},
1226 {"insn", s_insn
, 0},
1227 {"noopt", s_ignore
, 0},
1228 {"optim", s_ignore
, 0},
1229 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1230 {"code16", set_code_flag
, CODE_16BIT
},
1231 {"code32", set_code_flag
, CODE_32BIT
},
1233 {"code64", set_code_flag
, CODE_64BIT
},
1235 {"intel_syntax", set_intel_syntax
, 1},
1236 {"att_syntax", set_intel_syntax
, 0},
1237 {"intel_mnemonic", set_intel_mnemonic
, 1},
1238 {"att_mnemonic", set_intel_mnemonic
, 0},
1239 {"allow_index_reg", set_allow_index_reg
, 1},
1240 {"disallow_index_reg", set_allow_index_reg
, 0},
1241 {"sse_check", set_check
, 0},
1242 {"operand_check", set_check
, 1},
1243 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1244 {"largecomm", handle_large_common
, 0},
1246 {"file", dwarf2_directive_file
, 0},
1247 {"loc", dwarf2_directive_loc
, 0},
1248 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1251 {"secrel32", pe_directive_secrel
, 0},
1252 {"secidx", pe_directive_secidx
, 0},
1257 /* For interface with expression (). */
1258 extern char *input_line_pointer
;
1260 /* Hash table for instruction mnemonic lookup. */
1261 static htab_t op_hash
;
1263 /* Hash table for register lookup. */
1264 static htab_t reg_hash
;
1266 /* Various efficient no-op patterns for aligning code labels.
1267 Note: Don't try to assemble the instructions in the comments.
1268 0L and 0w are not legal. */
1269 static const unsigned char f32_1
[] =
1271 static const unsigned char f32_2
[] =
1272 {0x66,0x90}; /* xchg %ax,%ax */
1273 static const unsigned char f32_3
[] =
1274 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1275 static const unsigned char f32_4
[] =
1276 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1277 static const unsigned char f32_6
[] =
1278 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1279 static const unsigned char f32_7
[] =
1280 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1281 static const unsigned char f16_3
[] =
1282 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1283 static const unsigned char f16_4
[] =
1284 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1285 static const unsigned char jump_disp8
[] =
1286 {0xeb}; /* jmp disp8 */
1287 static const unsigned char jump32_disp32
[] =
1288 {0xe9}; /* jmp disp32 */
1289 static const unsigned char jump16_disp32
[] =
1290 {0x66,0xe9}; /* jmp disp32 */
1291 /* 32-bit NOPs patterns. */
1292 static const unsigned char *const f32_patt
[] = {
1293 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1295 /* 16-bit NOPs patterns. */
1296 static const unsigned char *const f16_patt
[] = {
1297 f32_1
, f32_2
, f16_3
, f16_4
1299 /* nopl (%[re]ax) */
1300 static const unsigned char alt_3
[] =
1302 /* nopl 0(%[re]ax) */
1303 static const unsigned char alt_4
[] =
1304 {0x0f,0x1f,0x40,0x00};
1305 /* nopl 0(%[re]ax,%[re]ax,1) */
1306 static const unsigned char alt_5
[] =
1307 {0x0f,0x1f,0x44,0x00,0x00};
1308 /* nopw 0(%[re]ax,%[re]ax,1) */
1309 static const unsigned char alt_6
[] =
1310 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1311 /* nopl 0L(%[re]ax) */
1312 static const unsigned char alt_7
[] =
1313 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1314 /* nopl 0L(%[re]ax,%[re]ax,1) */
1315 static const unsigned char alt_8
[] =
1316 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1317 /* nopw 0L(%[re]ax,%[re]ax,1) */
1318 static const unsigned char alt_9
[] =
1319 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1320 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1321 static const unsigned char alt_10
[] =
1322 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1323 /* data16 nopw %cs:0L(%eax,%eax,1) */
1324 static const unsigned char alt_11
[] =
1325 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1326 /* 32-bit and 64-bit NOPs patterns. */
1327 static const unsigned char *const alt_patt
[] = {
1328 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1329 alt_9
, alt_10
, alt_11
1332 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1333 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1336 i386_output_nops (char *where
, const unsigned char *const *patt
,
1337 int count
, int max_single_nop_size
)
1340 /* Place the longer NOP first. */
1343 const unsigned char *nops
;
1345 if (max_single_nop_size
< 1)
1347 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1348 max_single_nop_size
);
1352 nops
= patt
[max_single_nop_size
- 1];
1354 /* Use the smaller one if the requsted one isn't available. */
1357 max_single_nop_size
--;
1358 nops
= patt
[max_single_nop_size
- 1];
1361 last
= count
% max_single_nop_size
;
1364 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1365 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1369 nops
= patt
[last
- 1];
1372 /* Use the smaller one plus one-byte NOP if the needed one
1375 nops
= patt
[last
- 1];
1376 memcpy (where
+ offset
, nops
, last
);
1377 where
[offset
+ last
] = *patt
[0];
1380 memcpy (where
+ offset
, nops
, last
);
1385 fits_in_imm7 (offsetT num
)
1387 return (num
& 0x7f) == num
;
1391 fits_in_imm31 (offsetT num
)
1393 return (num
& 0x7fffffff) == num
;
1396 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1397 single NOP instruction LIMIT. */
1400 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1402 const unsigned char *const *patt
= NULL
;
1403 int max_single_nop_size
;
1404 /* Maximum number of NOPs before switching to jump over NOPs. */
1405 int max_number_of_nops
;
1407 switch (fragP
->fr_type
)
1412 case rs_machine_dependent
:
1413 /* Allow NOP padding for jumps and calls. */
1414 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1415 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1422 /* We need to decide which NOP sequence to use for 32bit and
1423 64bit. When -mtune= is used:
1425 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1426 PROCESSOR_GENERIC32, f32_patt will be used.
1427 2. For the rest, alt_patt will be used.
1429 When -mtune= isn't used, alt_patt will be used if
1430 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1433 When -march= or .arch is used, we can't use anything beyond
1434 cpu_arch_isa_flags. */
1436 if (flag_code
== CODE_16BIT
)
1439 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1440 /* Limit number of NOPs to 2 in 16-bit mode. */
1441 max_number_of_nops
= 2;
1445 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1447 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1448 switch (cpu_arch_tune
)
1450 case PROCESSOR_UNKNOWN
:
1451 /* We use cpu_arch_isa_flags to check if we SHOULD
1452 optimize with nops. */
1453 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1458 case PROCESSOR_PENTIUM4
:
1459 case PROCESSOR_NOCONA
:
1460 case PROCESSOR_CORE
:
1461 case PROCESSOR_CORE2
:
1462 case PROCESSOR_COREI7
:
1463 case PROCESSOR_GENERIC64
:
1465 case PROCESSOR_ATHLON
:
1467 case PROCESSOR_AMDFAM10
:
1469 case PROCESSOR_ZNVER
:
1473 case PROCESSOR_I386
:
1474 case PROCESSOR_I486
:
1475 case PROCESSOR_PENTIUM
:
1476 case PROCESSOR_PENTIUMPRO
:
1477 case PROCESSOR_IAMCU
:
1478 case PROCESSOR_GENERIC32
:
1481 case PROCESSOR_NONE
:
1487 switch (fragP
->tc_frag_data
.tune
)
1489 case PROCESSOR_UNKNOWN
:
1490 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1491 PROCESSOR_UNKNOWN. */
1495 case PROCESSOR_I386
:
1496 case PROCESSOR_I486
:
1497 case PROCESSOR_PENTIUM
:
1498 case PROCESSOR_IAMCU
:
1500 case PROCESSOR_ATHLON
:
1502 case PROCESSOR_AMDFAM10
:
1504 case PROCESSOR_ZNVER
:
1506 case PROCESSOR_GENERIC32
:
1507 /* We use cpu_arch_isa_flags to check if we CAN optimize
1509 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1514 case PROCESSOR_PENTIUMPRO
:
1515 case PROCESSOR_PENTIUM4
:
1516 case PROCESSOR_NOCONA
:
1517 case PROCESSOR_CORE
:
1518 case PROCESSOR_CORE2
:
1519 case PROCESSOR_COREI7
:
1520 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1525 case PROCESSOR_GENERIC64
:
1528 case PROCESSOR_NONE
:
1533 if (patt
== f32_patt
)
1535 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1536 /* Limit number of NOPs to 2 for older processors. */
1537 max_number_of_nops
= 2;
1541 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1542 /* Limit number of NOPs to 7 for newer processors. */
1543 max_number_of_nops
= 7;
1548 limit
= max_single_nop_size
;
1550 if (fragP
->fr_type
== rs_fill_nop
)
1552 /* Output NOPs for .nop directive. */
1553 if (limit
> max_single_nop_size
)
1555 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1556 _("invalid single nop size: %d "
1557 "(expect within [0, %d])"),
1558 limit
, max_single_nop_size
);
1562 else if (fragP
->fr_type
!= rs_machine_dependent
)
1563 fragP
->fr_var
= count
;
1565 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1567 /* Generate jump over NOPs. */
1568 offsetT disp
= count
- 2;
1569 if (fits_in_imm7 (disp
))
1571 /* Use "jmp disp8" if possible. */
1573 where
[0] = jump_disp8
[0];
1579 unsigned int size_of_jump
;
1581 if (flag_code
== CODE_16BIT
)
1583 where
[0] = jump16_disp32
[0];
1584 where
[1] = jump16_disp32
[1];
1589 where
[0] = jump32_disp32
[0];
1593 count
-= size_of_jump
+ 4;
1594 if (!fits_in_imm31 (count
))
1596 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1597 _("jump over nop padding out of range"));
1601 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1602 where
+= size_of_jump
+ 4;
1606 /* Generate multiple NOPs. */
1607 i386_output_nops (where
, patt
, count
, limit
);
1611 operand_type_all_zero (const union i386_operand_type
*x
)
1613 switch (ARRAY_SIZE(x
->array
))
1624 return !x
->array
[0];
1631 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1633 switch (ARRAY_SIZE(x
->array
))
1649 x
->bitfield
.class = ClassNone
;
1650 x
->bitfield
.instance
= InstanceNone
;
1654 operand_type_equal (const union i386_operand_type
*x
,
1655 const union i386_operand_type
*y
)
1657 switch (ARRAY_SIZE(x
->array
))
1660 if (x
->array
[2] != y
->array
[2])
1664 if (x
->array
[1] != y
->array
[1])
1668 return x
->array
[0] == y
->array
[0];
1676 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1678 switch (ARRAY_SIZE(x
->array
))
1697 return !x
->array
[0];
1704 cpu_flags_equal (const union i386_cpu_flags
*x
,
1705 const union i386_cpu_flags
*y
)
1707 switch (ARRAY_SIZE(x
->array
))
1710 if (x
->array
[4] != y
->array
[4])
1714 if (x
->array
[3] != y
->array
[3])
1718 if (x
->array
[2] != y
->array
[2])
1722 if (x
->array
[1] != y
->array
[1])
1726 return x
->array
[0] == y
->array
[0];
1734 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1736 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1737 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1740 static INLINE i386_cpu_flags
1741 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1743 switch (ARRAY_SIZE (x
.array
))
1746 x
.array
[4] &= y
.array
[4];
1749 x
.array
[3] &= y
.array
[3];
1752 x
.array
[2] &= y
.array
[2];
1755 x
.array
[1] &= y
.array
[1];
1758 x
.array
[0] &= y
.array
[0];
1766 static INLINE i386_cpu_flags
1767 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1769 switch (ARRAY_SIZE (x
.array
))
1772 x
.array
[4] |= y
.array
[4];
1775 x
.array
[3] |= y
.array
[3];
1778 x
.array
[2] |= y
.array
[2];
1781 x
.array
[1] |= y
.array
[1];
1784 x
.array
[0] |= y
.array
[0];
1792 static INLINE i386_cpu_flags
1793 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1795 switch (ARRAY_SIZE (x
.array
))
1798 x
.array
[4] &= ~y
.array
[4];
1801 x
.array
[3] &= ~y
.array
[3];
1804 x
.array
[2] &= ~y
.array
[2];
1807 x
.array
[1] &= ~y
.array
[1];
1810 x
.array
[0] &= ~y
.array
[0];
1818 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1820 #define CPU_FLAGS_ARCH_MATCH 0x1
1821 #define CPU_FLAGS_64BIT_MATCH 0x2
1823 #define CPU_FLAGS_PERFECT_MATCH \
1824 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1826 /* Return CPU flags match bits. */
1829 cpu_flags_match (const insn_template
*t
)
1831 i386_cpu_flags x
= t
->cpu_flags
;
1832 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1834 x
.bitfield
.cpu64
= 0;
1835 x
.bitfield
.cpuno64
= 0;
1837 if (cpu_flags_all_zero (&x
))
1839 /* This instruction is available on all archs. */
1840 match
|= CPU_FLAGS_ARCH_MATCH
;
1844 /* This instruction is available only on some archs. */
1845 i386_cpu_flags cpu
= cpu_arch_flags
;
1847 /* AVX512VL is no standalone feature - match it and then strip it. */
1848 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1850 x
.bitfield
.cpuavx512vl
= 0;
1852 /* AVX and AVX2 present at the same time express an operand size
1853 dependency - strip AVX2 for the purposes here. The operand size
1854 dependent check occurs in check_vecOperands(). */
1855 if (x
.bitfield
.cpuavx
&& x
.bitfield
.cpuavx2
)
1856 x
.bitfield
.cpuavx2
= 0;
1858 cpu
= cpu_flags_and (x
, cpu
);
1859 if (!cpu_flags_all_zero (&cpu
))
1861 if (x
.bitfield
.cpuavx
)
1863 /* We need to check a few extra flags with AVX. */
1864 if (cpu
.bitfield
.cpuavx
1865 && (!t
->opcode_modifier
.sse2avx
1866 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1867 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1868 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1869 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1870 match
|= CPU_FLAGS_ARCH_MATCH
;
1872 else if (x
.bitfield
.cpuavx512f
)
1874 /* We need to check a few extra flags with AVX512F. */
1875 if (cpu
.bitfield
.cpuavx512f
1876 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1877 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1878 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1879 match
|= CPU_FLAGS_ARCH_MATCH
;
1882 match
|= CPU_FLAGS_ARCH_MATCH
;
1888 static INLINE i386_operand_type
1889 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1891 if (x
.bitfield
.class != y
.bitfield
.class)
1892 x
.bitfield
.class = ClassNone
;
1893 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1894 x
.bitfield
.instance
= InstanceNone
;
1896 switch (ARRAY_SIZE (x
.array
))
1899 x
.array
[2] &= y
.array
[2];
1902 x
.array
[1] &= y
.array
[1];
1905 x
.array
[0] &= y
.array
[0];
1913 static INLINE i386_operand_type
1914 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1916 gas_assert (y
.bitfield
.class == ClassNone
);
1917 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1919 switch (ARRAY_SIZE (x
.array
))
1922 x
.array
[2] &= ~y
.array
[2];
1925 x
.array
[1] &= ~y
.array
[1];
1928 x
.array
[0] &= ~y
.array
[0];
1936 static INLINE i386_operand_type
1937 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1939 gas_assert (x
.bitfield
.class == ClassNone
||
1940 y
.bitfield
.class == ClassNone
||
1941 x
.bitfield
.class == y
.bitfield
.class);
1942 gas_assert (x
.bitfield
.instance
== InstanceNone
||
1943 y
.bitfield
.instance
== InstanceNone
||
1944 x
.bitfield
.instance
== y
.bitfield
.instance
);
1946 switch (ARRAY_SIZE (x
.array
))
1949 x
.array
[2] |= y
.array
[2];
1952 x
.array
[1] |= y
.array
[1];
1955 x
.array
[0] |= y
.array
[0];
1963 static INLINE i386_operand_type
1964 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1966 gas_assert (y
.bitfield
.class == ClassNone
);
1967 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1969 switch (ARRAY_SIZE (x
.array
))
1972 x
.array
[2] ^= y
.array
[2];
1975 x
.array
[1] ^= y
.array
[1];
1978 x
.array
[0] ^= y
.array
[0];
1986 static const i386_operand_type anydisp
= {
1987 .bitfield
= { .disp8
= 1, .disp16
= 1, .disp32
= 1, .disp64
= 1 }
1999 operand_type_check (i386_operand_type t
, enum operand_type c
)
2004 return t
.bitfield
.class == Reg
;
2007 return (t
.bitfield
.imm8
2011 || t
.bitfield
.imm32s
2012 || t
.bitfield
.imm64
);
2015 return (t
.bitfield
.disp8
2016 || t
.bitfield
.disp16
2017 || t
.bitfield
.disp32
2018 || t
.bitfield
.disp64
);
2021 return (t
.bitfield
.disp8
2022 || t
.bitfield
.disp16
2023 || t
.bitfield
.disp32
2024 || t
.bitfield
.disp64
2025 || t
.bitfield
.baseindex
);
2034 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2035 between operand GIVEN and opeand WANTED for instruction template T. */
2038 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2041 return !((i
.types
[given
].bitfield
.byte
2042 && !t
->operand_types
[wanted
].bitfield
.byte
)
2043 || (i
.types
[given
].bitfield
.word
2044 && !t
->operand_types
[wanted
].bitfield
.word
)
2045 || (i
.types
[given
].bitfield
.dword
2046 && !t
->operand_types
[wanted
].bitfield
.dword
)
2047 || (i
.types
[given
].bitfield
.qword
2048 && (!t
->operand_types
[wanted
].bitfield
.qword
2049 /* Don't allow 64-bit (memory) operands outside of 64-bit
2050 mode, when they're used where a 64-bit GPR could also
2051 be used. Checking is needed for Intel Syntax only. */
2053 && flag_code
!= CODE_64BIT
2054 && (t
->operand_types
[wanted
].bitfield
.class == Reg
2055 || t
->operand_types
[wanted
].bitfield
.class == Accum
2056 || t
->opcode_modifier
.isstring
))))
2057 || (i
.types
[given
].bitfield
.tbyte
2058 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2061 /* Return 1 if there is no conflict in SIMD register between operand
2062 GIVEN and opeand WANTED for instruction template T. */
2065 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2068 return !((i
.types
[given
].bitfield
.xmmword
2069 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2070 || (i
.types
[given
].bitfield
.ymmword
2071 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2072 || (i
.types
[given
].bitfield
.zmmword
2073 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2074 || (i
.types
[given
].bitfield
.tmmword
2075 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2078 /* Return 1 if there is no conflict in any size between operand GIVEN
2079 and opeand WANTED for instruction template T. */
2082 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2085 return (match_operand_size (t
, wanted
, given
)
2086 && !((i
.types
[given
].bitfield
.unspecified
2087 && !i
.broadcast
.type
2088 && !i
.broadcast
.bytes
2089 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2090 || (i
.types
[given
].bitfield
.fword
2091 && !t
->operand_types
[wanted
].bitfield
.fword
)
2092 /* For scalar opcode templates to allow register and memory
2093 operands at the same time, some special casing is needed
2094 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2095 down-conversion vpmov*. */
2096 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2097 && t
->operand_types
[wanted
].bitfield
.byte
2098 + t
->operand_types
[wanted
].bitfield
.word
2099 + t
->operand_types
[wanted
].bitfield
.dword
2100 + t
->operand_types
[wanted
].bitfield
.qword
2101 > !!t
->opcode_modifier
.broadcast
)
2102 ? (i
.types
[given
].bitfield
.xmmword
2103 || i
.types
[given
].bitfield
.ymmword
2104 || i
.types
[given
].bitfield
.zmmword
)
2105 : !match_simd_size(t
, wanted
, given
))));
2108 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2109 operands for instruction template T, and it has MATCH_REVERSE set if there
2110 is no size conflict on any operands for the template with operands reversed
2111 (and the template allows for reversing in the first place). */
2113 #define MATCH_STRAIGHT 1
2114 #define MATCH_REVERSE 2
2116 static INLINE
unsigned int
2117 operand_size_match (const insn_template
*t
)
2119 unsigned int j
, match
= MATCH_STRAIGHT
;
2121 /* Don't check non-absolute jump instructions. */
2122 if (t
->opcode_modifier
.jump
2123 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2126 /* Check memory and accumulator operand size. */
2127 for (j
= 0; j
< i
.operands
; j
++)
2129 if (i
.types
[j
].bitfield
.class != Reg
2130 && i
.types
[j
].bitfield
.class != RegSIMD
2131 && t
->opcode_modifier
.operandconstraint
== ANY_SIZE
)
2134 if (t
->operand_types
[j
].bitfield
.class == Reg
2135 && !match_operand_size (t
, j
, j
))
2141 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2142 && !match_simd_size (t
, j
, j
))
2148 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2149 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2155 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2162 if (!t
->opcode_modifier
.d
)
2165 /* Check reverse. */
2166 gas_assert (i
.operands
>= 2);
2168 for (j
= 0; j
< i
.operands
; j
++)
2170 unsigned int given
= i
.operands
- j
- 1;
2172 /* For FMA4 and XOP insns VEX.W controls just the first two
2173 register operands. */
2174 if (t
->cpu_flags
.bitfield
.cpufma4
|| t
->cpu_flags
.bitfield
.cpuxop
)
2175 given
= j
< 2 ? 1 - j
: j
;
2177 if (t
->operand_types
[j
].bitfield
.class == Reg
2178 && !match_operand_size (t
, j
, given
))
2181 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2182 && !match_simd_size (t
, j
, given
))
2185 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2186 && (!match_operand_size (t
, j
, given
)
2187 || !match_simd_size (t
, j
, given
)))
2190 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2194 return match
| MATCH_REVERSE
;
2198 operand_type_match (i386_operand_type overlap
,
2199 i386_operand_type given
)
2201 i386_operand_type temp
= overlap
;
2203 temp
.bitfield
.unspecified
= 0;
2204 temp
.bitfield
.byte
= 0;
2205 temp
.bitfield
.word
= 0;
2206 temp
.bitfield
.dword
= 0;
2207 temp
.bitfield
.fword
= 0;
2208 temp
.bitfield
.qword
= 0;
2209 temp
.bitfield
.tbyte
= 0;
2210 temp
.bitfield
.xmmword
= 0;
2211 temp
.bitfield
.ymmword
= 0;
2212 temp
.bitfield
.zmmword
= 0;
2213 temp
.bitfield
.tmmword
= 0;
2214 if (operand_type_all_zero (&temp
))
2217 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2221 i
.error
= operand_type_mismatch
;
2225 /* If given types g0 and g1 are registers they must be of the same type
2226 unless the expected operand type register overlap is null.
2227 Intel syntax sized memory operands are also checked here. */
2230 operand_type_register_match (i386_operand_type g0
,
2231 i386_operand_type t0
,
2232 i386_operand_type g1
,
2233 i386_operand_type t1
)
2235 if (g0
.bitfield
.class != Reg
2236 && g0
.bitfield
.class != RegSIMD
2237 && (g0
.bitfield
.unspecified
2238 || !operand_type_check (g0
, anymem
)))
2241 if (g1
.bitfield
.class != Reg
2242 && g1
.bitfield
.class != RegSIMD
2243 && (g1
.bitfield
.unspecified
2244 || !operand_type_check (g1
, anymem
)))
2247 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2248 && g0
.bitfield
.word
== g1
.bitfield
.word
2249 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2250 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2251 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2252 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2253 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2256 /* If expectations overlap in no more than a single size, all is fine. */
2257 g0
= operand_type_and (t0
, t1
);
2258 if (g0
.bitfield
.byte
2262 + g0
.bitfield
.xmmword
2263 + g0
.bitfield
.ymmword
2264 + g0
.bitfield
.zmmword
<= 1)
2267 i
.error
= register_type_mismatch
;
2272 static INLINE
unsigned int
2273 register_number (const reg_entry
*r
)
2275 unsigned int nr
= r
->reg_num
;
2277 if (r
->reg_flags
& RegRex
)
2280 if (r
->reg_flags
& RegVRex
)
2286 static INLINE
unsigned int
2287 mode_from_disp_size (i386_operand_type t
)
2289 if (t
.bitfield
.disp8
)
2291 else if (t
.bitfield
.disp16
2292 || t
.bitfield
.disp32
)
2299 fits_in_signed_byte (addressT num
)
2301 return num
+ 0x80 <= 0xff;
2305 fits_in_unsigned_byte (addressT num
)
2311 fits_in_unsigned_word (addressT num
)
2313 return num
<= 0xffff;
2317 fits_in_signed_word (addressT num
)
2319 return num
+ 0x8000 <= 0xffff;
2323 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2328 return num
+ 0x80000000 <= 0xffffffff;
2330 } /* fits_in_signed_long() */
2333 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2338 return num
<= 0xffffffff;
2340 } /* fits_in_unsigned_long() */
2342 static INLINE valueT
extend_to_32bit_address (addressT num
)
2345 if (fits_in_unsigned_long(num
))
2346 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2348 if (!fits_in_signed_long (num
))
2349 return num
& 0xffffffff;
2356 fits_in_disp8 (offsetT num
)
2358 int shift
= i
.memshift
;
2364 mask
= (1 << shift
) - 1;
2366 /* Return 0 if NUM isn't properly aligned. */
2370 /* Check if NUM will fit in 8bit after shift. */
2371 return fits_in_signed_byte (num
>> shift
);
2375 fits_in_imm4 (offsetT num
)
2377 /* Despite the name, check for imm3 if we're dealing with EVEX. */
2378 return (num
& (i
.vec_encoding
!= vex_encoding_evex
? 0xf : 7)) == num
;
2381 static i386_operand_type
2382 smallest_imm_type (offsetT num
)
2384 i386_operand_type t
;
2386 operand_type_set (&t
, 0);
2387 t
.bitfield
.imm64
= 1;
2389 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2391 /* This code is disabled on the 486 because all the Imm1 forms
2392 in the opcode table are slower on the i486. They're the
2393 versions with the implicitly specified single-position
2394 displacement, which has another syntax if you really want to
2396 t
.bitfield
.imm1
= 1;
2397 t
.bitfield
.imm8
= 1;
2398 t
.bitfield
.imm8s
= 1;
2399 t
.bitfield
.imm16
= 1;
2400 t
.bitfield
.imm32
= 1;
2401 t
.bitfield
.imm32s
= 1;
2403 else if (fits_in_signed_byte (num
))
2405 if (fits_in_unsigned_byte (num
))
2406 t
.bitfield
.imm8
= 1;
2407 t
.bitfield
.imm8s
= 1;
2408 t
.bitfield
.imm16
= 1;
2409 t
.bitfield
.imm32
= 1;
2410 t
.bitfield
.imm32s
= 1;
2412 else if (fits_in_unsigned_byte (num
))
2414 t
.bitfield
.imm8
= 1;
2415 t
.bitfield
.imm16
= 1;
2416 t
.bitfield
.imm32
= 1;
2417 t
.bitfield
.imm32s
= 1;
2419 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2421 t
.bitfield
.imm16
= 1;
2422 t
.bitfield
.imm32
= 1;
2423 t
.bitfield
.imm32s
= 1;
2425 else if (fits_in_signed_long (num
))
2427 t
.bitfield
.imm32
= 1;
2428 t
.bitfield
.imm32s
= 1;
2430 else if (fits_in_unsigned_long (num
))
2431 t
.bitfield
.imm32
= 1;
2437 offset_in_range (offsetT val
, int size
)
2443 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2444 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2446 case 4: mask
= ((addressT
) 1 << 32) - 1; break;
2448 case sizeof (val
): return val
;
2452 if ((val
& ~mask
) != 0 && (-val
& ~mask
) != 0)
2453 as_warn (_("0x%" PRIx64
" shortened to 0x%" PRIx64
),
2454 (uint64_t) val
, (uint64_t) (val
& mask
));
2459 static INLINE
const char *insn_name (const insn_template
*t
)
2461 return &i386_mnemonics
[t
->mnem_off
];
2474 a. PREFIX_EXIST if attempting to add a prefix where one from the
2475 same class already exists.
2476 b. PREFIX_LOCK if lock prefix is added.
2477 c. PREFIX_REP if rep/repne prefix is added.
2478 d. PREFIX_DS if ds prefix is added.
2479 e. PREFIX_OTHER if other prefix is added.
2482 static enum PREFIX_GROUP
2483 add_prefix (unsigned int prefix
)
2485 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2488 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2489 && flag_code
== CODE_64BIT
)
2491 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2492 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2493 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2494 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2505 case DS_PREFIX_OPCODE
:
2508 case CS_PREFIX_OPCODE
:
2509 case ES_PREFIX_OPCODE
:
2510 case FS_PREFIX_OPCODE
:
2511 case GS_PREFIX_OPCODE
:
2512 case SS_PREFIX_OPCODE
:
2516 case REPNE_PREFIX_OPCODE
:
2517 case REPE_PREFIX_OPCODE
:
2522 case LOCK_PREFIX_OPCODE
:
2531 case ADDR_PREFIX_OPCODE
:
2535 case DATA_PREFIX_OPCODE
:
2539 if (i
.prefix
[q
] != 0)
2547 i
.prefix
[q
] |= prefix
;
2550 as_bad (_("same type of prefix used twice"));
2556 update_code_flag (int value
, int check
)
2558 PRINTF_LIKE ((*as_error
));
2560 flag_code
= (enum flag_code
) value
;
2561 if (flag_code
== CODE_64BIT
)
2563 cpu_arch_flags
.bitfield
.cpu64
= 1;
2564 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2568 cpu_arch_flags
.bitfield
.cpu64
= 0;
2569 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2571 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2574 as_error
= as_fatal
;
2577 (*as_error
) (_("64bit mode not supported on `%s'."),
2578 cpu_arch_name
? cpu_arch_name
: default_arch
);
2580 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2583 as_error
= as_fatal
;
2586 (*as_error
) (_("32bit mode not supported on `%s'."),
2587 cpu_arch_name
? cpu_arch_name
: default_arch
);
2589 stackop_size
= '\0';
2593 set_code_flag (int value
)
2595 update_code_flag (value
, 0);
2599 set_16bit_gcc_code_flag (int new_code_flag
)
2601 flag_code
= (enum flag_code
) new_code_flag
;
2602 if (flag_code
!= CODE_16BIT
)
2604 cpu_arch_flags
.bitfield
.cpu64
= 0;
2605 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2606 stackop_size
= LONG_MNEM_SUFFIX
;
2610 set_intel_syntax (int syntax_flag
)
2612 /* Find out if register prefixing is specified. */
2613 int ask_naked_reg
= 0;
2616 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2619 int e
= get_symbol_name (&string
);
2621 if (strcmp (string
, "prefix") == 0)
2623 else if (strcmp (string
, "noprefix") == 0)
2626 as_bad (_("bad argument to syntax directive."));
2627 (void) restore_line_pointer (e
);
2629 demand_empty_rest_of_line ();
2631 intel_syntax
= syntax_flag
;
2633 if (ask_naked_reg
== 0)
2634 allow_naked_reg
= (intel_syntax
2635 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2637 allow_naked_reg
= (ask_naked_reg
< 0);
2639 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2641 register_prefix
= allow_naked_reg
? "" : "%";
2645 set_intel_mnemonic (int mnemonic_flag
)
2647 intel_mnemonic
= mnemonic_flag
;
2651 set_allow_index_reg (int flag
)
2653 allow_index_reg
= flag
;
2657 set_check (int what
)
2659 enum check_kind
*kind
;
2664 kind
= &operand_check
;
2675 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2678 int e
= get_symbol_name (&string
);
2680 if (strcmp (string
, "none") == 0)
2682 else if (strcmp (string
, "warning") == 0)
2683 *kind
= check_warning
;
2684 else if (strcmp (string
, "error") == 0)
2685 *kind
= check_error
;
2687 as_bad (_("bad argument to %s_check directive."), str
);
2688 (void) restore_line_pointer (e
);
2691 as_bad (_("missing argument for %s_check directive"), str
);
2693 demand_empty_rest_of_line ();
2697 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2698 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2700 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2701 static const char *arch
;
2703 /* Intel MCU is only supported on ELF. */
2709 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2710 use default_arch. */
2711 arch
= cpu_arch_name
;
2713 arch
= default_arch
;
2716 /* If we are targeting Intel MCU, we must enable it. */
2717 if ((get_elf_backend_data (stdoutput
)->elf_machine_code
== EM_IAMCU
)
2718 == new_flag
.bitfield
.cpuiamcu
)
2721 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2726 extend_cpu_sub_arch_name (const char *name
)
2728 if (cpu_sub_arch_name
)
2729 cpu_sub_arch_name
= reconcat (cpu_sub_arch_name
, cpu_sub_arch_name
,
2730 ".", name
, (const char *) NULL
);
2732 cpu_sub_arch_name
= concat (".", name
, (const char *) NULL
);
2736 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2738 typedef struct arch_stack_entry
2740 const struct arch_stack_entry
*prev
;
2743 i386_cpu_flags flags
;
2744 i386_cpu_flags isa_flags
;
2745 enum processor_type isa
;
2746 enum flag_code flag_code
;
2748 bool no_cond_jump_promotion
;
2750 static const arch_stack_entry
*arch_stack_top
;
2754 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2757 int e
= get_symbol_name (&s
);
2758 const char *string
= s
;
2760 i386_cpu_flags flags
;
2762 if (strcmp (string
, "default") == 0)
2764 if (strcmp (default_arch
, "iamcu") == 0)
2765 string
= default_arch
;
2768 static const i386_cpu_flags cpu_unknown_flags
= CPU_UNKNOWN_FLAGS
;
2770 cpu_arch_name
= NULL
;
2771 free (cpu_sub_arch_name
);
2772 cpu_sub_arch_name
= NULL
;
2773 cpu_arch_flags
= cpu_unknown_flags
;
2774 if (flag_code
== CODE_64BIT
)
2776 cpu_arch_flags
.bitfield
.cpu64
= 1;
2777 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2781 cpu_arch_flags
.bitfield
.cpu64
= 0;
2782 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2784 cpu_arch_isa
= PROCESSOR_UNKNOWN
;
2785 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
2786 if (!cpu_arch_tune_set
)
2788 cpu_arch_tune
= cpu_arch_isa
;
2789 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2792 j
= ARRAY_SIZE (cpu_arch
) + 1;
2795 else if (strcmp (string
, "push") == 0)
2797 arch_stack_entry
*top
= XNEW (arch_stack_entry
);
2799 top
->name
= cpu_arch_name
;
2800 if (cpu_sub_arch_name
)
2801 top
->sub_name
= xstrdup (cpu_sub_arch_name
);
2803 top
->sub_name
= NULL
;
2804 top
->flags
= cpu_arch_flags
;
2805 top
->isa
= cpu_arch_isa
;
2806 top
->isa_flags
= cpu_arch_isa_flags
;
2807 top
->flag_code
= flag_code
;
2808 top
->stackop_size
= stackop_size
;
2809 top
->no_cond_jump_promotion
= no_cond_jump_promotion
;
2811 top
->prev
= arch_stack_top
;
2812 arch_stack_top
= top
;
2814 (void) restore_line_pointer (e
);
2815 demand_empty_rest_of_line ();
2818 else if (strcmp (string
, "pop") == 0)
2820 const arch_stack_entry
*top
= arch_stack_top
;
2823 as_bad (_(".arch stack is empty"));
2824 else if (top
->flag_code
!= flag_code
2825 || top
->stackop_size
!= stackop_size
)
2827 static const unsigned int bits
[] = {
2833 as_bad (_("this `.arch pop' requires `.code%u%s' to be in effect"),
2834 bits
[top
->flag_code
],
2835 top
->stackop_size
== LONG_MNEM_SUFFIX
? "gcc" : "");
2839 arch_stack_top
= top
->prev
;
2841 cpu_arch_name
= top
->name
;
2842 free (cpu_sub_arch_name
);
2843 cpu_sub_arch_name
= top
->sub_name
;
2844 cpu_arch_flags
= top
->flags
;
2845 cpu_arch_isa
= top
->isa
;
2846 cpu_arch_isa_flags
= top
->isa_flags
;
2847 no_cond_jump_promotion
= top
->no_cond_jump_promotion
;
2852 (void) restore_line_pointer (e
);
2853 demand_empty_rest_of_line ();
2857 for (; j
< ARRAY_SIZE (cpu_arch
); j
++)
2859 if (strcmp (string
+ (*string
== '.'), cpu_arch
[j
].name
) == 0
2860 && (*string
== '.') == (cpu_arch
[j
].type
== PROCESSOR_NONE
))
2864 check_cpu_arch_compatible (string
, cpu_arch
[j
].enable
);
2866 cpu_arch_name
= cpu_arch
[j
].name
;
2867 free (cpu_sub_arch_name
);
2868 cpu_sub_arch_name
= NULL
;
2869 cpu_arch_flags
= cpu_arch
[j
].enable
;
2870 if (flag_code
== CODE_64BIT
)
2872 cpu_arch_flags
.bitfield
.cpu64
= 1;
2873 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2877 cpu_arch_flags
.bitfield
.cpu64
= 0;
2878 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2880 cpu_arch_isa
= cpu_arch
[j
].type
;
2881 cpu_arch_isa_flags
= cpu_arch
[j
].enable
;
2882 if (!cpu_arch_tune_set
)
2884 cpu_arch_tune
= cpu_arch_isa
;
2885 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2887 pre_386_16bit_warned
= false;
2891 if (cpu_flags_all_zero (&cpu_arch
[j
].enable
))
2894 flags
= cpu_flags_or (cpu_arch_flags
,
2895 cpu_arch
[j
].enable
);
2897 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2899 extend_cpu_sub_arch_name (string
+ 1);
2900 cpu_arch_flags
= flags
;
2901 cpu_arch_isa_flags
= flags
;
2905 = cpu_flags_or (cpu_arch_isa_flags
,
2906 cpu_arch
[j
].enable
);
2907 (void) restore_line_pointer (e
);
2908 demand_empty_rest_of_line ();
2913 if (startswith (string
, ".no") && j
>= ARRAY_SIZE (cpu_arch
))
2915 /* Disable an ISA extension. */
2916 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2917 if (cpu_arch
[j
].type
== PROCESSOR_NONE
2918 && strcmp (string
+ 3, cpu_arch
[j
].name
) == 0)
2920 flags
= cpu_flags_and_not (cpu_arch_flags
,
2921 cpu_arch
[j
].disable
);
2922 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2924 extend_cpu_sub_arch_name (string
+ 1);
2925 cpu_arch_flags
= flags
;
2926 cpu_arch_isa_flags
= flags
;
2928 (void) restore_line_pointer (e
);
2929 demand_empty_rest_of_line ();
2934 if (j
== ARRAY_SIZE (cpu_arch
))
2935 as_bad (_("no such architecture: `%s'"), string
);
2937 *input_line_pointer
= e
;
2940 as_bad (_("missing cpu architecture"));
2942 no_cond_jump_promotion
= 0;
2943 if (*input_line_pointer
== ','
2944 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2949 ++input_line_pointer
;
2950 e
= get_symbol_name (&string
);
2952 if (strcmp (string
, "nojumps") == 0)
2953 no_cond_jump_promotion
= 1;
2954 else if (strcmp (string
, "jumps") == 0)
2957 as_bad (_("no such architecture modifier: `%s'"), string
);
2959 (void) restore_line_pointer (e
);
2962 demand_empty_rest_of_line ();
2965 enum bfd_architecture
2968 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2970 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2971 || flag_code
== CODE_64BIT
)
2972 as_fatal (_("Intel MCU is 32bit ELF only"));
2973 return bfd_arch_iamcu
;
2976 return bfd_arch_i386
;
2982 if (startswith (default_arch
, "x86_64"))
2984 if (default_arch
[6] == '\0')
2985 return bfd_mach_x86_64
;
2987 return bfd_mach_x64_32
;
2989 else if (!strcmp (default_arch
, "i386")
2990 || !strcmp (default_arch
, "iamcu"))
2992 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2994 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2995 as_fatal (_("Intel MCU is 32bit ELF only"));
2996 return bfd_mach_i386_iamcu
;
2999 return bfd_mach_i386_i386
;
3002 as_fatal (_("unknown architecture"));
3005 #include "opcodes/i386-tbl.h"
3010 /* Support pseudo prefixes like {disp32}. */
3011 lex_type
['{'] = LEX_BEGIN_NAME
;
3013 /* Initialize op_hash hash table. */
3014 op_hash
= str_htab_create ();
3017 const insn_template
*const *sets
= i386_op_sets
;
3018 const insn_template
*const *end
= sets
+ ARRAY_SIZE (i386_op_sets
) - 1;
3020 /* Type checks to compensate for the conversion through void * which
3021 occurs during hash table insertion / lookup. */
3022 (void) sizeof (sets
== ¤t_templates
->start
);
3023 (void) sizeof (end
== ¤t_templates
->end
);
3024 for (; sets
< end
; ++sets
)
3025 if (str_hash_insert (op_hash
, insn_name (*sets
), sets
, 0))
3026 as_fatal (_("duplicate %s"), insn_name (*sets
));
3029 /* Initialize reg_hash hash table. */
3030 reg_hash
= str_htab_create ();
3032 const reg_entry
*regtab
;
3033 unsigned int regtab_size
= i386_regtab_size
;
3035 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3037 switch (regtab
->reg_type
.bitfield
.class)
3040 if (regtab
->reg_type
.bitfield
.dword
)
3042 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3045 else if (regtab
->reg_type
.bitfield
.tbyte
)
3047 /* There's no point inserting st(<N>) in the hash table, as
3048 parentheses aren't included in register_chars[] anyway. */
3049 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3056 switch (regtab
->reg_num
)
3058 case 0: reg_es
= regtab
; break;
3059 case 2: reg_ss
= regtab
; break;
3060 case 3: reg_ds
= regtab
; break;
3065 if (!regtab
->reg_num
)
3070 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3071 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3075 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3080 for (c
= 0; c
< 256; c
++)
3082 if (ISDIGIT (c
) || ISLOWER (c
))
3084 mnemonic_chars
[c
] = c
;
3085 register_chars
[c
] = c
;
3086 operand_chars
[c
] = c
;
3088 else if (ISUPPER (c
))
3090 mnemonic_chars
[c
] = TOLOWER (c
);
3091 register_chars
[c
] = mnemonic_chars
[c
];
3092 operand_chars
[c
] = c
;
3094 #ifdef SVR4_COMMENT_CHARS
3095 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3096 operand_chars
[c
] = c
;
3100 operand_chars
[c
] = c
;
3103 mnemonic_chars
['_'] = '_';
3104 mnemonic_chars
['-'] = '-';
3105 mnemonic_chars
['.'] = '.';
3107 for (p
= extra_symbol_chars
; *p
!= '\0'; p
++)
3108 operand_chars
[(unsigned char) *p
] = *p
;
3109 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3110 operand_chars
[(unsigned char) *p
] = *p
;
3113 if (flag_code
== CODE_64BIT
)
3115 #if defined (OBJ_COFF) && defined (TE_PE)
3116 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3119 x86_dwarf2_return_column
= 16;
3121 x86_cie_data_alignment
= -8;
3122 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3123 x86_sframe_cfa_sp_reg
= 7;
3124 x86_sframe_cfa_fp_reg
= 6;
3129 x86_dwarf2_return_column
= 8;
3130 x86_cie_data_alignment
= -4;
3133 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3134 can be turned into BRANCH_PREFIX frag. */
3135 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3140 i386_print_statistics (FILE *file
)
3142 htab_print_statistics (file
, "i386 opcode", op_hash
);
3143 htab_print_statistics (file
, "i386 register", reg_hash
);
3149 htab_delete (op_hash
);
3150 htab_delete (reg_hash
);
3155 /* Debugging routines for md_assemble. */
3156 static void pte (insn_template
*);
3157 static void pt (i386_operand_type
);
3158 static void pe (expressionS
*);
3159 static void ps (symbolS
*);
3162 pi (const char *line
, i386_insn
*x
)
3166 fprintf (stdout
, "%s: template ", line
);
3168 fprintf (stdout
, " address: base %s index %s scale %x\n",
3169 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3170 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3171 x
->log2_scale_factor
);
3172 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3173 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3174 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3175 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3176 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3177 (x
->rex
& REX_W
) != 0,
3178 (x
->rex
& REX_R
) != 0,
3179 (x
->rex
& REX_X
) != 0,
3180 (x
->rex
& REX_B
) != 0);
3181 for (j
= 0; j
< x
->operands
; j
++)
3183 fprintf (stdout
, " #%d: ", j
+ 1);
3185 fprintf (stdout
, "\n");
3186 if (x
->types
[j
].bitfield
.class == Reg
3187 || x
->types
[j
].bitfield
.class == RegMMX
3188 || x
->types
[j
].bitfield
.class == RegSIMD
3189 || x
->types
[j
].bitfield
.class == RegMask
3190 || x
->types
[j
].bitfield
.class == SReg
3191 || x
->types
[j
].bitfield
.class == RegCR
3192 || x
->types
[j
].bitfield
.class == RegDR
3193 || x
->types
[j
].bitfield
.class == RegTR
3194 || x
->types
[j
].bitfield
.class == RegBND
)
3195 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3196 if (operand_type_check (x
->types
[j
], imm
))
3198 if (operand_type_check (x
->types
[j
], disp
))
3199 pe (x
->op
[j
].disps
);
3204 pte (insn_template
*t
)
3206 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3207 static const char *const opc_spc
[] = {
3208 NULL
, "0f", "0f38", "0f3a", NULL
, "evexmap5", "evexmap6", NULL
,
3209 "XOP08", "XOP09", "XOP0A",
3213 fprintf (stdout
, " %d operands ", t
->operands
);
3214 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3215 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3216 if (opc_spc
[t
->opcode_space
])
3217 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_space
]);
3218 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3219 if (t
->extension_opcode
!= None
)
3220 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3221 if (t
->opcode_modifier
.d
)
3222 fprintf (stdout
, "D");
3223 if (t
->opcode_modifier
.w
)
3224 fprintf (stdout
, "W");
3225 fprintf (stdout
, "\n");
3226 for (j
= 0; j
< t
->operands
; j
++)
3228 fprintf (stdout
, " #%d type ", j
+ 1);
3229 pt (t
->operand_types
[j
]);
3230 fprintf (stdout
, "\n");
3237 fprintf (stdout
, " operation %d\n", e
->X_op
);
3238 fprintf (stdout
, " add_number %" PRId64
" (%" PRIx64
")\n",
3239 (int64_t) e
->X_add_number
, (uint64_t) (valueT
) e
->X_add_number
);
3240 if (e
->X_add_symbol
)
3242 fprintf (stdout
, " add_symbol ");
3243 ps (e
->X_add_symbol
);
3244 fprintf (stdout
, "\n");
3248 fprintf (stdout
, " op_symbol ");
3249 ps (e
->X_op_symbol
);
3250 fprintf (stdout
, "\n");
3257 fprintf (stdout
, "%s type %s%s",
3259 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3260 segment_name (S_GET_SEGMENT (s
)));
3263 static struct type_name
3265 i386_operand_type mask
;
3268 const type_names
[] =
3270 { { .bitfield
= { .class = Reg
, .byte
= 1 } }, "r8" },
3271 { { .bitfield
= { .class = Reg
, .word
= 1 } }, "r16" },
3272 { { .bitfield
= { .class = Reg
, .dword
= 1 } }, "r32" },
3273 { { .bitfield
= { .class = Reg
, .qword
= 1 } }, "r64" },
3274 { { .bitfield
= { .instance
= Accum
, .byte
= 1 } }, "acc8" },
3275 { { .bitfield
= { .instance
= Accum
, .word
= 1 } }, "acc16" },
3276 { { .bitfield
= { .instance
= Accum
, .dword
= 1 } }, "acc32" },
3277 { { .bitfield
= { .instance
= Accum
, .qword
= 1 } }, "acc64" },
3278 { { .bitfield
= { .imm8
= 1 } }, "i8" },
3279 { { .bitfield
= { .imm8s
= 1 } }, "i8s" },
3280 { { .bitfield
= { .imm16
= 1 } }, "i16" },
3281 { { .bitfield
= { .imm32
= 1 } }, "i32" },
3282 { { .bitfield
= { .imm32s
= 1 } }, "i32s" },
3283 { { .bitfield
= { .imm64
= 1 } }, "i64" },
3284 { { .bitfield
= { .imm1
= 1 } }, "i1" },
3285 { { .bitfield
= { .baseindex
= 1 } }, "BaseIndex" },
3286 { { .bitfield
= { .disp8
= 1 } }, "d8" },
3287 { { .bitfield
= { .disp16
= 1 } }, "d16" },
3288 { { .bitfield
= { .disp32
= 1 } }, "d32" },
3289 { { .bitfield
= { .disp64
= 1 } }, "d64" },
3290 { { .bitfield
= { .instance
= RegD
, .word
= 1 } }, "InOutPortReg" },
3291 { { .bitfield
= { .instance
= RegC
, .byte
= 1 } }, "ShiftCount" },
3292 { { .bitfield
= { .class = RegCR
} }, "control reg" },
3293 { { .bitfield
= { .class = RegTR
} }, "test reg" },
3294 { { .bitfield
= { .class = RegDR
} }, "debug reg" },
3295 { { .bitfield
= { .class = Reg
, .tbyte
= 1 } }, "FReg" },
3296 { { .bitfield
= { .instance
= Accum
, .tbyte
= 1 } }, "FAcc" },
3297 { { .bitfield
= { .class = SReg
} }, "SReg" },
3298 { { .bitfield
= { .class = RegMMX
} }, "rMMX" },
3299 { { .bitfield
= { .class = RegSIMD
, .xmmword
= 1 } }, "rXMM" },
3300 { { .bitfield
= { .class = RegSIMD
, .ymmword
= 1 } }, "rYMM" },
3301 { { .bitfield
= { .class = RegSIMD
, .zmmword
= 1 } }, "rZMM" },
3302 { { .bitfield
= { .class = RegSIMD
, .tmmword
= 1 } }, "rTMM" },
3303 { { .bitfield
= { .class = RegMask
} }, "Mask reg" },
3307 pt (i386_operand_type t
)
3310 i386_operand_type a
;
3312 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3314 a
= operand_type_and (t
, type_names
[j
].mask
);
3315 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3316 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3321 #endif /* DEBUG386 */
3323 static bfd_reloc_code_real_type
3324 reloc (unsigned int size
,
3327 bfd_reloc_code_real_type other
)
3329 if (other
!= NO_RELOC
)
3331 reloc_howto_type
*rel
;
3336 case BFD_RELOC_X86_64_GOT32
:
3337 return BFD_RELOC_X86_64_GOT64
;
3339 case BFD_RELOC_X86_64_GOTPLT64
:
3340 return BFD_RELOC_X86_64_GOTPLT64
;
3342 case BFD_RELOC_X86_64_PLTOFF64
:
3343 return BFD_RELOC_X86_64_PLTOFF64
;
3345 case BFD_RELOC_X86_64_GOTPC32
:
3346 other
= BFD_RELOC_X86_64_GOTPC64
;
3348 case BFD_RELOC_X86_64_GOTPCREL
:
3349 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3351 case BFD_RELOC_X86_64_TPOFF32
:
3352 other
= BFD_RELOC_X86_64_TPOFF64
;
3354 case BFD_RELOC_X86_64_DTPOFF32
:
3355 other
= BFD_RELOC_X86_64_DTPOFF64
;
3361 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3362 if (other
== BFD_RELOC_SIZE32
)
3365 other
= BFD_RELOC_SIZE64
;
3368 as_bad (_("there are no pc-relative size relocations"));
3374 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3375 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3378 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3380 as_bad (_("unknown relocation (%u)"), other
);
3381 else if (size
!= bfd_get_reloc_size (rel
))
3382 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3383 bfd_get_reloc_size (rel
),
3385 else if (pcrel
&& !rel
->pc_relative
)
3386 as_bad (_("non-pc-relative relocation for pc-relative field"));
3387 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3389 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3391 as_bad (_("relocated field and relocation type differ in signedness"));
3400 as_bad (_("there are no unsigned pc-relative relocations"));
3403 case 1: return BFD_RELOC_8_PCREL
;
3404 case 2: return BFD_RELOC_16_PCREL
;
3405 case 4: return BFD_RELOC_32_PCREL
;
3406 case 8: return BFD_RELOC_64_PCREL
;
3408 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3415 case 4: return BFD_RELOC_X86_64_32S
;
3420 case 1: return BFD_RELOC_8
;
3421 case 2: return BFD_RELOC_16
;
3422 case 4: return BFD_RELOC_32
;
3423 case 8: return BFD_RELOC_64
;
3425 as_bad (_("cannot do %s %u byte relocation"),
3426 sign
> 0 ? "signed" : "unsigned", size
);
3432 /* Here we decide which fixups can be adjusted to make them relative to
3433 the beginning of the section instead of the symbol. Basically we need
3434 to make sure that the dynamic relocations are done correctly, so in
3435 some cases we force the original symbol to be used. */
3438 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3440 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3444 /* Don't adjust pc-relative references to merge sections in 64-bit
3446 if (use_rela_relocations
3447 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3451 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3452 and changed later by validate_fix. */
3453 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3454 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3457 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3458 for size relocations. */
3459 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3460 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3461 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3462 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3463 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3464 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3465 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3466 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3467 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3468 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3469 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3470 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3471 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3472 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3473 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3474 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3475 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3476 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3477 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3478 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3479 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3480 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3481 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3482 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3483 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3484 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3485 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3486 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3487 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3488 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3489 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3496 want_disp32 (const insn_template
*t
)
3498 return flag_code
!= CODE_64BIT
3499 || i
.prefix
[ADDR_PREFIX
]
3500 || (t
->mnem_off
== MN_lea
3501 && (!i
.types
[1].bitfield
.qword
3502 || t
->opcode_modifier
.size
== SIZE32
));
3506 intel_float_operand (const char *mnemonic
)
3508 /* Note that the value returned is meaningful only for opcodes with (memory)
3509 operands, hence the code here is free to improperly handle opcodes that
3510 have no operands (for better performance and smaller code). */
3512 if (mnemonic
[0] != 'f')
3513 return 0; /* non-math */
3515 switch (mnemonic
[1])
3517 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3518 the fs segment override prefix not currently handled because no
3519 call path can make opcodes without operands get here */
3521 return 2 /* integer op */;
3523 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3524 return 3; /* fldcw/fldenv */
3527 if (mnemonic
[2] != 'o' /* fnop */)
3528 return 3; /* non-waiting control op */
3531 if (mnemonic
[2] == 's')
3532 return 3; /* frstor/frstpm */
3535 if (mnemonic
[2] == 'a')
3536 return 3; /* fsave */
3537 if (mnemonic
[2] == 't')
3539 switch (mnemonic
[3])
3541 case 'c': /* fstcw */
3542 case 'd': /* fstdw */
3543 case 'e': /* fstenv */
3544 case 's': /* fsts[gw] */
3550 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3551 return 0; /* fxsave/fxrstor are not really math ops */
3559 install_template (const insn_template
*t
)
3565 /* Note that for pseudo prefixes this produces a length of 1. But for them
3566 the length isn't interesting at all. */
3567 for (l
= 1; l
< 4; ++l
)
3568 if (!(t
->base_opcode
>> (8 * l
)))
3571 i
.opcode_length
= l
;
3574 /* Build the VEX prefix. */
3577 build_vex_prefix (const insn_template
*t
)
3579 unsigned int register_specifier
;
3580 unsigned int vector_length
;
3583 /* Check register specifier. */
3584 if (i
.vex
.register_specifier
)
3586 register_specifier
=
3587 ~register_number (i
.vex
.register_specifier
) & 0xf;
3588 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3591 register_specifier
= 0xf;
3593 /* Use 2-byte VEX prefix by swapping destination and source operand
3594 if there are more than 1 register operand. */
3595 if (i
.reg_operands
> 1
3596 && i
.vec_encoding
!= vex_encoding_vex3
3597 && i
.dir_encoding
== dir_encoding_default
3598 && i
.operands
== i
.reg_operands
3599 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3600 && i
.tm
.opcode_space
== SPACE_0F
3601 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3606 swap_2_operands (0, i
.operands
- 1);
3608 gas_assert (i
.rm
.mode
== 3);
3612 i
.rm
.regmem
= i
.rm
.reg
;
3615 if (i
.tm
.opcode_modifier
.d
)
3616 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3617 ? Opcode_ExtD
: Opcode_SIMD_IntD
;
3618 else /* Use the next insn. */
3619 install_template (&t
[1]);
3622 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3623 are no memory operands and at least 3 register ones. */
3624 if (i
.reg_operands
>= 3
3625 && i
.vec_encoding
!= vex_encoding_vex3
3626 && i
.reg_operands
== i
.operands
- i
.imm_operands
3627 && i
.tm
.opcode_modifier
.vex
3628 && i
.tm
.opcode_modifier
.commutative
3629 && (i
.tm
.opcode_modifier
.sse2avx
3630 || (optimize
> 1 && !i
.no_optimize
))
3632 && i
.vex
.register_specifier
3633 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3635 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3637 gas_assert (i
.tm
.opcode_space
== SPACE_0F
);
3638 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3639 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3640 &i
.types
[i
.operands
- 3]));
3641 gas_assert (i
.rm
.mode
== 3);
3643 swap_2_operands (xchg
, xchg
+ 1);
3646 xchg
= i
.rm
.regmem
| 8;
3647 i
.rm
.regmem
= ~register_specifier
& 0xf;
3648 gas_assert (!(i
.rm
.regmem
& 8));
3649 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3650 register_specifier
= ~xchg
& 0xf;
3653 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3654 vector_length
= avxscalar
;
3655 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3657 else if (dot_insn () && i
.tm
.opcode_modifier
.vex
== VEX128
)
3663 /* Determine vector length from the last multi-length vector
3666 for (op
= t
->operands
; op
--;)
3667 if (t
->operand_types
[op
].bitfield
.xmmword
3668 && t
->operand_types
[op
].bitfield
.ymmword
3669 && i
.types
[op
].bitfield
.ymmword
)
3676 /* Check the REX.W bit and VEXW. */
3677 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3678 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3679 else if (i
.tm
.opcode_modifier
.vexw
)
3680 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3682 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3684 /* Use 2-byte VEX prefix if possible. */
3686 && i
.vec_encoding
!= vex_encoding_vex3
3687 && i
.tm
.opcode_space
== SPACE_0F
3688 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3690 /* 2-byte VEX prefix. */
3694 i
.vex
.bytes
[0] = 0xc5;
3696 /* Check the REX.R bit. */
3697 r
= (i
.rex
& REX_R
) ? 0 : 1;
3698 i
.vex
.bytes
[1] = (r
<< 7
3699 | register_specifier
<< 3
3700 | vector_length
<< 2
3701 | i
.tm
.opcode_modifier
.opcodeprefix
);
3705 /* 3-byte VEX prefix. */
3708 switch (i
.tm
.opcode_space
)
3713 i
.vex
.bytes
[0] = 0xc4;
3718 i
.vex
.bytes
[0] = 0x8f;
3724 /* The high 3 bits of the second VEX byte are 1's compliment
3725 of RXB bits from REX. */
3726 i
.vex
.bytes
[1] = ((~i
.rex
& 7) << 5)
3727 | (!dot_insn () ? i
.tm
.opcode_space
3728 : i
.insn_opcode_space
);
3730 i
.vex
.bytes
[2] = (w
<< 7
3731 | register_specifier
<< 3
3732 | vector_length
<< 2
3733 | i
.tm
.opcode_modifier
.opcodeprefix
);
3738 is_evex_encoding (const insn_template
*t
)
3740 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3741 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3742 || t
->opcode_modifier
.sae
;
3746 is_any_vex_encoding (const insn_template
*t
)
3748 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3752 get_broadcast_bytes (const insn_template
*t
, bool diag
)
3754 unsigned int op
, bytes
;
3755 const i386_operand_type
*types
;
3757 if (i
.broadcast
.type
)
3758 return (1 << (t
->opcode_modifier
.broadcast
- 1)) * i
.broadcast
.type
;
3760 gas_assert (intel_syntax
);
3762 for (op
= 0; op
< t
->operands
; ++op
)
3763 if (t
->operand_types
[op
].bitfield
.baseindex
)
3766 gas_assert (op
< t
->operands
);
3768 if (t
->opcode_modifier
.evex
3769 && t
->opcode_modifier
.evex
!= EVEXDYN
)
3770 switch (i
.broadcast
.bytes
)
3773 if (t
->operand_types
[op
].bitfield
.word
)
3777 if (t
->operand_types
[op
].bitfield
.dword
)
3781 if (t
->operand_types
[op
].bitfield
.qword
)
3785 if (t
->operand_types
[op
].bitfield
.xmmword
)
3787 if (t
->operand_types
[op
].bitfield
.ymmword
)
3789 if (t
->operand_types
[op
].bitfield
.zmmword
)
3796 gas_assert (op
+ 1 < t
->operands
);
3798 if (t
->operand_types
[op
+ 1].bitfield
.xmmword
3799 + t
->operand_types
[op
+ 1].bitfield
.ymmword
3800 + t
->operand_types
[op
+ 1].bitfield
.zmmword
> 1)
3802 types
= &i
.types
[op
+ 1];
3805 else /* Ambiguous - guess with a preference to non-AVX512VL forms. */
3806 types
= &t
->operand_types
[op
];
3808 if (types
->bitfield
.zmmword
)
3810 else if (types
->bitfield
.ymmword
)
3816 as_warn (_("ambiguous broadcast for `%s', using %u-bit form"),
3817 insn_name (t
), bytes
* 8);
3822 /* Build the EVEX prefix. */
3825 build_evex_prefix (void)
3827 unsigned int register_specifier
, w
;
3828 rex_byte vrex_used
= 0;
3830 /* Check register specifier. */
3831 if (i
.vex
.register_specifier
)
3833 gas_assert ((i
.vrex
& REX_X
) == 0);
3835 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3836 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3837 register_specifier
+= 8;
3838 /* The upper 16 registers are encoded in the fourth byte of the
3840 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3841 i
.vex
.bytes
[3] = 0x8;
3842 register_specifier
= ~register_specifier
& 0xf;
3846 register_specifier
= 0xf;
3848 /* Encode upper 16 vector index register in the fourth byte of
3850 if (!(i
.vrex
& REX_X
))
3851 i
.vex
.bytes
[3] = 0x8;
3856 /* 4 byte EVEX prefix. */
3858 i
.vex
.bytes
[0] = 0x62;
3860 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3862 gas_assert (i
.tm
.opcode_space
>= SPACE_0F
);
3863 gas_assert (i
.tm
.opcode_space
<= SPACE_EVEXMAP6
);
3864 i
.vex
.bytes
[1] = ((~i
.rex
& 7) << 5)
3865 | (!dot_insn () ? i
.tm
.opcode_space
3866 : i
.insn_opcode_space
);
3868 /* The fifth bit of the second EVEX byte is 1's compliment of the
3869 REX_R bit in VREX. */
3870 if (!(i
.vrex
& REX_R
))
3871 i
.vex
.bytes
[1] |= 0x10;
3875 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3877 /* When all operands are registers, the REX_X bit in REX is not
3878 used. We reuse it to encode the upper 16 registers, which is
3879 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3880 as 1's compliment. */
3881 if ((i
.vrex
& REX_B
))
3884 i
.vex
.bytes
[1] &= ~0x40;
3888 /* EVEX instructions shouldn't need the REX prefix. */
3889 i
.vrex
&= ~vrex_used
;
3890 gas_assert (i
.vrex
== 0);
3892 /* Check the REX.W bit and VEXW. */
3893 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3894 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3895 else if (i
.tm
.opcode_modifier
.vexw
)
3896 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3898 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3900 /* The third byte of the EVEX prefix. */
3901 i
.vex
.bytes
[2] = ((w
<< 7)
3902 | (register_specifier
<< 3)
3903 | 4 /* Encode the U bit. */
3904 | i
.tm
.opcode_modifier
.opcodeprefix
);
3906 /* The fourth byte of the EVEX prefix. */
3907 /* The zeroing-masking bit. */
3908 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3909 i
.vex
.bytes
[3] |= 0x80;
3911 /* Don't always set the broadcast bit if there is no RC. */
3912 if (i
.rounding
.type
== rc_none
)
3914 /* Encode the vector length. */
3915 unsigned int vec_length
;
3917 if (!i
.tm
.opcode_modifier
.evex
3918 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3922 /* Determine vector length from the last multi-length vector
3924 for (op
= i
.operands
; op
--;)
3925 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3926 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3927 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3929 if (i
.types
[op
].bitfield
.zmmword
)
3931 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3934 else if (i
.types
[op
].bitfield
.ymmword
)
3936 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3939 else if (i
.types
[op
].bitfield
.xmmword
)
3941 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3944 else if ((i
.broadcast
.type
|| i
.broadcast
.bytes
)
3945 && op
== i
.broadcast
.operand
)
3947 switch (get_broadcast_bytes (&i
.tm
, true))
3950 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3953 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3956 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3965 if (op
>= MAX_OPERANDS
)
3969 switch (i
.tm
.opcode_modifier
.evex
)
3971 case EVEXLIG
: /* LL' is ignored */
3972 vec_length
= evexlig
<< 5;
3975 vec_length
= 0 << 5;
3978 vec_length
= 1 << 5;
3981 vec_length
= 2 << 5;
3986 vec_length
= 3 << 5;
3994 i
.vex
.bytes
[3] |= vec_length
;
3995 /* Encode the broadcast bit. */
3996 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
3997 i
.vex
.bytes
[3] |= 0x10;
3999 else if (i
.rounding
.type
!= saeonly
)
4000 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
4002 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
4005 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
4009 process_immext (void)
4013 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
4014 which is coded in the same place as an 8-bit immediate field
4015 would be. Here we fake an 8-bit immediate operand from the
4016 opcode suffix stored in tm.extension_opcode.
4018 AVX instructions also use this encoding, for some of
4019 3 argument instructions. */
4021 gas_assert (i
.imm_operands
<= 1
4023 || (is_any_vex_encoding (&i
.tm
)
4024 && i
.operands
<= 4)));
4026 exp
= &im_expressions
[i
.imm_operands
++];
4027 i
.op
[i
.operands
].imms
= exp
;
4028 i
.types
[i
.operands
].bitfield
.imm8
= 1;
4030 exp
->X_op
= O_constant
;
4031 exp
->X_add_number
= i
.tm
.extension_opcode
;
4032 i
.tm
.extension_opcode
= None
;
4039 switch (i
.tm
.opcode_modifier
.prefixok
)
4047 as_bad (_("invalid instruction `%s' after `%s'"),
4048 insn_name (&i
.tm
), i
.hle_prefix
);
4051 if (i
.prefix
[LOCK_PREFIX
])
4053 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4057 case PrefixHLERelease
:
4058 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4060 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4064 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4066 as_bad (_("memory destination needed for instruction `%s'"
4067 " after `xrelease'"), insn_name (&i
.tm
));
4074 /* Encode aligned vector move as unaligned vector move. */
4077 encode_with_unaligned_vector_move (void)
4079 switch (i
.tm
.base_opcode
)
4081 case 0x28: /* Load instructions. */
4082 case 0x29: /* Store instructions. */
4083 /* movaps/movapd/vmovaps/vmovapd. */
4084 if (i
.tm
.opcode_space
== SPACE_0F
4085 && i
.tm
.opcode_modifier
.opcodeprefix
<= PREFIX_0X66
)
4086 i
.tm
.base_opcode
= 0x10 | (i
.tm
.base_opcode
& 1);
4088 case 0x6f: /* Load instructions. */
4089 case 0x7f: /* Store instructions. */
4090 /* movdqa/vmovdqa/vmovdqa64/vmovdqa32. */
4091 if (i
.tm
.opcode_space
== SPACE_0F
4092 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0X66
)
4093 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4100 /* Try the shortest encoding by shortening operand size. */
4103 optimize_encoding (void)
4107 if (i
.tm
.mnem_off
== MN_lea
)
4110 lea symbol, %rN -> mov $symbol, %rN
4111 lea (%rM), %rN -> mov %rM, %rN
4112 lea (,%rM,1), %rN -> mov %rM, %rN
4114 and in 32-bit mode for 16-bit addressing
4116 lea (%rM), %rN -> movzx %rM, %rN
4118 and in 64-bit mode zap 32-bit addressing in favor of using a
4119 32-bit (or less) destination.
4121 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4123 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4124 i
.tm
.opcode_modifier
.size
= SIZE32
;
4125 i
.prefix
[ADDR_PREFIX
] = 0;
4128 if (!i
.index_reg
&& !i
.base_reg
)
4131 lea symbol, %rN -> mov $symbol, %rN
4133 if (flag_code
== CODE_64BIT
)
4135 /* Don't transform a relocation to a 16-bit one. */
4137 && i
.op
[0].disps
->X_op
!= O_constant
4138 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4141 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4142 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4144 i
.tm
.base_opcode
= 0xb8;
4145 i
.tm
.opcode_modifier
.modrm
= 0;
4146 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4147 i
.types
[0].bitfield
.imm32
= 1;
4150 i
.tm
.opcode_modifier
.size
= SIZE16
;
4151 i
.types
[0].bitfield
.imm16
= 1;
4156 /* Subject to further optimization below. */
4157 i
.tm
.base_opcode
= 0xc7;
4158 i
.tm
.extension_opcode
= 0;
4159 i
.types
[0].bitfield
.imm32s
= 1;
4160 i
.types
[0].bitfield
.baseindex
= 0;
4163 /* Outside of 64-bit mode address and operand sizes have to match if
4164 a relocation is involved, as otherwise we wouldn't (currently) or
4165 even couldn't express the relocation correctly. */
4166 else if (i
.op
[0].disps
4167 && i
.op
[0].disps
->X_op
!= O_constant
4168 && ((!i
.prefix
[ADDR_PREFIX
])
4169 != (flag_code
== CODE_32BIT
4170 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4171 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4173 /* In 16-bit mode converting LEA with 16-bit addressing and a 32-bit
4174 destination is going to grow encoding size. */
4175 else if (flag_code
== CODE_16BIT
4176 && (optimize
<= 1 || optimize_for_space
)
4177 && !i
.prefix
[ADDR_PREFIX
]
4178 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4182 i
.tm
.base_opcode
= 0xb8;
4183 i
.tm
.opcode_modifier
.modrm
= 0;
4184 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4185 i
.types
[0].bitfield
.imm32
= 1;
4187 i
.types
[0].bitfield
.imm16
= 1;
4190 && i
.op
[0].disps
->X_op
== O_constant
4191 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4192 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4194 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4195 i
.op
[0].disps
->X_add_number
&= 0xffff;
4198 i
.tm
.operand_types
[0] = i
.types
[0];
4202 i
.op
[0].imms
= &im_expressions
[0];
4203 i
.op
[0].imms
->X_op
= O_absent
;
4206 else if (i
.op
[0].disps
4207 && (i
.op
[0].disps
->X_op
!= O_constant
4208 || i
.op
[0].disps
->X_add_number
))
4213 lea (%rM), %rN -> mov %rM, %rN
4214 lea (,%rM,1), %rN -> mov %rM, %rN
4215 lea (%rM), %rN -> movzx %rM, %rN
4217 const reg_entry
*addr_reg
;
4219 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4220 addr_reg
= i
.base_reg
;
4221 else if (!i
.base_reg
4222 && i
.index_reg
->reg_num
!= RegIZ
4223 && !i
.log2_scale_factor
)
4224 addr_reg
= i
.index_reg
;
4228 if (addr_reg
->reg_type
.bitfield
.word
4229 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4231 if (flag_code
!= CODE_32BIT
)
4233 i
.tm
.opcode_space
= SPACE_0F
;
4234 i
.tm
.base_opcode
= 0xb7;
4237 i
.tm
.base_opcode
= 0x8b;
4239 if (addr_reg
->reg_type
.bitfield
.dword
4240 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4241 i
.tm
.opcode_modifier
.size
= SIZE32
;
4243 i
.op
[0].regs
= addr_reg
;
4248 i
.disp_operands
= 0;
4249 i
.prefix
[ADDR_PREFIX
] = 0;
4250 i
.prefix
[SEG_PREFIX
] = 0;
4254 if (optimize_for_space
4255 && i
.tm
.mnem_off
== MN_test
4256 && i
.reg_operands
== 1
4257 && i
.imm_operands
== 1
4258 && !i
.types
[1].bitfield
.byte
4259 && i
.op
[0].imms
->X_op
== O_constant
4260 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
))
4263 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4265 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4266 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4268 i
.types
[1].bitfield
.byte
= 1;
4269 /* Ignore the suffix. */
4271 /* Convert to byte registers. */
4272 if (i
.types
[1].bitfield
.word
)
4274 else if (i
.types
[1].bitfield
.dword
)
4278 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4283 else if (flag_code
== CODE_64BIT
4284 && i
.tm
.opcode_space
== SPACE_BASE
4285 && ((i
.types
[1].bitfield
.qword
4286 && i
.reg_operands
== 1
4287 && i
.imm_operands
== 1
4288 && i
.op
[0].imms
->X_op
== O_constant
4289 && ((i
.tm
.base_opcode
== 0xb8
4290 && i
.tm
.extension_opcode
== None
4291 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4292 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4293 && (i
.tm
.base_opcode
== 0x24
4294 || (i
.tm
.base_opcode
== 0x80
4295 && i
.tm
.extension_opcode
== 0x4)
4296 || i
.tm
.mnem_off
== MN_test
4297 || ((i
.tm
.base_opcode
| 1) == 0xc7
4298 && i
.tm
.extension_opcode
== 0x0)))
4299 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4300 && i
.tm
.base_opcode
== 0x83
4301 && i
.tm
.extension_opcode
== 0x4)))
4302 || (i
.types
[0].bitfield
.qword
4303 && ((i
.reg_operands
== 2
4304 && i
.op
[0].regs
== i
.op
[1].regs
4305 && (i
.tm
.mnem_off
== MN_xor
4306 || i
.tm
.mnem_off
== MN_sub
))
4307 || i
.tm
.mnem_off
== MN_clr
))))
4310 andq $imm31, %r64 -> andl $imm31, %r32
4311 andq $imm7, %r64 -> andl $imm7, %r32
4312 testq $imm31, %r64 -> testl $imm31, %r32
4313 xorq %r64, %r64 -> xorl %r32, %r32
4314 subq %r64, %r64 -> subl %r32, %r32
4315 movq $imm31, %r64 -> movl $imm31, %r32
4316 movq $imm32, %r64 -> movl $imm32, %r32
4318 i
.tm
.opcode_modifier
.size
= SIZE32
;
4321 i
.types
[0].bitfield
.imm32
= 1;
4322 i
.types
[0].bitfield
.imm32s
= 0;
4323 i
.types
[0].bitfield
.imm64
= 0;
4327 i
.types
[0].bitfield
.dword
= 1;
4328 i
.types
[0].bitfield
.qword
= 0;
4330 i
.types
[1].bitfield
.dword
= 1;
4331 i
.types
[1].bitfield
.qword
= 0;
4332 if (i
.tm
.mnem_off
== MN_mov
|| i
.tm
.mnem_off
== MN_lea
)
4335 movq $imm31, %r64 -> movl $imm31, %r32
4336 movq $imm32, %r64 -> movl $imm32, %r32
4338 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4339 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4340 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4341 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4344 movq $imm31, %r64 -> movl $imm31, %r32
4346 i
.tm
.base_opcode
= 0xb8;
4347 i
.tm
.extension_opcode
= None
;
4348 i
.tm
.opcode_modifier
.w
= 0;
4349 i
.tm
.opcode_modifier
.modrm
= 0;
4353 else if (optimize
> 1
4354 && !optimize_for_space
4355 && i
.reg_operands
== 2
4356 && i
.op
[0].regs
== i
.op
[1].regs
4357 && (i
.tm
.mnem_off
== MN_and
|| i
.tm
.mnem_off
== MN_or
)
4358 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4361 andb %rN, %rN -> testb %rN, %rN
4362 andw %rN, %rN -> testw %rN, %rN
4363 andq %rN, %rN -> testq %rN, %rN
4364 orb %rN, %rN -> testb %rN, %rN
4365 orw %rN, %rN -> testw %rN, %rN
4366 orq %rN, %rN -> testq %rN, %rN
4368 and outside of 64-bit mode
4370 andl %rN, %rN -> testl %rN, %rN
4371 orl %rN, %rN -> testl %rN, %rN
4373 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4375 else if (i
.tm
.base_opcode
== 0xba
4376 && i
.tm
.opcode_space
== SPACE_0F
4377 && i
.reg_operands
== 1
4378 && i
.op
[0].imms
->X_op
== O_constant
4379 && i
.op
[0].imms
->X_add_number
>= 0)
4382 btw $n, %rN -> btl $n, %rN (outside of 16-bit mode, n < 16)
4383 btq $n, %rN -> btl $n, %rN (in 64-bit mode, n < 32, N < 8)
4384 btl $n, %rN -> btw $n, %rN (in 16-bit mode, n < 16)
4386 With <BT> one of bts, btr, and bts also:
4387 <BT>w $n, %rN -> btl $n, %rN (in 32-bit mode, n < 16)
4388 <BT>l $n, %rN -> btw $n, %rN (in 16-bit mode, n < 16)
4393 if (i
.tm
.extension_opcode
!= 4)
4395 if (i
.types
[1].bitfield
.qword
4396 && i
.op
[0].imms
->X_add_number
< 32
4397 && !(i
.op
[1].regs
->reg_flags
& RegRex
))
4398 i
.tm
.opcode_modifier
.size
= SIZE32
;
4401 if (i
.types
[1].bitfield
.word
4402 && i
.op
[0].imms
->X_add_number
< 16)
4403 i
.tm
.opcode_modifier
.size
= SIZE32
;
4406 if (i
.op
[0].imms
->X_add_number
< 16)
4407 i
.tm
.opcode_modifier
.size
= SIZE16
;
4411 else if (i
.reg_operands
== 3
4412 && i
.op
[0].regs
== i
.op
[1].regs
4413 && !i
.types
[2].bitfield
.xmmword
4414 && (i
.tm
.opcode_modifier
.vex
4415 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4416 && is_evex_encoding (&i
.tm
)
4417 && (i
.vec_encoding
!= vex_encoding_evex
4418 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4419 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4420 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4421 && i
.types
[2].bitfield
.ymmword
))))
4422 && i
.tm
.opcode_space
== SPACE_0F
4423 && ((i
.tm
.base_opcode
| 2) == 0x57
4424 || i
.tm
.base_opcode
== 0xdf
4425 || i
.tm
.base_opcode
== 0xef
4426 || (i
.tm
.base_opcode
| 3) == 0xfb
4427 || i
.tm
.base_opcode
== 0x42
4428 || i
.tm
.base_opcode
== 0x47))
4431 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4433 EVEX VOP %zmmM, %zmmM, %zmmN
4434 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4435 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4436 EVEX VOP %ymmM, %ymmM, %ymmN
4437 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4438 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4439 VEX VOP %ymmM, %ymmM, %ymmN
4440 -> VEX VOP %xmmM, %xmmM, %xmmN
4441 VOP, one of vpandn and vpxor:
4442 VEX VOP %ymmM, %ymmM, %ymmN
4443 -> VEX VOP %xmmM, %xmmM, %xmmN
4444 VOP, one of vpandnd and vpandnq:
4445 EVEX VOP %zmmM, %zmmM, %zmmN
4446 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4447 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4448 EVEX VOP %ymmM, %ymmM, %ymmN
4449 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4450 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4451 VOP, one of vpxord and vpxorq:
4452 EVEX VOP %zmmM, %zmmM, %zmmN
4453 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4454 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4455 EVEX VOP %ymmM, %ymmM, %ymmN
4456 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4457 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4458 VOP, one of kxord and kxorq:
4459 VEX VOP %kM, %kM, %kN
4460 -> VEX kxorw %kM, %kM, %kN
4461 VOP, one of kandnd and kandnq:
4462 VEX VOP %kM, %kM, %kN
4463 -> VEX kandnw %kM, %kM, %kN
4465 if (is_evex_encoding (&i
.tm
))
4467 if (i
.vec_encoding
!= vex_encoding_evex
)
4469 i
.tm
.opcode_modifier
.vex
= VEX128
;
4470 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4471 i
.tm
.opcode_modifier
.evex
= 0;
4473 else if (optimize
> 1)
4474 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4478 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4480 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4481 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4484 i
.tm
.opcode_modifier
.vex
= VEX128
;
4486 if (i
.tm
.opcode_modifier
.vex
)
4487 for (j
= 0; j
< 3; j
++)
4489 i
.types
[j
].bitfield
.xmmword
= 1;
4490 i
.types
[j
].bitfield
.ymmword
= 0;
4493 else if (i
.vec_encoding
!= vex_encoding_evex
4494 && !i
.types
[0].bitfield
.zmmword
4495 && !i
.types
[1].bitfield
.zmmword
4497 && !i
.broadcast
.type
4498 && !i
.broadcast
.bytes
4499 && is_evex_encoding (&i
.tm
)
4500 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4501 || (i
.tm
.base_opcode
& ~4) == 0xdb
4502 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4503 && i
.tm
.extension_opcode
== None
)
4506 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4507 vmovdqu32 and vmovdqu64:
4508 EVEX VOP %xmmM, %xmmN
4509 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4510 EVEX VOP %ymmM, %ymmN
4511 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4513 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4515 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4517 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4519 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4520 VOP, one of vpand, vpandn, vpor, vpxor:
4521 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4522 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4523 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4524 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4525 EVEX VOP{d,q} mem, %xmmM, %xmmN
4526 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4527 EVEX VOP{d,q} mem, %ymmM, %ymmN
4528 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4530 for (j
= 0; j
< i
.operands
; j
++)
4531 if (operand_type_check (i
.types
[j
], disp
)
4532 && i
.op
[j
].disps
->X_op
== O_constant
)
4534 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4535 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4536 bytes, we choose EVEX Disp8 over VEX Disp32. */
4537 int evex_disp8
, vex_disp8
;
4538 unsigned int memshift
= i
.memshift
;
4539 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4541 evex_disp8
= fits_in_disp8 (n
);
4543 vex_disp8
= fits_in_disp8 (n
);
4544 if (evex_disp8
!= vex_disp8
)
4546 i
.memshift
= memshift
;
4550 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4553 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4554 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4555 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4556 i
.tm
.opcode_modifier
.vex
4557 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4558 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4559 /* VPAND, VPOR, and VPXOR are commutative. */
4560 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4561 i
.tm
.opcode_modifier
.commutative
= 1;
4562 i
.tm
.opcode_modifier
.evex
= 0;
4563 i
.tm
.opcode_modifier
.masking
= 0;
4564 i
.tm
.opcode_modifier
.broadcast
= 0;
4565 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4568 i
.types
[j
].bitfield
.disp8
4569 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4571 else if (optimize_for_space
4572 && i
.tm
.base_opcode
== 0x29
4573 && i
.tm
.opcode_space
== SPACE_0F38
4574 && i
.operands
== i
.reg_operands
4575 && i
.op
[0].regs
== i
.op
[1].regs
4576 && (!i
.tm
.opcode_modifier
.vex
4577 || !(i
.op
[0].regs
->reg_flags
& RegRex
))
4578 && !is_evex_encoding (&i
.tm
))
4581 pcmpeqq %xmmN, %xmmN -> pcmpeqd %xmmN, %xmmN
4582 vpcmpeqq %xmmN, %xmmN, %xmmM -> vpcmpeqd %xmmN, %xmmN, %xmmM (N < 8)
4583 vpcmpeqq %ymmN, %ymmN, %ymmM -> vpcmpeqd %ymmN, %ymmN, %ymmM (N < 8)
4585 i
.tm
.opcode_space
= SPACE_0F
;
4586 i
.tm
.base_opcode
= 0x76;
4588 else if (((i
.tm
.base_opcode
>= 0x64
4589 && i
.tm
.base_opcode
<= 0x66
4590 && i
.tm
.opcode_space
== SPACE_0F
)
4591 || (i
.tm
.base_opcode
== 0x37
4592 && i
.tm
.opcode_space
== SPACE_0F38
))
4593 && i
.operands
== i
.reg_operands
4594 && i
.op
[0].regs
== i
.op
[1].regs
4595 && !is_evex_encoding (&i
.tm
))
4598 pcmpgt[bwd] %mmN, %mmN -> pxor %mmN, %mmN
4599 pcmpgt[bwdq] %xmmN, %xmmN -> pxor %xmmN, %xmmN
4600 vpcmpgt[bwdq] %xmmN, %xmmN, %xmmM -> vpxor %xmmN, %xmmN, %xmmM (N < 8)
4601 vpcmpgt[bwdq] %xmmN, %xmmN, %xmmM -> vpxor %xmm0, %xmm0, %xmmM (N > 7)
4602 vpcmpgt[bwdq] %ymmN, %ymmN, %ymmM -> vpxor %ymmN, %ymmN, %ymmM (N < 8)
4603 vpcmpgt[bwdq] %ymmN, %ymmN, %ymmM -> vpxor %ymm0, %ymm0, %ymmM (N > 7)
4605 i
.tm
.opcode_space
= SPACE_0F
;
4606 i
.tm
.base_opcode
= 0xef;
4607 if (i
.tm
.opcode_modifier
.vex
&& (i
.op
[0].regs
->reg_flags
& RegRex
))
4609 if (i
.operands
== 2)
4611 gas_assert (i
.tm
.opcode_modifier
.sse2avx
);
4617 i
.op
[2].regs
= i
.op
[0].regs
;
4618 i
.types
[2] = i
.types
[0];
4619 i
.flags
[2] = i
.flags
[0];
4620 i
.tm
.operand_types
[2] = i
.tm
.operand_types
[0];
4622 i
.tm
.opcode_modifier
.sse2avx
= 0;
4624 i
.op
[0].regs
-= i
.op
[0].regs
->reg_num
+ 8;
4625 i
.op
[1].regs
= i
.op
[0].regs
;
4628 else if (optimize_for_space
4629 && i
.tm
.base_opcode
== 0x59
4630 && i
.tm
.opcode_space
== SPACE_0F38
4631 && i
.operands
== i
.reg_operands
4632 && i
.tm
.opcode_modifier
.vex
4633 && !(i
.op
[0].regs
->reg_flags
& RegRex
)
4634 && i
.op
[0].regs
->reg_type
.bitfield
.xmmword
4635 && i
.vec_encoding
!= vex_encoding_vex3
)
4638 vpbroadcastq %xmmN, %xmmM -> vpunpcklqdq %xmmN, %xmmN, %xmmM (N < 8)
4640 i
.tm
.opcode_space
= SPACE_0F
;
4641 i
.tm
.base_opcode
= 0x6c;
4642 i
.tm
.opcode_modifier
.vexvvvv
= 1;
4648 i
.op
[2].regs
= i
.op
[0].regs
;
4649 i
.types
[2] = i
.types
[0];
4650 i
.flags
[2] = i
.flags
[0];
4651 i
.tm
.operand_types
[2] = i
.tm
.operand_types
[0];
4653 swap_2_operands (1, 2);
4657 /* Return non-zero for load instruction. */
4663 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4664 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4668 /* Anysize insns: lea, invlpg, clflush, prefetch*, bndmk, bndcl, bndcu,
4669 bndcn, bndstx, bndldx, clflushopt, clwb, cldemote. */
4670 if (i
.tm
.opcode_modifier
.operandconstraint
== ANY_SIZE
)
4674 if (i
.tm
.mnem_off
== MN_pop
)
4678 if (i
.tm
.opcode_space
== SPACE_BASE
)
4681 if (i
.tm
.base_opcode
== 0x9d
4682 || i
.tm
.base_opcode
== 0x61)
4685 /* movs, cmps, lods, scas. */
4686 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4690 if (base_opcode
== 0x6f
4691 || i
.tm
.base_opcode
== 0xd7)
4693 /* NB: For AMD-specific insns with implicit memory operands,
4694 they're intentionally not covered. */
4697 /* No memory operand. */
4698 if (!i
.mem_operands
)
4703 if (i
.tm
.mnem_off
== MN_vldmxcsr
)
4706 else if (i
.tm
.opcode_space
== SPACE_BASE
)
4708 /* test, not, neg, mul, imul, div, idiv. */
4709 if (base_opcode
== 0xf7 && i
.tm
.extension_opcode
!= 1)
4713 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4716 /* add, or, adc, sbb, and, sub, xor, cmp. */
4717 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4720 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4721 if ((base_opcode
== 0xc1 || (base_opcode
| 2) == 0xd3)
4722 && i
.tm
.extension_opcode
!= 6)
4725 /* Check for x87 instructions. */
4726 if ((base_opcode
| 6) == 0xdf)
4728 /* Skip fst, fstp, fstenv, fstcw. */
4729 if (i
.tm
.base_opcode
== 0xd9
4730 && (i
.tm
.extension_opcode
== 2
4731 || i
.tm
.extension_opcode
== 3
4732 || i
.tm
.extension_opcode
== 6
4733 || i
.tm
.extension_opcode
== 7))
4736 /* Skip fisttp, fist, fistp, fstp. */
4737 if (i
.tm
.base_opcode
== 0xdb
4738 && (i
.tm
.extension_opcode
== 1
4739 || i
.tm
.extension_opcode
== 2
4740 || i
.tm
.extension_opcode
== 3
4741 || i
.tm
.extension_opcode
== 7))
4744 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4745 if (i
.tm
.base_opcode
== 0xdd
4746 && (i
.tm
.extension_opcode
== 1
4747 || i
.tm
.extension_opcode
== 2
4748 || i
.tm
.extension_opcode
== 3
4749 || i
.tm
.extension_opcode
== 6
4750 || i
.tm
.extension_opcode
== 7))
4753 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4754 if (i
.tm
.base_opcode
== 0xdf
4755 && (i
.tm
.extension_opcode
== 1
4756 || i
.tm
.extension_opcode
== 2
4757 || i
.tm
.extension_opcode
== 3
4758 || i
.tm
.extension_opcode
== 6
4759 || i
.tm
.extension_opcode
== 7))
4765 else if (i
.tm
.opcode_space
== SPACE_0F
)
4767 /* bt, bts, btr, btc. */
4768 if (i
.tm
.base_opcode
== 0xba
4769 && (i
.tm
.extension_opcode
| 3) == 7)
4772 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4773 if (i
.tm
.base_opcode
== 0xc7
4774 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4775 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4776 || i
.tm
.extension_opcode
== 6))
4779 /* fxrstor, ldmxcsr, xrstor. */
4780 if (i
.tm
.base_opcode
== 0xae
4781 && (i
.tm
.extension_opcode
== 1
4782 || i
.tm
.extension_opcode
== 2
4783 || i
.tm
.extension_opcode
== 5))
4786 /* lgdt, lidt, lmsw. */
4787 if (i
.tm
.base_opcode
== 0x01
4788 && (i
.tm
.extension_opcode
== 2
4789 || i
.tm
.extension_opcode
== 3
4790 || i
.tm
.extension_opcode
== 6))
4794 dest
= i
.operands
- 1;
4796 /* Check fake imm8 operand and 3 source operands. */
4797 if ((i
.tm
.opcode_modifier
.immext
4798 || i
.reg_operands
+ i
.mem_operands
== 4)
4799 && i
.types
[dest
].bitfield
.imm8
)
4802 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4803 if (i
.tm
.opcode_space
== SPACE_BASE
4804 && ((base_opcode
| 0x38) == 0x39
4805 || (base_opcode
| 2) == 0x87))
4808 if (i
.tm
.mnem_off
== MN_xadd
)
4811 /* Check for load instruction. */
4812 return (i
.types
[dest
].bitfield
.class != ClassNone
4813 || i
.types
[dest
].bitfield
.instance
== Accum
);
4816 /* Output lfence, 0xfaee8, after instruction. */
4819 insert_lfence_after (void)
4821 if (lfence_after_load
&& load_insn_p ())
4823 /* There are also two REP string instructions that require
4824 special treatment. Specifically, the compare string (CMPS)
4825 and scan string (SCAS) instructions set EFLAGS in a manner
4826 that depends on the data being compared/scanned. When used
4827 with a REP prefix, the number of iterations may therefore
4828 vary depending on this data. If the data is a program secret
4829 chosen by the adversary using an LVI method,
4830 then this data-dependent behavior may leak some aspect
4832 if (((i
.tm
.base_opcode
| 0x9) == 0xaf)
4833 && i
.prefix
[REP_PREFIX
])
4835 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4838 char *p
= frag_more (3);
4845 /* Output lfence, 0xfaee8, before instruction. */
4848 insert_lfence_before (void)
4852 if (i
.tm
.opcode_space
!= SPACE_BASE
)
4855 if (i
.tm
.base_opcode
== 0xff
4856 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4858 /* Insert lfence before indirect branch if needed. */
4860 if (lfence_before_indirect_branch
== lfence_branch_none
)
4863 if (i
.operands
!= 1)
4866 if (i
.reg_operands
== 1)
4868 /* Indirect branch via register. Don't insert lfence with
4869 -mlfence-after-load=yes. */
4870 if (lfence_after_load
4871 || lfence_before_indirect_branch
== lfence_branch_memory
)
4874 else if (i
.mem_operands
== 1
4875 && lfence_before_indirect_branch
!= lfence_branch_register
)
4877 as_warn (_("indirect `%s` with memory operand should be avoided"),
4884 if (last_insn
.kind
!= last_insn_other
4885 && last_insn
.seg
== now_seg
)
4887 as_warn_where (last_insn
.file
, last_insn
.line
,
4888 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4889 last_insn
.name
, insn_name (&i
.tm
));
4900 /* Output or/not/shl and lfence before near ret. */
4901 if (lfence_before_ret
!= lfence_before_ret_none
4902 && (i
.tm
.base_opcode
| 1) == 0xc3)
4904 if (last_insn
.kind
!= last_insn_other
4905 && last_insn
.seg
== now_seg
)
4907 as_warn_where (last_insn
.file
, last_insn
.line
,
4908 _("`%s` skips -mlfence-before-ret on `%s`"),
4909 last_insn
.name
, insn_name (&i
.tm
));
4913 /* Near ret ingore operand size override under CPU64. */
4914 char prefix
= flag_code
== CODE_64BIT
4916 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4918 if (lfence_before_ret
== lfence_before_ret_not
)
4920 /* not: 0xf71424, may add prefix
4921 for operand size override or 64-bit code. */
4922 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4936 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4939 if (lfence_before_ret
== lfence_before_ret_or
)
4941 /* or: 0x830c2400, may add prefix
4942 for operand size override or 64-bit code. */
4948 /* shl: 0xc1242400, may add prefix
4949 for operand size override or 64-bit code. */
4964 /* Shared helper for md_assemble() and s_insn(). */
4965 static void init_globals (void)
4969 memset (&i
, '\0', sizeof (i
));
4970 i
.rounding
.type
= rc_none
;
4971 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4972 i
.reloc
[j
] = NO_RELOC
;
4973 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4974 memset (im_expressions
, '\0', sizeof (im_expressions
));
4975 save_stack_p
= save_stack
;
4978 /* Helper for md_assemble() to decide whether to prepare for a possible 2nd
4979 parsing pass. Instead of introducing a rarely use new insn attribute this
4980 utilizes a common pattern between affected templates. It is deemed
4981 acceptable that this will lead to unnecessary pass 2 preparations in a
4982 limited set of cases. */
4983 static INLINE
bool may_need_pass2 (const insn_template
*t
)
4985 return t
->opcode_modifier
.sse2avx
4986 /* Note that all SSE2AVX templates have at least one operand. */
4987 ? t
->operand_types
[t
->operands
- 1].bitfield
.class == RegSIMD
4988 : (t
->opcode_space
== SPACE_0F
4989 && (t
->base_opcode
| 1) == 0xbf)
4990 || (t
->opcode_space
== SPACE_BASE
4991 && t
->base_opcode
== 0x63);
4994 /* This is the guts of the machine-dependent assembler. LINE points to a
4995 machine dependent instruction. This function is supposed to emit
4996 the frags/bytes it assembles to. */
4999 md_assemble (char *line
)
5002 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
= 0, *copy
= NULL
;
5003 const char *end
, *pass1_mnem
= NULL
;
5004 enum i386_error pass1_err
= 0;
5005 const insn_template
*t
;
5007 /* Initialize globals. */
5008 current_templates
= NULL
;
5012 /* First parse an instruction mnemonic & call i386_operand for the operands.
5013 We assume that the scrubber has arranged it so that line[0] is the valid
5014 start of a (possibly prefixed) mnemonic. */
5016 end
= parse_insn (line
, mnemonic
, false);
5019 if (pass1_mnem
!= NULL
)
5021 if (i
.error
!= no_error
)
5023 gas_assert (current_templates
!= NULL
);
5024 if (may_need_pass2 (current_templates
->start
) && !i
.suffix
)
5026 /* No point in trying a 2nd pass - it'll only find the same suffix
5028 mnem_suffix
= i
.suffix
;
5033 t
= current_templates
->start
;
5034 if (may_need_pass2 (t
))
5036 /* Make a copy of the full line in case we need to retry. */
5037 copy
= xstrdup (line
);
5040 mnem_suffix
= i
.suffix
;
5042 line
= parse_operands (line
, mnemonic
);
5050 /* Now we've parsed the mnemonic into a set of templates, and have the
5051 operands at hand. */
5053 /* All Intel opcodes have reversed operands except for "bound", "enter",
5054 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
5055 "rmpadjust", "rmpupdate", and "rmpquery". We also don't reverse
5056 intersegment "jmp" and "call" instructions with 2 immediate operands so
5057 that the immediate segment precedes the offset consistently in Intel and
5061 && (t
->mnem_off
!= MN_bound
)
5062 && !startswith (mnemonic
, "invlpg")
5063 && !startswith (mnemonic
, "monitor")
5064 && !startswith (mnemonic
, "mwait")
5065 && (t
->mnem_off
!= MN_pvalidate
)
5066 && !startswith (mnemonic
, "rmp")
5067 && (t
->mnem_off
!= MN_tpause
)
5068 && (t
->mnem_off
!= MN_umwait
)
5069 && !(i
.operands
== 2
5070 && operand_type_check (i
.types
[0], imm
)
5071 && operand_type_check (i
.types
[1], imm
)))
5074 /* The order of the immediates should be reversed
5075 for 2 immediates extrq and insertq instructions */
5076 if (i
.imm_operands
== 2
5077 && (t
->mnem_off
== MN_extrq
|| t
->mnem_off
== MN_insertq
))
5078 swap_2_operands (0, 1);
5083 if (i
.disp_operands
&& !optimize_disp (t
))
5086 /* Next, we find a template that matches the given insn,
5087 making sure the overlap of the given operands types is consistent
5088 with the template operand types. */
5090 if (!(t
= match_template (mnem_suffix
)))
5092 const char *err_msg
;
5094 if (copy
&& !mnem_suffix
)
5099 pass1_err
= i
.error
;
5100 pass1_mnem
= insn_name (current_templates
->start
);
5104 /* If a non-/only-64bit template (group) was found in pass 1, and if
5105 _some_ template (group) was found in pass 2, squash pass 1's
5107 if (pass1_err
== unsupported_64bit
)
5113 switch (pass1_mnem
? pass1_err
: i
.error
)
5117 case operand_size_mismatch
:
5118 err_msg
= _("operand size mismatch");
5120 case operand_type_mismatch
:
5121 err_msg
= _("operand type mismatch");
5123 case register_type_mismatch
:
5124 err_msg
= _("register type mismatch");
5126 case number_of_operands_mismatch
:
5127 err_msg
= _("number of operands mismatch");
5129 case invalid_instruction_suffix
:
5130 err_msg
= _("invalid instruction suffix");
5133 err_msg
= _("constant doesn't fit in 4 bits");
5135 case unsupported_with_intel_mnemonic
:
5136 err_msg
= _("unsupported with Intel mnemonic");
5138 case unsupported_syntax
:
5139 err_msg
= _("unsupported syntax");
5142 as_bad (_("unsupported instruction `%s'"),
5143 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
));
5145 case unsupported_on_arch
:
5146 as_bad (_("`%s' is not supported on `%s%s'"),
5147 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
),
5148 cpu_arch_name
? cpu_arch_name
: default_arch
,
5149 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5151 case unsupported_64bit
:
5152 if (ISLOWER (mnem_suffix
))
5154 if (flag_code
== CODE_64BIT
)
5155 as_bad (_("`%s%c' is not supported in 64-bit mode"),
5156 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
),
5159 as_bad (_("`%s%c' is only supported in 64-bit mode"),
5160 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
),
5165 if (flag_code
== CODE_64BIT
)
5166 as_bad (_("`%s' is not supported in 64-bit mode"),
5167 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
));
5169 as_bad (_("`%s' is only supported in 64-bit mode"),
5170 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
));
5173 case invalid_sib_address
:
5174 err_msg
= _("invalid SIB address");
5176 case invalid_vsib_address
:
5177 err_msg
= _("invalid VSIB address");
5179 case invalid_vector_register_set
:
5180 err_msg
= _("mask, index, and destination registers must be distinct");
5182 case invalid_tmm_register_set
:
5183 err_msg
= _("all tmm registers must be distinct");
5185 case invalid_dest_and_src_register_set
:
5186 err_msg
= _("destination and source registers must be distinct");
5188 case unsupported_vector_index_register
:
5189 err_msg
= _("unsupported vector index register");
5191 case unsupported_broadcast
:
5192 err_msg
= _("unsupported broadcast");
5194 case broadcast_needed
:
5195 err_msg
= _("broadcast is needed for operand of such type");
5197 case unsupported_masking
:
5198 err_msg
= _("unsupported masking");
5200 case mask_not_on_destination
:
5201 err_msg
= _("mask not on destination operand");
5203 case no_default_mask
:
5204 err_msg
= _("default mask isn't allowed");
5206 case unsupported_rc_sae
:
5207 err_msg
= _("unsupported static rounding/sae");
5209 case invalid_register_operand
:
5210 err_msg
= _("invalid register operand");
5213 as_bad (_("%s for `%s'"), err_msg
,
5214 pass1_mnem
? pass1_mnem
: insn_name (current_templates
->start
));
5220 if (sse_check
!= check_none
5221 /* The opcode space check isn't strictly needed; it's there only to
5222 bypass the logic below when easily possible. */
5223 && t
->opcode_space
>= SPACE_0F
5224 && t
->opcode_space
<= SPACE_0F3A
5225 && !i
.tm
.cpu_flags
.bitfield
.cpusse4a
5226 && !is_any_vex_encoding (t
))
5230 for (j
= 0; j
< t
->operands
; ++j
)
5232 if (t
->operand_types
[j
].bitfield
.class == RegMMX
)
5234 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
)
5238 if (j
>= t
->operands
&& simd
)
5239 (sse_check
== check_warning
5241 : as_bad
) (_("SSE instruction `%s' is used"), insn_name (&i
.tm
));
5244 if (i
.tm
.opcode_modifier
.fwait
)
5245 if (!add_prefix (FWAIT_OPCODE
))
5248 /* Check if REP prefix is OK. */
5249 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
5251 as_bad (_("invalid instruction `%s' after `%s'"),
5252 insn_name (&i
.tm
), i
.rep_prefix
);
5256 /* Check for lock without a lockable instruction. Destination operand
5257 must be memory unless it is xchg (0x86). */
5258 if (i
.prefix
[LOCK_PREFIX
])
5260 if (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
5261 || i
.mem_operands
== 0
5262 || (i
.tm
.base_opcode
!= 0x86
5263 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
)))
5265 as_bad (_("expecting lockable instruction after `lock'"));
5269 /* Zap the redundant prefix from XCHG when optimizing. */
5270 if (i
.tm
.base_opcode
== 0x86 && optimize
&& !i
.no_optimize
)
5271 i
.prefix
[LOCK_PREFIX
] = 0;
5274 if (is_any_vex_encoding (&i
.tm
)
5275 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
5276 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
)
5278 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
5279 if (i
.prefix
[DATA_PREFIX
])
5281 as_bad (_("data size prefix invalid with `%s'"), insn_name (&i
.tm
));
5285 /* Don't allow e.g. KMOV in TLS code sequences. */
5286 for (j
= i
.imm_operands
; j
< i
.operands
; ++j
)
5289 case BFD_RELOC_386_TLS_GOTIE
:
5290 case BFD_RELOC_386_TLS_LE_32
:
5291 case BFD_RELOC_X86_64_GOTTPOFF
:
5292 case BFD_RELOC_X86_64_TLSLD
:
5293 as_bad (_("TLS relocation cannot be used with `%s'"), insn_name (&i
.tm
));
5300 /* Check if HLE prefix is OK. */
5301 if (i
.hle_prefix
&& !check_hle ())
5304 /* Check BND prefix. */
5305 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5306 as_bad (_("expecting valid branch instruction after `bnd'"));
5308 /* Check NOTRACK prefix. */
5309 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5310 as_bad (_("expecting indirect branch instruction after `notrack'"));
5312 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5314 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5315 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5316 else if (flag_code
!= CODE_16BIT
5317 ? i
.prefix
[ADDR_PREFIX
]
5318 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5319 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5322 /* Insert BND prefix. */
5323 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5325 if (!i
.prefix
[BND_PREFIX
])
5326 add_prefix (BND_PREFIX_OPCODE
);
5327 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5329 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5330 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5334 /* Check string instruction segment overrides. */
5335 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5337 gas_assert (i
.mem_operands
);
5338 if (!check_string ())
5340 i
.disp_operands
= 0;
5343 /* The memory operand of (%dx) should be only used with input/output
5344 instructions (base opcodes: 0x6c, 0x6e, 0xec, 0xee). */
5345 if (i
.input_output_operand
5346 && ((i
.tm
.base_opcode
| 0x82) != 0xee
5347 || i
.tm
.opcode_space
!= SPACE_BASE
))
5349 as_bad (_("input/output port address isn't allowed with `%s'"),
5354 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5355 optimize_encoding ();
5357 if (use_unaligned_vector_move
)
5358 encode_with_unaligned_vector_move ();
5360 if (!process_suffix ())
5363 /* Check if IP-relative addressing requirements can be satisfied. */
5364 if (i
.tm
.cpu_flags
.bitfield
.cpuprefetchi
5365 && !(i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
))
5366 as_warn (_("'%s' only supports RIP-relative address"), insn_name (&i
.tm
));
5368 /* Update operand types and check extended states. */
5369 for (j
= 0; j
< i
.operands
; j
++)
5371 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5372 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5377 i
.xstate
|= xstate_mmx
;
5380 i
.xstate
|= xstate_mask
;
5383 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5384 i
.xstate
|= xstate_tmm
;
5385 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5386 i
.xstate
|= xstate_zmm
;
5387 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5388 i
.xstate
|= xstate_ymm
;
5389 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5390 i
.xstate
|= xstate_xmm
;
5395 /* Make still unresolved immediate matches conform to size of immediate
5396 given in i.suffix. */
5397 if (!finalize_imm ())
5400 if (i
.types
[0].bitfield
.imm1
)
5401 i
.imm_operands
= 0; /* kludge for shift insns. */
5403 /* For insns with operands there are more diddles to do to the opcode. */
5406 if (!process_operands ())
5409 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.operandconstraint
== UGH
)
5411 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5412 as_warn (_("translating to `%sp'"), insn_name (&i
.tm
));
5415 if (is_any_vex_encoding (&i
.tm
))
5417 if (!cpu_arch_flags
.bitfield
.cpui286
)
5419 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5424 /* Check for explicit REX prefix. */
5425 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5427 as_bad (_("REX prefix invalid with `%s'"), insn_name (&i
.tm
));
5431 if (i
.tm
.opcode_modifier
.vex
)
5432 build_vex_prefix (t
);
5434 build_evex_prefix ();
5436 /* The individual REX.RXBW bits got consumed. */
5437 i
.rex
&= REX_OPCODE
;
5440 /* Handle conversion of 'int $3' --> special int3 insn. */
5441 if (i
.tm
.mnem_off
== MN_int
5442 && i
.op
[0].imms
->X_add_number
== 3)
5444 i
.tm
.base_opcode
= INT3_OPCODE
;
5448 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5449 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5450 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5451 && i
.op
[0].disps
->X_op
== O_constant
)
5453 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5454 the absolute address given by the constant. Since ix86 jumps and
5455 calls are pc relative, we need to generate a reloc. */
5456 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5457 i
.op
[0].disps
->X_op
= O_symbol
;
5460 /* For 8 bit registers we need an empty rex prefix. Also if the
5461 instruction already has a prefix, we need to convert old
5462 registers to new ones. */
5464 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5465 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5466 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5467 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5468 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5469 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5474 i
.rex
|= REX_OPCODE
;
5475 for (x
= 0; x
< 2; x
++)
5477 /* Look for 8 bit operand that uses old registers. */
5478 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5479 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5481 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5482 /* In case it is "hi" register, give up. */
5483 if (i
.op
[x
].regs
->reg_num
> 3)
5484 as_bad (_("can't encode register '%s%s' in an "
5485 "instruction requiring REX prefix."),
5486 register_prefix
, i
.op
[x
].regs
->reg_name
);
5488 /* Otherwise it is equivalent to the extended register.
5489 Since the encoding doesn't change this is merely
5490 cosmetic cleanup for debug output. */
5492 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5497 if (i
.rex
== 0 && i
.rex_encoding
)
5499 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5500 that uses legacy register. If it is "hi" register, don't add
5501 the REX_OPCODE byte. */
5503 for (x
= 0; x
< 2; x
++)
5504 if (i
.types
[x
].bitfield
.class == Reg
5505 && i
.types
[x
].bitfield
.byte
5506 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5507 && i
.op
[x
].regs
->reg_num
> 3)
5509 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5510 i
.rex_encoding
= false;
5519 add_prefix (REX_OPCODE
| i
.rex
);
5521 insert_lfence_before ();
5523 /* We are ready to output the insn. */
5526 insert_lfence_after ();
5528 last_insn
.seg
= now_seg
;
5530 if (i
.tm
.opcode_modifier
.isprefix
)
5532 last_insn
.kind
= last_insn_prefix
;
5533 last_insn
.name
= insn_name (&i
.tm
);
5534 last_insn
.file
= as_where (&last_insn
.line
);
5537 last_insn
.kind
= last_insn_other
;
5540 /* The Q suffix is generally valid only in 64-bit mode, with very few
5541 exceptions: fild, fistp, fisttp, and cmpxchg8b. Note that for fild
5542 and fisttp only one of their two templates is matched below: That's
5543 sufficient since other relevant attributes are the same between both
5544 respective templates. */
5545 static INLINE
bool q_suffix_allowed(const insn_template
*t
)
5547 return flag_code
== CODE_64BIT
5548 || (t
->opcode_space
== SPACE_BASE
5549 && t
->base_opcode
== 0xdf
5550 && (t
->extension_opcode
& 1)) /* fild / fistp / fisttp */
5551 || t
->mnem_off
== MN_cmpxchg8b
;
5555 parse_insn (const char *line
, char *mnemonic
, bool prefix_only
)
5557 const char *l
= line
, *token_start
= l
;
5559 bool pass1
= !current_templates
;
5561 const insn_template
*t
;
5567 /* Pseudo-prefixes start with an opening figure brace. */
5568 if ((*mnem_p
= *l
) == '{')
5573 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5578 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5581 as_bad (_("no such instruction: `%s'"), token_start
);
5586 /* Pseudo-prefixes end with a closing figure brace. */
5587 if (*mnemonic
== '{' && *l
== '}')
5590 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5594 /* Point l at the closing brace if there's no other separator. */
5595 if (*l
!= END_OF_INSN
&& !is_space_char (*l
)
5596 && *l
!= PREFIX_SEPARATOR
)
5599 else if (!is_space_char (*l
)
5600 && *l
!= END_OF_INSN
5602 || (*l
!= PREFIX_SEPARATOR
&& *l
!= ',')))
5606 as_bad (_("invalid character %s in mnemonic"),
5607 output_invalid (*l
));
5610 if (token_start
== l
)
5612 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5613 as_bad (_("expecting prefix; got nothing"));
5615 as_bad (_("expecting mnemonic; got nothing"));
5619 /* Look up instruction (or prefix) via hash table. */
5620 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5622 if (*l
!= END_OF_INSN
5623 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5624 && current_templates
5625 && current_templates
->start
->opcode_modifier
.isprefix
)
5627 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5629 as_bad ((flag_code
!= CODE_64BIT
5630 ? _("`%s' is only supported in 64-bit mode")
5631 : _("`%s' is not supported in 64-bit mode")),
5632 insn_name (current_templates
->start
));
5635 /* If we are in 16-bit mode, do not allow addr16 or data16.
5636 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5637 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5638 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5639 && flag_code
!= CODE_64BIT
5640 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5641 ^ (flag_code
== CODE_16BIT
)))
5643 as_bad (_("redundant %s prefix"),
5644 insn_name (current_templates
->start
));
5648 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5650 /* Handle pseudo prefixes. */
5651 switch (current_templates
->start
->extension_opcode
)
5655 i
.disp_encoding
= disp_encoding_8bit
;
5659 i
.disp_encoding
= disp_encoding_16bit
;
5663 i
.disp_encoding
= disp_encoding_32bit
;
5667 i
.dir_encoding
= dir_encoding_load
;
5671 i
.dir_encoding
= dir_encoding_store
;
5675 i
.vec_encoding
= vex_encoding_vex
;
5679 i
.vec_encoding
= vex_encoding_vex3
;
5683 i
.vec_encoding
= vex_encoding_evex
;
5687 i
.rex_encoding
= true;
5689 case Prefix_NoOptimize
:
5691 i
.no_optimize
= true;
5699 /* Add prefix, checking for repeated prefixes. */
5700 switch (add_prefix (current_templates
->start
->base_opcode
))
5705 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5706 i
.notrack_prefix
= insn_name (current_templates
->start
);
5709 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5710 i
.hle_prefix
= insn_name (current_templates
->start
);
5711 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5712 i
.bnd_prefix
= insn_name (current_templates
->start
);
5714 i
.rep_prefix
= insn_name (current_templates
->start
);
5720 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5730 if (!current_templates
)
5732 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5733 Check if we should swap operand or force 32bit displacement in
5735 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5736 i
.dir_encoding
= dir_encoding_swap
;
5737 else if (mnem_p
- 3 == dot_p
5740 i
.disp_encoding
= disp_encoding_8bit
;
5741 else if (mnem_p
- 4 == dot_p
5745 i
.disp_encoding
= disp_encoding_32bit
;
5750 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5753 if (!current_templates
|| !pass1
)
5755 current_templates
= NULL
;
5758 if (mnem_p
> mnemonic
)
5760 /* See if we can get a match by trimming off a suffix. */
5763 case WORD_MNEM_SUFFIX
:
5764 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5765 i
.suffix
= SHORT_MNEM_SUFFIX
;
5768 case BYTE_MNEM_SUFFIX
:
5769 case QWORD_MNEM_SUFFIX
:
5770 i
.suffix
= mnem_p
[-1];
5773 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5775 case SHORT_MNEM_SUFFIX
:
5776 case LONG_MNEM_SUFFIX
:
5779 i
.suffix
= mnem_p
[-1];
5782 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5790 if (intel_float_operand (mnemonic
) == 1)
5791 i
.suffix
= SHORT_MNEM_SUFFIX
;
5793 i
.suffix
= LONG_MNEM_SUFFIX
;
5796 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5798 /* For compatibility reasons accept MOVSD and CMPSD without
5799 operands even in AT&T mode. */
5800 else if (*l
== END_OF_INSN
5801 || (is_space_char (*l
) && l
[1] == END_OF_INSN
))
5805 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5806 if (current_templates
!= NULL
5808 && (current_templates
->start
->base_opcode
| 2) == 0xa6
5809 && current_templates
->start
->opcode_space
5811 && mnem_p
[-2] == 's')
5813 as_warn (_("found `%sd'; assuming `%sl' was meant"),
5814 mnemonic
, mnemonic
);
5815 i
.suffix
= LONG_MNEM_SUFFIX
;
5819 current_templates
= NULL
;
5827 if (!current_templates
)
5830 as_bad (_("no such instruction: `%s'"), token_start
);
5835 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5836 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5838 /* Check for a branch hint. We allow ",pt" and ",pn" for
5839 predict taken and predict not taken respectively.
5840 I'm not sure that branch hints actually do anything on loop
5841 and jcxz insns (JumpByte) for current Pentium4 chips. They
5842 may work in the future and it doesn't hurt to accept them
5844 if (l
[0] == ',' && l
[1] == 'p')
5848 if (!add_prefix (DS_PREFIX_OPCODE
))
5852 else if (l
[2] == 'n')
5854 if (!add_prefix (CS_PREFIX_OPCODE
))
5860 /* Any other comma loses. */
5863 as_bad (_("invalid character %s in mnemonic"),
5864 output_invalid (*l
));
5868 /* Check if instruction is supported on specified architecture. */
5870 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5872 supported
|= cpu_flags_match (t
);
5874 if (i
.suffix
== QWORD_MNEM_SUFFIX
&& !q_suffix_allowed (t
))
5875 supported
&= ~CPU_FLAGS_64BIT_MATCH
;
5877 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5883 if (supported
& CPU_FLAGS_64BIT_MATCH
)
5884 i
.error
= unsupported_on_arch
;
5886 i
.error
= unsupported_64bit
;
5893 parse_operands (char *l
, const char *mnemonic
)
5897 /* 1 if operand is pending after ','. */
5898 unsigned int expecting_operand
= 0;
5900 while (*l
!= END_OF_INSN
)
5902 /* Non-zero if operand parens not balanced. */
5903 unsigned int paren_not_balanced
= 0;
5904 /* True if inside double quotes. */
5905 bool in_quotes
= false;
5907 /* Skip optional white space before operand. */
5908 if (is_space_char (*l
))
5910 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5912 as_bad (_("invalid character %s before operand %d"),
5913 output_invalid (*l
),
5917 token_start
= l
; /* After white space. */
5918 while (in_quotes
|| paren_not_balanced
|| *l
!= ',')
5920 if (*l
== END_OF_INSN
)
5924 as_bad (_("unbalanced double quotes in operand %d."),
5928 if (paren_not_balanced
)
5930 know (!intel_syntax
);
5931 as_bad (_("unbalanced parenthesis in operand %d."),
5936 break; /* we are done */
5938 else if (*l
== '\\' && l
[1] == '"')
5941 in_quotes
= !in_quotes
;
5942 else if (!in_quotes
&& !is_operand_char (*l
) && !is_space_char (*l
))
5944 as_bad (_("invalid character %s in operand %d"),
5945 output_invalid (*l
),
5949 if (!intel_syntax
&& !in_quotes
)
5952 ++paren_not_balanced
;
5954 --paren_not_balanced
;
5958 if (l
!= token_start
)
5959 { /* Yes, we've read in another operand. */
5960 unsigned int operand_ok
;
5961 this_operand
= i
.operands
++;
5962 if (i
.operands
> MAX_OPERANDS
)
5964 as_bad (_("spurious operands; (%d operands/instruction max)"),
5968 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5969 /* Now parse operand adding info to 'i' as we go along. */
5970 END_STRING_AND_SAVE (l
);
5972 if (i
.mem_operands
> 1)
5974 as_bad (_("too many memory references for `%s'"),
5981 i386_intel_operand (token_start
,
5982 intel_float_operand (mnemonic
));
5984 operand_ok
= i386_att_operand (token_start
);
5986 RESTORE_END_STRING (l
);
5992 if (expecting_operand
)
5994 expecting_operand_after_comma
:
5995 as_bad (_("expecting operand after ','; got nothing"));
6000 as_bad (_("expecting operand before ','; got nothing"));
6005 /* Now *l must be either ',' or END_OF_INSN. */
6008 if (*++l
== END_OF_INSN
)
6010 /* Just skip it, if it's \n complain. */
6011 goto expecting_operand_after_comma
;
6013 expecting_operand
= 1;
6020 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
6022 union i386_op temp_op
;
6023 i386_operand_type temp_type
;
6024 unsigned int temp_flags
;
6025 enum bfd_reloc_code_real temp_reloc
;
6027 temp_type
= i
.types
[xchg2
];
6028 i
.types
[xchg2
] = i
.types
[xchg1
];
6029 i
.types
[xchg1
] = temp_type
;
6031 temp_flags
= i
.flags
[xchg2
];
6032 i
.flags
[xchg2
] = i
.flags
[xchg1
];
6033 i
.flags
[xchg1
] = temp_flags
;
6035 temp_op
= i
.op
[xchg2
];
6036 i
.op
[xchg2
] = i
.op
[xchg1
];
6037 i
.op
[xchg1
] = temp_op
;
6039 temp_reloc
= i
.reloc
[xchg2
];
6040 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
6041 i
.reloc
[xchg1
] = temp_reloc
;
6043 temp_flags
= i
.imm_bits
[xchg2
];
6044 i
.imm_bits
[xchg2
] = i
.imm_bits
[xchg1
];
6045 i
.imm_bits
[xchg1
] = temp_flags
;
6049 if (i
.mask
.operand
== xchg1
)
6050 i
.mask
.operand
= xchg2
;
6051 else if (i
.mask
.operand
== xchg2
)
6052 i
.mask
.operand
= xchg1
;
6054 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6056 if (i
.broadcast
.operand
== xchg1
)
6057 i
.broadcast
.operand
= xchg2
;
6058 else if (i
.broadcast
.operand
== xchg2
)
6059 i
.broadcast
.operand
= xchg1
;
6064 swap_operands (void)
6070 swap_2_operands (1, i
.operands
- 2);
6074 swap_2_operands (0, i
.operands
- 1);
6080 if (i
.mem_operands
== 2)
6082 const reg_entry
*temp_seg
;
6083 temp_seg
= i
.seg
[0];
6084 i
.seg
[0] = i
.seg
[1];
6085 i
.seg
[1] = temp_seg
;
6089 /* Try to ensure constant immediates are represented in the smallest
6094 char guess_suffix
= 0;
6098 guess_suffix
= i
.suffix
;
6099 else if (i
.reg_operands
)
6101 /* Figure out a suffix from the last register operand specified.
6102 We can't do this properly yet, i.e. excluding special register
6103 instances, but the following works for instructions with
6104 immediates. In any case, we can't set i.suffix yet. */
6105 for (op
= i
.operands
; --op
>= 0;)
6106 if (i
.types
[op
].bitfield
.class != Reg
)
6108 else if (i
.types
[op
].bitfield
.byte
)
6110 guess_suffix
= BYTE_MNEM_SUFFIX
;
6113 else if (i
.types
[op
].bitfield
.word
)
6115 guess_suffix
= WORD_MNEM_SUFFIX
;
6118 else if (i
.types
[op
].bitfield
.dword
)
6120 guess_suffix
= LONG_MNEM_SUFFIX
;
6123 else if (i
.types
[op
].bitfield
.qword
)
6125 guess_suffix
= QWORD_MNEM_SUFFIX
;
6129 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6130 guess_suffix
= WORD_MNEM_SUFFIX
;
6131 else if (flag_code
!= CODE_64BIT
|| !(i
.prefix
[REX_PREFIX
] & REX_W
))
6132 guess_suffix
= LONG_MNEM_SUFFIX
;
6134 for (op
= i
.operands
; --op
>= 0;)
6135 if (operand_type_check (i
.types
[op
], imm
))
6137 switch (i
.op
[op
].imms
->X_op
)
6140 /* If a suffix is given, this operand may be shortened. */
6141 switch (guess_suffix
)
6143 case LONG_MNEM_SUFFIX
:
6144 i
.types
[op
].bitfield
.imm32
= 1;
6145 i
.types
[op
].bitfield
.imm64
= 1;
6147 case WORD_MNEM_SUFFIX
:
6148 i
.types
[op
].bitfield
.imm16
= 1;
6149 i
.types
[op
].bitfield
.imm32
= 1;
6150 i
.types
[op
].bitfield
.imm32s
= 1;
6151 i
.types
[op
].bitfield
.imm64
= 1;
6153 case BYTE_MNEM_SUFFIX
:
6154 i
.types
[op
].bitfield
.imm8
= 1;
6155 i
.types
[op
].bitfield
.imm8s
= 1;
6156 i
.types
[op
].bitfield
.imm16
= 1;
6157 i
.types
[op
].bitfield
.imm32
= 1;
6158 i
.types
[op
].bitfield
.imm32s
= 1;
6159 i
.types
[op
].bitfield
.imm64
= 1;
6163 /* If this operand is at most 16 bits, convert it
6164 to a signed 16 bit number before trying to see
6165 whether it will fit in an even smaller size.
6166 This allows a 16-bit operand such as $0xffe0 to
6167 be recognised as within Imm8S range. */
6168 if ((i
.types
[op
].bitfield
.imm16
)
6169 && fits_in_unsigned_word (i
.op
[op
].imms
->X_add_number
))
6171 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
6172 ^ 0x8000) - 0x8000);
6175 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
6176 if ((i
.types
[op
].bitfield
.imm32
)
6177 && fits_in_unsigned_long (i
.op
[op
].imms
->X_add_number
))
6179 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
6180 ^ ((offsetT
) 1 << 31))
6181 - ((offsetT
) 1 << 31));
6185 = operand_type_or (i
.types
[op
],
6186 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
6188 /* We must avoid matching of Imm32 templates when 64bit
6189 only immediate is available. */
6190 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
6191 i
.types
[op
].bitfield
.imm32
= 0;
6198 /* Symbols and expressions. */
6200 /* Convert symbolic operand to proper sizes for matching, but don't
6201 prevent matching a set of insns that only supports sizes other
6202 than those matching the insn suffix. */
6204 i386_operand_type mask
, allowed
;
6205 const insn_template
*t
= current_templates
->start
;
6207 operand_type_set (&mask
, 0);
6208 switch (guess_suffix
)
6210 case QWORD_MNEM_SUFFIX
:
6211 mask
.bitfield
.imm64
= 1;
6212 mask
.bitfield
.imm32s
= 1;
6214 case LONG_MNEM_SUFFIX
:
6215 mask
.bitfield
.imm32
= 1;
6217 case WORD_MNEM_SUFFIX
:
6218 mask
.bitfield
.imm16
= 1;
6220 case BYTE_MNEM_SUFFIX
:
6221 mask
.bitfield
.imm8
= 1;
6227 allowed
= operand_type_and (t
->operand_types
[op
], mask
);
6228 while (++t
< current_templates
->end
)
6230 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
6231 allowed
= operand_type_and (allowed
, mask
);
6234 if (!operand_type_all_zero (&allowed
))
6235 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
6242 /* Try to use the smallest displacement type too. */
6244 optimize_disp (const insn_template
*t
)
6248 if (!want_disp32 (t
)
6249 && (!t
->opcode_modifier
.jump
6250 || i
.jumpabsolute
|| i
.types
[0].bitfield
.baseindex
))
6252 for (op
= 0; op
< i
.operands
; ++op
)
6254 const expressionS
*exp
= i
.op
[op
].disps
;
6256 if (!operand_type_check (i
.types
[op
], disp
))
6259 if (exp
->X_op
!= O_constant
)
6262 /* Since displacement is signed extended to 64bit, don't allow
6263 disp32 if it is out of range. */
6264 if (fits_in_signed_long (exp
->X_add_number
))
6267 i
.types
[op
].bitfield
.disp32
= 0;
6268 if (i
.types
[op
].bitfield
.baseindex
)
6270 as_bad (_("0x%" PRIx64
" out of range of signed 32bit displacement"),
6271 (uint64_t) exp
->X_add_number
);
6277 /* Don't optimize displacement for movabs since it only takes 64bit
6279 if (i
.disp_encoding
> disp_encoding_8bit
6280 || (flag_code
== CODE_64BIT
&& t
->mnem_off
== MN_movabs
))
6283 for (op
= i
.operands
; op
-- > 0;)
6284 if (operand_type_check (i
.types
[op
], disp
))
6286 if (i
.op
[op
].disps
->X_op
== O_constant
)
6288 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
6290 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
6292 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
6293 i
.op
[op
].disps
= NULL
;
6298 if (i
.types
[op
].bitfield
.disp16
6299 && fits_in_unsigned_word (op_disp
))
6301 /* If this operand is at most 16 bits, convert
6302 to a signed 16 bit number and don't use 64bit
6304 op_disp
= ((op_disp
^ 0x8000) - 0x8000);
6305 i
.types
[op
].bitfield
.disp64
= 0;
6309 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
6310 if ((flag_code
!= CODE_64BIT
6311 ? i
.types
[op
].bitfield
.disp32
6313 && (!t
->opcode_modifier
.jump
6314 || i
.jumpabsolute
|| i
.types
[op
].bitfield
.baseindex
))
6315 && fits_in_unsigned_long (op_disp
))
6317 /* If this operand is at most 32 bits, convert
6318 to a signed 32 bit number and don't use 64bit
6320 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
6321 i
.types
[op
].bitfield
.disp64
= 0;
6322 i
.types
[op
].bitfield
.disp32
= 1;
6325 if (flag_code
== CODE_64BIT
&& fits_in_signed_long (op_disp
))
6327 i
.types
[op
].bitfield
.disp64
= 0;
6328 i
.types
[op
].bitfield
.disp32
= 1;
6331 if ((i
.types
[op
].bitfield
.disp32
6332 || i
.types
[op
].bitfield
.disp16
)
6333 && fits_in_disp8 (op_disp
))
6334 i
.types
[op
].bitfield
.disp8
= 1;
6336 i
.op
[op
].disps
->X_add_number
= op_disp
;
6338 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
6339 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
6341 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
6342 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
6343 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
6346 /* We only support 64bit displacement on constants. */
6347 i
.types
[op
].bitfield
.disp64
= 0;
6353 /* Return 1 if there is a match in broadcast bytes between operand
6354 GIVEN and instruction template T. */
6357 match_broadcast_size (const insn_template
*t
, unsigned int given
)
6359 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
6360 && i
.types
[given
].bitfield
.byte
)
6361 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
6362 && i
.types
[given
].bitfield
.word
)
6363 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
6364 && i
.types
[given
].bitfield
.dword
)
6365 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
6366 && i
.types
[given
].bitfield
.qword
));
6369 /* Check if operands are valid for the instruction. */
6372 check_VecOperands (const insn_template
*t
)
6377 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
6378 any one operand are implicity requiring AVX512VL support if the actual
6379 operand size is YMMword or XMMword. Since this function runs after
6380 template matching, there's no need to check for YMMword/XMMword in
6382 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
6383 if (!cpu_flags_all_zero (&cpu
)
6384 && !t
->cpu_flags
.bitfield
.cpuavx512vl
6385 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6387 for (op
= 0; op
< t
->operands
; ++op
)
6389 if (t
->operand_types
[op
].bitfield
.zmmword
6390 && (i
.types
[op
].bitfield
.ymmword
6391 || i
.types
[op
].bitfield
.xmmword
))
6393 i
.error
= unsupported
;
6399 /* Somewhat similarly, templates specifying both AVX and AVX2 are
6400 requiring AVX2 support if the actual operand size is YMMword. */
6401 if (t
->cpu_flags
.bitfield
.cpuavx
6402 && t
->cpu_flags
.bitfield
.cpuavx2
6403 && !cpu_arch_flags
.bitfield
.cpuavx2
)
6405 for (op
= 0; op
< t
->operands
; ++op
)
6407 if (t
->operand_types
[op
].bitfield
.xmmword
6408 && i
.types
[op
].bitfield
.ymmword
)
6410 i
.error
= unsupported
;
6416 /* Without VSIB byte, we can't have a vector register for index. */
6417 if (!t
->opcode_modifier
.sib
6419 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6420 || i
.index_reg
->reg_type
.bitfield
.ymmword
6421 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6423 i
.error
= unsupported_vector_index_register
;
6427 /* Check if default mask is allowed. */
6428 if (t
->opcode_modifier
.operandconstraint
== NO_DEFAULT_MASK
6429 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6431 i
.error
= no_default_mask
;
6435 /* For VSIB byte, we need a vector register for index, and all vector
6436 registers must be distinct. */
6437 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6440 || !((t
->opcode_modifier
.sib
== VECSIB128
6441 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6442 || (t
->opcode_modifier
.sib
== VECSIB256
6443 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6444 || (t
->opcode_modifier
.sib
== VECSIB512
6445 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6447 i
.error
= invalid_vsib_address
;
6451 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6452 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6454 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6455 gas_assert (i
.types
[0].bitfield
.xmmword
6456 || i
.types
[0].bitfield
.ymmword
);
6457 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6458 gas_assert (i
.types
[2].bitfield
.xmmword
6459 || i
.types
[2].bitfield
.ymmword
);
6460 if (operand_check
== check_none
)
6462 if (register_number (i
.op
[0].regs
)
6463 != register_number (i
.index_reg
)
6464 && register_number (i
.op
[2].regs
)
6465 != register_number (i
.index_reg
)
6466 && register_number (i
.op
[0].regs
)
6467 != register_number (i
.op
[2].regs
))
6469 if (operand_check
== check_error
)
6471 i
.error
= invalid_vector_register_set
;
6474 as_warn (_("mask, index, and destination registers should be distinct"));
6476 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6478 if (i
.types
[1].bitfield
.class == RegSIMD
6479 && (i
.types
[1].bitfield
.xmmword
6480 || i
.types
[1].bitfield
.ymmword
6481 || i
.types
[1].bitfield
.zmmword
)
6482 && (register_number (i
.op
[1].regs
)
6483 == register_number (i
.index_reg
)))
6485 if (operand_check
== check_error
)
6487 i
.error
= invalid_vector_register_set
;
6490 if (operand_check
!= check_none
)
6491 as_warn (_("index and destination registers should be distinct"));
6496 /* For AMX instructions with 3 TMM register operands, all operands
6497 must be distinct. */
6498 if (i
.reg_operands
== 3
6499 && t
->operand_types
[0].bitfield
.tmmword
6500 && (i
.op
[0].regs
== i
.op
[1].regs
6501 || i
.op
[0].regs
== i
.op
[2].regs
6502 || i
.op
[1].regs
== i
.op
[2].regs
))
6504 i
.error
= invalid_tmm_register_set
;
6508 /* For some special instructions require that destination must be distinct
6509 from source registers. */
6510 if (t
->opcode_modifier
.operandconstraint
== DISTINCT_DEST
)
6512 unsigned int dest_reg
= i
.operands
- 1;
6514 know (i
.operands
>= 3);
6516 /* #UD if dest_reg == src1_reg or dest_reg == src2_reg. */
6517 if (i
.op
[dest_reg
- 1].regs
== i
.op
[dest_reg
].regs
6518 || (i
.reg_operands
> 2
6519 && i
.op
[dest_reg
- 2].regs
== i
.op
[dest_reg
].regs
))
6521 i
.error
= invalid_dest_and_src_register_set
;
6526 /* Check if broadcast is supported by the instruction and is applied
6527 to the memory operand. */
6528 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6530 i386_operand_type type
, overlap
;
6532 /* Check if specified broadcast is supported in this instruction,
6533 and its broadcast bytes match the memory operand. */
6534 op
= i
.broadcast
.operand
;
6535 if (!t
->opcode_modifier
.broadcast
6536 || !(i
.flags
[op
] & Operand_Mem
)
6537 || (!i
.types
[op
].bitfield
.unspecified
6538 && !match_broadcast_size (t
, op
)))
6541 i
.error
= unsupported_broadcast
;
6545 operand_type_set (&type
, 0);
6546 switch (get_broadcast_bytes (t
, false))
6549 type
.bitfield
.word
= 1;
6552 type
.bitfield
.dword
= 1;
6555 type
.bitfield
.qword
= 1;
6558 type
.bitfield
.xmmword
= 1;
6561 type
.bitfield
.ymmword
= 1;
6564 type
.bitfield
.zmmword
= 1;
6570 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6571 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6572 && t
->operand_types
[op
].bitfield
.byte
6573 + t
->operand_types
[op
].bitfield
.word
6574 + t
->operand_types
[op
].bitfield
.dword
6575 + t
->operand_types
[op
].bitfield
.qword
> 1)
6577 overlap
.bitfield
.xmmword
= 0;
6578 overlap
.bitfield
.ymmword
= 0;
6579 overlap
.bitfield
.zmmword
= 0;
6581 if (operand_type_all_zero (&overlap
))
6584 if (t
->opcode_modifier
.checkoperandsize
)
6588 type
.bitfield
.baseindex
= 1;
6589 for (j
= 0; j
< i
.operands
; ++j
)
6592 && !operand_type_register_match(i
.types
[j
],
6593 t
->operand_types
[j
],
6595 t
->operand_types
[op
]))
6600 /* If broadcast is supported in this instruction, we need to check if
6601 operand of one-element size isn't specified without broadcast. */
6602 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6604 /* Find memory operand. */
6605 for (op
= 0; op
< i
.operands
; op
++)
6606 if (i
.flags
[op
] & Operand_Mem
)
6608 gas_assert (op
< i
.operands
);
6609 /* Check size of the memory operand. */
6610 if (match_broadcast_size (t
, op
))
6612 i
.error
= broadcast_needed
;
6617 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6619 /* Check if requested masking is supported. */
6622 if (!t
->opcode_modifier
.masking
)
6624 i
.error
= unsupported_masking
;
6628 /* Common rules for masking:
6629 - mask register destinations permit only zeroing-masking, without
6630 that actually being expressed by a {z} operand suffix or EVEX.z,
6631 - memory destinations allow only merging-masking,
6632 - scatter/gather insns (i.e. ones using vSIB) only allow merging-
6635 && (t
->operand_types
[t
->operands
- 1].bitfield
.class == RegMask
6636 || (i
.flags
[t
->operands
- 1] & Operand_Mem
)
6637 || t
->opcode_modifier
.sib
))
6639 i
.error
= unsupported_masking
;
6644 /* Check if masking is applied to dest operand. */
6645 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6647 i
.error
= mask_not_on_destination
;
6652 if (i
.rounding
.type
!= rc_none
)
6654 if (!t
->opcode_modifier
.sae
6655 || ((i
.rounding
.type
!= saeonly
) != t
->opcode_modifier
.staticrounding
)
6658 i
.error
= unsupported_rc_sae
;
6662 /* Non-EVEX.LIG forms need to have a ZMM register as at least one
6664 if (t
->opcode_modifier
.evex
!= EVEXLIG
)
6666 for (op
= 0; op
< t
->operands
; ++op
)
6667 if (i
.types
[op
].bitfield
.zmmword
)
6669 if (op
>= t
->operands
)
6671 i
.error
= operand_size_mismatch
;
6677 /* Check the special Imm4 cases; must be the first operand. */
6678 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6680 if (i
.op
[0].imms
->X_op
!= O_constant
6681 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6687 /* Turn off Imm<N> so that update_imm won't complain. */
6688 operand_type_set (&i
.types
[0], 0);
6691 /* Check vector Disp8 operand. */
6692 if (t
->opcode_modifier
.disp8memshift
6693 && i
.disp_encoding
<= disp_encoding_8bit
)
6695 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6696 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6697 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6698 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6701 const i386_operand_type
*type
= NULL
, *fallback
= NULL
;
6704 for (op
= 0; op
< i
.operands
; op
++)
6705 if (i
.flags
[op
] & Operand_Mem
)
6707 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6708 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6709 else if (t
->operand_types
[op
].bitfield
.xmmword
6710 + t
->operand_types
[op
].bitfield
.ymmword
6711 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6712 type
= &t
->operand_types
[op
];
6713 else if (!i
.types
[op
].bitfield
.unspecified
)
6714 type
= &i
.types
[op
];
6715 else /* Ambiguities get resolved elsewhere. */
6716 fallback
= &t
->operand_types
[op
];
6718 else if (i
.types
[op
].bitfield
.class == RegSIMD
6719 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6721 if (i
.types
[op
].bitfield
.zmmword
)
6723 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6725 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6729 if (!type
&& !i
.memshift
)
6733 if (type
->bitfield
.zmmword
)
6735 else if (type
->bitfield
.ymmword
)
6737 else if (type
->bitfield
.xmmword
)
6741 /* For the check in fits_in_disp8(). */
6742 if (i
.memshift
== 0)
6746 for (op
= 0; op
< i
.operands
; op
++)
6747 if (operand_type_check (i
.types
[op
], disp
)
6748 && i
.op
[op
].disps
->X_op
== O_constant
)
6750 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6752 i
.types
[op
].bitfield
.disp8
= 1;
6755 i
.types
[op
].bitfield
.disp8
= 0;
6764 /* Check if encoding requirements are met by the instruction. */
6767 VEX_check_encoding (const insn_template
*t
)
6769 if (i
.vec_encoding
== vex_encoding_error
)
6771 i
.error
= unsupported
;
6775 if (i
.vec_encoding
== vex_encoding_evex
)
6777 /* This instruction must be encoded with EVEX prefix. */
6778 if (!is_evex_encoding (t
))
6780 i
.error
= unsupported
;
6786 if (!t
->opcode_modifier
.vex
)
6788 /* This instruction template doesn't have VEX prefix. */
6789 if (i
.vec_encoding
!= vex_encoding_default
)
6791 i
.error
= unsupported
;
6800 /* Helper function for the progress() macro in match_template(). */
6801 static INLINE
enum i386_error
progress (enum i386_error
new,
6802 enum i386_error last
,
6803 unsigned int line
, unsigned int *line_p
)
6805 if (line
<= *line_p
)
6811 static const insn_template
*
6812 match_template (char mnem_suffix
)
6814 /* Points to template once we've found it. */
6815 const insn_template
*t
;
6816 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6817 i386_operand_type overlap4
;
6818 unsigned int found_reverse_match
;
6819 i386_operand_type operand_types
[MAX_OPERANDS
];
6820 int addr_prefix_disp
;
6821 unsigned int j
, size_match
, check_register
, errline
= __LINE__
;
6822 enum i386_error specific_error
= number_of_operands_mismatch
;
6823 #define progress(err) progress (err, specific_error, __LINE__, &errline)
6825 #if MAX_OPERANDS != 5
6826 # error "MAX_OPERANDS must be 5."
6829 found_reverse_match
= 0;
6830 addr_prefix_disp
= -1;
6832 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6834 addr_prefix_disp
= -1;
6835 found_reverse_match
= 0;
6837 /* Must have right number of operands. */
6838 if (i
.operands
!= t
->operands
)
6841 /* Check processor support. */
6842 specific_error
= progress (unsupported
);
6843 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6846 /* Check AT&T mnemonic. */
6847 specific_error
= progress (unsupported_with_intel_mnemonic
);
6848 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6851 /* Check AT&T/Intel syntax. */
6852 specific_error
= progress (unsupported_syntax
);
6853 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6854 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6857 /* Check Intel64/AMD64 ISA. */
6861 /* Default: Don't accept Intel64. */
6862 if (t
->opcode_modifier
.isa64
== INTEL64
)
6866 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6867 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6871 /* -mintel64: Don't accept AMD64. */
6872 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6877 /* Check the suffix. */
6878 specific_error
= progress (invalid_instruction_suffix
);
6879 if ((t
->opcode_modifier
.no_bsuf
&& mnem_suffix
== BYTE_MNEM_SUFFIX
)
6880 || (t
->opcode_modifier
.no_wsuf
&& mnem_suffix
== WORD_MNEM_SUFFIX
)
6881 || (t
->opcode_modifier
.no_lsuf
&& mnem_suffix
== LONG_MNEM_SUFFIX
)
6882 || (t
->opcode_modifier
.no_ssuf
&& mnem_suffix
== SHORT_MNEM_SUFFIX
)
6883 || (t
->opcode_modifier
.no_qsuf
&& mnem_suffix
== QWORD_MNEM_SUFFIX
))
6886 specific_error
= progress (operand_size_mismatch
);
6887 size_match
= operand_size_match (t
);
6891 /* This is intentionally not
6893 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6895 as the case of a missing * on the operand is accepted (perhaps with
6896 a warning, issued further down). */
6897 specific_error
= progress (operand_type_mismatch
);
6898 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6901 /* In Intel syntax, normally we can check for memory operand size when
6902 there is no mnemonic suffix. But jmp and call have 2 different
6903 encodings with Dword memory operand size. Skip the "near" one
6904 (permitting a register operand) when "far" was requested. */
6906 && t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
6907 && t
->operand_types
[0].bitfield
.class == Reg
)
6910 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6911 operand_types
[j
] = t
->operand_types
[j
];
6913 /* In general, don't allow 32-bit operands on pre-386. */
6914 specific_error
= progress (mnem_suffix
? invalid_instruction_suffix
6915 : operand_size_mismatch
);
6916 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6917 if (i
.suffix
== LONG_MNEM_SUFFIX
6918 && !cpu_arch_flags
.bitfield
.cpui386
6920 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6921 && !intel_float_operand (insn_name (t
)))
6922 : intel_float_operand (insn_name (t
)) != 2)
6923 && (t
->operands
== i
.imm_operands
6924 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6925 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6926 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6927 || (operand_types
[j
].bitfield
.class != RegMMX
6928 && operand_types
[j
].bitfield
.class != RegSIMD
6929 && operand_types
[j
].bitfield
.class != RegMask
))
6930 && !t
->opcode_modifier
.sib
)
6933 /* Do not verify operands when there are none. */
6936 if (VEX_check_encoding (t
))
6938 specific_error
= progress (i
.error
);
6942 /* We've found a match; break out of loop. */
6946 if (!t
->opcode_modifier
.jump
6947 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6949 /* There should be only one Disp operand. */
6950 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6951 if (operand_type_check (operand_types
[j
], disp
))
6953 if (j
< MAX_OPERANDS
)
6955 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6957 addr_prefix_disp
= j
;
6959 /* Address size prefix will turn Disp64 operand into Disp32 and
6960 Disp32/Disp16 one into Disp16/Disp32 respectively. */
6964 override
= !override
;
6967 if (operand_types
[j
].bitfield
.disp32
6968 && operand_types
[j
].bitfield
.disp16
)
6970 operand_types
[j
].bitfield
.disp16
= override
;
6971 operand_types
[j
].bitfield
.disp32
= !override
;
6973 gas_assert (!operand_types
[j
].bitfield
.disp64
);
6977 if (operand_types
[j
].bitfield
.disp64
)
6979 gas_assert (!operand_types
[j
].bitfield
.disp32
);
6980 operand_types
[j
].bitfield
.disp32
= override
;
6981 operand_types
[j
].bitfield
.disp64
= !override
;
6983 operand_types
[j
].bitfield
.disp16
= 0;
6989 /* We check register size if needed. */
6990 if (t
->opcode_modifier
.checkoperandsize
)
6992 check_register
= (1 << t
->operands
) - 1;
6993 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6994 check_register
&= ~(1 << i
.broadcast
.operand
);
6999 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
7000 switch (t
->operands
)
7003 if (!operand_type_match (overlap0
, i
.types
[0]))
7006 /* Allow the ModR/M encoding to be requested by using the {load} or
7007 {store} pseudo prefix on an applicable insn. */
7008 if (!t
->opcode_modifier
.modrm
7009 && i
.reg_operands
== 1
7010 && ((i
.dir_encoding
== dir_encoding_load
7011 && t
->mnem_off
!= MN_pop
)
7012 || (i
.dir_encoding
== dir_encoding_store
7013 && t
->mnem_off
!= MN_push
))
7015 && t
->mnem_off
!= MN_bswap
)
7020 /* xchg %eax, %eax is a special case. It is an alias for nop
7021 only in 32bit mode and we can use opcode 0x90. In 64bit
7022 mode, we can't use 0x90 for xchg %eax, %eax since it should
7023 zero-extend %eax to %rax. */
7024 if (t
->base_opcode
== 0x90
7025 && t
->opcode_space
== SPACE_BASE
)
7027 if (flag_code
== CODE_64BIT
7028 && i
.types
[0].bitfield
.instance
== Accum
7029 && i
.types
[0].bitfield
.dword
7030 && i
.types
[1].bitfield
.instance
== Accum
)
7033 /* Allow the ModR/M encoding to be requested by using the
7034 {load} or {store} pseudo prefix. */
7035 if (i
.dir_encoding
== dir_encoding_load
7036 || i
.dir_encoding
== dir_encoding_store
)
7040 if (t
->base_opcode
== MOV_AX_DISP32
7041 && t
->opcode_space
== SPACE_BASE
7042 && t
->mnem_off
!= MN_movabs
)
7044 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
7045 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
)
7048 /* xrelease mov %eax, <disp> is another special case. It must not
7049 match the accumulator-only encoding of mov. */
7053 /* Allow the ModR/M encoding to be requested by using a suitable
7054 {load} or {store} pseudo prefix. */
7055 if (i
.dir_encoding
== (i
.types
[0].bitfield
.instance
== Accum
7056 ? dir_encoding_store
7057 : dir_encoding_load
)
7058 && !i
.types
[0].bitfield
.disp64
7059 && !i
.types
[1].bitfield
.disp64
)
7063 /* Allow the ModR/M encoding to be requested by using the {load} or
7064 {store} pseudo prefix on an applicable insn. */
7065 if (!t
->opcode_modifier
.modrm
7066 && i
.reg_operands
== 1
7067 && i
.imm_operands
== 1
7068 && (i
.dir_encoding
== dir_encoding_load
7069 || i
.dir_encoding
== dir_encoding_store
)
7070 && t
->opcode_space
== SPACE_BASE
)
7072 if (t
->base_opcode
== 0xb0 /* mov $imm, %reg */
7073 && i
.dir_encoding
== dir_encoding_store
)
7076 if ((t
->base_opcode
| 0x38) == 0x3c /* <alu> $imm, %acc */
7077 && (t
->base_opcode
!= 0x3c /* cmp $imm, %acc */
7078 || i
.dir_encoding
== dir_encoding_load
))
7081 if (t
->base_opcode
== 0xa8 /* test $imm, %acc */
7082 && i
.dir_encoding
== dir_encoding_load
)
7088 if (!(size_match
& MATCH_STRAIGHT
))
7090 /* Reverse direction of operands if swapping is possible in the first
7091 place (operands need to be symmetric) and
7092 - the load form is requested, and the template is a store form,
7093 - the store form is requested, and the template is a load form,
7094 - the non-default (swapped) form is requested. */
7095 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
7096 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
7097 && !operand_type_all_zero (&overlap1
))
7098 switch (i
.dir_encoding
)
7100 case dir_encoding_load
:
7101 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
7102 || t
->opcode_modifier
.regmem
)
7106 case dir_encoding_store
:
7107 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
7108 && !t
->opcode_modifier
.regmem
)
7112 case dir_encoding_swap
:
7115 case dir_encoding_default
:
7118 /* If we want store form, we skip the current load. */
7119 if ((i
.dir_encoding
== dir_encoding_store
7120 || i
.dir_encoding
== dir_encoding_swap
)
7121 && i
.mem_operands
== 0
7122 && t
->opcode_modifier
.load
)
7127 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
7128 if (!operand_type_match (overlap0
, i
.types
[0])
7129 || !operand_type_match (overlap1
, i
.types
[1])
7130 || ((check_register
& 3) == 3
7131 && !operand_type_register_match (i
.types
[0],
7136 specific_error
= progress (i
.error
);
7138 /* Check if other direction is valid ... */
7139 if (!t
->opcode_modifier
.d
)
7143 if (!(size_match
& MATCH_REVERSE
))
7145 /* Try reversing direction of operands. */
7146 j
= t
->cpu_flags
.bitfield
.cpufma4
7147 || t
->cpu_flags
.bitfield
.cpuxop
? 1 : i
.operands
- 1;
7148 overlap0
= operand_type_and (i
.types
[0], operand_types
[j
]);
7149 overlap1
= operand_type_and (i
.types
[j
], operand_types
[0]);
7150 overlap2
= operand_type_and (i
.types
[1], operand_types
[1]);
7151 gas_assert (t
->operands
!= 3 || !check_register
);
7152 if (!operand_type_match (overlap0
, i
.types
[0])
7153 || !operand_type_match (overlap1
, i
.types
[j
])
7154 || (t
->operands
== 3
7155 && !operand_type_match (overlap2
, i
.types
[1]))
7157 && !operand_type_register_match (i
.types
[0],
7162 /* Does not match either direction. */
7163 specific_error
= progress (i
.error
);
7166 /* found_reverse_match holds which variant of D
7168 if (!t
->opcode_modifier
.d
)
7169 found_reverse_match
= 0;
7170 else if (operand_types
[0].bitfield
.tbyte
)
7172 if (t
->opcode_modifier
.operandconstraint
!= UGH
)
7173 found_reverse_match
= Opcode_FloatD
;
7175 found_reverse_match
= ~0;
7176 /* FSUB{,R} and FDIV{,R} may need a 2nd bit flipped. */
7177 if ((t
->extension_opcode
& 4)
7178 && (intel_syntax
|| intel_mnemonic
))
7179 found_reverse_match
|= Opcode_FloatR
;
7181 else if (t
->cpu_flags
.bitfield
.cpufma4
7182 || t
->cpu_flags
.bitfield
.cpuxop
)
7184 found_reverse_match
= Opcode_VexW
;
7185 goto check_operands_345
;
7187 else if (t
->opcode_space
!= SPACE_BASE
7188 && (t
->opcode_space
!= SPACE_0F
7189 /* MOV to/from CR/DR/TR, as an exception, follow
7190 the base opcode space encoding model. */
7191 || (t
->base_opcode
| 7) != 0x27))
7192 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
7193 ? Opcode_ExtD
: Opcode_SIMD_IntD
;
7194 else if (!t
->opcode_modifier
.commutative
)
7195 found_reverse_match
= Opcode_D
;
7197 found_reverse_match
= ~0;
7201 /* Found a forward 2 operand match here. */
7203 switch (t
->operands
)
7206 overlap4
= operand_type_and (i
.types
[4], operand_types
[4]);
7207 if (!operand_type_match (overlap4
, i
.types
[4])
7208 || !operand_type_register_match (i
.types
[3],
7213 specific_error
= progress (i
.error
);
7218 overlap3
= operand_type_and (i
.types
[3], operand_types
[3]);
7219 if (!operand_type_match (overlap3
, i
.types
[3])
7220 || ((check_register
& 0xa) == 0xa
7221 && !operand_type_register_match (i
.types
[1],
7225 || ((check_register
& 0xc) == 0xc
7226 && !operand_type_register_match (i
.types
[2],
7231 specific_error
= progress (i
.error
);
7236 overlap2
= operand_type_and (i
.types
[2], operand_types
[2]);
7237 if (!operand_type_match (overlap2
, i
.types
[2])
7238 || ((check_register
& 5) == 5
7239 && !operand_type_register_match (i
.types
[0],
7243 || ((check_register
& 6) == 6
7244 && !operand_type_register_match (i
.types
[1],
7249 specific_error
= progress (i
.error
);
7255 /* Found either forward/reverse 2, 3 or 4 operand match here:
7256 slip through to break. */
7259 /* Check if VEX/EVEX encoding requirements can be satisfied. */
7260 if (VEX_check_encoding (t
))
7262 specific_error
= progress (i
.error
);
7266 /* Check if vector operands are valid. */
7267 if (check_VecOperands (t
))
7269 specific_error
= progress (i
.error
);
7273 /* We've found a match; break out of loop. */
7279 if (t
== current_templates
->end
)
7281 /* We found no match. */
7282 i
.error
= specific_error
;
7286 if (!quiet_warnings
)
7289 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
7290 as_warn (_("indirect %s without `*'"), insn_name (t
));
7292 if (t
->opcode_modifier
.isprefix
7293 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7295 /* Warn them that a data or address size prefix doesn't
7296 affect assembly of the next line of code. */
7297 as_warn (_("stand-alone `%s' prefix"), insn_name (t
));
7301 /* Copy the template we found. */
7302 install_template (t
);
7304 if (addr_prefix_disp
!= -1)
7305 i
.tm
.operand_types
[addr_prefix_disp
]
7306 = operand_types
[addr_prefix_disp
];
7308 switch (found_reverse_match
)
7314 case Opcode_FloatR
| Opcode_FloatD
:
7315 i
.tm
.extension_opcode
^= Opcode_FloatR
>> 3;
7316 found_reverse_match
&= Opcode_FloatD
;
7320 /* If we found a reverse match we must alter the opcode direction
7321 bit and clear/flip the regmem modifier one. found_reverse_match
7322 holds bits to change (different for int & float insns). */
7324 i
.tm
.base_opcode
^= found_reverse_match
;
7326 /* Certain SIMD insns have their load forms specified in the opcode
7327 table, and hence we need to _set_ RegMem instead of clearing it.
7328 We need to avoid setting the bit though on insns like KMOVW. */
7329 i
.tm
.opcode_modifier
.regmem
7330 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
7331 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
7332 && !i
.tm
.opcode_modifier
.regmem
;
7336 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
7337 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
7341 /* Only the first two register operands need reversing, alongside
7343 i
.tm
.opcode_modifier
.vexw
^= VEXW0
^ VEXW1
;
7345 j
= i
.tm
.operand_types
[0].bitfield
.imm8
;
7346 i
.tm
.operand_types
[j
] = operand_types
[j
+ 1];
7347 i
.tm
.operand_types
[j
+ 1] = operand_types
[j
];
7357 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
7358 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
7360 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
7362 as_bad (_("`%s' operand %u must use `%ses' segment"),
7364 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
7369 /* There's only ever one segment override allowed per instruction.
7370 This instruction possibly has a legal segment override on the
7371 second operand, so copy the segment to where non-string
7372 instructions store it, allowing common code. */
7373 i
.seg
[op
] = i
.seg
[1];
7379 process_suffix (void)
7381 bool is_movx
= false;
7383 /* If matched instruction specifies an explicit instruction mnemonic
7385 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
7386 i
.suffix
= WORD_MNEM_SUFFIX
;
7387 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
7388 i
.suffix
= LONG_MNEM_SUFFIX
;
7389 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
7390 i
.suffix
= QWORD_MNEM_SUFFIX
;
7391 else if (i
.reg_operands
7392 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
7393 && i
.tm
.opcode_modifier
.operandconstraint
!= ADDR_PREFIX_OP_REG
)
7395 unsigned int numop
= i
.operands
;
7398 is_movx
= (i
.tm
.opcode_space
== SPACE_0F
7399 && (i
.tm
.base_opcode
| 8) == 0xbe)
7400 || (i
.tm
.opcode_space
== SPACE_BASE
7401 && i
.tm
.base_opcode
== 0x63
7402 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
7404 /* movsx/movzx want only their source operand considered here, for the
7405 ambiguity checking below. The suffix will be replaced afterwards
7406 to represent the destination (register). */
7407 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
7410 /* crc32 needs REX.W set regardless of suffix / source operand size. */
7411 if (i
.tm
.mnem_off
== MN_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
7414 /* If there's no instruction mnemonic suffix we try to invent one
7415 based on GPR operands. */
7418 /* We take i.suffix from the last register operand specified,
7419 Destination register type is more significant than source
7420 register type. crc32 in SSE4.2 prefers source register
7422 unsigned int op
= i
.tm
.mnem_off
== MN_crc32
? 1 : i
.operands
;
7425 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7426 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7428 if (i
.types
[op
].bitfield
.class != Reg
)
7430 if (i
.types
[op
].bitfield
.byte
)
7431 i
.suffix
= BYTE_MNEM_SUFFIX
;
7432 else if (i
.types
[op
].bitfield
.word
)
7433 i
.suffix
= WORD_MNEM_SUFFIX
;
7434 else if (i
.types
[op
].bitfield
.dword
)
7435 i
.suffix
= LONG_MNEM_SUFFIX
;
7436 else if (i
.types
[op
].bitfield
.qword
)
7437 i
.suffix
= QWORD_MNEM_SUFFIX
;
7443 /* As an exception, movsx/movzx silently default to a byte source
7445 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7446 i
.suffix
= BYTE_MNEM_SUFFIX
;
7448 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7450 if (!check_byte_reg ())
7453 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7455 if (!check_long_reg ())
7458 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7460 if (!check_qword_reg ())
7463 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7465 if (!check_word_reg ())
7468 else if (intel_syntax
7469 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7470 /* Do nothing if the instruction is going to ignore the prefix. */
7475 /* Undo the movsx/movzx change done above. */
7478 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7481 i
.suffix
= stackop_size
;
7482 if (stackop_size
== LONG_MNEM_SUFFIX
)
7484 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7485 .code16gcc directive to support 16-bit mode with
7486 32-bit address. For IRET without a suffix, generate
7487 16-bit IRET (opcode 0xcf) to return from an interrupt
7489 if (i
.tm
.base_opcode
== 0xcf)
7491 i
.suffix
= WORD_MNEM_SUFFIX
;
7492 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7494 /* Warn about changed behavior for segment register push/pop. */
7495 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7496 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7501 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7502 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7503 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7504 || (i
.tm
.opcode_space
== SPACE_0F
7505 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7506 && i
.tm
.extension_opcode
<= 3)))
7511 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7513 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7514 || i
.tm
.opcode_modifier
.no_lsuf
)
7515 i
.suffix
= QWORD_MNEM_SUFFIX
;
7520 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7521 i
.suffix
= LONG_MNEM_SUFFIX
;
7524 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7525 i
.suffix
= WORD_MNEM_SUFFIX
;
7531 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7532 /* Also cover lret/retf/iret in 64-bit mode. */
7533 || (flag_code
== CODE_64BIT
7534 && !i
.tm
.opcode_modifier
.no_lsuf
7535 && !i
.tm
.opcode_modifier
.no_qsuf
))
7536 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7537 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7538 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7539 /* Accept FLDENV et al without suffix. */
7540 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7542 unsigned int suffixes
, evex
= 0;
7544 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7545 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7547 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7549 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7551 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7554 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7555 also suitable for AT&T syntax mode, it was requested that this be
7556 restricted to just Intel syntax. */
7557 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
)
7558 && !i
.broadcast
.type
&& !i
.broadcast
.bytes
)
7562 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7564 if (is_evex_encoding (&i
.tm
)
7565 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7567 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7568 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7569 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7570 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7571 if (!i
.tm
.opcode_modifier
.evex
7572 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7573 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7576 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7577 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7578 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7581 /* Any properly sized operand disambiguates the insn. */
7582 if (i
.types
[op
].bitfield
.xmmword
7583 || i
.types
[op
].bitfield
.ymmword
7584 || i
.types
[op
].bitfield
.zmmword
)
7586 suffixes
&= ~(7 << 6);
7591 if ((i
.flags
[op
] & Operand_Mem
)
7592 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7594 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7596 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7598 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7600 if (is_evex_encoding (&i
.tm
))
7606 /* Are multiple suffixes / operand sizes allowed? */
7607 if (suffixes
& (suffixes
- 1))
7610 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7611 || operand_check
== check_error
))
7613 as_bad (_("ambiguous operand size for `%s'"), insn_name (&i
.tm
));
7616 if (operand_check
== check_error
)
7618 as_bad (_("no instruction mnemonic suffix given and "
7619 "no register operands; can't size `%s'"), insn_name (&i
.tm
));
7622 if (operand_check
== check_warning
)
7623 as_warn (_("%s; using default for `%s'"),
7625 ? _("ambiguous operand size")
7626 : _("no instruction mnemonic suffix given and "
7627 "no register operands"),
7630 if (i
.tm
.opcode_modifier
.floatmf
)
7631 i
.suffix
= SHORT_MNEM_SUFFIX
;
7633 /* handled below */;
7635 i
.tm
.opcode_modifier
.evex
= evex
;
7636 else if (flag_code
== CODE_16BIT
)
7637 i
.suffix
= WORD_MNEM_SUFFIX
;
7638 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7639 i
.suffix
= LONG_MNEM_SUFFIX
;
7641 i
.suffix
= QWORD_MNEM_SUFFIX
;
7647 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7648 In AT&T syntax, if there is no suffix (warned about above), the default
7649 will be byte extension. */
7650 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7651 i
.tm
.base_opcode
|= 1;
7653 /* For further processing, the suffix should represent the destination
7654 (register). This is already the case when one was used with
7655 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7656 no suffix to begin with. */
7657 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7659 if (i
.types
[1].bitfield
.word
)
7660 i
.suffix
= WORD_MNEM_SUFFIX
;
7661 else if (i
.types
[1].bitfield
.qword
)
7662 i
.suffix
= QWORD_MNEM_SUFFIX
;
7664 i
.suffix
= LONG_MNEM_SUFFIX
;
7666 i
.tm
.opcode_modifier
.w
= 0;
7670 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7671 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7672 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7674 /* Change the opcode based on the operand size given by i.suffix. */
7677 /* Size floating point instruction. */
7678 case LONG_MNEM_SUFFIX
:
7679 if (i
.tm
.opcode_modifier
.floatmf
)
7681 i
.tm
.base_opcode
^= 4;
7685 case WORD_MNEM_SUFFIX
:
7686 case QWORD_MNEM_SUFFIX
:
7687 /* It's not a byte, select word/dword operation. */
7688 if (i
.tm
.opcode_modifier
.w
)
7691 i
.tm
.base_opcode
|= 8;
7693 i
.tm
.base_opcode
|= 1;
7696 case SHORT_MNEM_SUFFIX
:
7697 /* Now select between word & dword operations via the operand
7698 size prefix, except for instructions that will ignore this
7700 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7701 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7702 && !i
.tm
.opcode_modifier
.floatmf
7703 && !is_any_vex_encoding (&i
.tm
)
7704 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7705 || (flag_code
== CODE_64BIT
7706 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7708 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7710 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7711 prefix
= ADDR_PREFIX_OPCODE
;
7713 if (!add_prefix (prefix
))
7717 /* Set mode64 for an operand. */
7718 if (i
.suffix
== QWORD_MNEM_SUFFIX
7719 && flag_code
== CODE_64BIT
7720 && !i
.tm
.opcode_modifier
.norex64
7721 && !i
.tm
.opcode_modifier
.vexw
7722 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7724 && ! (i
.operands
== 2
7725 && i
.tm
.base_opcode
== 0x90
7726 && i
.tm
.opcode_space
== SPACE_BASE
7727 && i
.types
[0].bitfield
.instance
== Accum
7728 && i
.types
[0].bitfield
.qword
7729 && i
.types
[1].bitfield
.instance
== Accum
))
7735 /* Select word/dword/qword operation with explicit data sizing prefix
7736 when there are no suitable register operands. */
7737 if (i
.tm
.opcode_modifier
.w
7738 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7740 || (i
.reg_operands
== 1
7742 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7744 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7745 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7746 || i
.tm
.mnem_off
== MN_crc32
))))
7747 i
.tm
.base_opcode
|= 1;
7751 if (i
.tm
.opcode_modifier
.operandconstraint
== ADDR_PREFIX_OP_REG
)
7753 gas_assert (!i
.suffix
);
7754 gas_assert (i
.reg_operands
);
7756 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7759 /* The address size override prefix changes the size of the
7761 if (flag_code
== CODE_64BIT
7762 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7764 as_bad (_("16-bit addressing unavailable for `%s'"),
7769 if ((flag_code
== CODE_32BIT
7770 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7771 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7772 && !add_prefix (ADDR_PREFIX_OPCODE
))
7777 /* Check invalid register operand when the address size override
7778 prefix changes the size of register operands. */
7780 enum { need_word
, need_dword
, need_qword
} need
;
7782 /* Check the register operand for the address size prefix if
7783 the memory operand has no real registers, like symbol, DISP
7784 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7785 if (i
.mem_operands
== 1
7786 && i
.reg_operands
== 1
7788 && i
.types
[1].bitfield
.class == Reg
7789 && (flag_code
== CODE_32BIT
7790 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7791 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7792 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7793 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7794 || (x86_elf_abi
== X86_64_X32_ABI
7796 && i
.base_reg
->reg_num
== RegIP
7797 && i
.base_reg
->reg_type
.bitfield
.qword
))
7801 && !add_prefix (ADDR_PREFIX_OPCODE
))
7804 if (flag_code
== CODE_32BIT
)
7805 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7806 else if (i
.prefix
[ADDR_PREFIX
])
7809 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7811 for (op
= 0; op
< i
.operands
; op
++)
7813 if (i
.types
[op
].bitfield
.class != Reg
)
7819 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7823 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7827 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7832 as_bad (_("invalid register operand size for `%s'"),
7843 check_byte_reg (void)
7847 for (op
= i
.operands
; --op
>= 0;)
7849 /* Skip non-register operands. */
7850 if (i
.types
[op
].bitfield
.class != Reg
)
7853 /* If this is an eight bit register, it's OK. If it's the 16 or
7854 32 bit version of an eight bit register, we will just use the
7855 low portion, and that's OK too. */
7856 if (i
.types
[op
].bitfield
.byte
)
7859 /* I/O port address operands are OK too. */
7860 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7861 && i
.tm
.operand_types
[op
].bitfield
.word
)
7864 /* crc32 only wants its source operand checked here. */
7865 if (i
.tm
.mnem_off
== MN_crc32
&& op
!= 0)
7868 /* Any other register is bad. */
7869 as_bad (_("`%s%s' not allowed with `%s%c'"),
7870 register_prefix
, i
.op
[op
].regs
->reg_name
,
7871 insn_name (&i
.tm
), i
.suffix
);
7878 check_long_reg (void)
7882 for (op
= i
.operands
; --op
>= 0;)
7883 /* Skip non-register operands. */
7884 if (i
.types
[op
].bitfield
.class != Reg
)
7886 /* Reject eight bit registers, except where the template requires
7887 them. (eg. movzb) */
7888 else if (i
.types
[op
].bitfield
.byte
7889 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7890 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7891 && (i
.tm
.operand_types
[op
].bitfield
.word
7892 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7894 as_bad (_("`%s%s' not allowed with `%s%c'"),
7896 i
.op
[op
].regs
->reg_name
,
7901 /* Error if the e prefix on a general reg is missing. */
7902 else if (i
.types
[op
].bitfield
.word
7903 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7904 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7905 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7907 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7908 register_prefix
, i
.op
[op
].regs
->reg_name
,
7912 /* Warn if the r prefix on a general reg is present. */
7913 else if (i
.types
[op
].bitfield
.qword
7914 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7915 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7916 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7918 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7919 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
7926 check_qword_reg (void)
7930 for (op
= i
.operands
; --op
>= 0; )
7931 /* Skip non-register operands. */
7932 if (i
.types
[op
].bitfield
.class != Reg
)
7934 /* Reject eight bit registers, except where the template requires
7935 them. (eg. movzb) */
7936 else if (i
.types
[op
].bitfield
.byte
7937 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7938 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7939 && (i
.tm
.operand_types
[op
].bitfield
.word
7940 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7942 as_bad (_("`%s%s' not allowed with `%s%c'"),
7944 i
.op
[op
].regs
->reg_name
,
7949 /* Warn if the r prefix on a general reg is missing. */
7950 else if ((i
.types
[op
].bitfield
.word
7951 || i
.types
[op
].bitfield
.dword
)
7952 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7953 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7954 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7956 /* Prohibit these changes in the 64bit mode, since the
7957 lowering is more complicated. */
7958 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7959 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
7966 check_word_reg (void)
7969 for (op
= i
.operands
; --op
>= 0;)
7970 /* Skip non-register operands. */
7971 if (i
.types
[op
].bitfield
.class != Reg
)
7973 /* Reject eight bit registers, except where the template requires
7974 them. (eg. movzb) */
7975 else if (i
.types
[op
].bitfield
.byte
7976 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7977 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7978 && (i
.tm
.operand_types
[op
].bitfield
.word
7979 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7981 as_bad (_("`%s%s' not allowed with `%s%c'"),
7983 i
.op
[op
].regs
->reg_name
,
7988 /* Error if the e or r prefix on a general reg is present. */
7989 else if ((i
.types
[op
].bitfield
.dword
7990 || i
.types
[op
].bitfield
.qword
)
7991 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7992 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7993 && i
.tm
.operand_types
[op
].bitfield
.word
)
7995 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7996 register_prefix
, i
.op
[op
].regs
->reg_name
,
8004 update_imm (unsigned int j
)
8006 i386_operand_type overlap
= i
.types
[j
];
8008 if (i
.tm
.operand_types
[j
].bitfield
.imm8
8009 && i
.tm
.operand_types
[j
].bitfield
.imm8s
8010 && overlap
.bitfield
.imm8
&& overlap
.bitfield
.imm8s
)
8012 /* This combination is used on 8-bit immediates where e.g. $~0 is
8013 desirable to permit. We're past operand type matching, so simply
8014 put things back in the shape they were before introducing the
8015 distinction between Imm8, Imm8S, and Imm8|Imm8S. */
8016 overlap
.bitfield
.imm8s
= 0;
8019 if (overlap
.bitfield
.imm8
8020 + overlap
.bitfield
.imm8s
8021 + overlap
.bitfield
.imm16
8022 + overlap
.bitfield
.imm32
8023 + overlap
.bitfield
.imm32s
8024 + overlap
.bitfield
.imm64
> 1)
8026 static const i386_operand_type imm16
= { .bitfield
= { .imm16
= 1 } };
8027 static const i386_operand_type imm32
= { .bitfield
= { .imm32
= 1 } };
8028 static const i386_operand_type imm32s
= { .bitfield
= { .imm32s
= 1 } };
8029 static const i386_operand_type imm16_32
= { .bitfield
=
8030 { .imm16
= 1, .imm32
= 1 }
8032 static const i386_operand_type imm16_32s
= { .bitfield
=
8033 { .imm16
= 1, .imm32s
= 1 }
8035 static const i386_operand_type imm16_32_32s
= { .bitfield
=
8036 { .imm16
= 1, .imm32
= 1, .imm32s
= 1 }
8041 i386_operand_type temp
;
8043 operand_type_set (&temp
, 0);
8044 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
8046 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
8047 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
8049 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
8050 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
8051 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
8053 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
8054 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
8057 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
8060 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
8061 || operand_type_equal (&overlap
, &imm16_32
)
8062 || operand_type_equal (&overlap
, &imm16_32s
))
8064 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
8069 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
8070 overlap
= operand_type_and (overlap
, imm32s
);
8071 else if (i
.prefix
[DATA_PREFIX
])
8072 overlap
= operand_type_and (overlap
,
8073 flag_code
!= CODE_16BIT
? imm16
: imm32
);
8074 if (overlap
.bitfield
.imm8
8075 + overlap
.bitfield
.imm8s
8076 + overlap
.bitfield
.imm16
8077 + overlap
.bitfield
.imm32
8078 + overlap
.bitfield
.imm32s
8079 + overlap
.bitfield
.imm64
!= 1)
8081 as_bad (_("no instruction mnemonic suffix given; "
8082 "can't determine immediate size"));
8086 i
.types
[j
] = overlap
;
8096 /* Update the first 2 immediate operands. */
8097 n
= i
.operands
> 2 ? 2 : i
.operands
;
8100 for (j
= 0; j
< n
; j
++)
8101 if (update_imm (j
) == 0)
8104 /* The 3rd operand can't be immediate operand. */
8105 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
8111 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
8114 if (r
->reg_flags
& RegRex
)
8116 if (i
.rex
& rex_bit
)
8117 as_bad (_("same type of prefix used twice"));
8120 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
8122 gas_assert (i
.vex
.register_specifier
== r
);
8123 i
.vex
.register_specifier
+= 8;
8126 if (r
->reg_flags
& RegVRex
)
8131 process_operands (void)
8133 /* Default segment register this instruction will use for memory
8134 accesses. 0 means unknown. This is only for optimizing out
8135 unnecessary segment overrides. */
8136 const reg_entry
*default_seg
= NULL
;
8138 /* We only need to check those implicit registers for instructions
8139 with 3 operands or less. */
8140 if (i
.operands
<= 3)
8141 for (unsigned int j
= 0; j
< i
.operands
; j
++)
8142 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
)
8145 if (i
.tm
.opcode_modifier
.sse2avx
)
8147 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
8149 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
8150 i
.prefix
[REX_PREFIX
] = 0;
8153 /* ImmExt should be processed after SSE2AVX. */
8154 else if (i
.tm
.opcode_modifier
.immext
)
8157 /* TILEZERO is unusual in that it has a single operand encoded in ModR/M.reg,
8158 not ModR/M.rm. To avoid special casing this in build_modrm_byte(), fake a
8159 new destination operand here, while converting the source one to register
8161 if (i
.tm
.mnem_off
== MN_tilezero
)
8163 i
.op
[1].regs
= i
.op
[0].regs
;
8164 i
.op
[0].regs
-= i
.op
[0].regs
->reg_num
;
8165 i
.types
[1] = i
.types
[0];
8166 i
.tm
.operand_types
[1] = i
.tm
.operand_types
[0];
8167 i
.flags
[1] = i
.flags
[0];
8173 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
8175 static const i386_operand_type regxmm
= {
8176 .bitfield
= { .class = RegSIMD
, .xmmword
= 1 }
8178 unsigned int dupl
= i
.operands
;
8179 unsigned int dest
= dupl
- 1;
8182 /* The destination must be an xmm register. */
8183 gas_assert (i
.reg_operands
8184 && MAX_OPERANDS
> dupl
8185 && operand_type_equal (&i
.types
[dest
], ®xmm
));
8187 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
8188 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
8190 /* Keep xmm0 for instructions with VEX prefix and 3
8192 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
8193 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
8198 if (i
.tm
.opcode_modifier
.operandconstraint
== IMPLICIT_1ST_XMM0
)
8200 gas_assert ((MAX_OPERANDS
- 1) > dupl
);
8202 /* Add the implicit xmm0 for instructions with VEX prefix
8204 for (j
= i
.operands
; j
> 0; j
--)
8206 i
.op
[j
] = i
.op
[j
- 1];
8207 i
.types
[j
] = i
.types
[j
- 1];
8208 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
8209 i
.flags
[j
] = i
.flags
[j
- 1];
8212 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
8213 i
.types
[0] = regxmm
;
8214 i
.tm
.operand_types
[0] = regxmm
;
8217 i
.reg_operands
+= 2;
8222 i
.op
[dupl
] = i
.op
[dest
];
8223 i
.types
[dupl
] = i
.types
[dest
];
8224 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
8225 i
.flags
[dupl
] = i
.flags
[dest
];
8234 i
.op
[dupl
] = i
.op
[dest
];
8235 i
.types
[dupl
] = i
.types
[dest
];
8236 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
8237 i
.flags
[dupl
] = i
.flags
[dest
];
8240 if (i
.tm
.opcode_modifier
.immext
)
8243 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
8244 && i
.tm
.opcode_modifier
.modrm
)
8248 for (j
= 1; j
< i
.operands
; j
++)
8250 i
.op
[j
- 1] = i
.op
[j
];
8251 i
.types
[j
- 1] = i
.types
[j
];
8253 /* We need to adjust fields in i.tm since they are used by
8254 build_modrm_byte. */
8255 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
8257 i
.flags
[j
- 1] = i
.flags
[j
];
8260 /* No adjustment to i.reg_operands: This was already done at the top
8265 else if (i
.tm
.opcode_modifier
.operandconstraint
== IMPLICIT_QUAD_GROUP
)
8267 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
8269 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
8270 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
8271 regnum
= register_number (i
.op
[1].regs
);
8272 first_reg_in_group
= regnum
& ~3;
8273 last_reg_in_group
= first_reg_in_group
+ 3;
8274 if (regnum
!= first_reg_in_group
)
8275 as_warn (_("source register `%s%s' implicitly denotes"
8276 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
8277 register_prefix
, i
.op
[1].regs
->reg_name
,
8278 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
8279 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
8282 else if (i
.tm
.opcode_modifier
.operandconstraint
== REG_KLUDGE
)
8284 /* The imul $imm, %reg instruction is converted into
8285 imul $imm, %reg, %reg, and the clr %reg instruction
8286 is converted into xor %reg, %reg. */
8288 unsigned int first_reg_op
;
8290 if (operand_type_check (i
.types
[0], reg
))
8294 /* Pretend we saw the extra register operand. */
8295 gas_assert (i
.reg_operands
== 1
8296 && i
.op
[first_reg_op
+ 1].regs
== 0);
8297 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
8298 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
8303 if (i
.tm
.opcode_modifier
.modrm
)
8305 /* The opcode is completed (modulo i.tm.extension_opcode which
8306 must be put into the modrm byte). Now, we make the modrm and
8307 index base bytes based on all the info we've collected. */
8309 default_seg
= build_modrm_byte ();
8311 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.operandconstraint
== UGH
)
8313 /* Warn about some common errors, but press on regardless. */
8314 if (i
.operands
== 2)
8316 /* Reversed arguments on faddp or fmulp. */
8317 as_warn (_("translating to `%s %s%s,%s%s'"), insn_name (&i
.tm
),
8318 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
8319 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
8321 else if (i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
8323 /* Extraneous `l' suffix on fp insn. */
8324 as_warn (_("translating to `%s %s%s'"), insn_name (&i
.tm
),
8325 register_prefix
, i
.op
[0].regs
->reg_name
);
8329 else if (i
.types
[0].bitfield
.class == SReg
&& !dot_insn ())
8331 if (flag_code
!= CODE_64BIT
8332 ? i
.tm
.base_opcode
== POP_SEG_SHORT
8333 && i
.op
[0].regs
->reg_num
== 1
8334 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
8335 && i
.op
[0].regs
->reg_num
< 4)
8337 as_bad (_("you can't `%s %s%s'"),
8338 insn_name (&i
.tm
), register_prefix
, i
.op
[0].regs
->reg_name
);
8341 if (i
.op
[0].regs
->reg_num
> 3
8342 && i
.tm
.opcode_space
== SPACE_BASE
)
8344 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
8345 i
.tm
.opcode_space
= SPACE_0F
;
8347 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
8349 else if (i
.tm
.opcode_space
== SPACE_BASE
8350 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
8352 default_seg
= reg_ds
;
8354 else if (i
.tm
.opcode_modifier
.isstring
)
8356 /* For the string instructions that allow a segment override
8357 on one of their operands, the default segment is ds. */
8358 default_seg
= reg_ds
;
8360 else if (i
.short_form
)
8362 /* The register operand is in the 1st or 2nd non-immediate operand. */
8363 const reg_entry
*r
= i
.op
[i
.imm_operands
].regs
;
8366 && r
->reg_type
.bitfield
.instance
== Accum
8367 && i
.op
[i
.imm_operands
+ 1].regs
)
8368 r
= i
.op
[i
.imm_operands
+ 1].regs
;
8369 /* Register goes in low 3 bits of opcode. */
8370 i
.tm
.base_opcode
|= r
->reg_num
;
8371 set_rex_vrex (r
, REX_B
, false);
8373 if (dot_insn () && i
.reg_operands
== 2)
8375 gas_assert (is_any_vex_encoding (&i
.tm
)
8376 || i
.vec_encoding
!= vex_encoding_default
);
8377 i
.vex
.register_specifier
= i
.op
[i
.operands
- 1].regs
;
8380 else if (i
.reg_operands
== 1
8381 && !i
.flags
[i
.operands
- 1]
8382 && i
.tm
.operand_types
[i
.operands
- 1].bitfield
.instance
8385 gas_assert (is_any_vex_encoding (&i
.tm
)
8386 || i
.vec_encoding
!= vex_encoding_default
);
8387 i
.vex
.register_specifier
= i
.op
[i
.operands
- 1].regs
;
8390 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
8391 && i
.tm
.mnem_off
== MN_lea
)
8393 if (!quiet_warnings
)
8394 as_warn (_("segment override on `%s' is ineffectual"), insn_name (&i
.tm
));
8395 if (optimize
&& !i
.no_optimize
)
8398 i
.prefix
[SEG_PREFIX
] = 0;
8402 /* If a segment was explicitly specified, and the specified segment
8403 is neither the default nor the one already recorded from a prefix,
8404 use an opcode prefix to select it. If we never figured out what
8405 the default segment is, then default_seg will be zero at this
8406 point, and the specified segment prefix will always be used. */
8408 && i
.seg
[0] != default_seg
8409 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
8411 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
8417 static const reg_entry
*
8418 build_modrm_byte (void)
8420 const reg_entry
*default_seg
= NULL
;
8421 unsigned int source
= i
.imm_operands
- i
.tm
.opcode_modifier
.immext
8422 /* Compensate for kludge in md_assemble(). */
8423 + i
.tm
.operand_types
[0].bitfield
.imm1
;
8424 unsigned int dest
= i
.operands
- 1 - i
.tm
.opcode_modifier
.immext
;
8425 unsigned int v
, op
, reg_slot
= ~0;
8427 /* Accumulator (in particular %st), shift count (%cl), and alike need
8428 to be skipped just like immediate operands do. */
8429 if (i
.tm
.operand_types
[source
].bitfield
.instance
)
8431 while (i
.tm
.operand_types
[dest
].bitfield
.instance
)
8434 for (op
= source
; op
< i
.operands
; ++op
)
8435 if (i
.tm
.operand_types
[op
].bitfield
.baseindex
)
8438 if (i
.reg_operands
+ i
.mem_operands
+ (i
.tm
.extension_opcode
!= None
) == 4)
8442 /* There are 2 kinds of instructions:
8443 1. 5 operands: 4 register operands or 3 register operands
8444 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8445 VexW0 or VexW1. The destination must be either XMM, YMM or
8447 2. 4 operands: 4 register operands or 3 register operands
8448 plus 1 memory operand, with VexXDS.
8449 3. Other equivalent combinations when coming from s_insn(). */
8450 gas_assert (i
.tm
.opcode_modifier
.vexvvvv
8451 && i
.tm
.opcode_modifier
.vexw
);
8452 gas_assert (dot_insn ()
8453 || i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8455 /* Of the first two non-immediate operands the one with the template
8456 not allowing for a memory one is encoded in the immediate operand. */
8458 reg_slot
= source
+ 1;
8460 reg_slot
= source
++;
8464 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8465 gas_assert (!(i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
));
8468 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class != ClassNone
);
8470 if (i
.imm_operands
== 0)
8472 /* When there is no immediate operand, generate an 8bit
8473 immediate operand to encode the first operand. */
8474 exp
= &im_expressions
[i
.imm_operands
++];
8475 i
.op
[i
.operands
].imms
= exp
;
8476 i
.types
[i
.operands
].bitfield
.imm8
= 1;
8479 exp
->X_op
= O_constant
;
8483 gas_assert (i
.imm_operands
== 1);
8484 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8485 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8487 /* Turn on Imm8 again so that output_imm will generate it. */
8488 i
.types
[0].bitfield
.imm8
= 1;
8492 exp
->X_add_number
|= register_number (i
.op
[reg_slot
].regs
)
8493 << (3 + !(is_evex_encoding (&i
.tm
)
8494 || i
.vec_encoding
== vex_encoding_evex
));
8497 for (v
= source
+ 1; v
< dest
; ++v
)
8502 if (i
.tm
.extension_opcode
!= None
)
8508 gas_assert (source
< dest
);
8509 if (i
.tm
.opcode_modifier
.operandconstraint
== SWAP_SOURCES
8512 unsigned int tmp
= source
;
8518 if (v
< MAX_OPERANDS
)
8520 gas_assert (i
.tm
.opcode_modifier
.vexvvvv
);
8521 i
.vex
.register_specifier
= i
.op
[v
].regs
;
8524 if (op
< i
.operands
)
8528 unsigned int fake_zero_displacement
= 0;
8530 gas_assert (i
.flags
[op
] & Operand_Mem
);
8532 if (i
.tm
.opcode_modifier
.sib
)
8534 /* The index register of VSIB shouldn't be RegIZ. */
8535 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8536 && i
.index_reg
->reg_num
== RegIZ
)
8539 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8542 i
.sib
.base
= NO_BASE_REGISTER
;
8543 i
.sib
.scale
= i
.log2_scale_factor
;
8544 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8545 i
.types
[op
].bitfield
.disp32
= 1;
8548 /* Since the mandatory SIB always has index register, so
8549 the code logic remains unchanged. The non-mandatory SIB
8550 without index register is allowed and will be handled
8554 if (i
.index_reg
->reg_num
== RegIZ
)
8555 i
.sib
.index
= NO_INDEX_REGISTER
;
8557 i
.sib
.index
= i
.index_reg
->reg_num
;
8558 set_rex_vrex (i
.index_reg
, REX_X
, false);
8562 default_seg
= reg_ds
;
8564 if (i
.base_reg
== 0)
8567 if (!i
.disp_operands
)
8568 fake_zero_displacement
= 1;
8569 if (i
.index_reg
== 0)
8571 /* Both check for VSIB and mandatory non-vector SIB. */
8572 gas_assert (!i
.tm
.opcode_modifier
.sib
8573 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8574 /* Operand is just <disp> */
8575 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8576 if (flag_code
== CODE_64BIT
)
8578 /* 64bit mode overwrites the 32bit absolute
8579 addressing by RIP relative addressing and
8580 absolute addressing is encoded by one of the
8581 redundant SIB forms. */
8582 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8583 i
.sib
.base
= NO_BASE_REGISTER
;
8584 i
.sib
.index
= NO_INDEX_REGISTER
;
8585 i
.types
[op
].bitfield
.disp32
= 1;
8587 else if ((flag_code
== CODE_16BIT
)
8588 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8590 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8591 i
.types
[op
].bitfield
.disp16
= 1;
8595 i
.rm
.regmem
= NO_BASE_REGISTER
;
8596 i
.types
[op
].bitfield
.disp32
= 1;
8599 else if (!i
.tm
.opcode_modifier
.sib
)
8601 /* !i.base_reg && i.index_reg */
8602 if (i
.index_reg
->reg_num
== RegIZ
)
8603 i
.sib
.index
= NO_INDEX_REGISTER
;
8605 i
.sib
.index
= i
.index_reg
->reg_num
;
8606 i
.sib
.base
= NO_BASE_REGISTER
;
8607 i
.sib
.scale
= i
.log2_scale_factor
;
8608 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8609 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8610 i
.types
[op
].bitfield
.disp32
= 1;
8611 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8615 /* RIP addressing for 64bit mode. */
8616 else if (i
.base_reg
->reg_num
== RegIP
)
8618 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8619 i
.rm
.regmem
= NO_BASE_REGISTER
;
8620 i
.types
[op
].bitfield
.disp8
= 0;
8621 i
.types
[op
].bitfield
.disp16
= 0;
8622 i
.types
[op
].bitfield
.disp32
= 1;
8623 i
.types
[op
].bitfield
.disp64
= 0;
8624 i
.flags
[op
] |= Operand_PCrel
;
8625 if (! i
.disp_operands
)
8626 fake_zero_displacement
= 1;
8628 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8630 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8631 switch (i
.base_reg
->reg_num
)
8634 if (i
.index_reg
== 0)
8636 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8637 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8640 default_seg
= reg_ss
;
8641 if (i
.index_reg
== 0)
8644 if (operand_type_check (i
.types
[op
], disp
) == 0)
8646 /* fake (%bp) into 0(%bp) */
8647 if (i
.disp_encoding
== disp_encoding_16bit
)
8648 i
.types
[op
].bitfield
.disp16
= 1;
8650 i
.types
[op
].bitfield
.disp8
= 1;
8651 fake_zero_displacement
= 1;
8654 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8655 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8657 default: /* (%si) -> 4 or (%di) -> 5 */
8658 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8660 if (!fake_zero_displacement
8664 fake_zero_displacement
= 1;
8665 if (i
.disp_encoding
== disp_encoding_8bit
)
8666 i
.types
[op
].bitfield
.disp8
= 1;
8668 i
.types
[op
].bitfield
.disp16
= 1;
8670 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8672 else /* i.base_reg and 32/64 bit mode */
8674 if (operand_type_check (i
.types
[op
], disp
))
8676 i
.types
[op
].bitfield
.disp16
= 0;
8677 i
.types
[op
].bitfield
.disp64
= 0;
8678 i
.types
[op
].bitfield
.disp32
= 1;
8681 if (!i
.tm
.opcode_modifier
.sib
)
8682 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8683 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8685 i
.sib
.base
= i
.base_reg
->reg_num
;
8686 /* x86-64 ignores REX prefix bit here to avoid decoder
8688 if (!(i
.base_reg
->reg_flags
& RegRex
)
8689 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8690 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8691 default_seg
= reg_ss
;
8692 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8694 fake_zero_displacement
= 1;
8695 if (i
.disp_encoding
== disp_encoding_32bit
)
8696 i
.types
[op
].bitfield
.disp32
= 1;
8698 i
.types
[op
].bitfield
.disp8
= 1;
8700 i
.sib
.scale
= i
.log2_scale_factor
;
8701 if (i
.index_reg
== 0)
8703 /* Only check for VSIB. */
8704 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8705 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8706 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8708 /* <disp>(%esp) becomes two byte modrm with no index
8709 register. We've already stored the code for esp
8710 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8711 Any base register besides %esp will not use the
8712 extra modrm byte. */
8713 i
.sib
.index
= NO_INDEX_REGISTER
;
8715 else if (!i
.tm
.opcode_modifier
.sib
)
8717 if (i
.index_reg
->reg_num
== RegIZ
)
8718 i
.sib
.index
= NO_INDEX_REGISTER
;
8720 i
.sib
.index
= i
.index_reg
->reg_num
;
8721 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8722 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8727 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8728 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8732 if (!fake_zero_displacement
8736 fake_zero_displacement
= 1;
8737 if (i
.disp_encoding
== disp_encoding_8bit
)
8738 i
.types
[op
].bitfield
.disp8
= 1;
8740 i
.types
[op
].bitfield
.disp32
= 1;
8742 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8746 if (fake_zero_displacement
)
8748 /* Fakes a zero displacement assuming that i.types[op]
8749 holds the correct displacement size. */
8752 gas_assert (i
.op
[op
].disps
== 0);
8753 exp
= &disp_expressions
[i
.disp_operands
++];
8754 i
.op
[op
].disps
= exp
;
8755 exp
->X_op
= O_constant
;
8756 exp
->X_add_number
= 0;
8757 exp
->X_add_symbol
= (symbolS
*) 0;
8758 exp
->X_op_symbol
= (symbolS
*) 0;
8764 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8765 set_rex_vrex (i
.op
[op
].regs
, REX_B
, false);
8776 if (!i
.tm
.opcode_modifier
.regmem
)
8778 gas_assert (source
< MAX_OPERANDS
);
8779 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8780 set_rex_vrex (i
.op
[source
].regs
, REX_B
,
8781 dest
>= MAX_OPERANDS
&& i
.tm
.opcode_modifier
.sse2avx
);
8786 gas_assert (dest
< MAX_OPERANDS
);
8787 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8788 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8793 /* Fill in i.rm.reg field with extension opcode (if any) or the
8794 appropriate register. */
8795 if (i
.tm
.extension_opcode
!= None
)
8796 i
.rm
.reg
= i
.tm
.extension_opcode
;
8797 else if (!i
.tm
.opcode_modifier
.regmem
&& dest
< MAX_OPERANDS
)
8799 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8800 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8804 gas_assert (source
< MAX_OPERANDS
);
8805 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8806 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8809 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8811 gas_assert (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class == RegCR
);
8813 add_prefix (LOCK_PREFIX_OPCODE
);
8820 frag_opcode_byte (unsigned char byte
)
8822 if (now_seg
!= absolute_section
)
8823 FRAG_APPEND_1_CHAR (byte
);
8825 ++abs_section_offset
;
8829 flip_code16 (unsigned int code16
)
8831 gas_assert (i
.tm
.operands
== 1);
8833 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8834 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8835 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8840 output_branch (void)
8846 relax_substateT subtype
;
8850 if (now_seg
== absolute_section
)
8852 as_bad (_("relaxable branches not supported in absolute section"));
8856 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8857 size
= i
.disp_encoding
> disp_encoding_8bit
? BIG
: SMALL
;
8860 if (i
.prefix
[DATA_PREFIX
] != 0)
8864 code16
^= flip_code16(code16
);
8866 /* Pentium4 branch hints. */
8867 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8868 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8873 if (i
.prefix
[REX_PREFIX
] != 0)
8879 /* BND prefixed jump. */
8880 if (i
.prefix
[BND_PREFIX
] != 0)
8886 if (i
.prefixes
!= 0)
8887 as_warn (_("skipping prefixes on `%s'"), insn_name (&i
.tm
));
8889 /* It's always a symbol; End frag & setup for relax.
8890 Make sure there is enough room in this frag for the largest
8891 instruction we may generate in md_convert_frag. This is 2
8892 bytes for the opcode and room for the prefix and largest
8894 frag_grow (prefix
+ 2 + 4);
8895 /* Prefix and 1 opcode byte go in fr_fix. */
8896 p
= frag_more (prefix
+ 1);
8897 if (i
.prefix
[DATA_PREFIX
] != 0)
8898 *p
++ = DATA_PREFIX_OPCODE
;
8899 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8900 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8901 *p
++ = i
.prefix
[SEG_PREFIX
];
8902 if (i
.prefix
[BND_PREFIX
] != 0)
8903 *p
++ = BND_PREFIX_OPCODE
;
8904 if (i
.prefix
[REX_PREFIX
] != 0)
8905 *p
++ = i
.prefix
[REX_PREFIX
];
8906 *p
= i
.tm
.base_opcode
;
8908 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8909 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8910 else if (cpu_arch_flags
.bitfield
.cpui386
)
8911 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8913 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8916 sym
= i
.op
[0].disps
->X_add_symbol
;
8917 off
= i
.op
[0].disps
->X_add_number
;
8919 if (i
.op
[0].disps
->X_op
!= O_constant
8920 && i
.op
[0].disps
->X_op
!= O_symbol
)
8922 /* Handle complex expressions. */
8923 sym
= make_expr_symbol (i
.op
[0].disps
);
8927 frag_now
->tc_frag_data
.code64
= flag_code
== CODE_64BIT
;
8929 /* 1 possible extra opcode + 4 byte displacement go in var part.
8930 Pass reloc in fr_var. */
8931 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8934 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8935 /* Return TRUE iff PLT32 relocation should be used for branching to
8939 need_plt32_p (symbolS
*s
)
8941 /* PLT32 relocation is ELF only. */
8946 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8947 krtld support it. */
8951 /* Since there is no need to prepare for PLT branch on x86-64, we
8952 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8953 be used as a marker for 32-bit PC-relative branches. */
8960 /* Weak or undefined symbol need PLT32 relocation. */
8961 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8964 /* Non-global symbol doesn't need PLT32 relocation. */
8965 if (! S_IS_EXTERNAL (s
))
8968 /* Other global symbols need PLT32 relocation. NB: Symbol with
8969 non-default visibilities are treated as normal global symbol
8970 so that PLT32 relocation can be used as a marker for 32-bit
8971 PC-relative branches. It is useful for linker relaxation. */
8982 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8984 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8986 /* This is a loop or jecxz type instruction. */
8988 if (i
.prefix
[ADDR_PREFIX
] != 0)
8990 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8993 /* Pentium4 branch hints. */
8994 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8995 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8997 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
9006 if (flag_code
== CODE_16BIT
)
9009 if (i
.prefix
[DATA_PREFIX
] != 0)
9011 frag_opcode_byte (DATA_PREFIX_OPCODE
);
9013 code16
^= flip_code16(code16
);
9021 /* BND prefixed jump. */
9022 if (i
.prefix
[BND_PREFIX
] != 0)
9024 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
9028 if (i
.prefix
[REX_PREFIX
] != 0)
9030 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
9034 if (i
.prefixes
!= 0)
9035 as_warn (_("skipping prefixes on `%s'"), insn_name (&i
.tm
));
9037 if (now_seg
== absolute_section
)
9039 abs_section_offset
+= i
.opcode_length
+ size
;
9043 p
= frag_more (i
.opcode_length
+ size
);
9044 switch (i
.opcode_length
)
9047 *p
++ = i
.tm
.base_opcode
>> 8;
9050 *p
++ = i
.tm
.base_opcode
;
9056 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9057 if (flag_code
== CODE_64BIT
&& size
== 4
9058 && jump_reloc
== NO_RELOC
&& i
.op
[0].disps
->X_add_number
== 0
9059 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
9060 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
9063 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
9065 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9066 i
.op
[0].disps
, 1, jump_reloc
);
9068 /* All jumps handled here are signed, but don't unconditionally use a
9069 signed limit check for 32 and 16 bit jumps as we want to allow wrap
9070 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
9075 fixP
->fx_signed
= 1;
9079 if (i
.tm
.mnem_off
== MN_xbegin
)
9080 fixP
->fx_signed
= 1;
9084 if (flag_code
== CODE_64BIT
)
9085 fixP
->fx_signed
= 1;
9091 output_interseg_jump (void)
9099 if (flag_code
== CODE_16BIT
)
9103 if (i
.prefix
[DATA_PREFIX
] != 0)
9110 gas_assert (!i
.prefix
[REX_PREFIX
]);
9116 if (i
.prefixes
!= 0)
9117 as_warn (_("skipping prefixes on `%s'"), insn_name (&i
.tm
));
9119 if (now_seg
== absolute_section
)
9121 abs_section_offset
+= prefix
+ 1 + 2 + size
;
9125 /* 1 opcode; 2 segment; offset */
9126 p
= frag_more (prefix
+ 1 + 2 + size
);
9128 if (i
.prefix
[DATA_PREFIX
] != 0)
9129 *p
++ = DATA_PREFIX_OPCODE
;
9131 if (i
.prefix
[REX_PREFIX
] != 0)
9132 *p
++ = i
.prefix
[REX_PREFIX
];
9134 *p
++ = i
.tm
.base_opcode
;
9135 if (i
.op
[1].imms
->X_op
== O_constant
)
9137 offsetT n
= i
.op
[1].imms
->X_add_number
;
9140 && !fits_in_unsigned_word (n
)
9141 && !fits_in_signed_word (n
))
9143 as_bad (_("16-bit jump out of range"));
9146 md_number_to_chars (p
, n
, size
);
9149 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9150 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9153 if (i
.op
[0].imms
->X_op
== O_constant
)
9154 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9156 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9157 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9160 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9165 asection
*seg
= now_seg
;
9166 subsegT subseg
= now_subseg
;
9168 unsigned int alignment
, align_size_1
;
9169 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9170 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9171 unsigned int padding
;
9173 if (!IS_ELF
|| !x86_used_note
)
9176 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9178 /* The .note.gnu.property section layout:
9180 Field Length Contents
9183 n_descsz 4 The note descriptor size
9184 n_type 4 NT_GNU_PROPERTY_TYPE_0
9186 n_desc n_descsz The program property array
9190 /* Create the .note.gnu.property section. */
9191 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9192 bfd_set_section_flags (sec
,
9199 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9210 bfd_set_section_alignment (sec
, alignment
);
9211 elf_section_type (sec
) = SHT_NOTE
;
9213 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9215 isa_1_descsz_raw
= 4 + 4 + 4;
9216 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9217 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9219 feature_2_descsz_raw
= isa_1_descsz
;
9220 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9222 feature_2_descsz_raw
+= 4 + 4 + 4;
9223 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9224 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9227 descsz
= feature_2_descsz
;
9228 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9229 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9231 /* Write n_namsz. */
9232 md_number_to_chars (p
, (valueT
) 4, 4);
9234 /* Write n_descsz. */
9235 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9238 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9241 memcpy (p
+ 4 * 3, "GNU", 4);
9243 /* Write 4-byte type. */
9244 md_number_to_chars (p
+ 4 * 4,
9245 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9247 /* Write 4-byte data size. */
9248 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9250 /* Write 4-byte data. */
9251 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9253 /* Zero out paddings. */
9254 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9256 memset (p
+ 4 * 7, 0, padding
);
9258 /* Write 4-byte type. */
9259 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9260 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9262 /* Write 4-byte data size. */
9263 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9265 /* Write 4-byte data. */
9266 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9267 (valueT
) x86_feature_2_used
, 4);
9269 /* Zero out paddings. */
9270 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9272 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9274 /* We probably can't restore the current segment, for there likely
9277 subseg_set (seg
, subseg
);
9281 x86_support_sframe_p (void)
9283 /* At this time, SFrame stack trace is supported for AMD64 ABI only. */
9284 return (x86_elf_abi
== X86_64_ABI
);
9288 x86_sframe_ra_tracking_p (void)
9290 /* In AMD64, return address is always stored on the stack at a fixed offset
9291 from the CFA (provided via x86_sframe_cfa_ra_offset ()).
9292 Do not track explicitly via an SFrame Frame Row Entry. */
9297 x86_sframe_cfa_ra_offset (void)
9299 gas_assert (x86_elf_abi
== X86_64_ABI
);
9300 return (offsetT
) -8;
9304 x86_sframe_get_abi_arch (void)
9306 unsigned char sframe_abi_arch
= 0;
9308 if (x86_support_sframe_p ())
9310 gas_assert (!target_big_endian
);
9311 sframe_abi_arch
= SFRAME_ABI_AMD64_ENDIAN_LITTLE
;
9314 return sframe_abi_arch
;
9320 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9321 const char *frag_now_ptr
)
9323 unsigned int len
= 0;
9325 if (start_frag
!= frag_now
)
9327 const fragS
*fr
= start_frag
;
9332 } while (fr
&& fr
!= frag_now
);
9335 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9338 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9339 be macro-fused with conditional jumps.
9340 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9341 or is one of the following format:
9354 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9356 /* No RIP address. */
9357 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9360 /* No opcodes outside of base encoding space. */
9361 if (i
.tm
.opcode_space
!= SPACE_BASE
)
9364 /* add, sub without add/sub m, imm. */
9365 if (i
.tm
.base_opcode
<= 5
9366 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9367 || ((i
.tm
.base_opcode
| 3) == 0x83
9368 && (i
.tm
.extension_opcode
== 0x5
9369 || i
.tm
.extension_opcode
== 0x0)))
9371 *mf_cmp_p
= mf_cmp_alu_cmp
;
9372 return !(i
.mem_operands
&& i
.imm_operands
);
9375 /* and without and m, imm. */
9376 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9377 || ((i
.tm
.base_opcode
| 3) == 0x83
9378 && i
.tm
.extension_opcode
== 0x4))
9380 *mf_cmp_p
= mf_cmp_test_and
;
9381 return !(i
.mem_operands
&& i
.imm_operands
);
9384 /* test without test m imm. */
9385 if ((i
.tm
.base_opcode
| 1) == 0x85
9386 || (i
.tm
.base_opcode
| 1) == 0xa9
9387 || ((i
.tm
.base_opcode
| 1) == 0xf7
9388 && i
.tm
.extension_opcode
== 0))
9390 *mf_cmp_p
= mf_cmp_test_and
;
9391 return !(i
.mem_operands
&& i
.imm_operands
);
9394 /* cmp without cmp m, imm. */
9395 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9396 || ((i
.tm
.base_opcode
| 3) == 0x83
9397 && (i
.tm
.extension_opcode
== 0x7)))
9399 *mf_cmp_p
= mf_cmp_alu_cmp
;
9400 return !(i
.mem_operands
&& i
.imm_operands
);
9403 /* inc, dec without inc/dec m. */
9404 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9405 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9406 || ((i
.tm
.base_opcode
| 1) == 0xff
9407 && i
.tm
.extension_opcode
<= 0x1))
9409 *mf_cmp_p
= mf_cmp_incdec
;
9410 return !i
.mem_operands
;
9416 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9419 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9421 /* NB: Don't work with COND_JUMP86 without i386. */
9422 if (!align_branch_power
9423 || now_seg
== absolute_section
9424 || !cpu_arch_flags
.bitfield
.cpui386
9425 || !(align_branch
& align_branch_fused_bit
))
9428 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9430 if (last_insn
.kind
== last_insn_other
9431 || last_insn
.seg
!= now_seg
)
9434 as_warn_where (last_insn
.file
, last_insn
.line
,
9435 _("`%s` skips -malign-branch-boundary on `%s`"),
9436 last_insn
.name
, insn_name (&i
.tm
));
9442 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9445 add_branch_prefix_frag_p (void)
9447 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9448 to PadLock instructions since they include prefixes in opcode. */
9449 if (!align_branch_power
9450 || !align_branch_prefix_size
9451 || now_seg
== absolute_section
9452 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9453 || !cpu_arch_flags
.bitfield
.cpui386
)
9456 /* Don't add prefix if it is a prefix or there is no operand in case
9457 that segment prefix is special. */
9458 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9461 if (last_insn
.kind
== last_insn_other
9462 || last_insn
.seg
!= now_seg
)
9466 as_warn_where (last_insn
.file
, last_insn
.line
,
9467 _("`%s` skips -malign-branch-boundary on `%s`"),
9468 last_insn
.name
, insn_name (&i
.tm
));
9473 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9476 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9477 enum mf_jcc_kind
*mf_jcc_p
)
9481 /* NB: Don't work with COND_JUMP86 without i386. */
9482 if (!align_branch_power
9483 || now_seg
== absolute_section
9484 || !cpu_arch_flags
.bitfield
.cpui386
9485 || i
.tm
.opcode_space
!= SPACE_BASE
)
9490 /* Check for jcc and direct jmp. */
9491 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9493 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9495 *branch_p
= align_branch_jmp
;
9496 add_padding
= align_branch
& align_branch_jmp_bit
;
9500 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9501 igore the lowest bit. */
9502 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9503 *branch_p
= align_branch_jcc
;
9504 if ((align_branch
& align_branch_jcc_bit
))
9508 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9511 *branch_p
= align_branch_ret
;
9512 if ((align_branch
& align_branch_ret_bit
))
9517 /* Check for indirect jmp, direct and indirect calls. */
9518 if (i
.tm
.base_opcode
== 0xe8)
9521 *branch_p
= align_branch_call
;
9522 if ((align_branch
& align_branch_call_bit
))
9525 else if (i
.tm
.base_opcode
== 0xff
9526 && (i
.tm
.extension_opcode
== 2
9527 || i
.tm
.extension_opcode
== 4))
9529 /* Indirect call and jmp. */
9530 *branch_p
= align_branch_indirect
;
9531 if ((align_branch
& align_branch_indirect_bit
))
9538 && (i
.op
[0].disps
->X_op
== O_symbol
9539 || (i
.op
[0].disps
->X_op
== O_subtract
9540 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9542 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9543 /* No padding to call to global or undefined tls_get_addr. */
9544 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9545 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9551 && last_insn
.kind
!= last_insn_other
9552 && last_insn
.seg
== now_seg
)
9555 as_warn_where (last_insn
.file
, last_insn
.line
,
9556 _("`%s` skips -malign-branch-boundary on `%s`"),
9557 last_insn
.name
, insn_name (&i
.tm
));
9567 fragS
*insn_start_frag
;
9568 offsetT insn_start_off
;
9569 fragS
*fragP
= NULL
;
9570 enum align_branch_kind branch
= align_branch_none
;
9571 /* The initializer is arbitrary just to avoid uninitialized error.
9572 it's actually either assigned in add_branch_padding_frag_p
9573 or never be used. */
9574 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9576 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9577 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9579 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9580 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9581 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9583 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9584 || i
.tm
.cpu_flags
.bitfield
.cpu287
9585 || i
.tm
.cpu_flags
.bitfield
.cpu387
9586 || i
.tm
.cpu_flags
.bitfield
.cpu687
9587 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9588 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9590 if ((i
.xstate
& xstate_mmx
)
9591 || i
.tm
.mnem_off
== MN_emms
9592 || i
.tm
.mnem_off
== MN_femms
)
9593 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9597 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9598 i
.xstate
|= xstate_zmm
;
9599 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9600 i
.xstate
|= xstate_ymm
;
9601 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9602 i
.xstate
|= xstate_xmm
;
9605 /* vzeroall / vzeroupper */
9606 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9607 i
.xstate
|= xstate_ymm
;
9609 if ((i
.xstate
& xstate_xmm
)
9610 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9611 || (i
.tm
.base_opcode
== 0xae
9612 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9613 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9614 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9615 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9616 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9618 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9619 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9620 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9621 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9622 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9623 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9624 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9625 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9626 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9627 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9628 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9629 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9630 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9631 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9633 if (x86_feature_2_used
9634 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9635 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9636 || i
.tm
.mnem_off
== MN_cmpxchg8b
)
9637 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9638 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9639 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9640 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9641 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9642 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9643 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9644 /* LAHF-SAHF insns in 64-bit mode. */
9645 || (flag_code
== CODE_64BIT
9646 && (i
.tm
.base_opcode
| 1) == 0x9f
9647 && i
.tm
.opcode_space
== SPACE_BASE
))
9648 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9649 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9650 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9651 /* Any VEX encoded insns execpt for AVX512F, AVX512BW, AVX512DQ,
9652 XOP, FMA4, LPW, TBM, and AMX. */
9653 || (i
.tm
.opcode_modifier
.vex
9654 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9655 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9656 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9657 && !i
.tm
.cpu_flags
.bitfield
.cpuxop
9658 && !i
.tm
.cpu_flags
.bitfield
.cpufma4
9659 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9660 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9661 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9662 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9663 || i
.tm
.cpu_flags
.bitfield
.cpufma
9664 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9665 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9666 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9667 || (x86_feature_2_used
9668 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9669 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9670 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9671 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9672 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9673 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9674 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9675 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9676 /* Any EVEX encoded insns except for AVX512ER, AVX512PF,
9677 AVX512-4FMAPS, and AVX512-4VNNIW. */
9678 || (i
.tm
.opcode_modifier
.evex
9679 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9680 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9681 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
9682 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9683 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9687 /* Tie dwarf2 debug info to the address at the start of the insn.
9688 We can't do this after the insn has been output as the current
9689 frag may have been closed off. eg. by frag_var. */
9690 dwarf2_emit_insn (0);
9692 insn_start_frag
= frag_now
;
9693 insn_start_off
= frag_now_fix ();
9695 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9698 /* Branch can be 8 bytes. Leave some room for prefixes. */
9699 unsigned int max_branch_padding_size
= 14;
9701 /* Align section to boundary. */
9702 record_alignment (now_seg
, align_branch_power
);
9704 /* Make room for padding. */
9705 frag_grow (max_branch_padding_size
);
9707 /* Start of the padding. */
9712 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9713 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9716 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9717 fragP
->tc_frag_data
.branch_type
= branch
;
9718 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9721 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
)
9722 && !pre_386_16bit_warned
)
9724 as_warn (_("use .code16 to ensure correct addressing mode"));
9725 pre_386_16bit_warned
= true;
9729 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9731 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9732 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9734 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9735 output_interseg_jump ();
9738 /* Output normal instructions here. */
9742 enum mf_cmp_kind mf_cmp
;
9745 && (i
.tm
.base_opcode
== 0xaee8
9746 || i
.tm
.base_opcode
== 0xaef0
9747 || i
.tm
.base_opcode
== 0xaef8))
9749 /* Encode lfence, mfence, and sfence as
9750 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9751 if (flag_code
== CODE_16BIT
)
9752 as_bad (_("Cannot convert `%s' in 16-bit mode"), insn_name (&i
.tm
));
9753 else if (omit_lock_prefix
)
9754 as_bad (_("Cannot convert `%s' with `-momit-lock-prefix=yes' in effect"),
9756 else if (now_seg
!= absolute_section
)
9758 offsetT val
= 0x240483f0ULL
;
9761 md_number_to_chars (p
, val
, 5);
9764 abs_section_offset
+= 5;
9768 /* Some processors fail on LOCK prefix. This options makes
9769 assembler ignore LOCK prefix and serves as a workaround. */
9770 if (omit_lock_prefix
)
9772 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9773 && i
.tm
.opcode_modifier
.isprefix
)
9775 i
.prefix
[LOCK_PREFIX
] = 0;
9779 /* Skip if this is a branch. */
9781 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9783 /* Make room for padding. */
9784 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9789 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9790 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9793 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9794 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9795 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9797 else if (add_branch_prefix_frag_p ())
9799 unsigned int max_prefix_size
= align_branch_prefix_size
;
9801 /* Make room for padding. */
9802 frag_grow (max_prefix_size
);
9807 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9808 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9811 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9814 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9815 don't need the explicit prefix. */
9816 if (!is_any_vex_encoding (&i
.tm
))
9818 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9827 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9828 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9832 switch (i
.opcode_length
)
9837 /* Check for pseudo prefixes. */
9838 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9840 as_bad_where (insn_start_frag
->fr_file
,
9841 insn_start_frag
->fr_line
,
9842 _("pseudo prefix without instruction"));
9852 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9853 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9854 R_X86_64_GOTTPOFF relocation so that linker can safely
9855 perform IE->LE optimization. A dummy REX_OPCODE prefix
9856 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9857 relocation for GDesc -> IE/LE optimization. */
9858 if (x86_elf_abi
== X86_64_X32_ABI
9860 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9861 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9862 && i
.prefix
[REX_PREFIX
] == 0)
9863 add_prefix (REX_OPCODE
);
9866 /* The prefix bytes. */
9867 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9869 frag_opcode_byte (*q
);
9873 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9879 frag_opcode_byte (*q
);
9882 /* There should be no other prefixes for instructions
9887 /* For EVEX instructions i.vrex should become 0 after
9888 build_evex_prefix. For VEX instructions upper 16 registers
9889 aren't available, so VREX should be 0. */
9892 /* Now the VEX prefix. */
9893 if (now_seg
!= absolute_section
)
9895 p
= frag_more (i
.vex
.length
);
9896 for (j
= 0; j
< i
.vex
.length
; j
++)
9897 p
[j
] = i
.vex
.bytes
[j
];
9900 abs_section_offset
+= i
.vex
.length
;
9903 /* Now the opcode; be careful about word order here! */
9904 j
= i
.opcode_length
;
9906 switch (i
.tm
.opcode_space
)
9921 if (now_seg
== absolute_section
)
9922 abs_section_offset
+= j
;
9925 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9931 && i
.tm
.opcode_space
!= SPACE_BASE
)
9934 if (i
.tm
.opcode_space
!= SPACE_0F
)
9935 *p
++ = i
.tm
.opcode_space
== SPACE_0F38
9939 switch (i
.opcode_length
)
9942 /* Put out high byte first: can't use md_number_to_chars! */
9943 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9946 *p
= i
.tm
.base_opcode
& 0xff;
9955 /* Now the modrm byte and sib byte (if present). */
9956 if (i
.tm
.opcode_modifier
.modrm
)
9958 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9960 | (i
.rm
.mode
<< 6));
9961 /* If i.rm.regmem == ESP (4)
9962 && i.rm.mode != (Register mode)
9964 ==> need second modrm byte. */
9965 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9967 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9968 frag_opcode_byte ((i
.sib
.base
<< 0)
9969 | (i
.sib
.index
<< 3)
9970 | (i
.sib
.scale
<< 6));
9973 if (i
.disp_operands
)
9974 output_disp (insn_start_frag
, insn_start_off
);
9977 output_imm (insn_start_frag
, insn_start_off
);
9980 * frag_now_fix () returning plain abs_section_offset when we're in the
9981 * absolute section, and abs_section_offset not getting updated as data
9982 * gets added to the frag breaks the logic below.
9984 if (now_seg
!= absolute_section
)
9986 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9988 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9992 /* NB: Don't add prefix with GOTPC relocation since
9993 output_disp() above depends on the fixed encoding
9994 length. Can't add prefix with TLS relocation since
9995 it breaks TLS linker optimization. */
9996 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9997 /* Prefix count on the current instruction. */
9998 unsigned int count
= i
.vex
.length
;
10000 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
10001 /* REX byte is encoded in VEX/EVEX prefix. */
10002 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
10005 /* Count prefixes for extended opcode maps. */
10007 switch (i
.tm
.opcode_space
)
10022 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
10025 /* Set the maximum prefix size in BRANCH_PREFIX
10027 if (fragP
->tc_frag_data
.max_bytes
> max
)
10028 fragP
->tc_frag_data
.max_bytes
= max
;
10029 if (fragP
->tc_frag_data
.max_bytes
> count
)
10030 fragP
->tc_frag_data
.max_bytes
-= count
;
10032 fragP
->tc_frag_data
.max_bytes
= 0;
10036 /* Remember the maximum prefix size in FUSED_JCC_PADDING
10038 unsigned int max_prefix_size
;
10039 if (align_branch_prefix_size
> max
)
10040 max_prefix_size
= max
;
10042 max_prefix_size
= align_branch_prefix_size
;
10043 if (max_prefix_size
> count
)
10044 fragP
->tc_frag_data
.max_prefix_length
10045 = max_prefix_size
- count
;
10048 /* Use existing segment prefix if possible. Use CS
10049 segment prefix in 64-bit mode. In 32-bit mode, use SS
10050 segment prefix with ESP/EBP base register and use DS
10051 segment prefix without ESP/EBP base register. */
10052 if (i
.prefix
[SEG_PREFIX
])
10053 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
10054 else if (flag_code
== CODE_64BIT
)
10055 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
10056 else if (i
.base_reg
10057 && (i
.base_reg
->reg_num
== 4
10058 || i
.base_reg
->reg_num
== 5))
10059 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
10061 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
10066 /* NB: Don't work with COND_JUMP86 without i386. */
10067 if (align_branch_power
10068 && now_seg
!= absolute_section
10069 && cpu_arch_flags
.bitfield
.cpui386
)
10071 /* Terminate each frag so that we can add prefix and check for
10073 frag_wane (frag_now
);
10080 pi ("" /*line*/, &i
);
10082 #endif /* DEBUG386 */
10085 /* Return the size of the displacement operand N. */
10088 disp_size (unsigned int n
)
10092 if (i
.types
[n
].bitfield
.disp64
)
10094 else if (i
.types
[n
].bitfield
.disp8
)
10096 else if (i
.types
[n
].bitfield
.disp16
)
10101 /* Return the size of the immediate operand N. */
10104 imm_size (unsigned int n
)
10107 if (i
.types
[n
].bitfield
.imm64
)
10109 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
10111 else if (i
.types
[n
].bitfield
.imm16
)
10117 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
10122 for (n
= 0; n
< i
.operands
; n
++)
10124 if (operand_type_check (i
.types
[n
], disp
))
10126 int size
= disp_size (n
);
10128 if (now_seg
== absolute_section
)
10129 abs_section_offset
+= size
;
10130 else if (i
.op
[n
].disps
->X_op
== O_constant
)
10132 offsetT val
= i
.op
[n
].disps
->X_add_number
;
10134 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
10136 p
= frag_more (size
);
10137 md_number_to_chars (p
, val
, size
);
10141 enum bfd_reloc_code_real reloc_type
;
10142 bool pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
10143 bool sign
= (flag_code
== CODE_64BIT
&& size
== 4
10144 && (!want_disp32 (&i
.tm
)
10145 || (i
.tm
.opcode_modifier
.jump
&& !i
.jumpabsolute
10146 && !i
.types
[n
].bitfield
.baseindex
)))
10150 /* We can't have 8 bit displacement here. */
10151 gas_assert (!i
.types
[n
].bitfield
.disp8
);
10153 /* The PC relative address is computed relative
10154 to the instruction boundary, so in case immediate
10155 fields follows, we need to adjust the value. */
10156 if (pcrel
&& i
.imm_operands
)
10161 for (n1
= 0; n1
< i
.operands
; n1
++)
10162 if (operand_type_check (i
.types
[n1
], imm
))
10164 /* Only one immediate is allowed for PC
10165 relative address, except with .insn. */
10166 gas_assert (sz
== 0 || dot_insn ());
10167 sz
+= imm_size (n1
);
10169 /* We should find at least one immediate. */
10170 gas_assert (sz
!= 0);
10171 i
.op
[n
].disps
->X_add_number
-= sz
;
10174 p
= frag_more (size
);
10175 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
10177 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
10178 && (((reloc_type
== BFD_RELOC_32
10179 || reloc_type
== BFD_RELOC_X86_64_32S
10180 || (reloc_type
== BFD_RELOC_64
10182 && (i
.op
[n
].disps
->X_op
== O_symbol
10183 || (i
.op
[n
].disps
->X_op
== O_add
10184 && ((symbol_get_value_expression
10185 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10187 || reloc_type
== BFD_RELOC_32_PCREL
))
10191 reloc_type
= BFD_RELOC_386_GOTPC
;
10192 i
.has_gotpc_tls_reloc
= true;
10193 i
.op
[n
].disps
->X_add_number
+=
10194 encoding_length (insn_start_frag
, insn_start_off
, p
);
10196 else if (reloc_type
== BFD_RELOC_64
)
10197 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10199 /* Don't do the adjustment for x86-64, as there
10200 the pcrel addressing is relative to the _next_
10201 insn, and that is taken care of in other code. */
10202 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10204 else if (align_branch_power
)
10206 switch (reloc_type
)
10208 case BFD_RELOC_386_TLS_GD
:
10209 case BFD_RELOC_386_TLS_LDM
:
10210 case BFD_RELOC_386_TLS_IE
:
10211 case BFD_RELOC_386_TLS_IE_32
:
10212 case BFD_RELOC_386_TLS_GOTIE
:
10213 case BFD_RELOC_386_TLS_GOTDESC
:
10214 case BFD_RELOC_386_TLS_DESC_CALL
:
10215 case BFD_RELOC_X86_64_TLSGD
:
10216 case BFD_RELOC_X86_64_TLSLD
:
10217 case BFD_RELOC_X86_64_GOTTPOFF
:
10218 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10219 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10220 i
.has_gotpc_tls_reloc
= true;
10225 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10226 size
, i
.op
[n
].disps
, pcrel
,
10229 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10230 && !i
.prefix
[ADDR_PREFIX
])
10231 fixP
->fx_signed
= 1;
10233 /* Check for "call/jmp *mem", "mov mem, %reg",
10234 "test %reg, mem" and "binop mem, %reg" where binop
10235 is one of adc, add, and, cmp, or, sbb, sub, xor
10236 instructions without data prefix. Always generate
10237 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10238 if (i
.prefix
[DATA_PREFIX
] == 0
10239 && (generate_relax_relocations
10242 && i
.rm
.regmem
== 5))
10244 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10245 && i
.tm
.opcode_space
== SPACE_BASE
10246 && ((i
.operands
== 1
10247 && i
.tm
.base_opcode
== 0xff
10248 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10249 || (i
.operands
== 2
10250 && (i
.tm
.base_opcode
== 0x8b
10251 || i
.tm
.base_opcode
== 0x85
10252 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10256 fixP
->fx_tcbit
= i
.rex
!= 0;
10258 && (i
.base_reg
->reg_num
== RegIP
))
10259 fixP
->fx_tcbit2
= 1;
10262 fixP
->fx_tcbit2
= 1;
10270 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10275 for (n
= 0; n
< i
.operands
; n
++)
10277 if (operand_type_check (i
.types
[n
], imm
))
10279 int size
= imm_size (n
);
10281 if (now_seg
== absolute_section
)
10282 abs_section_offset
+= size
;
10283 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10287 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10289 p
= frag_more (size
);
10290 md_number_to_chars (p
, val
, size
);
10294 /* Not absolute_section.
10295 Need a 32-bit fixup (don't support 8bit
10296 non-absolute imms). Try to support other
10298 enum bfd_reloc_code_real reloc_type
;
10301 if (i
.types
[n
].bitfield
.imm32s
10302 && (i
.suffix
== QWORD_MNEM_SUFFIX
10303 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)
10309 p
= frag_more (size
);
10310 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10312 /* This is tough to explain. We end up with this one if we
10313 * have operands that look like
10314 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10315 * obtain the absolute address of the GOT, and it is strongly
10316 * preferable from a performance point of view to avoid using
10317 * a runtime relocation for this. The actual sequence of
10318 * instructions often look something like:
10323 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10325 * The call and pop essentially return the absolute address
10326 * of the label .L66 and store it in %ebx. The linker itself
10327 * will ultimately change the first operand of the addl so
10328 * that %ebx points to the GOT, but to keep things simple, the
10329 * .o file must have this operand set so that it generates not
10330 * the absolute address of .L66, but the absolute address of
10331 * itself. This allows the linker itself simply treat a GOTPC
10332 * relocation as asking for a pcrel offset to the GOT to be
10333 * added in, and the addend of the relocation is stored in the
10334 * operand field for the instruction itself.
10336 * Our job here is to fix the operand so that it would add
10337 * the correct offset so that %ebx would point to itself. The
10338 * thing that is tricky is that .-.L66 will point to the
10339 * beginning of the instruction, so we need to further modify
10340 * the operand so that it will point to itself. There are
10341 * other cases where you have something like:
10343 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10345 * and here no correction would be required. Internally in
10346 * the assembler we treat operands of this form as not being
10347 * pcrel since the '.' is explicitly mentioned, and I wonder
10348 * whether it would simplify matters to do it this way. Who
10349 * knows. In earlier versions of the PIC patches, the
10350 * pcrel_adjust field was used to store the correction, but
10351 * since the expression is not pcrel, I felt it would be
10352 * confusing to do it this way. */
10354 if ((reloc_type
== BFD_RELOC_32
10355 || reloc_type
== BFD_RELOC_X86_64_32S
10356 || reloc_type
== BFD_RELOC_64
)
10358 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10359 && (i
.op
[n
].imms
->X_op
== O_symbol
10360 || (i
.op
[n
].imms
->X_op
== O_add
10361 && ((symbol_get_value_expression
10362 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10366 reloc_type
= BFD_RELOC_386_GOTPC
;
10367 else if (size
== 4)
10368 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10369 else if (size
== 8)
10370 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10371 i
.has_gotpc_tls_reloc
= true;
10372 i
.op
[n
].imms
->X_add_number
+=
10373 encoding_length (insn_start_frag
, insn_start_off
, p
);
10375 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10376 i
.op
[n
].imms
, 0, reloc_type
);
10382 /* x86_cons_fix_new is called via the expression parsing code when a
10383 reloc is needed. We use this hook to get the correct .got reloc. */
10384 static int cons_sign
= -1;
10387 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10388 expressionS
*exp
, bfd_reloc_code_real_type r
)
10390 r
= reloc (len
, 0, cons_sign
, r
);
10393 if (exp
->X_op
== O_secrel
)
10395 exp
->X_op
= O_symbol
;
10396 r
= BFD_RELOC_32_SECREL
;
10398 else if (exp
->X_op
== O_secidx
)
10399 r
= BFD_RELOC_16_SECIDX
;
10402 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10405 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10406 purpose of the `.dc.a' internal pseudo-op. */
10409 x86_address_bytes (void)
10411 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10413 return stdoutput
->arch_info
->bits_per_address
/ 8;
10416 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10417 || defined (LEX_AT)) && !defined (TE_PE)
10418 # define lex_got(reloc, adjust, types) NULL
10420 /* Parse operands of the form
10421 <symbol>@GOTOFF+<nnn>
10422 and similar .plt or .got references.
10424 If we find one, set up the correct relocation in RELOC and copy the
10425 input string, minus the `@GOTOFF' into a malloc'd buffer for
10426 parsing by the calling routine. Return this buffer, and if ADJUST
10427 is non-null set it to the length of the string we removed from the
10428 input line. Otherwise return NULL. */
10430 lex_got (enum bfd_reloc_code_real
*rel
,
10432 i386_operand_type
*types
)
10434 /* Some of the relocations depend on the size of what field is to
10435 be relocated. But in our callers i386_immediate and i386_displacement
10436 we don't yet know the operand size (this will be set by insn
10437 matching). Hence we record the word32 relocation here,
10438 and adjust the reloc according to the real size in reloc(). */
10439 static const struct
10443 const enum bfd_reloc_code_real rel
[2];
10444 const i386_operand_type types64
;
10445 bool need_GOT_symbol
;
10450 #define OPERAND_TYPE_IMM32_32S_DISP32 { .bitfield = \
10451 { .imm32 = 1, .imm32s = 1, .disp32 = 1 } }
10452 #define OPERAND_TYPE_IMM32_32S_64_DISP32 { .bitfield = \
10453 { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1 } }
10454 #define OPERAND_TYPE_IMM32_32S_64_DISP32_64 { .bitfield = \
10455 { .imm32 = 1, .imm32s = 1, .imm64 = 1, .disp32 = 1, .disp64 = 1 } }
10456 #define OPERAND_TYPE_IMM64_DISP64 { .bitfield = \
10457 { .imm64 = 1, .disp64 = 1 } }
10460 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10461 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10462 BFD_RELOC_SIZE32
},
10463 { .bitfield
= { .imm32
= 1, .imm64
= 1 } }, false },
10465 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10466 BFD_RELOC_X86_64_PLTOFF64
},
10467 { .bitfield
= { .imm64
= 1 } }, true },
10468 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10469 BFD_RELOC_X86_64_PLT32
},
10470 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10471 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10472 BFD_RELOC_X86_64_GOTPLT64
},
10473 OPERAND_TYPE_IMM64_DISP64
, true },
10474 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10475 BFD_RELOC_X86_64_GOTOFF64
},
10476 OPERAND_TYPE_IMM64_DISP64
, true },
10477 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10478 BFD_RELOC_X86_64_GOTPCREL
},
10479 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10480 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10481 BFD_RELOC_X86_64_TLSGD
},
10482 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10483 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10484 _dummy_first_bfd_reloc_code_real
},
10485 OPERAND_TYPE_NONE
, true },
10486 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10487 BFD_RELOC_X86_64_TLSLD
},
10488 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10489 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10490 BFD_RELOC_X86_64_GOTTPOFF
},
10491 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10492 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10493 BFD_RELOC_X86_64_TPOFF32
},
10494 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10495 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10496 _dummy_first_bfd_reloc_code_real
},
10497 OPERAND_TYPE_NONE
, true },
10498 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10499 BFD_RELOC_X86_64_DTPOFF32
},
10500 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10501 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10502 _dummy_first_bfd_reloc_code_real
},
10503 OPERAND_TYPE_NONE
, true },
10504 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10505 _dummy_first_bfd_reloc_code_real
},
10506 OPERAND_TYPE_NONE
, true },
10507 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10508 BFD_RELOC_X86_64_GOT32
},
10509 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10510 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10511 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10512 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10513 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10514 BFD_RELOC_X86_64_TLSDESC_CALL
},
10515 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10517 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10518 BFD_RELOC_32_SECREL
},
10519 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10522 #undef OPERAND_TYPE_IMM32_32S_DISP32
10523 #undef OPERAND_TYPE_IMM32_32S_64_DISP32
10524 #undef OPERAND_TYPE_IMM32_32S_64_DISP32_64
10525 #undef OPERAND_TYPE_IMM64_DISP64
10531 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10536 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10537 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10540 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10542 int len
= gotrel
[j
].len
;
10543 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10545 if (gotrel
[j
].rel
[object_64bit
] != 0)
10548 char *tmpbuf
, *past_reloc
;
10550 *rel
= gotrel
[j
].rel
[object_64bit
];
10554 if (flag_code
!= CODE_64BIT
)
10556 types
->bitfield
.imm32
= 1;
10557 types
->bitfield
.disp32
= 1;
10560 *types
= gotrel
[j
].types64
;
10563 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10564 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10566 /* The length of the first part of our input line. */
10567 first
= cp
- input_line_pointer
;
10569 /* The second part goes from after the reloc token until
10570 (and including) an end_of_line char or comma. */
10571 past_reloc
= cp
+ 1 + len
;
10573 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10575 second
= cp
+ 1 - past_reloc
;
10577 /* Allocate and copy string. The trailing NUL shouldn't
10578 be necessary, but be safe. */
10579 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10580 memcpy (tmpbuf
, input_line_pointer
, first
);
10581 if (second
!= 0 && *past_reloc
!= ' ')
10582 /* Replace the relocation token with ' ', so that
10583 errors like foo@GOTOFF1 will be detected. */
10584 tmpbuf
[first
++] = ' ';
10586 /* Increment length by 1 if the relocation token is
10591 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10592 tmpbuf
[first
+ second
] = '\0';
10596 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10597 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10602 /* Might be a symbol version string. Don't as_bad here. */
10607 bfd_reloc_code_real_type
10608 x86_cons (expressionS
*exp
, int size
)
10610 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10612 intel_syntax
= -intel_syntax
;
10614 expr_mode
= expr_operator_none
;
10616 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
10617 && !defined (LEX_AT)) \
10619 if (size
== 4 || (object_64bit
&& size
== 8))
10621 /* Handle @GOTOFF and the like in an expression. */
10623 char *gotfree_input_line
;
10626 save
= input_line_pointer
;
10627 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10628 if (gotfree_input_line
)
10629 input_line_pointer
= gotfree_input_line
;
10633 if (gotfree_input_line
)
10635 /* expression () has merrily parsed up to the end of line,
10636 or a comma - in the wrong buffer. Transfer how far
10637 input_line_pointer has moved to the right buffer. */
10638 input_line_pointer
= (save
10639 + (input_line_pointer
- gotfree_input_line
)
10641 free (gotfree_input_line
);
10642 if (exp
->X_op
== O_constant
10643 || exp
->X_op
== O_absent
10644 || exp
->X_op
== O_illegal
10645 || exp
->X_op
== O_register
10646 || exp
->X_op
== O_big
)
10648 char c
= *input_line_pointer
;
10649 *input_line_pointer
= 0;
10650 as_bad (_("missing or invalid expression `%s'"), save
);
10651 *input_line_pointer
= c
;
10653 else if ((got_reloc
== BFD_RELOC_386_PLT32
10654 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10655 && exp
->X_op
!= O_symbol
)
10657 char c
= *input_line_pointer
;
10658 *input_line_pointer
= 0;
10659 as_bad (_("invalid PLT expression `%s'"), save
);
10660 *input_line_pointer
= c
;
10668 intel_syntax
= -intel_syntax
;
10671 i386_intel_simplify (exp
);
10673 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10674 if (size
<= 4 && expr_mode
== expr_operator_present
10675 && exp
->X_op
== O_constant
&& !object_64bit
)
10676 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10682 signed_cons (int size
)
10691 s_insn (int dummy ATTRIBUTE_UNUSED
)
10693 char mnemonic
[MAX_MNEM_SIZE
], *line
= input_line_pointer
, *ptr
;
10694 char *saved_ilp
= find_end_of_line (line
, false), saved_char
;
10698 bool vex
= false, xop
= false, evex
= false;
10699 static const templates tt
= { &i
.tm
, &i
.tm
+ 1 };
10703 saved_char
= *saved_ilp
;
10706 end
= parse_insn (line
, mnemonic
, true);
10710 *saved_ilp
= saved_char
;
10711 ignore_rest_of_line ();
10715 line
+= end
- line
;
10717 current_templates
= &tt
;
10718 i
.tm
.mnem_off
= MN__insn
;
10719 i
.tm
.extension_opcode
= None
;
10721 if (startswith (line
, "VEX")
10722 && (line
[3] == '.' || is_space_char (line
[3])))
10727 else if (startswith (line
, "XOP") && ISDIGIT (line
[3]))
10730 unsigned long n
= strtoul (line
+ 3, &e
, 16);
10732 if (e
== line
+ 5 && n
>= 0x08 && n
<= 0x1f
10733 && (*e
== '.' || is_space_char (*e
)))
10736 /* Arrange for build_vex_prefix() to emit 0x8f. */
10737 i
.tm
.opcode_space
= SPACE_XOP08
;
10738 i
.insn_opcode_space
= n
;
10742 else if (startswith (line
, "EVEX")
10743 && (line
[4] == '.' || is_space_char (line
[4])))
10750 ? i
.vec_encoding
== vex_encoding_evex
10752 ? i
.vec_encoding
== vex_encoding_vex
10753 || i
.vec_encoding
== vex_encoding_vex3
10754 : i
.vec_encoding
!= vex_encoding_default
)
10756 as_bad (_("pseudo-prefix conflicts with encoding specifier"));
10760 if (line
> end
&& i
.vec_encoding
== vex_encoding_default
)
10761 i
.vec_encoding
= evex
? vex_encoding_evex
: vex_encoding_vex
;
10763 if (line
> end
&& *line
== '.')
10765 /* Length specifier (VEX.L, XOP.L, EVEX.L'L). */
10773 i
.tm
.opcode_modifier
.evex
= EVEX128
;
10775 i
.tm
.opcode_modifier
.vex
= VEX128
;
10780 i
.tm
.opcode_modifier
.evex
= EVEX256
;
10782 i
.tm
.opcode_modifier
.vex
= VEX256
;
10787 i
.tm
.opcode_modifier
.evex
= EVEX512
;
10792 i
.tm
.opcode_modifier
.evex
= EVEX_L3
;
10796 if (line
[3] == 'G')
10799 i
.tm
.opcode_modifier
.evex
= EVEXLIG
;
10801 i
.tm
.opcode_modifier
.vex
= VEXScalar
; /* LIG */
10807 if (i
.tm
.opcode_modifier
.vex
|| i
.tm
.opcode_modifier
.evex
)
10812 if (line
[2] == '2' && line
[3] == '8')
10815 i
.tm
.opcode_modifier
.evex
= EVEX128
;
10817 i
.tm
.opcode_modifier
.vex
= VEX128
;
10823 if (line
[2] == '5' && line
[3] == '6')
10826 i
.tm
.opcode_modifier
.evex
= EVEX256
;
10828 i
.tm
.opcode_modifier
.vex
= VEX256
;
10834 if (evex
&& line
[2] == '1' && line
[3] == '2')
10836 i
.tm
.opcode_modifier
.evex
= EVEX512
;
10843 if (line
> end
&& *line
== '.')
10845 /* embedded prefix (VEX.pp, XOP.pp, EVEX.pp). */
10849 if (line
[2] == 'P')
10854 if (line
[2] == '6')
10856 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0X66
;
10861 case 'F': case 'f':
10862 if (line
[2] == '3')
10864 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
10867 else if (line
[2] == '2')
10869 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF2
;
10876 if (line
> end
&& !xop
&& *line
== '.')
10878 /* Encoding space (VEX.mmmmm, EVEX.mmmm). */
10882 if (TOUPPER (line
[2]) != 'F')
10884 if (line
[3] == '.' || is_space_char (line
[3]))
10886 i
.insn_opcode_space
= SPACE_0F
;
10889 else if (line
[3] == '3'
10890 && (line
[4] == '8' || TOUPPER (line
[4]) == 'A')
10891 && (line
[5] == '.' || is_space_char (line
[5])))
10893 i
.insn_opcode_space
= line
[4] == '8' ? SPACE_0F38
: SPACE_0F3A
;
10899 if (ISDIGIT (line
[2]) && line
[2] != '0')
10902 unsigned long n
= strtoul (line
+ 2, &e
, 10);
10904 if (n
<= (evex
? 15 : 31)
10905 && (*e
== '.' || is_space_char (*e
)))
10907 i
.insn_opcode_space
= n
;
10915 if (line
> end
&& *line
== '.' && line
[1] == 'W')
10917 /* VEX.W, XOP.W, EVEX.W */
10921 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
10925 i
.tm
.opcode_modifier
.vexw
= VEXW1
;
10929 if (line
[3] == 'G')
10931 i
.tm
.opcode_modifier
.vexw
= VEXWIG
;
10937 if (i
.tm
.opcode_modifier
.vexw
)
10941 if (line
> end
&& *line
&& !is_space_char (*line
))
10943 /* Improve diagnostic a little. */
10944 if (*line
== '.' && line
[1] && !is_space_char (line
[1]))
10949 /* Before processing the opcode expression, find trailing "+r" or
10950 "/<digit>" specifiers. */
10951 for (ptr
= line
; ; ++ptr
)
10956 ptr
= strpbrk (ptr
, "+/,");
10957 if (ptr
== NULL
|| *ptr
== ',')
10960 if (*ptr
== '+' && ptr
[1] == 'r'
10961 && (ptr
[2] == ',' || (is_space_char (ptr
[2]) && ptr
[3] == ',')))
10965 i
.short_form
= true;
10969 if (*ptr
== '/' && ISDIGIT (ptr
[1])
10970 && (n
= strtoul (ptr
+ 1, &e
, 8)) < 8
10972 && (ptr
[2] == ',' || (is_space_char (ptr
[2]) && ptr
[3] == ',')))
10976 i
.tm
.extension_opcode
= n
;
10977 i
.tm
.opcode_modifier
.modrm
= 1;
10982 input_line_pointer
= line
;
10983 val
= get_absolute_expression ();
10984 line
= input_line_pointer
;
10986 if (i
.short_form
&& (val
& 7))
10987 as_warn ("`+r' assumes low three opcode bits to be clear");
10989 for (j
= 1; j
< sizeof(val
); ++j
)
10990 if (!(val
>> (j
* 8)))
10993 /* Trim off a prefix if present. */
10994 if (j
> 1 && !vex
&& !xop
&& !evex
)
10996 uint8_t byte
= val
>> ((j
- 1) * 8);
11000 case DATA_PREFIX_OPCODE
:
11001 case REPE_PREFIX_OPCODE
:
11002 case REPNE_PREFIX_OPCODE
:
11003 if (!add_prefix (byte
))
11005 val
&= ((uint64_t)1 << (--j
* 8)) - 1;
11010 /* Trim off encoding space. */
11011 if (j
> 1 && !i
.insn_opcode_space
&& (val
>> ((j
- 1) * 8)) == 0x0f)
11013 uint8_t byte
= val
>> ((--j
- 1) * 8);
11015 i
.insn_opcode_space
= SPACE_0F
;
11016 switch (byte
& -(j
> 1))
11019 i
.insn_opcode_space
= SPACE_0F38
;
11023 i
.insn_opcode_space
= SPACE_0F3A
;
11027 i
.tm
.opcode_space
= i
.insn_opcode_space
;
11028 val
&= ((uint64_t)1 << (j
* 8)) - 1;
11030 if (!i
.tm
.opcode_space
&& (vex
|| evex
))
11031 /* Arrange for build_vex_prefix() to properly emit 0xC4/0xC5.
11032 Also avoid hitting abort() there or in build_evex_prefix(). */
11033 i
.tm
.opcode_space
= i
.insn_opcode_space
== SPACE_0F
? SPACE_0F
11038 as_bad (_("opcode residual (%#"PRIx64
") too wide"), (uint64_t) val
);
11041 i
.opcode_length
= j
;
11043 /* Handle operands, if any. */
11046 i386_operand_type combined
;
11047 expressionS
*disp_exp
= NULL
;
11052 ptr
= parse_operands (line
+ 1, &i386_mnemonics
[MN__insn
]);
11060 as_bad (_("expecting operand after ','; got nothing"));
11064 if (i
.mem_operands
> 1)
11066 as_bad (_("too many memory references for `%s'"),
11067 &i386_mnemonics
[MN__insn
]);
11071 /* Are we to emit ModR/M encoding? */
11074 || i
.reg_operands
> (i
.vec_encoding
!= vex_encoding_default
)
11075 || i
.tm
.extension_opcode
!= None
))
11076 i
.tm
.opcode_modifier
.modrm
= 1;
11078 if (!i
.tm
.opcode_modifier
.modrm
11080 > i
.short_form
+ 0U + (i
.vec_encoding
!= vex_encoding_default
)
11081 || i
.mem_operands
))
11083 as_bad (_("too many register/memory operands"));
11087 /* Enforce certain constraints on operands. */
11088 switch (i
.reg_operands
+ i
.mem_operands
11089 + (i
.tm
.extension_opcode
!= None
))
11094 as_bad (_("too few register/memory operands"));
11097 /* Fall through. */
11099 if (i
.tm
.opcode_modifier
.modrm
)
11101 as_bad (_("too few register/memory operands"));
11111 && (i
.op
[0].imms
->X_op
!= O_constant
11112 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
)))
11114 as_bad (_("constant doesn't fit in %d bits"), evex
? 3 : 4);
11117 /* Fall through. */
11119 if (i
.vec_encoding
!= vex_encoding_default
)
11121 i
.tm
.opcode_modifier
.vexvvvv
= 1;
11124 /* Fall through. */
11126 as_bad (_("too many register/memory operands"));
11130 /* Bring operands into canonical order (imm, mem, reg). */
11135 for (j
= 1; j
< i
.operands
; ++j
)
11137 if ((!operand_type_check (i
.types
[j
- 1], imm
)
11138 && operand_type_check (i
.types
[j
], imm
))
11139 || (i
.types
[j
- 1].bitfield
.class != ClassNone
11140 && i
.types
[j
].bitfield
.class == ClassNone
))
11142 swap_2_operands (j
- 1, j
);
11149 /* For Intel syntax swap the order of register operands. */
11151 switch (i
.reg_operands
)
11158 swap_2_operands (i
.imm_operands
+ i
.mem_operands
+ 1, i
.operands
- 2);
11159 /* Fall through. */
11162 swap_2_operands (i
.imm_operands
+ i
.mem_operands
, i
.operands
- 1);
11169 /* Enforce constraints when using VSIB. */
11171 && (i
.index_reg
->reg_type
.bitfield
.xmmword
11172 || i
.index_reg
->reg_type
.bitfield
.ymmword
11173 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
11175 if (i
.vec_encoding
== vex_encoding_default
)
11177 as_bad (_("VSIB unavailable with legacy encoding"));
11181 if (i
.vec_encoding
== vex_encoding_evex
11182 && i
.reg_operands
> 1)
11184 /* We could allow two register operands, encoding the 2nd one in
11185 an 8-bit immediate like for 4-register-operand insns, but that
11186 would require ugly fiddling with process_operands() and/or
11187 build_modrm_byte(). */
11188 as_bad (_("too many register operands with VSIB"));
11192 i
.tm
.opcode_modifier
.sib
= 1;
11195 /* Establish operand size encoding. */
11196 operand_type_set (&combined
, 0);
11198 for (j
= i
.imm_operands
; j
< i
.operands
; ++j
)
11200 i
.types
[j
].bitfield
.instance
= InstanceNone
;
11202 if (operand_type_check (i
.types
[j
], disp
))
11204 i
.types
[j
].bitfield
.baseindex
= 1;
11205 disp_exp
= i
.op
[j
].disps
;
11208 if (evex
&& i
.types
[j
].bitfield
.baseindex
)
11210 unsigned int n
= i
.memshift
;
11212 if (i
.types
[j
].bitfield
.byte
)
11214 else if (i
.types
[j
].bitfield
.word
)
11216 else if (i
.types
[j
].bitfield
.dword
)
11218 else if (i
.types
[j
].bitfield
.qword
)
11220 else if (i
.types
[j
].bitfield
.xmmword
)
11222 else if (i
.types
[j
].bitfield
.ymmword
)
11224 else if (i
.types
[j
].bitfield
.zmmword
)
11227 if (i
.memshift
< 32 && n
!= i
.memshift
)
11228 as_warn ("conflicting memory operand size specifiers");
11232 if ((i
.broadcast
.type
|| i
.broadcast
.bytes
)
11233 && j
== i
.broadcast
.operand
)
11236 combined
= operand_type_or (combined
, i
.types
[j
]);
11237 combined
.bitfield
.class = ClassNone
;
11240 switch ((i
.broadcast
.type
? i
.broadcast
.type
: 1)
11241 << (i
.memshift
< 32 ? i
.memshift
: 0))
11243 case 64: combined
.bitfield
.zmmword
= 1; break;
11244 case 32: combined
.bitfield
.ymmword
= 1; break;
11245 case 16: combined
.bitfield
.xmmword
= 1; break;
11246 case 8: combined
.bitfield
.qword
= 1; break;
11247 case 4: combined
.bitfield
.dword
= 1; break;
11250 if (i
.vec_encoding
== vex_encoding_default
)
11252 if (flag_code
== CODE_64BIT
&& combined
.bitfield
.qword
)
11254 else if ((flag_code
== CODE_16BIT
? combined
.bitfield
.dword
11255 : combined
.bitfield
.word
)
11256 && !add_prefix (DATA_PREFIX_OPCODE
))
11259 else if (!i
.tm
.opcode_modifier
.vexw
)
11261 if (flag_code
== CODE_64BIT
)
11263 if (combined
.bitfield
.qword
)
11264 i
.tm
.opcode_modifier
.vexw
= VEXW1
;
11265 else if (combined
.bitfield
.dword
)
11266 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
11269 if (!i
.tm
.opcode_modifier
.vexw
)
11270 i
.tm
.opcode_modifier
.vexw
= VEXWIG
;
11275 if (!i
.tm
.opcode_modifier
.vex
)
11277 if (combined
.bitfield
.ymmword
)
11278 i
.tm
.opcode_modifier
.vex
= VEX256
;
11279 else if (combined
.bitfield
.xmmword
)
11280 i
.tm
.opcode_modifier
.vex
= VEX128
;
11285 if (!i
.tm
.opcode_modifier
.evex
)
11287 /* Do _not_ consider AVX512VL here. */
11288 if (i
.rounding
.type
!= rc_none
|| combined
.bitfield
.zmmword
)
11289 i
.tm
.opcode_modifier
.evex
= EVEX512
;
11290 else if (combined
.bitfield
.ymmword
)
11291 i
.tm
.opcode_modifier
.evex
= EVEX256
;
11292 else if (combined
.bitfield
.xmmword
)
11293 i
.tm
.opcode_modifier
.evex
= EVEX128
;
11296 if (i
.memshift
>= 32)
11298 unsigned int n
= 0;
11300 switch (i
.tm
.opcode_modifier
.evex
)
11302 case EVEX512
: n
= 64; break;
11303 case EVEX256
: n
= 32; break;
11304 case EVEX128
: n
= 16; break;
11307 if (i
.broadcast
.type
)
11308 n
/= i
.broadcast
.type
;
11311 for (i
.memshift
= 0; !(n
& 1); n
>>= 1)
11313 else if (disp_exp
!= NULL
&& disp_exp
->X_op
== O_constant
11314 && disp_exp
->X_add_number
!= 0
11315 && i
.disp_encoding
!= disp_encoding_32bit
)
11317 if (!quiet_warnings
)
11318 as_warn ("cannot determine memory operand size");
11319 i
.disp_encoding
= disp_encoding_32bit
;
11324 if (i
.memshift
>= 32)
11327 i
.vec_encoding
= vex_encoding_error
;
11329 if (i
.disp_operands
&& !optimize_disp (&i
.tm
))
11332 /* Establish size for immediate operands. */
11333 for (j
= 0; j
< i
.imm_operands
; ++j
)
11335 expressionS
*expP
= i
.op
[j
].imms
;
11337 gas_assert (operand_type_check (i
.types
[j
], imm
));
11338 operand_type_set (&i
.types
[j
], 0);
11340 if (i
.imm_bits
[j
] > 32)
11341 i
.types
[j
].bitfield
.imm64
= 1;
11342 else if (i
.imm_bits
[j
] > 16)
11344 if (flag_code
== CODE_64BIT
&& (i
.flags
[j
] & Operand_Signed
))
11345 i
.types
[j
].bitfield
.imm32s
= 1;
11347 i
.types
[j
].bitfield
.imm32
= 1;
11349 else if (i
.imm_bits
[j
] > 8)
11350 i
.types
[j
].bitfield
.imm16
= 1;
11351 else if (i
.imm_bits
[j
] > 0)
11353 if (i
.flags
[j
] & Operand_Signed
)
11354 i
.types
[j
].bitfield
.imm8s
= 1;
11356 i
.types
[j
].bitfield
.imm8
= 1;
11358 else if (expP
->X_op
== O_constant
)
11360 i
.types
[j
] = smallest_imm_type (expP
->X_add_number
);
11361 i
.types
[j
].bitfield
.imm1
= 0;
11362 /* Oddly enough imm_size() checks imm64 first, so the bit needs
11363 zapping since smallest_imm_type() sets it unconditionally. */
11364 if (flag_code
!= CODE_64BIT
)
11366 i
.types
[j
].bitfield
.imm64
= 0;
11367 i
.types
[j
].bitfield
.imm32s
= 0;
11368 i
.types
[j
].bitfield
.imm32
= 1;
11370 else if (i
.types
[j
].bitfield
.imm32
|| i
.types
[j
].bitfield
.imm32s
)
11371 i
.types
[j
].bitfield
.imm64
= 0;
11374 /* Non-constant expressions are sized heuristically. */
11377 case CODE_64BIT
: i
.types
[j
].bitfield
.imm32s
= 1; break;
11378 case CODE_32BIT
: i
.types
[j
].bitfield
.imm32
= 1; break;
11379 case CODE_16BIT
: i
.types
[j
].bitfield
.imm16
= 1; break;
11383 for (j
= 0; j
< i
.operands
; ++j
)
11384 i
.tm
.operand_types
[j
] = i
.types
[j
];
11386 process_operands ();
11389 /* Don't set opcode until after processing operands, to avoid any
11390 potential special casing there. */
11391 i
.tm
.base_opcode
|= val
;
11393 if (i
.vec_encoding
== vex_encoding_error
11394 || (i
.vec_encoding
!= vex_encoding_evex
11395 ? i
.broadcast
.type
|| i
.broadcast
.bytes
11396 || i
.rounding
.type
!= rc_none
11398 : (i
.broadcast
.type
|| i
.broadcast
.bytes
)
11399 && i
.rounding
.type
!= rc_none
))
11401 as_bad (_("conflicting .insn operands"));
11407 if (!i
.tm
.opcode_modifier
.vex
)
11408 i
.tm
.opcode_modifier
.vex
= VEXScalar
; /* LIG */
11410 build_vex_prefix (NULL
);
11411 i
.rex
&= REX_OPCODE
;
11415 if (!i
.tm
.opcode_modifier
.evex
)
11416 i
.tm
.opcode_modifier
.evex
= EVEXLIG
;
11418 build_evex_prefix ();
11419 i
.rex
&= REX_OPCODE
;
11421 else if (i
.rex
!= 0)
11422 add_prefix (REX_OPCODE
| i
.rex
);
11427 *saved_ilp
= saved_char
;
11428 input_line_pointer
= line
;
11430 demand_empty_rest_of_line ();
11432 /* Make sure dot_insn() won't yield "true" anymore. */
11438 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
11445 if (exp
.X_op
== O_symbol
)
11446 exp
.X_op
= O_secrel
;
11448 emit_expr (&exp
, 4);
11450 while (*input_line_pointer
++ == ',');
11452 input_line_pointer
--;
11453 demand_empty_rest_of_line ();
11457 pe_directive_secidx (int dummy ATTRIBUTE_UNUSED
)
11464 if (exp
.X_op
== O_symbol
)
11465 exp
.X_op
= O_secidx
;
11467 emit_expr (&exp
, 2);
11469 while (*input_line_pointer
++ == ',');
11471 input_line_pointer
--;
11472 demand_empty_rest_of_line ();
11476 /* Handle Rounding Control / SAE specifiers. */
11479 RC_SAE_specifier (const char *pstr
)
11483 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11485 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11487 if (i
.rounding
.type
!= rc_none
)
11489 as_bad (_("duplicated `{%s}'"), RC_NamesTable
[j
].name
);
11493 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11495 return (char *)(pstr
+ RC_NamesTable
[j
].len
);
11502 /* Handle Vector operations. */
11505 check_VecOperations (char *op_string
)
11507 const reg_entry
*mask
;
11514 if (*op_string
== '{')
11518 /* Check broadcasts. */
11519 if (startswith (op_string
, "1to"))
11521 unsigned int bcst_type
;
11523 if (i
.broadcast
.type
)
11524 goto duplicated_vec_op
;
11527 if (*op_string
== '8')
11529 else if (*op_string
== '4')
11531 else if (*op_string
== '2')
11533 else if (*op_string
== '1'
11534 && *(op_string
+1) == '6')
11539 else if (*op_string
== '3'
11540 && *(op_string
+1) == '2')
11547 as_bad (_("Unsupported broadcast: `%s'"), saved
);
11552 i
.broadcast
.type
= bcst_type
;
11553 i
.broadcast
.operand
= this_operand
;
11555 /* For .insn a data size specifier may be appended. */
11556 if (dot_insn () && *op_string
== ':')
11557 goto dot_insn_modifier
;
11559 /* Check .insn special cases. */
11560 else if (dot_insn () && *op_string
== ':')
11563 switch (op_string
[1])
11568 if (i
.memshift
< 32)
11569 goto duplicated_vec_op
;
11571 n
= strtoul (op_string
+ 2, &end_op
, 0);
11573 for (i
.memshift
= 0; !(n
& 1); n
>>= 1)
11575 if (i
.memshift
< 32 && n
== 1)
11576 op_string
= end_op
;
11579 case 's': case 'u':
11580 /* This isn't really a "vector" operation, but a sign/size
11581 specifier for immediate operands of .insn. Note that AT&T
11582 syntax handles the same in i386_immediate(). */
11586 if (i
.imm_bits
[this_operand
])
11587 goto duplicated_vec_op
;
11589 n
= strtoul (op_string
+ 2, &end_op
, 0);
11590 if (n
&& n
<= (flag_code
== CODE_64BIT
? 64 : 32))
11592 i
.imm_bits
[this_operand
] = n
;
11593 if (op_string
[1] == 's')
11594 i
.flags
[this_operand
] |= Operand_Signed
;
11595 op_string
= end_op
;
11600 /* Check masking operation. */
11601 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
11603 if (mask
== &bad_reg
)
11606 /* k0 can't be used for write mask. */
11607 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
11609 as_bad (_("`%s%s' can't be used for write mask"),
11610 register_prefix
, mask
->reg_name
);
11617 i
.mask
.operand
= this_operand
;
11619 else if (i
.mask
.reg
->reg_num
)
11620 goto duplicated_vec_op
;
11625 /* Only "{z}" is allowed here. No need to check
11626 zeroing mask explicitly. */
11627 if (i
.mask
.operand
!= (unsigned int) this_operand
)
11629 as_bad (_("invalid write mask `%s'"), saved
);
11634 op_string
= end_op
;
11636 /* Check zeroing-flag for masking operation. */
11637 else if (*op_string
== 'z')
11641 i
.mask
.reg
= reg_k0
;
11642 i
.mask
.zeroing
= 1;
11643 i
.mask
.operand
= this_operand
;
11647 if (i
.mask
.zeroing
)
11650 as_bad (_("duplicated `%s'"), saved
);
11654 i
.mask
.zeroing
= 1;
11656 /* Only "{%k}" is allowed here. No need to check mask
11657 register explicitly. */
11658 if (i
.mask
.operand
!= (unsigned int) this_operand
)
11660 as_bad (_("invalid zeroing-masking `%s'"),
11668 else if (intel_syntax
11669 && (op_string
= RC_SAE_specifier (op_string
)) != NULL
)
11670 i
.rounding
.modifier
= true;
11672 goto unknown_vec_op
;
11674 if (*op_string
!= '}')
11676 as_bad (_("missing `}' in `%s'"), saved
);
11681 /* Strip whitespace since the addition of pseudo prefixes
11682 changed how the scrubber treats '{'. */
11683 if (is_space_char (*op_string
))
11689 /* We don't know this one. */
11690 as_bad (_("unknown vector operation: `%s'"), saved
);
11694 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
11696 as_bad (_("zeroing-masking only allowed with write mask"));
11704 i386_immediate (char *imm_start
)
11706 char *save_input_line_pointer
;
11707 char *gotfree_input_line
;
11710 i386_operand_type types
;
11712 operand_type_set (&types
, ~0);
11714 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
11716 as_bad (_("at most %d immediate operands are allowed"),
11717 MAX_IMMEDIATE_OPERANDS
);
11721 exp
= &im_expressions
[i
.imm_operands
++];
11722 i
.op
[this_operand
].imms
= exp
;
11724 if (is_space_char (*imm_start
))
11727 save_input_line_pointer
= input_line_pointer
;
11728 input_line_pointer
= imm_start
;
11730 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
11731 if (gotfree_input_line
)
11732 input_line_pointer
= gotfree_input_line
;
11734 expr_mode
= expr_operator_none
;
11735 exp_seg
= expression (exp
);
11737 /* For .insn immediates there may be a size specifier. */
11738 if (dot_insn () && *input_line_pointer
== '{' && input_line_pointer
[1] == ':'
11739 && (input_line_pointer
[2] == 's' || input_line_pointer
[2] == 'u'))
11742 unsigned long n
= strtoul (input_line_pointer
+ 3, &e
, 0);
11744 if (*e
== '}' && n
&& n
<= (flag_code
== CODE_64BIT
? 64 : 32))
11746 i
.imm_bits
[this_operand
] = n
;
11747 if (input_line_pointer
[2] == 's')
11748 i
.flags
[this_operand
] |= Operand_Signed
;
11749 input_line_pointer
= e
+ 1;
11753 SKIP_WHITESPACE ();
11754 if (*input_line_pointer
)
11755 as_bad (_("junk `%s' after expression"), input_line_pointer
);
11757 input_line_pointer
= save_input_line_pointer
;
11758 if (gotfree_input_line
)
11760 free (gotfree_input_line
);
11762 if (exp
->X_op
== O_constant
)
11763 exp
->X_op
= O_illegal
;
11766 if (exp_seg
== reg_section
)
11768 as_bad (_("illegal immediate register operand %s"), imm_start
);
11772 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
11776 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
11777 i386_operand_type types
, const char *imm_start
)
11779 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
11782 as_bad (_("missing or invalid immediate expression `%s'"),
11786 else if (exp
->X_op
== O_constant
)
11788 /* Size it properly later. */
11789 i
.types
[this_operand
].bitfield
.imm64
= 1;
11791 /* If not 64bit, sign/zero extend val, to account for wraparound
11793 if (expr_mode
== expr_operator_present
11794 && flag_code
!= CODE_64BIT
&& !object_64bit
)
11795 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
11797 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11798 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
11799 && exp_seg
!= absolute_section
11800 && exp_seg
!= text_section
11801 && exp_seg
!= data_section
11802 && exp_seg
!= bss_section
11803 && exp_seg
!= undefined_section
11804 && !bfd_is_com_section (exp_seg
))
11806 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11812 /* This is an address. The size of the address will be
11813 determined later, depending on destination register,
11814 suffix, or the default for the section. */
11815 i
.types
[this_operand
].bitfield
.imm8
= 1;
11816 i
.types
[this_operand
].bitfield
.imm16
= 1;
11817 i
.types
[this_operand
].bitfield
.imm32
= 1;
11818 i
.types
[this_operand
].bitfield
.imm32s
= 1;
11819 i
.types
[this_operand
].bitfield
.imm64
= 1;
11820 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
11828 i386_scale (char *scale
)
11831 char *save
= input_line_pointer
;
11833 input_line_pointer
= scale
;
11834 val
= get_absolute_expression ();
11839 i
.log2_scale_factor
= 0;
11842 i
.log2_scale_factor
= 1;
11845 i
.log2_scale_factor
= 2;
11848 i
.log2_scale_factor
= 3;
11852 char sep
= *input_line_pointer
;
11854 *input_line_pointer
= '\0';
11855 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
11857 *input_line_pointer
= sep
;
11858 input_line_pointer
= save
;
11862 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
11864 as_warn (_("scale factor of %d without an index register"),
11865 1 << i
.log2_scale_factor
);
11866 i
.log2_scale_factor
= 0;
11868 scale
= input_line_pointer
;
11869 input_line_pointer
= save
;
11874 i386_displacement (char *disp_start
, char *disp_end
)
11878 char *save_input_line_pointer
;
11879 char *gotfree_input_line
;
11881 i386_operand_type bigdisp
, types
= anydisp
;
11884 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
11886 as_bad (_("at most %d displacement operands are allowed"),
11887 MAX_MEMORY_OPERANDS
);
11891 operand_type_set (&bigdisp
, 0);
11893 || i
.types
[this_operand
].bitfield
.baseindex
11894 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
11895 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
11897 i386_addressing_mode ();
11898 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
11899 if (flag_code
== CODE_64BIT
)
11901 bigdisp
.bitfield
.disp32
= 1;
11903 bigdisp
.bitfield
.disp64
= 1;
11905 else if ((flag_code
== CODE_16BIT
) ^ override
)
11906 bigdisp
.bitfield
.disp16
= 1;
11908 bigdisp
.bitfield
.disp32
= 1;
11912 /* For PC-relative branches, the width of the displacement may be
11913 dependent upon data size, but is never dependent upon address size.
11914 Also make sure to not unintentionally match against a non-PC-relative
11915 branch template. */
11916 static templates aux_templates
;
11917 const insn_template
*t
= current_templates
->start
;
11918 bool has_intel64
= false;
11920 aux_templates
.start
= t
;
11921 while (++t
< current_templates
->end
)
11923 if (t
->opcode_modifier
.jump
11924 != current_templates
->start
->opcode_modifier
.jump
)
11926 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
11927 has_intel64
= true;
11929 if (t
< current_templates
->end
)
11931 aux_templates
.end
= t
;
11932 current_templates
= &aux_templates
;
11935 override
= (i
.prefix
[DATA_PREFIX
] != 0);
11936 if (flag_code
== CODE_64BIT
)
11938 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
11939 && (!intel64
|| !has_intel64
))
11940 bigdisp
.bitfield
.disp16
= 1;
11942 bigdisp
.bitfield
.disp32
= 1;
11947 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
11949 : LONG_MNEM_SUFFIX
));
11950 bigdisp
.bitfield
.disp32
= 1;
11951 if ((flag_code
== CODE_16BIT
) ^ override
)
11953 bigdisp
.bitfield
.disp32
= 0;
11954 bigdisp
.bitfield
.disp16
= 1;
11958 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11961 exp
= &disp_expressions
[i
.disp_operands
];
11962 i
.op
[this_operand
].disps
= exp
;
11964 save_input_line_pointer
= input_line_pointer
;
11965 input_line_pointer
= disp_start
;
11966 END_STRING_AND_SAVE (disp_end
);
11968 #ifndef GCC_ASM_O_HACK
11969 #define GCC_ASM_O_HACK 0
11972 END_STRING_AND_SAVE (disp_end
+ 1);
11973 if (i
.types
[this_operand
].bitfield
.baseIndex
11974 && displacement_string_end
[-1] == '+')
11976 /* This hack is to avoid a warning when using the "o"
11977 constraint within gcc asm statements.
11980 #define _set_tssldt_desc(n,addr,limit,type) \
11981 __asm__ __volatile__ ( \
11982 "movw %w2,%0\n\t" \
11983 "movw %w1,2+%0\n\t" \
11984 "rorl $16,%1\n\t" \
11985 "movb %b1,4+%0\n\t" \
11986 "movb %4,5+%0\n\t" \
11987 "movb $0,6+%0\n\t" \
11988 "movb %h1,7+%0\n\t" \
11990 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
11992 This works great except that the output assembler ends
11993 up looking a bit weird if it turns out that there is
11994 no offset. You end up producing code that looks like:
12007 So here we provide the missing zero. */
12009 *displacement_string_end
= '0';
12012 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
12013 if (gotfree_input_line
)
12014 input_line_pointer
= gotfree_input_line
;
12016 expr_mode
= expr_operator_none
;
12017 exp_seg
= expression (exp
);
12019 SKIP_WHITESPACE ();
12020 if (*input_line_pointer
)
12021 as_bad (_("junk `%s' after expression"), input_line_pointer
);
12023 RESTORE_END_STRING (disp_end
+ 1);
12025 input_line_pointer
= save_input_line_pointer
;
12026 if (gotfree_input_line
)
12028 free (gotfree_input_line
);
12030 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
12031 exp
->X_op
= O_illegal
;
12034 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
12036 RESTORE_END_STRING (disp_end
);
12042 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
12043 i386_operand_type types
, const char *disp_start
)
12047 /* We do this to make sure that the section symbol is in
12048 the symbol table. We will ultimately change the relocation
12049 to be relative to the beginning of the section. */
12050 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
12051 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
12052 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
12054 if (exp
->X_op
!= O_symbol
)
12057 if (S_IS_LOCAL (exp
->X_add_symbol
)
12058 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
12059 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
12060 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
12061 exp
->X_op
= O_subtract
;
12062 exp
->X_op_symbol
= GOT_symbol
;
12063 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
12064 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
12065 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
12066 i
.reloc
[this_operand
] = BFD_RELOC_64
;
12068 i
.reloc
[this_operand
] = BFD_RELOC_32
;
12071 else if (exp
->X_op
== O_absent
12072 || exp
->X_op
== O_illegal
12073 || exp
->X_op
== O_big
)
12076 as_bad (_("missing or invalid displacement expression `%s'"),
12081 else if (exp
->X_op
== O_constant
)
12083 /* Sizing gets taken care of by optimize_disp().
12085 If not 64bit, sign/zero extend val, to account for wraparound
12087 if (expr_mode
== expr_operator_present
12088 && flag_code
!= CODE_64BIT
&& !object_64bit
)
12089 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
12092 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
12093 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
12094 && exp_seg
!= absolute_section
12095 && exp_seg
!= text_section
12096 && exp_seg
!= data_section
12097 && exp_seg
!= bss_section
12098 && exp_seg
!= undefined_section
12099 && !bfd_is_com_section (exp_seg
))
12101 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
12106 else if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
12107 i
.types
[this_operand
].bitfield
.disp8
= 1;
12109 /* Check if this is a displacement only operand. */
12110 if (!i
.types
[this_operand
].bitfield
.baseindex
)
12111 i
.types
[this_operand
] =
12112 operand_type_or (operand_type_and_not (i
.types
[this_operand
], anydisp
),
12113 operand_type_and (i
.types
[this_operand
], types
));
12118 /* Return the active addressing mode, taking address override and
12119 registers forming the address into consideration. Update the
12120 address override prefix if necessary. */
12122 static enum flag_code
12123 i386_addressing_mode (void)
12125 enum flag_code addr_mode
;
12127 if (i
.prefix
[ADDR_PREFIX
])
12128 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
12129 else if (flag_code
== CODE_16BIT
12130 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
12131 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
12132 from md_assemble() by "is not a valid base/index expression"
12133 when there is a base and/or index. */
12134 && !i
.types
[this_operand
].bitfield
.baseindex
)
12136 /* MPX insn memory operands with neither base nor index must be forced
12137 to use 32-bit addressing in 16-bit mode. */
12138 addr_mode
= CODE_32BIT
;
12139 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
12141 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
12142 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
12146 addr_mode
= flag_code
;
12148 #if INFER_ADDR_PREFIX
12149 if (i
.mem_operands
== 0)
12151 /* Infer address prefix from the first memory operand. */
12152 const reg_entry
*addr_reg
= i
.base_reg
;
12154 if (addr_reg
== NULL
)
12155 addr_reg
= i
.index_reg
;
12159 if (addr_reg
->reg_type
.bitfield
.dword
)
12160 addr_mode
= CODE_32BIT
;
12161 else if (flag_code
!= CODE_64BIT
12162 && addr_reg
->reg_type
.bitfield
.word
)
12163 addr_mode
= CODE_16BIT
;
12165 if (addr_mode
!= flag_code
)
12167 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
12169 /* Change the size of any displacement too. At most one
12170 of Disp16 or Disp32 is set.
12171 FIXME. There doesn't seem to be any real need for
12172 separate Disp16 and Disp32 flags. The same goes for
12173 Imm16 and Imm32. Removing them would probably clean
12174 up the code quite a lot. */
12175 if (flag_code
!= CODE_64BIT
12176 && (i
.types
[this_operand
].bitfield
.disp16
12177 || i
.types
[this_operand
].bitfield
.disp32
))
12179 static const i386_operand_type disp16_32
= {
12180 .bitfield
= { .disp16
= 1, .disp32
= 1 }
12183 i
.types
[this_operand
]
12184 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
12195 /* Make sure the memory operand we've been dealt is valid.
12196 Return 1 on success, 0 on a failure. */
12199 i386_index_check (const char *operand_string
)
12201 const char *kind
= "base/index";
12202 enum flag_code addr_mode
= i386_addressing_mode ();
12203 const insn_template
*t
= current_templates
->end
- 1;
12205 if (t
->opcode_modifier
.isstring
)
12207 /* Memory operands of string insns are special in that they only allow
12208 a single register (rDI, rSI, or rBX) as their memory address. */
12209 const reg_entry
*expected_reg
;
12210 static const char *di_si
[][2] =
12216 static const char *bx
[] = { "ebx", "bx", "rbx" };
12218 kind
= "string address";
12220 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
12222 int es_op
= t
->opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
12225 if (!t
->operand_types
[0].bitfield
.baseindex
12226 || ((!i
.mem_operands
!= !intel_syntax
)
12227 && t
->operand_types
[1].bitfield
.baseindex
))
12230 = (const reg_entry
*) str_hash_find (reg_hash
,
12231 di_si
[addr_mode
][op
== es_op
]);
12235 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
12237 if (i
.base_reg
!= expected_reg
12239 || operand_type_check (i
.types
[this_operand
], disp
))
12241 /* The second memory operand must have the same size as
12245 && !((addr_mode
== CODE_64BIT
12246 && i
.base_reg
->reg_type
.bitfield
.qword
)
12247 || (addr_mode
== CODE_32BIT
12248 ? i
.base_reg
->reg_type
.bitfield
.dword
12249 : i
.base_reg
->reg_type
.bitfield
.word
)))
12252 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
12254 intel_syntax
? '[' : '(',
12256 expected_reg
->reg_name
,
12257 intel_syntax
? ']' : ')');
12264 as_bad (_("`%s' is not a valid %s expression"),
12265 operand_string
, kind
);
12270 t
= current_templates
->start
;
12272 if (addr_mode
!= CODE_16BIT
)
12274 /* 32-bit/64-bit checks. */
12275 if (i
.disp_encoding
== disp_encoding_16bit
)
12278 as_bad (_("invalid `%s' prefix"),
12279 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
12284 && ((addr_mode
== CODE_64BIT
12285 ? !i
.base_reg
->reg_type
.bitfield
.qword
12286 : !i
.base_reg
->reg_type
.bitfield
.dword
)
12287 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
12288 || i
.base_reg
->reg_num
== RegIZ
))
12290 && !i
.index_reg
->reg_type
.bitfield
.xmmword
12291 && !i
.index_reg
->reg_type
.bitfield
.ymmword
12292 && !i
.index_reg
->reg_type
.bitfield
.zmmword
12293 && ((addr_mode
== CODE_64BIT
12294 ? !i
.index_reg
->reg_type
.bitfield
.qword
12295 : !i
.index_reg
->reg_type
.bitfield
.dword
)
12296 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
12299 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
12300 if (t
->mnem_off
== MN_bndmk
12301 || t
->mnem_off
== MN_bndldx
12302 || t
->mnem_off
== MN_bndstx
12303 || t
->opcode_modifier
.sib
== SIBMEM
)
12305 /* They cannot use RIP-relative addressing. */
12306 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
12308 as_bad (_("`%s' cannot be used here"), operand_string
);
12312 /* bndldx and bndstx ignore their scale factor. */
12313 if ((t
->mnem_off
== MN_bndldx
|| t
->mnem_off
== MN_bndstx
)
12314 && i
.log2_scale_factor
)
12315 as_warn (_("register scaling is being ignored here"));
12320 /* 16-bit checks. */
12321 if (i
.disp_encoding
== disp_encoding_32bit
)
12325 && (!i
.base_reg
->reg_type
.bitfield
.word
12326 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
12328 && (!i
.index_reg
->reg_type
.bitfield
.word
12329 || !i
.index_reg
->reg_type
.bitfield
.baseindex
12331 && i
.base_reg
->reg_num
< 6
12332 && i
.index_reg
->reg_num
>= 6
12333 && i
.log2_scale_factor
== 0))))
12340 /* Handle vector immediates. */
12343 RC_SAE_immediate (const char *imm_start
)
12345 const char *pstr
= imm_start
;
12350 pstr
= RC_SAE_specifier (pstr
+ 1);
12354 if (*pstr
++ != '}')
12356 as_bad (_("Missing '}': '%s'"), imm_start
);
12359 /* RC/SAE immediate string should contain nothing more. */;
12362 as_bad (_("Junk after '}': '%s'"), imm_start
);
12366 /* Internally this doesn't count as an operand. */
12372 static INLINE
bool starts_memory_operand (char c
)
12375 || is_name_beginner (c
)
12376 || strchr ("([\"+-!~", c
);
12379 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
12383 i386_att_operand (char *operand_string
)
12385 const reg_entry
*r
;
12387 char *op_string
= operand_string
;
12389 if (is_space_char (*op_string
))
12392 /* We check for an absolute prefix (differentiating,
12393 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
12394 if (*op_string
== ABSOLUTE_PREFIX
12395 && current_templates
->start
->opcode_modifier
.jump
)
12398 if (is_space_char (*op_string
))
12400 i
.jumpabsolute
= true;
12403 /* Check if operand is a register. */
12404 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
12406 i386_operand_type temp
;
12411 /* Check for a segment override by searching for ':' after a
12412 segment register. */
12413 op_string
= end_op
;
12414 if (is_space_char (*op_string
))
12416 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
12418 i
.seg
[i
.mem_operands
] = r
;
12420 /* Skip the ':' and whitespace. */
12422 if (is_space_char (*op_string
))
12425 /* Handle case of %es:*foo. */
12426 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
12427 && current_templates
->start
->opcode_modifier
.jump
)
12430 if (is_space_char (*op_string
))
12432 i
.jumpabsolute
= true;
12435 if (!starts_memory_operand (*op_string
))
12437 as_bad (_("bad memory operand `%s'"), op_string
);
12440 goto do_memory_reference
;
12443 /* Handle vector operations. */
12444 if (*op_string
== '{')
12446 op_string
= check_VecOperations (op_string
);
12447 if (op_string
== NULL
)
12453 as_bad (_("junk `%s' after register"), op_string
);
12457 /* Reject pseudo registers for .insn. */
12458 if (dot_insn () && r
->reg_type
.bitfield
.class == ClassNone
)
12460 as_bad (_("`%s%s' cannot be used here"),
12461 register_prefix
, r
->reg_name
);
12465 temp
= r
->reg_type
;
12466 temp
.bitfield
.baseindex
= 0;
12467 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
12469 i
.types
[this_operand
].bitfield
.unspecified
= 0;
12470 i
.op
[this_operand
].regs
= r
;
12473 /* A GPR may follow an RC or SAE immediate only if a (vector) register
12474 operand was also present earlier on. */
12475 if (i
.rounding
.type
!= rc_none
&& temp
.bitfield
.class == Reg
12476 && i
.reg_operands
== 1)
12480 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); ++j
)
12481 if (i
.rounding
.type
== RC_NamesTable
[j
].type
)
12483 as_bad (_("`%s': misplaced `{%s}'"),
12484 insn_name (current_templates
->start
), RC_NamesTable
[j
].name
);
12488 else if (*op_string
== REGISTER_PREFIX
)
12490 as_bad (_("bad register name `%s'"), op_string
);
12493 else if (*op_string
== IMMEDIATE_PREFIX
)
12496 if (i
.jumpabsolute
)
12498 as_bad (_("immediate operand illegal with absolute jump"));
12501 if (!i386_immediate (op_string
))
12503 if (i
.rounding
.type
!= rc_none
)
12505 as_bad (_("`%s': RC/SAE operand must follow immediate operands"),
12506 insn_name (current_templates
->start
));
12510 else if (RC_SAE_immediate (operand_string
))
12512 /* If it is a RC or SAE immediate, do the necessary placement check:
12513 Only another immediate or a GPR may precede it. */
12514 if (i
.mem_operands
|| i
.reg_operands
+ i
.imm_operands
> 1
12515 || (i
.reg_operands
== 1
12516 && i
.op
[0].regs
->reg_type
.bitfield
.class != Reg
))
12518 as_bad (_("`%s': misplaced `%s'"),
12519 insn_name (current_templates
->start
), operand_string
);
12523 else if (starts_memory_operand (*op_string
))
12525 /* This is a memory reference of some sort. */
12528 /* Start and end of displacement string expression (if found). */
12529 char *displacement_string_start
;
12530 char *displacement_string_end
;
12532 do_memory_reference
:
12533 /* Check for base index form. We detect the base index form by
12534 looking for an ')' at the end of the operand, searching
12535 for the '(' matching it, and finding a REGISTER_PREFIX or ','
12537 base_string
= op_string
+ strlen (op_string
);
12539 /* Handle vector operations. */
12541 if (is_space_char (*base_string
))
12544 if (*base_string
== '}')
12546 char *vop_start
= NULL
;
12548 while (base_string
-- > op_string
)
12550 if (*base_string
== '"')
12552 if (*base_string
!= '{')
12555 vop_start
= base_string
;
12558 if (is_space_char (*base_string
))
12561 if (*base_string
!= '}')
12569 as_bad (_("unbalanced figure braces"));
12573 if (check_VecOperations (vop_start
) == NULL
)
12577 /* If we only have a displacement, set-up for it to be parsed later. */
12578 displacement_string_start
= op_string
;
12579 displacement_string_end
= base_string
+ 1;
12581 if (*base_string
== ')')
12584 unsigned int parens_not_balanced
= 0;
12585 bool in_quotes
= false;
12587 /* We've already checked that the number of left & right ()'s are
12588 equal, and that there's a matching set of double quotes. */
12589 end_op
= base_string
;
12590 for (temp_string
= op_string
; temp_string
< end_op
; temp_string
++)
12592 if (*temp_string
== '\\' && temp_string
[1] == '"')
12594 else if (*temp_string
== '"')
12595 in_quotes
= !in_quotes
;
12596 else if (!in_quotes
)
12598 if (*temp_string
== '(' && !parens_not_balanced
++)
12599 base_string
= temp_string
;
12600 if (*temp_string
== ')')
12601 --parens_not_balanced
;
12605 temp_string
= base_string
;
12607 /* Skip past '(' and whitespace. */
12608 gas_assert (*base_string
== '(');
12610 if (is_space_char (*base_string
))
12613 if (*base_string
== ','
12614 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
12617 displacement_string_end
= temp_string
;
12619 i
.types
[this_operand
].bitfield
.baseindex
= 1;
12623 if (i
.base_reg
== &bad_reg
)
12625 base_string
= end_op
;
12626 if (is_space_char (*base_string
))
12630 /* There may be an index reg or scale factor here. */
12631 if (*base_string
== ',')
12634 if (is_space_char (*base_string
))
12637 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
12640 if (i
.index_reg
== &bad_reg
)
12642 base_string
= end_op
;
12643 if (is_space_char (*base_string
))
12645 if (*base_string
== ',')
12648 if (is_space_char (*base_string
))
12651 else if (*base_string
!= ')')
12653 as_bad (_("expecting `,' or `)' "
12654 "after index register in `%s'"),
12659 else if (*base_string
== REGISTER_PREFIX
)
12661 end_op
= strchr (base_string
, ',');
12664 as_bad (_("bad register name `%s'"), base_string
);
12668 /* Check for scale factor. */
12669 if (*base_string
!= ')')
12671 char *end_scale
= i386_scale (base_string
);
12676 base_string
= end_scale
;
12677 if (is_space_char (*base_string
))
12679 if (*base_string
!= ')')
12681 as_bad (_("expecting `)' "
12682 "after scale factor in `%s'"),
12687 else if (!i
.index_reg
)
12689 as_bad (_("expecting index register or scale factor "
12690 "after `,'; got '%c'"),
12695 else if (*base_string
!= ')')
12697 as_bad (_("expecting `,' or `)' "
12698 "after base register in `%s'"),
12703 else if (*base_string
== REGISTER_PREFIX
)
12705 end_op
= strchr (base_string
, ',');
12708 as_bad (_("bad register name `%s'"), base_string
);
12713 /* If there's an expression beginning the operand, parse it,
12714 assuming displacement_string_start and
12715 displacement_string_end are meaningful. */
12716 if (displacement_string_start
!= displacement_string_end
)
12718 if (!i386_displacement (displacement_string_start
,
12719 displacement_string_end
))
12723 /* Special case for (%dx) while doing input/output op. */
12725 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
12726 && i
.base_reg
->reg_type
.bitfield
.word
12727 && i
.index_reg
== 0
12728 && i
.log2_scale_factor
== 0
12729 && i
.seg
[i
.mem_operands
] == 0
12730 && !operand_type_check (i
.types
[this_operand
], disp
))
12732 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
12733 i
.input_output_operand
= true;
12737 if (i386_index_check (operand_string
) == 0)
12739 i
.flags
[this_operand
] |= Operand_Mem
;
12744 /* It's not a memory operand; argh! */
12745 as_bad (_("invalid char %s beginning operand %d `%s'"),
12746 output_invalid (*op_string
),
12751 return 1; /* Normal return. */
12754 /* Calculate the maximum variable size (i.e., excluding fr_fix)
12755 that an rs_machine_dependent frag may reach. */
12758 i386_frag_max_var (fragS
*frag
)
12760 /* The only relaxable frags are for jumps.
12761 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
12762 gas_assert (frag
->fr_type
== rs_machine_dependent
);
12763 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
12766 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12768 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
12770 /* STT_GNU_IFUNC symbol must go through PLT. */
12771 if ((symbol_get_bfdsym (fr_symbol
)->flags
12772 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
12775 if (!S_IS_EXTERNAL (fr_symbol
))
12776 /* Symbol may be weak or local. */
12777 return !S_IS_WEAK (fr_symbol
);
12779 /* Global symbols with non-default visibility can't be preempted. */
12780 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
12783 if (fr_var
!= NO_RELOC
)
12784 switch ((enum bfd_reloc_code_real
) fr_var
)
12786 case BFD_RELOC_386_PLT32
:
12787 case BFD_RELOC_X86_64_PLT32
:
12788 /* Symbol with PLT relocation may be preempted. */
12794 /* Global symbols with default visibility in a shared library may be
12795 preempted by another definition. */
12800 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
12801 Note also work for Skylake and Cascadelake.
12802 ---------------------------------------------------------------------
12803 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
12804 | ------ | ----------- | ------- | -------- |
12806 | Jno | N | N | Y |
12807 | Jc/Jb | Y | N | Y |
12808 | Jae/Jnb | Y | N | Y |
12809 | Je/Jz | Y | Y | Y |
12810 | Jne/Jnz | Y | Y | Y |
12811 | Jna/Jbe | Y | N | Y |
12812 | Ja/Jnbe | Y | N | Y |
12814 | Jns | N | N | Y |
12815 | Jp/Jpe | N | N | Y |
12816 | Jnp/Jpo | N | N | Y |
12817 | Jl/Jnge | Y | Y | Y |
12818 | Jge/Jnl | Y | Y | Y |
12819 | Jle/Jng | Y | Y | Y |
12820 | Jg/Jnle | Y | Y | Y |
12821 --------------------------------------------------------------------- */
12823 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
12825 if (mf_cmp
== mf_cmp_alu_cmp
)
12826 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
12827 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
12828 if (mf_cmp
== mf_cmp_incdec
)
12829 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
12830 || mf_jcc
== mf_jcc_jle
);
12831 if (mf_cmp
== mf_cmp_test_and
)
12836 /* Return the next non-empty frag. */
12839 i386_next_non_empty_frag (fragS
*fragP
)
12841 /* There may be a frag with a ".fill 0" when there is no room in
12842 the current frag for frag_grow in output_insn. */
12843 for (fragP
= fragP
->fr_next
;
12845 && fragP
->fr_type
== rs_fill
12846 && fragP
->fr_fix
== 0);
12847 fragP
= fragP
->fr_next
)
12852 /* Return the next jcc frag after BRANCH_PADDING. */
12855 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
12857 fragS
*branch_fragP
;
12861 if (pad_fragP
->fr_type
== rs_machine_dependent
12862 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
12863 == BRANCH_PADDING
))
12865 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
12866 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
12868 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
12869 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
12870 pad_fragP
->tc_frag_data
.mf_type
))
12871 return branch_fragP
;
12877 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
12880 i386_classify_machine_dependent_frag (fragS
*fragP
)
12884 fragS
*branch_fragP
;
12886 unsigned int max_prefix_length
;
12888 if (fragP
->tc_frag_data
.classified
)
12891 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
12892 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
12893 for (next_fragP
= fragP
;
12894 next_fragP
!= NULL
;
12895 next_fragP
= next_fragP
->fr_next
)
12897 next_fragP
->tc_frag_data
.classified
= 1;
12898 if (next_fragP
->fr_type
== rs_machine_dependent
)
12899 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
12901 case BRANCH_PADDING
:
12902 /* The BRANCH_PADDING frag must be followed by a branch
12904 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
12905 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
12907 case FUSED_JCC_PADDING
:
12908 /* Check if this is a fused jcc:
12910 CMP like instruction
12914 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
12915 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
12916 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
12919 /* The BRANCH_PADDING frag is merged with the
12920 FUSED_JCC_PADDING frag. */
12921 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
12922 /* CMP like instruction size. */
12923 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
12924 frag_wane (pad_fragP
);
12925 /* Skip to branch_fragP. */
12926 next_fragP
= branch_fragP
;
12928 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
12930 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
12932 next_fragP
->fr_subtype
12933 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
12934 next_fragP
->tc_frag_data
.max_bytes
12935 = next_fragP
->tc_frag_data
.max_prefix_length
;
12936 /* This will be updated in the BRANCH_PREFIX scan. */
12937 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
12940 frag_wane (next_fragP
);
12945 /* Stop if there is no BRANCH_PREFIX. */
12946 if (!align_branch_prefix_size
)
12949 /* Scan for BRANCH_PREFIX. */
12950 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
12952 if (fragP
->fr_type
!= rs_machine_dependent
12953 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12957 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
12958 COND_JUMP_PREFIX. */
12959 max_prefix_length
= 0;
12960 for (next_fragP
= fragP
;
12961 next_fragP
!= NULL
;
12962 next_fragP
= next_fragP
->fr_next
)
12964 if (next_fragP
->fr_type
== rs_fill
)
12965 /* Skip rs_fill frags. */
12967 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
12968 /* Stop for all other frags. */
12971 /* rs_machine_dependent frags. */
12972 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12975 /* Count BRANCH_PREFIX frags. */
12976 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
12978 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
12979 frag_wane (next_fragP
);
12983 += next_fragP
->tc_frag_data
.max_bytes
;
12985 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12987 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12988 == FUSED_JCC_PADDING
))
12990 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
12991 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
12995 /* Stop for other rs_machine_dependent frags. */
12999 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
13001 /* Skip to the next frag. */
13002 fragP
= next_fragP
;
13006 /* Compute padding size for
13009 CMP like instruction
13011 COND_JUMP/UNCOND_JUMP
13016 COND_JUMP/UNCOND_JUMP
13020 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
13022 unsigned int offset
, size
, padding_size
;
13023 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
13025 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
13027 address
= fragP
->fr_address
;
13028 address
+= fragP
->fr_fix
;
13030 /* CMP like instrunction size. */
13031 size
= fragP
->tc_frag_data
.cmp_size
;
13033 /* The base size of the branch frag. */
13034 size
+= branch_fragP
->fr_fix
;
13036 /* Add opcode and displacement bytes for the rs_machine_dependent
13038 if (branch_fragP
->fr_type
== rs_machine_dependent
)
13039 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
13041 /* Check if branch is within boundary and doesn't end at the last
13043 offset
= address
& ((1U << align_branch_power
) - 1);
13044 if ((offset
+ size
) >= (1U << align_branch_power
))
13045 /* Padding needed to avoid crossing boundary. */
13046 padding_size
= (1U << align_branch_power
) - offset
;
13048 /* No padding needed. */
13051 /* The return value may be saved in tc_frag_data.length which is
13053 if (!fits_in_unsigned_byte (padding_size
))
13056 return padding_size
;
13059 /* i386_generic_table_relax_frag()
13061 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
13062 grow/shrink padding to align branch frags. Hand others to
13066 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
13068 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
13069 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
13071 long padding_size
= i386_branch_padding_size (fragP
, 0);
13072 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
13074 /* When the BRANCH_PREFIX frag is used, the computed address
13075 must match the actual address and there should be no padding. */
13076 if (fragP
->tc_frag_data
.padding_address
13077 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
13081 /* Update the padding size. */
13083 fragP
->tc_frag_data
.length
= padding_size
;
13087 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
13089 fragS
*padding_fragP
, *next_fragP
;
13090 long padding_size
, left_size
, last_size
;
13092 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
13093 if (!padding_fragP
)
13094 /* Use the padding set by the leading BRANCH_PREFIX frag. */
13095 return (fragP
->tc_frag_data
.length
13096 - fragP
->tc_frag_data
.last_length
);
13098 /* Compute the relative address of the padding frag in the very
13099 first time where the BRANCH_PREFIX frag sizes are zero. */
13100 if (!fragP
->tc_frag_data
.padding_address
)
13101 fragP
->tc_frag_data
.padding_address
13102 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
13104 /* First update the last length from the previous interation. */
13105 left_size
= fragP
->tc_frag_data
.prefix_length
;
13106 for (next_fragP
= fragP
;
13107 next_fragP
!= padding_fragP
;
13108 next_fragP
= next_fragP
->fr_next
)
13109 if (next_fragP
->fr_type
== rs_machine_dependent
13110 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
13115 int max
= next_fragP
->tc_frag_data
.max_bytes
;
13119 if (max
> left_size
)
13124 next_fragP
->tc_frag_data
.last_length
= size
;
13128 next_fragP
->tc_frag_data
.last_length
= 0;
13131 /* Check the padding size for the padding frag. */
13132 padding_size
= i386_branch_padding_size
13133 (padding_fragP
, (fragP
->fr_address
13134 + fragP
->tc_frag_data
.padding_address
));
13136 last_size
= fragP
->tc_frag_data
.prefix_length
;
13137 /* Check if there is change from the last interation. */
13138 if (padding_size
== last_size
)
13140 /* Update the expected address of the padding frag. */
13141 padding_fragP
->tc_frag_data
.padding_address
13142 = (fragP
->fr_address
+ padding_size
13143 + fragP
->tc_frag_data
.padding_address
);
13147 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
13149 /* No padding if there is no sufficient room. Clear the
13150 expected address of the padding frag. */
13151 padding_fragP
->tc_frag_data
.padding_address
= 0;
13155 /* Store the expected address of the padding frag. */
13156 padding_fragP
->tc_frag_data
.padding_address
13157 = (fragP
->fr_address
+ padding_size
13158 + fragP
->tc_frag_data
.padding_address
);
13160 fragP
->tc_frag_data
.prefix_length
= padding_size
;
13162 /* Update the length for the current interation. */
13163 left_size
= padding_size
;
13164 for (next_fragP
= fragP
;
13165 next_fragP
!= padding_fragP
;
13166 next_fragP
= next_fragP
->fr_next
)
13167 if (next_fragP
->fr_type
== rs_machine_dependent
13168 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
13173 int max
= next_fragP
->tc_frag_data
.max_bytes
;
13177 if (max
> left_size
)
13182 next_fragP
->tc_frag_data
.length
= size
;
13186 next_fragP
->tc_frag_data
.length
= 0;
13189 return (fragP
->tc_frag_data
.length
13190 - fragP
->tc_frag_data
.last_length
);
13192 return relax_frag (segment
, fragP
, stretch
);
13195 /* md_estimate_size_before_relax()
13197 Called just before relax() for rs_machine_dependent frags. The x86
13198 assembler uses these frags to handle variable size jump
13201 Any symbol that is now undefined will not become defined.
13202 Return the correct fr_subtype in the frag.
13203 Return the initial "guess for variable size of frag" to caller.
13204 The guess is actually the growth beyond the fixed part. Whatever
13205 we do to grow the fixed or variable part contributes to our
13209 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
13211 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
13212 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
13213 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
13215 i386_classify_machine_dependent_frag (fragP
);
13216 return fragP
->tc_frag_data
.length
;
13219 /* We've already got fragP->fr_subtype right; all we have to do is
13220 check for un-relaxable symbols. On an ELF system, we can't relax
13221 an externally visible symbol, because it may be overridden by a
13223 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
13224 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13226 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
13229 #if defined (OBJ_COFF) && defined (TE_PE)
13230 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
13231 && S_IS_WEAK (fragP
->fr_symbol
))
13235 /* Symbol is undefined in this segment, or we need to keep a
13236 reloc so that weak symbols can be overridden. */
13237 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
13238 enum bfd_reloc_code_real reloc_type
;
13239 unsigned char *opcode
;
13243 if (fragP
->fr_var
!= NO_RELOC
)
13244 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
13245 else if (size
== 2)
13246 reloc_type
= BFD_RELOC_16_PCREL
;
13247 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13248 else if (fragP
->tc_frag_data
.code64
&& fragP
->fr_offset
== 0
13249 && need_plt32_p (fragP
->fr_symbol
))
13250 reloc_type
= BFD_RELOC_X86_64_PLT32
;
13253 reloc_type
= BFD_RELOC_32_PCREL
;
13255 old_fr_fix
= fragP
->fr_fix
;
13256 opcode
= (unsigned char *) fragP
->fr_opcode
;
13258 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
13261 /* Make jmp (0xeb) a (d)word displacement jump. */
13263 fragP
->fr_fix
+= size
;
13264 fixP
= fix_new (fragP
, old_fr_fix
, size
,
13266 fragP
->fr_offset
, 1,
13272 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
13274 /* Negate the condition, and branch past an
13275 unconditional jump. */
13278 /* Insert an unconditional jump. */
13280 /* We added two extra opcode bytes, and have a two byte
13282 fragP
->fr_fix
+= 2 + 2;
13283 fix_new (fragP
, old_fr_fix
+ 2, 2,
13285 fragP
->fr_offset
, 1,
13289 /* Fall through. */
13292 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
13294 fragP
->fr_fix
+= 1;
13295 fixP
= fix_new (fragP
, old_fr_fix
, 1,
13297 fragP
->fr_offset
, 1,
13298 BFD_RELOC_8_PCREL
);
13299 fixP
->fx_signed
= 1;
13303 /* This changes the byte-displacement jump 0x7N
13304 to the (d)word-displacement jump 0x0f,0x8N. */
13305 opcode
[1] = opcode
[0] + 0x10;
13306 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
13307 /* We've added an opcode byte. */
13308 fragP
->fr_fix
+= 1 + size
;
13309 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
13311 fragP
->fr_offset
, 1,
13316 BAD_CASE (fragP
->fr_subtype
);
13320 /* All jumps handled here are signed, but don't unconditionally use a
13321 signed limit check for 32 and 16 bit jumps as we want to allow wrap
13322 around at 4G (outside of 64-bit mode) and 64k. */
13323 if (size
== 4 && flag_code
== CODE_64BIT
)
13324 fixP
->fx_signed
= 1;
13327 return fragP
->fr_fix
- old_fr_fix
;
13330 /* Guess size depending on current relax state. Initially the relax
13331 state will correspond to a short jump and we return 1, because
13332 the variable part of the frag (the branch offset) is one byte
13333 long. However, we can relax a section more than once and in that
13334 case we must either set fr_subtype back to the unrelaxed state,
13335 or return the value for the appropriate branch. */
13336 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
13339 /* Called after relax() is finished.
13341 In: Address of frag.
13342 fr_type == rs_machine_dependent.
13343 fr_subtype is what the address relaxed to.
13345 Out: Any fixSs and constants are set up.
13346 Caller will turn frag into a ".space 0". */
13349 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
13352 unsigned char *opcode
;
13353 unsigned char *where_to_put_displacement
= NULL
;
13354 offsetT target_address
;
13355 offsetT opcode_address
;
13356 unsigned int extension
= 0;
13357 offsetT displacement_from_opcode_start
;
13359 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
13360 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
13361 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
13363 /* Generate nop padding. */
13364 unsigned int size
= fragP
->tc_frag_data
.length
;
13367 if (size
> fragP
->tc_frag_data
.max_bytes
)
13373 const char *branch
= "branch";
13374 const char *prefix
= "";
13375 fragS
*padding_fragP
;
13376 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
13379 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
13380 switch (fragP
->tc_frag_data
.default_prefix
)
13385 case CS_PREFIX_OPCODE
:
13388 case DS_PREFIX_OPCODE
:
13391 case ES_PREFIX_OPCODE
:
13394 case FS_PREFIX_OPCODE
:
13397 case GS_PREFIX_OPCODE
:
13400 case SS_PREFIX_OPCODE
:
13405 msg
= _("%s:%u: add %d%s at 0x%llx to align "
13406 "%s within %d-byte boundary\n");
13408 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
13409 "align %s within %d-byte boundary\n");
13413 padding_fragP
= fragP
;
13414 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
13415 "%s within %d-byte boundary\n");
13419 switch (padding_fragP
->tc_frag_data
.branch_type
)
13421 case align_branch_jcc
:
13424 case align_branch_fused
:
13425 branch
= "fused jcc";
13427 case align_branch_jmp
:
13430 case align_branch_call
:
13433 case align_branch_indirect
:
13434 branch
= "indiret branch";
13436 case align_branch_ret
:
13443 fprintf (stdout
, msg
,
13444 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
13445 (long long) fragP
->fr_address
, branch
,
13446 1 << align_branch_power
);
13448 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
13449 memset (fragP
->fr_opcode
,
13450 fragP
->tc_frag_data
.default_prefix
, size
);
13452 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
13454 fragP
->fr_fix
+= size
;
13459 opcode
= (unsigned char *) fragP
->fr_opcode
;
13461 /* Address we want to reach in file space. */
13462 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
13464 /* Address opcode resides at in file space. */
13465 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
13467 /* Displacement from opcode start to fill into instruction. */
13468 displacement_from_opcode_start
= target_address
- opcode_address
;
13470 if ((fragP
->fr_subtype
& BIG
) == 0)
13472 /* Don't have to change opcode. */
13473 extension
= 1; /* 1 opcode + 1 displacement */
13474 where_to_put_displacement
= &opcode
[1];
13478 if (no_cond_jump_promotion
13479 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
13480 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
13481 _("long jump required"));
13483 switch (fragP
->fr_subtype
)
13485 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
13486 extension
= 4; /* 1 opcode + 4 displacement */
13488 where_to_put_displacement
= &opcode
[1];
13491 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
13492 extension
= 2; /* 1 opcode + 2 displacement */
13494 where_to_put_displacement
= &opcode
[1];
13497 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
13498 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
13499 extension
= 5; /* 2 opcode + 4 displacement */
13500 opcode
[1] = opcode
[0] + 0x10;
13501 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
13502 where_to_put_displacement
= &opcode
[2];
13505 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
13506 extension
= 3; /* 2 opcode + 2 displacement */
13507 opcode
[1] = opcode
[0] + 0x10;
13508 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
13509 where_to_put_displacement
= &opcode
[2];
13512 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
13517 where_to_put_displacement
= &opcode
[3];
13521 BAD_CASE (fragP
->fr_subtype
);
13526 /* If size if less then four we are sure that the operand fits,
13527 but if it's 4, then it could be that the displacement is larger
13529 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
13531 && ((addressT
) (displacement_from_opcode_start
- extension
13532 + ((addressT
) 1 << 31))
13533 > (((addressT
) 2 << 31) - 1)))
13535 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
13536 _("jump target out of range"));
13537 /* Make us emit 0. */
13538 displacement_from_opcode_start
= extension
;
13540 /* Now put displacement after opcode. */
13541 md_number_to_chars ((char *) where_to_put_displacement
,
13542 (valueT
) (displacement_from_opcode_start
- extension
),
13543 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
13544 fragP
->fr_fix
+= extension
;
13547 /* Apply a fixup (fixP) to segment data, once it has been determined
13548 by our caller that we have all the info we need to fix it up.
13550 Parameter valP is the pointer to the value of the bits.
13552 On the 386, immediates, displacements, and data pointers are all in
13553 the same (little-endian) format, so we don't need to care about which
13554 we are handling. */
13557 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
13559 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
13560 valueT value
= *valP
;
13562 #if !defined (TE_Mach)
13563 if (fixP
->fx_pcrel
)
13565 switch (fixP
->fx_r_type
)
13571 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
13574 case BFD_RELOC_X86_64_32S
:
13575 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
13578 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
13581 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
13586 if (fixP
->fx_addsy
!= NULL
13587 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
13588 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
13589 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
13590 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
13591 && !use_rela_relocations
)
13593 /* This is a hack. There should be a better way to handle this.
13594 This covers for the fact that bfd_install_relocation will
13595 subtract the current location (for partial_inplace, PC relative
13596 relocations); see more below. */
13600 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
13603 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13605 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13608 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
13610 if ((sym_seg
== seg
13611 || (symbol_section_p (fixP
->fx_addsy
)
13612 && sym_seg
!= absolute_section
))
13613 && !generic_force_reloc (fixP
))
13615 /* Yes, we add the values in twice. This is because
13616 bfd_install_relocation subtracts them out again. I think
13617 bfd_install_relocation is broken, but I don't dare change
13619 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13623 #if defined (OBJ_COFF) && defined (TE_PE)
13624 /* For some reason, the PE format does not store a
13625 section address offset for a PC relative symbol. */
13626 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
13627 || S_IS_WEAK (fixP
->fx_addsy
))
13628 value
+= md_pcrel_from (fixP
);
13631 #if defined (OBJ_COFF) && defined (TE_PE)
13632 if (fixP
->fx_addsy
!= NULL
13633 && S_IS_WEAK (fixP
->fx_addsy
)
13634 /* PR 16858: Do not modify weak function references. */
13635 && ! fixP
->fx_pcrel
)
13637 #if !defined (TE_PEP)
13638 /* For x86 PE weak function symbols are neither PC-relative
13639 nor do they set S_IS_FUNCTION. So the only reliable way
13640 to detect them is to check the flags of their containing
13642 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
13643 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
13647 value
-= S_GET_VALUE (fixP
->fx_addsy
);
13651 /* Fix a few things - the dynamic linker expects certain values here,
13652 and we must not disappoint it. */
13653 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13654 if (IS_ELF
&& fixP
->fx_addsy
)
13655 switch (fixP
->fx_r_type
)
13657 case BFD_RELOC_386_PLT32
:
13658 case BFD_RELOC_X86_64_PLT32
:
13659 /* Make the jump instruction point to the address of the operand.
13660 At runtime we merely add the offset to the actual PLT entry.
13661 NB: Subtract the offset size only for jump instructions. */
13662 if (fixP
->fx_pcrel
)
13666 case BFD_RELOC_386_TLS_GD
:
13667 case BFD_RELOC_386_TLS_LDM
:
13668 case BFD_RELOC_386_TLS_IE_32
:
13669 case BFD_RELOC_386_TLS_IE
:
13670 case BFD_RELOC_386_TLS_GOTIE
:
13671 case BFD_RELOC_386_TLS_GOTDESC
:
13672 case BFD_RELOC_X86_64_TLSGD
:
13673 case BFD_RELOC_X86_64_TLSLD
:
13674 case BFD_RELOC_X86_64_GOTTPOFF
:
13675 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13676 value
= 0; /* Fully resolved at runtime. No addend. */
13678 case BFD_RELOC_386_TLS_LE
:
13679 case BFD_RELOC_386_TLS_LDO_32
:
13680 case BFD_RELOC_386_TLS_LE_32
:
13681 case BFD_RELOC_X86_64_DTPOFF32
:
13682 case BFD_RELOC_X86_64_DTPOFF64
:
13683 case BFD_RELOC_X86_64_TPOFF32
:
13684 case BFD_RELOC_X86_64_TPOFF64
:
13685 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
13688 case BFD_RELOC_386_TLS_DESC_CALL
:
13689 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13690 value
= 0; /* Fully resolved at runtime. No addend. */
13691 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
13695 case BFD_RELOC_VTABLE_INHERIT
:
13696 case BFD_RELOC_VTABLE_ENTRY
:
13703 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
13705 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
13707 value
= extend_to_32bit_address (value
);
13710 #endif /* !defined (TE_Mach) */
13712 /* Are we finished with this relocation now? */
13713 if (fixP
->fx_addsy
== NULL
)
13716 switch (fixP
->fx_r_type
)
13718 case BFD_RELOC_X86_64_32S
:
13719 fixP
->fx_signed
= 1;
13726 #if defined (OBJ_COFF) && defined (TE_PE)
13727 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
13730 /* Remember value for tc_gen_reloc. */
13731 fixP
->fx_addnumber
= value
;
13732 /* Clear out the frag for now. */
13736 else if (use_rela_relocations
)
13738 if (!disallow_64bit_reloc
|| fixP
->fx_r_type
== NO_RELOC
)
13739 fixP
->fx_no_overflow
= 1;
13740 /* Remember value for tc_gen_reloc. */
13741 fixP
->fx_addnumber
= value
;
13745 md_number_to_chars (p
, value
, fixP
->fx_size
);
13749 md_atof (int type
, char *litP
, int *sizeP
)
13751 /* This outputs the LITTLENUMs in REVERSE order;
13752 in accord with the bigendian 386. */
13753 return ieee_md_atof (type
, litP
, sizeP
, false);
13756 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
13759 output_invalid (int c
)
13762 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
13765 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
13766 "(0x%x)", (unsigned char) c
);
13767 return output_invalid_buf
;
13770 /* Verify that @r can be used in the current context. */
13772 static bool check_register (const reg_entry
*r
)
13774 if (allow_pseudo_reg
)
13777 if (operand_type_all_zero (&r
->reg_type
))
13780 if ((r
->reg_type
.bitfield
.dword
13781 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
13782 || r
->reg_type
.bitfield
.class == RegCR
13783 || r
->reg_type
.bitfield
.class == RegDR
)
13784 && !cpu_arch_flags
.bitfield
.cpui386
)
13787 if (r
->reg_type
.bitfield
.class == RegTR
13788 && (flag_code
== CODE_64BIT
13789 || !cpu_arch_flags
.bitfield
.cpui386
13790 || cpu_arch_isa_flags
.bitfield
.cpui586
13791 || cpu_arch_isa_flags
.bitfield
.cpui686
))
13794 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
13797 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
13799 if (r
->reg_type
.bitfield
.zmmword
13800 || r
->reg_type
.bitfield
.class == RegMask
)
13803 if (!cpu_arch_flags
.bitfield
.cpuavx
)
13805 if (r
->reg_type
.bitfield
.ymmword
)
13808 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
13813 if (r
->reg_type
.bitfield
.tmmword
13814 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
13815 || flag_code
!= CODE_64BIT
))
13818 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
13821 /* Don't allow fake index register unless allow_index_reg isn't 0. */
13822 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
13825 /* Upper 16 vector registers are only available with VREX in 64bit
13826 mode, and require EVEX encoding. */
13827 if (r
->reg_flags
& RegVRex
)
13829 if (!cpu_arch_flags
.bitfield
.cpuavx512f
13830 || flag_code
!= CODE_64BIT
)
13833 if (i
.vec_encoding
== vex_encoding_default
)
13834 i
.vec_encoding
= vex_encoding_evex
;
13835 else if (i
.vec_encoding
!= vex_encoding_evex
)
13836 i
.vec_encoding
= vex_encoding_error
;
13839 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
13840 && (!cpu_arch_flags
.bitfield
.cpulm
13841 || r
->reg_type
.bitfield
.class != RegCR
13843 && flag_code
!= CODE_64BIT
)
13846 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
13853 /* REG_STRING starts *before* REGISTER_PREFIX. */
13855 static const reg_entry
*
13856 parse_real_register (const char *reg_string
, char **end_op
)
13858 const char *s
= reg_string
;
13860 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
13861 const reg_entry
*r
;
13863 /* Skip possible REGISTER_PREFIX and possible whitespace. */
13864 if (*s
== REGISTER_PREFIX
)
13867 if (is_space_char (*s
))
13870 p
= reg_name_given
;
13871 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
13873 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
13874 return (const reg_entry
*) NULL
;
13878 if (is_part_of_name (*s
))
13879 return (const reg_entry
*) NULL
;
13881 *end_op
= (char *) s
;
13883 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
13885 /* Handle floating point regs, allowing spaces in the (i) part. */
13888 if (!cpu_arch_flags
.bitfield
.cpu8087
13889 && !cpu_arch_flags
.bitfield
.cpu287
13890 && !cpu_arch_flags
.bitfield
.cpu387
13891 && !allow_pseudo_reg
)
13892 return (const reg_entry
*) NULL
;
13894 if (is_space_char (*s
))
13899 if (is_space_char (*s
))
13901 if (*s
>= '0' && *s
<= '7')
13903 int fpr
= *s
- '0';
13905 if (is_space_char (*s
))
13909 *end_op
= (char *) s
+ 1;
13910 know (r
[fpr
].reg_num
== fpr
);
13914 /* We have "%st(" then garbage. */
13915 return (const reg_entry
*) NULL
;
13919 return r
&& check_register (r
) ? r
: NULL
;
13922 /* REG_STRING starts *before* REGISTER_PREFIX. */
13924 static const reg_entry
*
13925 parse_register (const char *reg_string
, char **end_op
)
13927 const reg_entry
*r
;
13929 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
13930 r
= parse_real_register (reg_string
, end_op
);
13935 char *save
= input_line_pointer
;
13936 char *buf
= xstrdup (reg_string
), *name
;
13939 input_line_pointer
= buf
;
13940 get_symbol_name (&name
);
13941 symbolP
= symbol_find (name
);
13942 while (symbolP
&& symbol_equated_p (symbolP
))
13944 const expressionS
*e
= symbol_get_value_expression(symbolP
);
13946 if (e
->X_add_number
)
13948 symbolP
= e
->X_add_symbol
;
13950 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
13952 const expressionS
*e
= symbol_get_value_expression (symbolP
);
13954 if (e
->X_op
== O_register
)
13956 know (e
->X_add_number
>= 0
13957 && (valueT
) e
->X_add_number
< i386_regtab_size
);
13958 r
= i386_regtab
+ e
->X_add_number
;
13959 *end_op
= (char *) reg_string
+ (input_line_pointer
- buf
);
13961 if (r
&& !check_register (r
))
13963 as_bad (_("register '%s%s' cannot be used here"),
13964 register_prefix
, r
->reg_name
);
13968 input_line_pointer
= save
;
13975 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
13977 const reg_entry
*r
= NULL
;
13978 char *end
= input_line_pointer
;
13980 /* We only know the terminating character here. It being double quote could
13981 be the closing one of a quoted symbol name, or an opening one from a
13982 following string (or another quoted symbol name). Since the latter can't
13983 be valid syntax for anything, bailing in either case is good enough. */
13984 if (*nextcharP
== '"')
13988 if (*name
== REGISTER_PREFIX
|| allow_naked_reg
)
13989 r
= parse_real_register (name
, &input_line_pointer
);
13990 if (r
&& end
<= input_line_pointer
)
13992 *nextcharP
= *input_line_pointer
;
13993 *input_line_pointer
= 0;
13994 e
->X_op
= O_register
;
13995 e
->X_add_number
= r
- i386_regtab
;
13998 input_line_pointer
= end
;
14000 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
14004 md_operand (expressionS
*e
)
14007 const reg_entry
*r
;
14009 switch (*input_line_pointer
)
14011 case REGISTER_PREFIX
:
14012 r
= parse_real_register (input_line_pointer
, &end
);
14015 e
->X_op
= O_register
;
14016 e
->X_add_number
= r
- i386_regtab
;
14017 input_line_pointer
= end
;
14022 gas_assert (intel_syntax
);
14023 end
= input_line_pointer
++;
14025 if (*input_line_pointer
== ']')
14027 ++input_line_pointer
;
14028 e
->X_op_symbol
= make_expr_symbol (e
);
14029 e
->X_add_symbol
= NULL
;
14030 e
->X_add_number
= 0;
14035 e
->X_op
= O_absent
;
14036 input_line_pointer
= end
;
14043 /* To maintain consistency with !BFD64 builds of gas record, whether any
14044 (binary) operator was involved in an expression. As expressions are
14045 evaluated in only 32 bits when !BFD64, we use this to decide whether to
14046 truncate results. */
14047 bool i386_record_operator (operatorT op
,
14048 const expressionS
*left
,
14049 const expressionS
*right
)
14051 if (op
== O_absent
)
14056 /* Since the expression parser applies unary operators fine to bignum
14057 operands, we don't need to be concerned of respective operands not
14058 fitting in 32 bits. */
14059 if (right
->X_op
== O_constant
&& right
->X_unsigned
14060 && !fits_in_unsigned_long (right
->X_add_number
))
14063 /* This isn't entirely right: The pattern can also result when constant
14064 expressions are folded (e.g. 0xffffffff + 1). */
14065 else if ((left
->X_op
== O_constant
&& left
->X_unsigned
14066 && !fits_in_unsigned_long (left
->X_add_number
))
14067 || (right
->X_op
== O_constant
&& right
->X_unsigned
14068 && !fits_in_unsigned_long (right
->X_add_number
)))
14069 expr_mode
= expr_large_value
;
14071 if (expr_mode
!= expr_large_value
)
14072 expr_mode
= expr_operator_present
;
14078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14079 const char *md_shortopts
= "kVQ:sqnO::";
14081 const char *md_shortopts
= "qnO::";
14084 #define OPTION_32 (OPTION_MD_BASE + 0)
14085 #define OPTION_64 (OPTION_MD_BASE + 1)
14086 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
14087 #define OPTION_MARCH (OPTION_MD_BASE + 3)
14088 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
14089 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
14090 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
14091 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
14092 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
14093 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
14094 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
14095 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
14096 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
14097 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
14098 #define OPTION_X32 (OPTION_MD_BASE + 14)
14099 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
14100 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
14101 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
14102 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
14103 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
14104 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
14105 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
14106 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
14107 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
14108 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
14109 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
14110 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
14111 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
14112 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
14113 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
14114 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
14115 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
14116 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
14117 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
14118 #define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
14120 struct option md_longopts
[] =
14122 {"32", no_argument
, NULL
, OPTION_32
},
14123 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
14124 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
14125 {"64", no_argument
, NULL
, OPTION_64
},
14127 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14128 {"x32", no_argument
, NULL
, OPTION_X32
},
14129 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
14130 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
14132 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
14133 {"march", required_argument
, NULL
, OPTION_MARCH
},
14134 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
14135 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
14136 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
14137 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
14138 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
14139 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
14140 {"muse-unaligned-vector-move", no_argument
, NULL
, OPTION_MUSE_UNALIGNED_VECTOR_MOVE
},
14141 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
14142 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
14143 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
14144 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
14145 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
14146 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
14147 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
14148 # if defined (TE_PE) || defined (TE_PEP)
14149 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
14151 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
14152 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
14153 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
14154 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
14155 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
14156 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
14157 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
14158 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
14159 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
14160 {"mlfence-before-indirect-branch", required_argument
, NULL
,
14161 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
14162 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
14163 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
14164 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
14165 {NULL
, no_argument
, NULL
, 0}
14167 size_t md_longopts_size
= sizeof (md_longopts
);
14170 md_parse_option (int c
, const char *arg
)
14173 char *arch
, *next
, *saved
, *type
;
14178 optimize_align_code
= 0;
14182 quiet_warnings
= 1;
14185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14186 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
14187 should be emitted or not. FIXME: Not implemented. */
14189 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
14193 /* -V: SVR4 argument to print version ID. */
14195 print_version_id ();
14198 /* -k: Ignore for FreeBSD compatibility. */
14203 /* -s: On i386 Solaris, this tells the native assembler to use
14204 .stab instead of .stab.excl. We always use .stab anyhow. */
14207 case OPTION_MSHARED
:
14211 case OPTION_X86_USED_NOTE
:
14212 if (strcasecmp (arg
, "yes") == 0)
14214 else if (strcasecmp (arg
, "no") == 0)
14217 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
14222 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
14223 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
14226 const char **list
, **l
;
14228 list
= bfd_target_list ();
14229 for (l
= list
; *l
!= NULL
; l
++)
14230 if (startswith (*l
, "elf64-x86-64")
14231 || strcmp (*l
, "coff-x86-64") == 0
14232 || strcmp (*l
, "pe-x86-64") == 0
14233 || strcmp (*l
, "pei-x86-64") == 0
14234 || strcmp (*l
, "mach-o-x86-64") == 0)
14236 default_arch
= "x86_64";
14240 as_fatal (_("no compiled in support for x86_64"));
14246 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14250 const char **list
, **l
;
14252 list
= bfd_target_list ();
14253 for (l
= list
; *l
!= NULL
; l
++)
14254 if (startswith (*l
, "elf32-x86-64"))
14256 default_arch
= "x86_64:32";
14260 as_fatal (_("no compiled in support for 32bit x86_64"));
14264 as_fatal (_("32bit x86_64 is only supported for ELF"));
14270 const char **list
, **l
;
14272 list
= bfd_target_list ();
14273 for (l
= list
; *l
!= NULL
; l
++)
14274 if (strstr (*l
, "-i386")
14275 || strstr (*l
, "-go32"))
14277 default_arch
= "i386";
14281 as_fatal (_("no compiled in support for ix86"));
14286 case OPTION_DIVIDE
:
14287 #ifdef SVR4_COMMENT_CHARS
14292 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
14294 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
14298 i386_comment_chars
= n
;
14304 saved
= xstrdup (arg
);
14306 /* Allow -march=+nosse. */
14312 as_fatal (_("invalid -march= option: `%s'"), arg
);
14313 next
= strchr (arch
, '+');
14316 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
14318 if (arch
== saved
&& cpu_arch
[j
].type
!= PROCESSOR_NONE
14319 && strcmp (arch
, cpu_arch
[j
].name
) == 0)
14322 if (! cpu_arch
[j
].enable
.bitfield
.cpui386
)
14325 cpu_arch_name
= cpu_arch
[j
].name
;
14326 free (cpu_sub_arch_name
);
14327 cpu_sub_arch_name
= NULL
;
14328 cpu_arch_flags
= cpu_arch
[j
].enable
;
14329 cpu_arch_isa
= cpu_arch
[j
].type
;
14330 cpu_arch_isa_flags
= cpu_arch
[j
].enable
;
14331 if (!cpu_arch_tune_set
)
14333 cpu_arch_tune
= cpu_arch_isa
;
14334 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
14338 else if (cpu_arch
[j
].type
== PROCESSOR_NONE
14339 && strcmp (arch
, cpu_arch
[j
].name
) == 0
14340 && !cpu_flags_all_zero (&cpu_arch
[j
].enable
))
14342 /* ISA extension. */
14343 i386_cpu_flags flags
;
14345 flags
= cpu_flags_or (cpu_arch_flags
,
14346 cpu_arch
[j
].enable
);
14348 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
14350 extend_cpu_sub_arch_name (arch
);
14351 cpu_arch_flags
= flags
;
14352 cpu_arch_isa_flags
= flags
;
14356 = cpu_flags_or (cpu_arch_isa_flags
,
14357 cpu_arch
[j
].enable
);
14362 if (j
>= ARRAY_SIZE (cpu_arch
) && startswith (arch
, "no"))
14364 /* Disable an ISA extension. */
14365 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
14366 if (cpu_arch
[j
].type
== PROCESSOR_NONE
14367 && strcmp (arch
+ 2, cpu_arch
[j
].name
) == 0)
14369 i386_cpu_flags flags
;
14371 flags
= cpu_flags_and_not (cpu_arch_flags
,
14372 cpu_arch
[j
].disable
);
14373 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
14375 extend_cpu_sub_arch_name (arch
);
14376 cpu_arch_flags
= flags
;
14377 cpu_arch_isa_flags
= flags
;
14383 if (j
>= ARRAY_SIZE (cpu_arch
))
14384 as_fatal (_("invalid -march= option: `%s'"), arg
);
14388 while (next
!= NULL
);
14394 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
14395 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
14397 if (cpu_arch
[j
].type
!= PROCESSOR_NONE
14398 && strcmp (arg
, cpu_arch
[j
].name
) == 0)
14400 cpu_arch_tune_set
= 1;
14401 cpu_arch_tune
= cpu_arch
[j
].type
;
14402 cpu_arch_tune_flags
= cpu_arch
[j
].enable
;
14406 if (j
>= ARRAY_SIZE (cpu_arch
))
14407 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
14410 case OPTION_MMNEMONIC
:
14411 if (strcasecmp (arg
, "att") == 0)
14412 intel_mnemonic
= 0;
14413 else if (strcasecmp (arg
, "intel") == 0)
14414 intel_mnemonic
= 1;
14416 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
14419 case OPTION_MSYNTAX
:
14420 if (strcasecmp (arg
, "att") == 0)
14422 else if (strcasecmp (arg
, "intel") == 0)
14425 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
14428 case OPTION_MINDEX_REG
:
14429 allow_index_reg
= 1;
14432 case OPTION_MNAKED_REG
:
14433 allow_naked_reg
= 1;
14436 case OPTION_MSSE2AVX
:
14440 case OPTION_MUSE_UNALIGNED_VECTOR_MOVE
:
14441 use_unaligned_vector_move
= 1;
14444 case OPTION_MSSE_CHECK
:
14445 if (strcasecmp (arg
, "error") == 0)
14446 sse_check
= check_error
;
14447 else if (strcasecmp (arg
, "warning") == 0)
14448 sse_check
= check_warning
;
14449 else if (strcasecmp (arg
, "none") == 0)
14450 sse_check
= check_none
;
14452 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
14455 case OPTION_MOPERAND_CHECK
:
14456 if (strcasecmp (arg
, "error") == 0)
14457 operand_check
= check_error
;
14458 else if (strcasecmp (arg
, "warning") == 0)
14459 operand_check
= check_warning
;
14460 else if (strcasecmp (arg
, "none") == 0)
14461 operand_check
= check_none
;
14463 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
14466 case OPTION_MAVXSCALAR
:
14467 if (strcasecmp (arg
, "128") == 0)
14468 avxscalar
= vex128
;
14469 else if (strcasecmp (arg
, "256") == 0)
14470 avxscalar
= vex256
;
14472 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
14475 case OPTION_MVEXWIG
:
14476 if (strcmp (arg
, "0") == 0)
14478 else if (strcmp (arg
, "1") == 0)
14481 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
14484 case OPTION_MADD_BND_PREFIX
:
14485 add_bnd_prefix
= 1;
14488 case OPTION_MEVEXLIG
:
14489 if (strcmp (arg
, "128") == 0)
14490 evexlig
= evexl128
;
14491 else if (strcmp (arg
, "256") == 0)
14492 evexlig
= evexl256
;
14493 else if (strcmp (arg
, "512") == 0)
14494 evexlig
= evexl512
;
14496 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
14499 case OPTION_MEVEXRCIG
:
14500 if (strcmp (arg
, "rne") == 0)
14502 else if (strcmp (arg
, "rd") == 0)
14504 else if (strcmp (arg
, "ru") == 0)
14506 else if (strcmp (arg
, "rz") == 0)
14509 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
14512 case OPTION_MEVEXWIG
:
14513 if (strcmp (arg
, "0") == 0)
14515 else if (strcmp (arg
, "1") == 0)
14518 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
14521 # if defined (TE_PE) || defined (TE_PEP)
14522 case OPTION_MBIG_OBJ
:
14527 case OPTION_MOMIT_LOCK_PREFIX
:
14528 if (strcasecmp (arg
, "yes") == 0)
14529 omit_lock_prefix
= 1;
14530 else if (strcasecmp (arg
, "no") == 0)
14531 omit_lock_prefix
= 0;
14533 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
14536 case OPTION_MFENCE_AS_LOCK_ADD
:
14537 if (strcasecmp (arg
, "yes") == 0)
14539 else if (strcasecmp (arg
, "no") == 0)
14542 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
14545 case OPTION_MLFENCE_AFTER_LOAD
:
14546 if (strcasecmp (arg
, "yes") == 0)
14547 lfence_after_load
= 1;
14548 else if (strcasecmp (arg
, "no") == 0)
14549 lfence_after_load
= 0;
14551 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
14554 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
14555 if (strcasecmp (arg
, "all") == 0)
14557 lfence_before_indirect_branch
= lfence_branch_all
;
14558 if (lfence_before_ret
== lfence_before_ret_none
)
14559 lfence_before_ret
= lfence_before_ret_shl
;
14561 else if (strcasecmp (arg
, "memory") == 0)
14562 lfence_before_indirect_branch
= lfence_branch_memory
;
14563 else if (strcasecmp (arg
, "register") == 0)
14564 lfence_before_indirect_branch
= lfence_branch_register
;
14565 else if (strcasecmp (arg
, "none") == 0)
14566 lfence_before_indirect_branch
= lfence_branch_none
;
14568 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
14572 case OPTION_MLFENCE_BEFORE_RET
:
14573 if (strcasecmp (arg
, "or") == 0)
14574 lfence_before_ret
= lfence_before_ret_or
;
14575 else if (strcasecmp (arg
, "not") == 0)
14576 lfence_before_ret
= lfence_before_ret_not
;
14577 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
14578 lfence_before_ret
= lfence_before_ret_shl
;
14579 else if (strcasecmp (arg
, "none") == 0)
14580 lfence_before_ret
= lfence_before_ret_none
;
14582 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
14586 case OPTION_MRELAX_RELOCATIONS
:
14587 if (strcasecmp (arg
, "yes") == 0)
14588 generate_relax_relocations
= 1;
14589 else if (strcasecmp (arg
, "no") == 0)
14590 generate_relax_relocations
= 0;
14592 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
14595 case OPTION_MALIGN_BRANCH_BOUNDARY
:
14598 long int align
= strtoul (arg
, &end
, 0);
14603 align_branch_power
= 0;
14606 else if (align
>= 16)
14609 for (align_power
= 0;
14611 align
>>= 1, align_power
++)
14613 /* Limit alignment power to 31. */
14614 if (align
== 1 && align_power
< 32)
14616 align_branch_power
= align_power
;
14621 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
14625 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
14628 int align
= strtoul (arg
, &end
, 0);
14629 /* Some processors only support 5 prefixes. */
14630 if (*end
== '\0' && align
>= 0 && align
< 6)
14632 align_branch_prefix_size
= align
;
14635 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
14640 case OPTION_MALIGN_BRANCH
:
14642 saved
= xstrdup (arg
);
14646 next
= strchr (type
, '+');
14649 if (strcasecmp (type
, "jcc") == 0)
14650 align_branch
|= align_branch_jcc_bit
;
14651 else if (strcasecmp (type
, "fused") == 0)
14652 align_branch
|= align_branch_fused_bit
;
14653 else if (strcasecmp (type
, "jmp") == 0)
14654 align_branch
|= align_branch_jmp_bit
;
14655 else if (strcasecmp (type
, "call") == 0)
14656 align_branch
|= align_branch_call_bit
;
14657 else if (strcasecmp (type
, "ret") == 0)
14658 align_branch
|= align_branch_ret_bit
;
14659 else if (strcasecmp (type
, "indirect") == 0)
14660 align_branch
|= align_branch_indirect_bit
;
14662 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
14665 while (next
!= NULL
);
14669 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
14670 align_branch_power
= 5;
14671 align_branch_prefix_size
= 5;
14672 align_branch
= (align_branch_jcc_bit
14673 | align_branch_fused_bit
14674 | align_branch_jmp_bit
);
14677 case OPTION_MAMD64
:
14681 case OPTION_MINTEL64
:
14689 /* Turn off -Os. */
14690 optimize_for_space
= 0;
14692 else if (*arg
== 's')
14694 optimize_for_space
= 1;
14695 /* Turn on all encoding optimizations. */
14696 optimize
= INT_MAX
;
14700 optimize
= atoi (arg
);
14701 /* Turn off -Os. */
14702 optimize_for_space
= 0;
14712 #define MESSAGE_TEMPLATE \
14716 output_message (FILE *stream
, char *p
, char *message
, char *start
,
14717 int *left_p
, const char *name
, int len
)
14719 int size
= sizeof (MESSAGE_TEMPLATE
);
14720 int left
= *left_p
;
14722 /* Reserve 2 spaces for ", " or ",\0" */
14725 /* Check if there is any room. */
14733 p
= mempcpy (p
, name
, len
);
14737 /* Output the current message now and start a new one. */
14740 fprintf (stream
, "%s\n", message
);
14742 left
= size
- (start
- message
) - len
- 2;
14744 gas_assert (left
>= 0);
14746 p
= mempcpy (p
, name
, len
);
14754 show_arch (FILE *stream
, int ext
, int check
)
14756 static char message
[] = MESSAGE_TEMPLATE
;
14757 char *start
= message
+ 27;
14759 int size
= sizeof (MESSAGE_TEMPLATE
);
14766 left
= size
- (start
- message
);
14770 p
= output_message (stream
, p
, message
, start
, &left
,
14771 STRING_COMMA_LEN ("default"));
14772 p
= output_message (stream
, p
, message
, start
, &left
,
14773 STRING_COMMA_LEN ("push"));
14774 p
= output_message (stream
, p
, message
, start
, &left
,
14775 STRING_COMMA_LEN ("pop"));
14778 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
14780 /* Should it be skipped? */
14781 if (cpu_arch
[j
].skip
)
14784 name
= cpu_arch
[j
].name
;
14785 len
= cpu_arch
[j
].len
;
14786 if (cpu_arch
[j
].type
== PROCESSOR_NONE
)
14788 /* It is an extension. Skip if we aren't asked to show it. */
14789 if (!ext
|| cpu_flags_all_zero (&cpu_arch
[j
].enable
))
14794 /* It is an processor. Skip if we show only extension. */
14797 else if (check
&& ! cpu_arch
[j
].enable
.bitfield
.cpui386
)
14799 /* It is an impossible processor - skip. */
14803 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
14806 /* Display disabled extensions. */
14808 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
14812 if (cpu_arch
[j
].type
!= PROCESSOR_NONE
14813 || !cpu_flags_all_zero (&cpu_arch
[j
].enable
))
14815 str
= xasprintf ("no%s", cpu_arch
[j
].name
);
14816 p
= output_message (stream
, p
, message
, start
, &left
, str
,
14822 fprintf (stream
, "%s\n", message
);
14826 md_show_usage (FILE *stream
)
14828 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14829 fprintf (stream
, _("\
14830 -Qy, -Qn ignored\n\
14831 -V print assembler version number\n\
14834 fprintf (stream
, _("\
14835 -n do not optimize code alignment\n\
14836 -O{012s} attempt some code optimizations\n\
14837 -q quieten some warnings\n"));
14838 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14839 fprintf (stream
, _("\
14843 # if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14844 fprintf (stream
, _("\
14845 --32/--64/--x32 generate 32bit/64bit/x32 object\n"));
14846 # elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
14847 fprintf (stream
, _("\
14848 --32/--64 generate 32bit/64bit object\n"));
14851 #ifdef SVR4_COMMENT_CHARS
14852 fprintf (stream
, _("\
14853 --divide do not treat `/' as a comment character\n"));
14855 fprintf (stream
, _("\
14856 --divide ignored\n"));
14858 fprintf (stream
, _("\
14859 -march=CPU[,+EXTENSION...]\n\
14860 generate code for CPU and EXTENSION, CPU is one of:\n"));
14861 show_arch (stream
, 0, 1);
14862 fprintf (stream
, _("\
14863 EXTENSION is combination of (possibly \"no\"-prefixed):\n"));
14864 show_arch (stream
, 1, 0);
14865 fprintf (stream
, _("\
14866 -mtune=CPU optimize for CPU, CPU is one of:\n"));
14867 show_arch (stream
, 0, 0);
14868 fprintf (stream
, _("\
14869 -msse2avx encode SSE instructions with VEX prefix\n"));
14870 fprintf (stream
, _("\
14871 -muse-unaligned-vector-move\n\
14872 encode aligned vector move as unaligned vector move\n"));
14873 fprintf (stream
, _("\
14874 -msse-check=[none|error|warning] (default: warning)\n\
14875 check SSE instructions\n"));
14876 fprintf (stream
, _("\
14877 -moperand-check=[none|error|warning] (default: warning)\n\
14878 check operand combinations for validity\n"));
14879 fprintf (stream
, _("\
14880 -mavxscalar=[128|256] (default: 128)\n\
14881 encode scalar AVX instructions with specific vector\n\
14883 fprintf (stream
, _("\
14884 -mvexwig=[0|1] (default: 0)\n\
14885 encode VEX instructions with specific VEX.W value\n\
14886 for VEX.W bit ignored instructions\n"));
14887 fprintf (stream
, _("\
14888 -mevexlig=[128|256|512] (default: 128)\n\
14889 encode scalar EVEX instructions with specific vector\n\
14891 fprintf (stream
, _("\
14892 -mevexwig=[0|1] (default: 0)\n\
14893 encode EVEX instructions with specific EVEX.W value\n\
14894 for EVEX.W bit ignored instructions\n"));
14895 fprintf (stream
, _("\
14896 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
14897 encode EVEX instructions with specific EVEX.RC value\n\
14898 for SAE-only ignored instructions\n"));
14899 fprintf (stream
, _("\
14900 -mmnemonic=[att|intel] "));
14901 if (SYSV386_COMPAT
)
14902 fprintf (stream
, _("(default: att)\n"));
14904 fprintf (stream
, _("(default: intel)\n"));
14905 fprintf (stream
, _("\
14906 use AT&T/Intel mnemonic\n"));
14907 fprintf (stream
, _("\
14908 -msyntax=[att|intel] (default: att)\n\
14909 use AT&T/Intel syntax\n"));
14910 fprintf (stream
, _("\
14911 -mindex-reg support pseudo index registers\n"));
14912 fprintf (stream
, _("\
14913 -mnaked-reg don't require `%%' prefix for registers\n"));
14914 fprintf (stream
, _("\
14915 -madd-bnd-prefix add BND prefix for all valid branches\n"));
14916 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14917 fprintf (stream
, _("\
14918 -mshared disable branch optimization for shared code\n"));
14919 fprintf (stream
, _("\
14920 -mx86-used-note=[no|yes] "));
14921 if (DEFAULT_X86_USED_NOTE
)
14922 fprintf (stream
, _("(default: yes)\n"));
14924 fprintf (stream
, _("(default: no)\n"));
14925 fprintf (stream
, _("\
14926 generate x86 used ISA and feature properties\n"));
14928 #if defined (TE_PE) || defined (TE_PEP)
14929 fprintf (stream
, _("\
14930 -mbig-obj generate big object files\n"));
14932 fprintf (stream
, _("\
14933 -momit-lock-prefix=[no|yes] (default: no)\n\
14934 strip all lock prefixes\n"));
14935 fprintf (stream
, _("\
14936 -mfence-as-lock-add=[no|yes] (default: no)\n\
14937 encode lfence, mfence and sfence as\n\
14938 lock addl $0x0, (%%{re}sp)\n"));
14939 fprintf (stream
, _("\
14940 -mrelax-relocations=[no|yes] "));
14941 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
14942 fprintf (stream
, _("(default: yes)\n"));
14944 fprintf (stream
, _("(default: no)\n"));
14945 fprintf (stream
, _("\
14946 generate relax relocations\n"));
14947 fprintf (stream
, _("\
14948 -malign-branch-boundary=NUM (default: 0)\n\
14949 align branches within NUM byte boundary\n"));
14950 fprintf (stream
, _("\
14951 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
14952 TYPE is combination of jcc, fused, jmp, call, ret,\n\
14954 specify types of branches to align\n"));
14955 fprintf (stream
, _("\
14956 -malign-branch-prefix-size=NUM (default: 5)\n\
14957 align branches with NUM prefixes per instruction\n"));
14958 fprintf (stream
, _("\
14959 -mbranches-within-32B-boundaries\n\
14960 align branches within 32 byte boundary\n"));
14961 fprintf (stream
, _("\
14962 -mlfence-after-load=[no|yes] (default: no)\n\
14963 generate lfence after load\n"));
14964 fprintf (stream
, _("\
14965 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
14966 generate lfence before indirect near branch\n"));
14967 fprintf (stream
, _("\
14968 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
14969 generate lfence before ret\n"));
14970 fprintf (stream
, _("\
14971 -mamd64 accept only AMD64 ISA [default]\n"));
14972 fprintf (stream
, _("\
14973 -mintel64 accept only Intel64 ISA\n"));
14976 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
14977 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
14978 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
14980 /* Pick the target format to use. */
14983 i386_target_format (void)
14985 if (startswith (default_arch
, "x86_64"))
14987 update_code_flag (CODE_64BIT
, 1);
14988 if (default_arch
[6] == '\0')
14989 x86_elf_abi
= X86_64_ABI
;
14991 x86_elf_abi
= X86_64_X32_ABI
;
14993 else if (!strcmp (default_arch
, "i386"))
14994 update_code_flag (CODE_32BIT
, 1);
14995 else if (!strcmp (default_arch
, "iamcu"))
14997 update_code_flag (CODE_32BIT
, 1);
14998 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
15000 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
15001 cpu_arch_name
= "iamcu";
15002 free (cpu_sub_arch_name
);
15003 cpu_sub_arch_name
= NULL
;
15004 cpu_arch_flags
= iamcu_flags
;
15005 cpu_arch_isa
= PROCESSOR_IAMCU
;
15006 cpu_arch_isa_flags
= iamcu_flags
;
15007 if (!cpu_arch_tune_set
)
15009 cpu_arch_tune
= cpu_arch_isa
;
15010 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
15013 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
15014 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
15018 as_fatal (_("unknown architecture"));
15020 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
15021 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
15022 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
15023 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
15025 switch (OUTPUT_FLAVOR
)
15027 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
15028 case bfd_target_aout_flavour
:
15029 return AOUT_TARGET_FORMAT
;
15031 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
15032 # if defined (TE_PE) || defined (TE_PEP)
15033 case bfd_target_coff_flavour
:
15034 if (flag_code
== CODE_64BIT
)
15037 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
15039 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
15040 # elif defined (TE_GO32)
15041 case bfd_target_coff_flavour
:
15042 return "coff-go32";
15044 case bfd_target_coff_flavour
:
15045 return "coff-i386";
15048 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
15049 case bfd_target_elf_flavour
:
15051 const char *format
;
15053 switch (x86_elf_abi
)
15056 format
= ELF_TARGET_FORMAT
;
15058 tls_get_addr
= "___tls_get_addr";
15062 use_rela_relocations
= 1;
15065 tls_get_addr
= "__tls_get_addr";
15067 format
= ELF_TARGET_FORMAT64
;
15069 case X86_64_X32_ABI
:
15070 use_rela_relocations
= 1;
15073 tls_get_addr
= "__tls_get_addr";
15075 disallow_64bit_reloc
= 1;
15076 format
= ELF_TARGET_FORMAT32
;
15079 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
15081 if (x86_elf_abi
!= I386_ABI
)
15082 as_fatal (_("Intel MCU is 32bit only"));
15083 return ELF_TARGET_IAMCU_FORMAT
;
15089 #if defined (OBJ_MACH_O)
15090 case bfd_target_mach_o_flavour
:
15091 if (flag_code
== CODE_64BIT
)
15093 use_rela_relocations
= 1;
15095 return "mach-o-x86-64";
15098 return "mach-o-i386";
15106 #endif /* OBJ_MAYBE_ more than one */
15109 md_undefined_symbol (char *name
)
15111 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
15112 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
15113 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
15114 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
15118 if (symbol_find (name
))
15119 as_bad (_("GOT already in symbol table"));
15120 GOT_symbol
= symbol_new (name
, undefined_section
,
15121 &zero_address_frag
, 0);
15128 /* Round up a section size to the appropriate boundary. */
15131 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
15133 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
15134 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
15136 /* For a.out, force the section size to be aligned. If we don't do
15137 this, BFD will align it for us, but it will not write out the
15138 final bytes of the section. This may be a bug in BFD, but it is
15139 easier to fix it here since that is how the other a.out targets
15143 align
= bfd_section_alignment (segment
);
15144 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
15151 /* On the i386, PC-relative offsets are relative to the start of the
15152 next instruction. That is, the address of the offset, plus its
15153 size, since the offset is always the last part of the insn. */
15156 md_pcrel_from (fixS
*fixP
)
15158 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
15164 s_bss (int ignore ATTRIBUTE_UNUSED
)
15168 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15170 obj_elf_section_change_hook ();
15172 temp
= get_absolute_expression ();
15173 subseg_set (bss_section
, (subsegT
) temp
);
15174 demand_empty_rest_of_line ();
15179 /* Remember constant directive. */
15182 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
15184 if (last_insn
.kind
!= last_insn_directive
15185 && (bfd_section_flags (now_seg
) & SEC_CODE
))
15187 last_insn
.seg
= now_seg
;
15188 last_insn
.kind
= last_insn_directive
;
15189 last_insn
.name
= "constant directive";
15190 last_insn
.file
= as_where (&last_insn
.line
);
15191 if (lfence_before_ret
!= lfence_before_ret_none
)
15193 if (lfence_before_indirect_branch
!= lfence_branch_none
)
15194 as_warn (_("constant directive skips -mlfence-before-ret "
15195 "and -mlfence-before-indirect-branch"));
15197 as_warn (_("constant directive skips -mlfence-before-ret"));
15199 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
15200 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
15205 i386_validate_fix (fixS
*fixp
)
15207 if (fixp
->fx_addsy
&& S_GET_SEGMENT(fixp
->fx_addsy
) == reg_section
)
15209 reloc_howto_type
*howto
;
15211 howto
= bfd_reloc_type_lookup (stdoutput
, fixp
->fx_r_type
);
15212 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15213 _("invalid %s relocation against register"),
15214 howto
? howto
->name
: "<unknown>");
15218 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15219 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
15220 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
15221 return IS_ELF
&& fixp
->fx_addsy
15222 && (!S_IS_DEFINED (fixp
->fx_addsy
)
15223 || S_IS_EXTERNAL (fixp
->fx_addsy
));
15226 if (fixp
->fx_subsy
)
15228 if (fixp
->fx_subsy
== GOT_symbol
)
15230 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
15234 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15235 if (fixp
->fx_tcbit2
)
15236 fixp
->fx_r_type
= (fixp
->fx_tcbit
15237 ? BFD_RELOC_X86_64_REX_GOTPCRELX
15238 : BFD_RELOC_X86_64_GOTPCRELX
);
15241 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
15246 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
15248 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
15250 fixp
->fx_subsy
= 0;
15253 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15256 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
15257 to section. Since PLT32 relocation must be against symbols,
15258 turn such PLT32 relocation into PC32 relocation. */
15260 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
15261 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
15262 && symbol_section_p (fixp
->fx_addsy
))
15263 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
15266 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
15267 && fixp
->fx_tcbit2
)
15268 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
15277 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
15280 bfd_reloc_code_real_type code
;
15282 switch (fixp
->fx_r_type
)
15284 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15287 case BFD_RELOC_SIZE32
:
15288 case BFD_RELOC_SIZE64
:
15290 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
15291 && (!fixp
->fx_subsy
15292 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
15293 sym
= fixp
->fx_addsy
;
15294 else if (fixp
->fx_subsy
15295 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
15296 && (!fixp
->fx_addsy
15297 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
15298 sym
= fixp
->fx_subsy
;
15301 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
15303 /* Resolve size relocation against local symbol to size of
15304 the symbol plus addend. */
15305 valueT value
= S_GET_SIZE (sym
);
15307 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
15308 value
= bfd_section_size (S_GET_SEGMENT (sym
));
15309 if (sym
== fixp
->fx_subsy
)
15312 if (fixp
->fx_addsy
)
15313 value
+= S_GET_VALUE (fixp
->fx_addsy
);
15315 else if (fixp
->fx_subsy
)
15316 value
-= S_GET_VALUE (fixp
->fx_subsy
);
15317 value
+= fixp
->fx_offset
;
15318 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
15320 && !fits_in_unsigned_long (value
))
15321 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15322 _("symbol size computation overflow"));
15323 fixp
->fx_addsy
= NULL
;
15324 fixp
->fx_subsy
= NULL
;
15325 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
15328 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
15330 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15331 "unsupported expression involving @size");
15335 /* Fall through. */
15337 case BFD_RELOC_X86_64_PLT32
:
15338 case BFD_RELOC_X86_64_GOT32
:
15339 case BFD_RELOC_X86_64_GOTPCREL
:
15340 case BFD_RELOC_X86_64_GOTPCRELX
:
15341 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
15342 case BFD_RELOC_386_PLT32
:
15343 case BFD_RELOC_386_GOT32
:
15344 case BFD_RELOC_386_GOT32X
:
15345 case BFD_RELOC_386_GOTOFF
:
15346 case BFD_RELOC_386_GOTPC
:
15347 case BFD_RELOC_386_TLS_GD
:
15348 case BFD_RELOC_386_TLS_LDM
:
15349 case BFD_RELOC_386_TLS_LDO_32
:
15350 case BFD_RELOC_386_TLS_IE_32
:
15351 case BFD_RELOC_386_TLS_IE
:
15352 case BFD_RELOC_386_TLS_GOTIE
:
15353 case BFD_RELOC_386_TLS_LE_32
:
15354 case BFD_RELOC_386_TLS_LE
:
15355 case BFD_RELOC_386_TLS_GOTDESC
:
15356 case BFD_RELOC_386_TLS_DESC_CALL
:
15357 case BFD_RELOC_X86_64_TLSGD
:
15358 case BFD_RELOC_X86_64_TLSLD
:
15359 case BFD_RELOC_X86_64_DTPOFF32
:
15360 case BFD_RELOC_X86_64_DTPOFF64
:
15361 case BFD_RELOC_X86_64_GOTTPOFF
:
15362 case BFD_RELOC_X86_64_TPOFF32
:
15363 case BFD_RELOC_X86_64_TPOFF64
:
15364 case BFD_RELOC_X86_64_GOTOFF64
:
15365 case BFD_RELOC_X86_64_GOTPC32
:
15366 case BFD_RELOC_X86_64_GOT64
:
15367 case BFD_RELOC_X86_64_GOTPCREL64
:
15368 case BFD_RELOC_X86_64_GOTPC64
:
15369 case BFD_RELOC_X86_64_GOTPLT64
:
15370 case BFD_RELOC_X86_64_PLTOFF64
:
15371 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
15372 case BFD_RELOC_X86_64_TLSDESC_CALL
:
15373 case BFD_RELOC_RVA
:
15374 case BFD_RELOC_VTABLE_ENTRY
:
15375 case BFD_RELOC_VTABLE_INHERIT
:
15377 case BFD_RELOC_32_SECREL
:
15378 case BFD_RELOC_16_SECIDX
:
15380 code
= fixp
->fx_r_type
;
15382 case BFD_RELOC_X86_64_32S
:
15383 if (!fixp
->fx_pcrel
)
15385 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
15386 code
= fixp
->fx_r_type
;
15389 /* Fall through. */
15391 if (fixp
->fx_pcrel
)
15393 switch (fixp
->fx_size
)
15396 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15397 _("can not do %d byte pc-relative relocation"),
15399 code
= BFD_RELOC_32_PCREL
;
15401 case 1: code
= BFD_RELOC_8_PCREL
; break;
15402 case 2: code
= BFD_RELOC_16_PCREL
; break;
15403 case 4: code
= BFD_RELOC_32_PCREL
; break;
15405 case 8: code
= BFD_RELOC_64_PCREL
; break;
15411 switch (fixp
->fx_size
)
15414 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15415 _("can not do %d byte relocation"),
15417 code
= BFD_RELOC_32
;
15419 case 1: code
= BFD_RELOC_8
; break;
15420 case 2: code
= BFD_RELOC_16
; break;
15421 case 4: code
= BFD_RELOC_32
; break;
15423 case 8: code
= BFD_RELOC_64
; break;
15430 if ((code
== BFD_RELOC_32
15431 || code
== BFD_RELOC_32_PCREL
15432 || code
== BFD_RELOC_X86_64_32S
)
15434 && fixp
->fx_addsy
== GOT_symbol
)
15437 code
= BFD_RELOC_386_GOTPC
;
15439 code
= BFD_RELOC_X86_64_GOTPC32
;
15441 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
15443 && fixp
->fx_addsy
== GOT_symbol
)
15445 code
= BFD_RELOC_X86_64_GOTPC64
;
15448 rel
= XNEW (arelent
);
15449 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
15450 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
15452 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
15454 if (!use_rela_relocations
)
15456 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
15457 vtable entry to be used in the relocation's section offset. */
15458 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
15459 rel
->address
= fixp
->fx_offset
;
15460 #if defined (OBJ_COFF) && defined (TE_PE)
15461 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
15462 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
15467 /* Use the rela in 64bit mode. */
15470 if (disallow_64bit_reloc
)
15473 case BFD_RELOC_X86_64_DTPOFF64
:
15474 case BFD_RELOC_X86_64_TPOFF64
:
15475 case BFD_RELOC_64_PCREL
:
15476 case BFD_RELOC_X86_64_GOTOFF64
:
15477 case BFD_RELOC_X86_64_GOT64
:
15478 case BFD_RELOC_X86_64_GOTPCREL64
:
15479 case BFD_RELOC_X86_64_GOTPC64
:
15480 case BFD_RELOC_X86_64_GOTPLT64
:
15481 case BFD_RELOC_X86_64_PLTOFF64
:
15482 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15483 _("cannot represent relocation type %s in x32 mode"),
15484 bfd_get_reloc_code_name (code
));
15490 if (!fixp
->fx_pcrel
)
15491 rel
->addend
= fixp
->fx_offset
;
15495 case BFD_RELOC_X86_64_PLT32
:
15496 case BFD_RELOC_X86_64_GOT32
:
15497 case BFD_RELOC_X86_64_GOTPCREL
:
15498 case BFD_RELOC_X86_64_GOTPCRELX
:
15499 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
15500 case BFD_RELOC_X86_64_TLSGD
:
15501 case BFD_RELOC_X86_64_TLSLD
:
15502 case BFD_RELOC_X86_64_GOTTPOFF
:
15503 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
15504 case BFD_RELOC_X86_64_TLSDESC_CALL
:
15505 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
15508 rel
->addend
= (section
->vma
15510 + fixp
->fx_addnumber
15511 + md_pcrel_from (fixp
));
15516 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
15517 if (rel
->howto
== NULL
)
15519 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
15520 _("cannot represent relocation type %s"),
15521 bfd_get_reloc_code_name (code
));
15522 /* Set howto to a garbage value so that we can keep going. */
15523 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
15524 gas_assert (rel
->howto
!= NULL
);
15530 #include "tc-i386-intel.c"
15533 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
15535 int saved_naked_reg
;
15536 char saved_register_dot
;
15538 saved_naked_reg
= allow_naked_reg
;
15539 allow_naked_reg
= 1;
15540 saved_register_dot
= register_chars
['.'];
15541 register_chars
['.'] = '.';
15542 allow_pseudo_reg
= 1;
15543 expression_and_evaluate (exp
);
15544 allow_pseudo_reg
= 0;
15545 register_chars
['.'] = saved_register_dot
;
15546 allow_naked_reg
= saved_naked_reg
;
15548 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
15550 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
15552 exp
->X_op
= O_constant
;
15553 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
15554 .dw2_regnum
[flag_code
>> 1];
15557 exp
->X_op
= O_illegal
;
15562 tc_x86_frame_initial_instructions (void)
15564 static unsigned int sp_regno
[2];
15566 if (!sp_regno
[flag_code
>> 1])
15568 char *saved_input
= input_line_pointer
;
15569 char sp
[][4] = {"esp", "rsp"};
15572 input_line_pointer
= sp
[flag_code
>> 1];
15573 tc_x86_parse_to_dw2regnum (&exp
);
15574 gas_assert (exp
.X_op
== O_constant
);
15575 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
15576 input_line_pointer
= saved_input
;
15579 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
15580 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
15584 x86_dwarf2_addr_size (void)
15586 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
15587 if (x86_elf_abi
== X86_64_X32_ABI
)
15590 return bfd_arch_bits_per_address (stdoutput
) / 8;
15594 i386_elf_section_type (const char *str
, size_t len
)
15596 if (flag_code
== CODE_64BIT
15597 && len
== sizeof ("unwind") - 1
15598 && startswith (str
, "unwind"))
15599 return SHT_X86_64_UNWIND
;
15606 i386_solaris_fix_up_eh_frame (segT sec
)
15608 if (flag_code
== CODE_64BIT
)
15609 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
15615 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
15619 exp
.X_op
= O_secrel
;
15620 exp
.X_add_symbol
= symbol
;
15621 exp
.X_add_number
= 0;
15622 emit_expr (&exp
, size
);
15626 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
15627 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
15630 x86_64_section_letter (int letter
, const char **ptr_msg
)
15632 if (flag_code
== CODE_64BIT
)
15635 return SHF_X86_64_LARGE
;
15637 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
15640 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
15645 x86_64_section_word (char *str
, size_t len
)
15647 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
15648 return SHF_X86_64_LARGE
;
15654 handle_large_common (int small ATTRIBUTE_UNUSED
)
15656 if (flag_code
!= CODE_64BIT
)
15658 s_comm_internal (0, elf_common_parse
);
15659 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
15663 static segT lbss_section
;
15664 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
15665 asection
*saved_bss_section
= bss_section
;
15667 if (lbss_section
== NULL
)
15669 flagword applicable
;
15670 segT seg
= now_seg
;
15671 subsegT subseg
= now_subseg
;
15673 /* The .lbss section is for local .largecomm symbols. */
15674 lbss_section
= subseg_new (".lbss", 0);
15675 applicable
= bfd_applicable_section_flags (stdoutput
);
15676 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
15677 seg_info (lbss_section
)->bss
= 1;
15679 subseg_set (seg
, subseg
);
15682 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
15683 bss_section
= lbss_section
;
15685 s_comm_internal (0, elf_common_parse
);
15687 elf_com_section_ptr
= saved_com_section_ptr
;
15688 bss_section
= saved_bss_section
;
15691 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */