1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2022 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "gen-sframe.h"
35 #include "elf/x86-64.h"
36 #include "opcodes/i386-init.h"
39 #ifndef INFER_ADDR_PREFIX
40 #define INFER_ADDR_PREFIX 1
44 #define DEFAULT_ARCH "i386"
49 #define INLINE __inline__
55 /* Prefixes will be emitted in the order defined below.
56 WAIT_PREFIX must be the first prefix since FWAIT is really is an
57 instruction, and so must come before any prefixes.
58 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
59 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
65 #define HLE_PREFIX REP_PREFIX
66 #define BND_PREFIX REP_PREFIX
68 #define REX_PREFIX 6 /* must come last. */
69 #define MAX_PREFIXES 7 /* max prefixes per opcode */
71 /* we define the syntax here (modulo base,index,scale syntax) */
72 #define REGISTER_PREFIX '%'
73 #define IMMEDIATE_PREFIX '$'
74 #define ABSOLUTE_PREFIX '*'
76 /* these are the instruction mnemonic suffixes in AT&T syntax or
77 memory operand size in Intel syntax. */
78 #define WORD_MNEM_SUFFIX 'w'
79 #define BYTE_MNEM_SUFFIX 'b'
80 #define SHORT_MNEM_SUFFIX 's'
81 #define LONG_MNEM_SUFFIX 'l'
82 #define QWORD_MNEM_SUFFIX 'q'
83 /* Intel Syntax. Use a non-ascii letter since since it never appears
85 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
87 #define END_OF_INSN '\0'
89 /* This matches the C -> StaticRounding alias in the opcode table. */
90 #define commutative staticrounding
93 'templates' is for grouping together 'template' structures for opcodes
94 of the same name. This is only used for storing the insns in the grand
95 ole hash table of insns.
96 The templates themselves start at START and range up to (but not including)
101 const insn_template
*start
;
102 const insn_template
*end
;
106 /* 386 operand encoding bytes: see 386 book for details of this. */
109 unsigned int regmem
; /* codes register or memory operand */
110 unsigned int reg
; /* codes register operand (or extended opcode) */
111 unsigned int mode
; /* how to interpret regmem & reg */
115 /* x86-64 extension prefix. */
116 typedef int rex_byte
;
118 /* 386 opcode byte to code indirect addressing. */
127 /* x86 arch names, types and features */
130 const char *name
; /* arch name */
131 unsigned int len
:8; /* arch string length */
132 bool skip
:1; /* show_arch should skip this. */
133 enum processor_type type
; /* arch type */
134 i386_cpu_flags enable
; /* cpu feature enable flags */
135 i386_cpu_flags disable
; /* cpu feature disable flags */
139 static void update_code_flag (int, int);
140 static void set_code_flag (int);
141 static void set_16bit_gcc_code_flag (int);
142 static void set_intel_syntax (int);
143 static void set_intel_mnemonic (int);
144 static void set_allow_index_reg (int);
145 static void set_check (int);
146 static void set_cpu_arch (int);
148 static void pe_directive_secrel (int);
149 static void pe_directive_secidx (int);
151 static void signed_cons (int);
152 static char *output_invalid (int c
);
153 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
155 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS
*);
160 static int i386_intel_parse_name (const char *, expressionS
*);
161 static const reg_entry
*parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (unsigned int, unsigned int);
166 static enum flag_code
i386_addressing_mode (void);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template
*match_template (char);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const reg_entry
*build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS
*, offsetT
);
181 static void output_disp (fragS
*, offsetT
);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
188 /* GNU_PROPERTY_X86_ISA_1_USED. */
189 static unsigned int x86_isa_1_used
;
190 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
191 static unsigned int x86_feature_2_used
;
192 /* Generate x86 used ISA and feature properties. */
193 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
196 static const char *default_arch
= DEFAULT_ARCH
;
198 /* parse_register() returns this when a register alias cannot be used. */
199 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
200 { Dw2Inval
, Dw2Inval
} };
202 static const reg_entry
*reg_eax
;
203 static const reg_entry
*reg_ds
;
204 static const reg_entry
*reg_es
;
205 static const reg_entry
*reg_ss
;
206 static const reg_entry
*reg_st0
;
207 static const reg_entry
*reg_k0
;
212 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
213 unsigned char bytes
[4];
215 /* Destination or source register specifier. */
216 const reg_entry
*register_specifier
;
219 /* 'md_assemble ()' gathers together information and puts it into a
226 const reg_entry
*regs
;
231 no_error
, /* Must be first. */
232 operand_size_mismatch
,
233 operand_type_mismatch
,
234 register_type_mismatch
,
235 number_of_operands_mismatch
,
236 invalid_instruction_suffix
,
238 unsupported_with_intel_mnemonic
,
242 invalid_vsib_address
,
243 invalid_vector_register_set
,
244 invalid_tmm_register_set
,
245 invalid_dest_and_src_register_set
,
246 unsupported_vector_index_register
,
247 unsupported_broadcast
,
250 mask_not_on_destination
,
253 invalid_register_operand
,
258 /* TM holds the template for the insn were currently assembling. */
261 /* SUFFIX holds the instruction size suffix for byte, word, dword
262 or qword, if given. */
265 /* OPCODE_LENGTH holds the number of base opcode bytes. */
266 unsigned char opcode_length
;
268 /* OPERANDS gives the number of given operands. */
269 unsigned int operands
;
271 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
272 of given register, displacement, memory operands and immediate
274 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
276 /* TYPES [i] is the type (see above #defines) which tells us how to
277 use OP[i] for the corresponding operand. */
278 i386_operand_type types
[MAX_OPERANDS
];
280 /* Displacement expression, immediate expression, or register for each
282 union i386_op op
[MAX_OPERANDS
];
284 /* Flags for operands. */
285 unsigned int flags
[MAX_OPERANDS
];
286 #define Operand_PCrel 1
287 #define Operand_Mem 2
289 /* Relocation type for operand */
290 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
292 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
293 the base index byte below. */
294 const reg_entry
*base_reg
;
295 const reg_entry
*index_reg
;
296 unsigned int log2_scale_factor
;
298 /* SEG gives the seg_entries of this insn. They are zero unless
299 explicit segment overrides are given. */
300 const reg_entry
*seg
[2];
302 /* Copied first memory operand string, for re-checking. */
305 /* PREFIX holds all the given prefix opcodes (usually null).
306 PREFIXES is the number of prefix opcodes. */
307 unsigned int prefixes
;
308 unsigned char prefix
[MAX_PREFIXES
];
310 /* Register is in low 3 bits of opcode. */
313 /* The operand to a branch insn indicates an absolute branch. */
316 /* There is a memory operand of (%dx) which should be only used
317 with input/output instructions. */
318 bool input_output_operand
;
320 /* Extended states. */
328 xstate_ymm
= 1 << 2 | xstate_xmm
,
330 xstate_zmm
= 1 << 3 | xstate_ymm
,
333 /* Use MASK state. */
337 /* Has GOTPC or TLS relocation. */
338 bool has_gotpc_tls_reloc
;
340 /* RM and SIB are the modrm byte and the sib byte where the
341 addressing modes of this insn are encoded. */
348 /* Masking attributes.
350 The struct describes masking, applied to OPERAND in the instruction.
351 REG is a pointer to the corresponding mask register. ZEROING tells
352 whether merging or zeroing mask is used. */
353 struct Mask_Operation
355 const reg_entry
*reg
;
356 unsigned int zeroing
;
357 /* The operand where this operation is associated. */
358 unsigned int operand
;
361 /* Rounding control and SAE attributes. */
373 /* In Intel syntax the operand modifier form is supposed to be used, but
374 we continue to accept the immediate forms as well. */
378 /* Broadcasting attributes.
380 The struct describes broadcasting, applied to OPERAND. TYPE is
381 expresses the broadcast factor. */
382 struct Broadcast_Operation
384 /* Type of broadcast: {1to2}, {1to4}, {1to8}, {1to16} or {1to32}. */
387 /* Index of broadcasted operand. */
388 unsigned int operand
;
390 /* Number of bytes to broadcast. */
394 /* Compressed disp8*N attribute. */
395 unsigned int memshift
;
397 /* Prefer load or store in encoding. */
400 dir_encoding_default
= 0,
406 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
409 disp_encoding_default
= 0,
415 /* Prefer the REX byte in encoding. */
418 /* Disable instruction size optimization. */
421 /* How to encode vector instructions. */
424 vex_encoding_default
= 0,
432 const char *rep_prefix
;
435 const char *hle_prefix
;
437 /* Have BND prefix. */
438 const char *bnd_prefix
;
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix
;
444 enum i386_error error
;
447 typedef struct _i386_insn i386_insn
;
449 /* Link RC type with corresponding string, that'll be looked for in
458 static const struct RC_name RC_NamesTable
[] =
460 { rne
, STRING_COMMA_LEN ("rn-sae") },
461 { rd
, STRING_COMMA_LEN ("rd-sae") },
462 { ru
, STRING_COMMA_LEN ("ru-sae") },
463 { rz
, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly
, STRING_COMMA_LEN ("sae") },
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars
[] = "*%-([{}"
478 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
479 && !defined (TE_GNU) \
480 && !defined (TE_LINUX) \
481 && !defined (TE_Haiku) \
482 && !defined (TE_FreeBSD) \
483 && !defined (TE_DragonFly) \
484 && !defined (TE_NetBSD))
485 /* This array holds the chars that always start a comment. If the
486 pre-processor is disabled, these aren't very useful. The option
487 --divide will remove '/' from this list. */
488 const char *i386_comment_chars
= "#/";
489 #define SVR4_COMMENT_CHARS 1
490 #define PREFIX_SEPARATOR '\\'
493 const char *i386_comment_chars
= "#";
494 #define PREFIX_SEPARATOR '/'
497 /* This array holds the chars that only start a comment at the beginning of
498 a line. If the line seems to have the form '# 123 filename'
499 .line and .file directives will appear in the pre-processed output.
500 Note that input_file.c hand checks for '#' at the beginning of the
501 first line of the input file. This is because the compiler outputs
502 #NO_APP at the beginning of its output.
503 Also note that comments started like this one will always work if
504 '/' isn't otherwise defined. */
505 const char line_comment_chars
[] = "#/";
507 const char line_separator_chars
[] = ";";
509 /* Chars that can be used to separate mant from exp in floating point
511 const char EXP_CHARS
[] = "eE";
513 /* Chars that mean this number is a floating point constant
516 const char FLT_CHARS
[] = "fFdDxXhHbB";
518 /* Tables for lexical analysis. */
519 static char mnemonic_chars
[256];
520 static char register_chars
[256];
521 static char operand_chars
[256];
522 static char identifier_chars
[256];
524 /* Lexical macros. */
525 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
526 #define is_operand_char(x) (operand_chars[(unsigned char) x])
527 #define is_register_char(x) (register_chars[(unsigned char) x])
528 #define is_space_char(x) ((x) == ' ')
529 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
531 /* All non-digit non-letter characters that may occur in an operand. */
532 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
534 /* md_assemble() always leaves the strings it's passed unaltered. To
535 effect this we maintain a stack of saved characters that we've smashed
536 with '\0's (indicating end of strings for various sub-fields of the
537 assembler instruction). */
538 static char save_stack
[32];
539 static char *save_stack_p
;
540 #define END_STRING_AND_SAVE(s) \
541 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
542 #define RESTORE_END_STRING(s) \
543 do { *(s) = *--save_stack_p; } while (0)
545 /* The instruction we're assembling. */
548 /* Possible templates for current insn. */
549 static const templates
*current_templates
;
551 /* Per instruction expressionS buffers: max displacements & immediates. */
552 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
553 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
555 /* Current operand we are working on. */
556 static int this_operand
= -1;
558 /* We support four different modes. FLAG_CODE variable is used to distinguish
566 static enum flag_code flag_code
;
567 static unsigned int object_64bit
;
568 static unsigned int disallow_64bit_reloc
;
569 static int use_rela_relocations
= 0;
570 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
571 static const char *tls_get_addr
;
573 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
574 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
575 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
577 /* The ELF ABI to use. */
585 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
588 #if defined (TE_PE) || defined (TE_PEP)
589 /* Use big object file format. */
590 static int use_big_obj
= 0;
593 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
594 /* 1 if generating code for a shared library. */
595 static int shared
= 0;
597 unsigned int x86_sframe_cfa_sp_reg
;
598 /* The other CFA base register for SFrame unwind info. */
599 unsigned int x86_sframe_cfa_fp_reg
;
600 unsigned int x86_sframe_cfa_ra_reg
;
604 /* 1 for intel syntax,
606 static int intel_syntax
= 0;
608 static enum x86_64_isa
610 amd64
= 1, /* AMD64 ISA. */
611 intel64
/* Intel64 ISA. */
614 /* 1 for intel mnemonic,
615 0 if att mnemonic. */
616 static int intel_mnemonic
= !SYSV386_COMPAT
;
618 /* 1 if pseudo registers are permitted. */
619 static int allow_pseudo_reg
= 0;
621 /* 1 if register prefix % not required. */
622 static int allow_naked_reg
= 0;
624 /* 1 if the assembler should add BND prefix for all control-transferring
625 instructions supporting it, even if this prefix wasn't specified
627 static int add_bnd_prefix
= 0;
629 /* 1 if pseudo index register, eiz/riz, is allowed . */
630 static int allow_index_reg
= 0;
632 /* 1 if the assembler should ignore LOCK prefix, even if it was
633 specified explicitly. */
634 static int omit_lock_prefix
= 0;
636 /* 1 if the assembler should encode lfence, mfence, and sfence as
637 "lock addl $0, (%{re}sp)". */
638 static int avoid_fence
= 0;
640 /* 1 if lfence should be inserted after every load. */
641 static int lfence_after_load
= 0;
643 /* Non-zero if lfence should be inserted before indirect branch. */
644 static enum lfence_before_indirect_branch_kind
646 lfence_branch_none
= 0,
647 lfence_branch_register
,
648 lfence_branch_memory
,
651 lfence_before_indirect_branch
;
653 /* Non-zero if lfence should be inserted before ret. */
654 static enum lfence_before_ret_kind
656 lfence_before_ret_none
= 0,
657 lfence_before_ret_not
,
658 lfence_before_ret_or
,
659 lfence_before_ret_shl
663 /* Types of previous instruction is .byte or prefix. */
678 /* 1 if the assembler should generate relax relocations. */
680 static int generate_relax_relocations
681 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
683 static enum check_kind
689 sse_check
, operand_check
= check_warning
;
691 /* Non-zero if branches should be aligned within power of 2 boundary. */
692 static int align_branch_power
= 0;
694 /* Types of branches to align. */
695 enum align_branch_kind
697 align_branch_none
= 0,
698 align_branch_jcc
= 1,
699 align_branch_fused
= 2,
700 align_branch_jmp
= 3,
701 align_branch_call
= 4,
702 align_branch_indirect
= 5,
706 /* Type bits of branches to align. */
707 enum align_branch_bit
709 align_branch_jcc_bit
= 1 << align_branch_jcc
,
710 align_branch_fused_bit
= 1 << align_branch_fused
,
711 align_branch_jmp_bit
= 1 << align_branch_jmp
,
712 align_branch_call_bit
= 1 << align_branch_call
,
713 align_branch_indirect_bit
= 1 << align_branch_indirect
,
714 align_branch_ret_bit
= 1 << align_branch_ret
717 static unsigned int align_branch
= (align_branch_jcc_bit
718 | align_branch_fused_bit
719 | align_branch_jmp_bit
);
721 /* Types of condition jump used by macro-fusion. */
724 mf_jcc_jo
= 0, /* base opcode 0x70 */
725 mf_jcc_jc
, /* base opcode 0x72 */
726 mf_jcc_je
, /* base opcode 0x74 */
727 mf_jcc_jna
, /* base opcode 0x76 */
728 mf_jcc_js
, /* base opcode 0x78 */
729 mf_jcc_jp
, /* base opcode 0x7a */
730 mf_jcc_jl
, /* base opcode 0x7c */
731 mf_jcc_jle
, /* base opcode 0x7e */
734 /* Types of compare flag-modifying insntructions used by macro-fusion. */
737 mf_cmp_test_and
, /* test/cmp */
738 mf_cmp_alu_cmp
, /* add/sub/cmp */
739 mf_cmp_incdec
/* inc/dec */
742 /* The maximum padding size for fused jcc. CMP like instruction can
743 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
745 #define MAX_FUSED_JCC_PADDING_SIZE 20
747 /* The maximum number of prefixes added for an instruction. */
748 static unsigned int align_branch_prefix_size
= 5;
751 1. Clear the REX_W bit with register operand if possible.
752 2. Above plus use 128bit vector instruction to clear the full vector
755 static int optimize
= 0;
758 1. Clear the REX_W bit with register operand if possible.
759 2. Above plus use 128bit vector instruction to clear the full vector
761 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
764 static int optimize_for_space
= 0;
766 /* Register prefix used for error message. */
767 static const char *register_prefix
= "%";
769 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
770 leave, push, and pop instructions so that gcc has the same stack
771 frame as in 32 bit mode. */
772 static char stackop_size
= '\0';
774 /* Non-zero to optimize code alignment. */
775 int optimize_align_code
= 1;
777 /* Non-zero to quieten some warnings. */
778 static int quiet_warnings
= 0;
780 /* Guard to avoid repeated warnings about non-16-bit code on 16-bit CPUs. */
781 static bool pre_386_16bit_warned
;
784 static const char *cpu_arch_name
= NULL
;
785 static char *cpu_sub_arch_name
= NULL
;
787 /* CPU feature flags. */
788 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
790 /* If we have selected a cpu we are generating instructions for. */
791 static int cpu_arch_tune_set
= 0;
793 /* Cpu we are generating instructions for. */
794 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
796 /* CPU feature flags of cpu we are generating instructions for. */
797 static i386_cpu_flags cpu_arch_tune_flags
;
799 /* CPU instruction set architecture used. */
800 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
802 /* CPU feature flags of instruction set architecture used. */
803 i386_cpu_flags cpu_arch_isa_flags
;
805 /* If set, conditional jumps are not automatically promoted to handle
806 larger than a byte offset. */
807 static bool no_cond_jump_promotion
= false;
809 /* Encode SSE instructions with VEX prefix. */
810 static unsigned int sse2avx
;
812 /* Encode aligned vector move as unaligned vector move. */
813 static unsigned int use_unaligned_vector_move
;
815 /* Encode scalar AVX instructions with specific vector length. */
822 /* Encode VEX WIG instructions with specific vex.w. */
829 /* Encode scalar EVEX LIG instructions with specific vector length. */
837 /* Encode EVEX WIG instructions with specific evex.w. */
844 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
845 static enum rc_type evexrcig
= rne
;
847 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
848 static symbolS
*GOT_symbol
;
850 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
851 unsigned int x86_dwarf2_return_column
;
853 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
854 int x86_cie_data_alignment
;
856 /* Interface to relax_segment.
857 There are 3 major relax states for 386 jump insns because the
858 different types of jumps add different sizes to frags when we're
859 figuring out what sort of jump to choose to reach a given label.
861 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
862 branches which are handled by md_estimate_size_before_relax() and
863 i386_generic_table_relax_frag(). */
866 #define UNCOND_JUMP 0
868 #define COND_JUMP86 2
869 #define BRANCH_PADDING 3
870 #define BRANCH_PREFIX 4
871 #define FUSED_JCC_PADDING 5
876 #define SMALL16 (SMALL | CODE16)
878 #define BIG16 (BIG | CODE16)
882 #define INLINE __inline__
888 #define ENCODE_RELAX_STATE(type, size) \
889 ((relax_substateT) (((type) << 2) | (size)))
890 #define TYPE_FROM_RELAX_STATE(s) \
892 #define DISP_SIZE_FROM_RELAX_STATE(s) \
893 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
895 /* This table is used by relax_frag to promote short jumps to long
896 ones where necessary. SMALL (short) jumps may be promoted to BIG
897 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
898 don't allow a short jump in a 32 bit code segment to be promoted to
899 a 16 bit offset jump because it's slower (requires data size
900 prefix), and doesn't work, unless the destination is in the bottom
901 64k of the code segment (The top 16 bits of eip are zeroed). */
903 const relax_typeS md_relax_table
[] =
906 1) most positive reach of this state,
907 2) most negative reach of this state,
908 3) how many bytes this mode will have in the variable part of the frag
909 4) which index into the table to try if we can't fit into this one. */
911 /* UNCOND_JUMP states. */
912 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
914 /* dword jmp adds 4 bytes to frag:
915 0 extra opcode bytes, 4 displacement bytes. */
917 /* word jmp adds 2 byte2 to frag:
918 0 extra opcode bytes, 2 displacement bytes. */
921 /* COND_JUMP states. */
922 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
924 /* dword conditionals adds 5 bytes to frag:
925 1 extra opcode byte, 4 displacement bytes. */
927 /* word conditionals add 3 bytes to frag:
928 1 extra opcode byte, 2 displacement bytes. */
931 /* COND_JUMP86 states. */
932 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
933 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
934 /* dword conditionals adds 5 bytes to frag:
935 1 extra opcode byte, 4 displacement bytes. */
937 /* word conditionals add 4 bytes to frag:
938 1 displacement byte and a 3 byte long branch insn. */
942 #define ARCH(n, t, f, s) \
943 { STRING_COMMA_LEN (#n), s, PROCESSOR_ ## t, CPU_ ## f ## _FLAGS, \
945 #define SUBARCH(n, e, d, s) \
946 { STRING_COMMA_LEN (#n), s, PROCESSOR_NONE, CPU_ ## e ## _FLAGS, \
947 CPU_ ## d ## _FLAGS }
949 static const arch_entry cpu_arch
[] =
951 /* Do not replace the first two entries - i386_target_format() and
952 set_cpu_arch() rely on them being there in this order. */
953 ARCH (generic32
, GENERIC32
, GENERIC32
, false),
954 ARCH (generic64
, GENERIC64
, GENERIC64
, false),
955 ARCH (i8086
, UNKNOWN
, NONE
, false),
956 ARCH (i186
, UNKNOWN
, I186
, false),
957 ARCH (i286
, UNKNOWN
, I286
, false),
958 ARCH (i386
, I386
, I386
, false),
959 ARCH (i486
, I486
, I486
, false),
960 ARCH (i586
, PENTIUM
, I586
, false),
961 ARCH (i686
, PENTIUMPRO
, I686
, false),
962 ARCH (pentium
, PENTIUM
, I586
, false),
963 ARCH (pentiumpro
, PENTIUMPRO
, PENTIUMPRO
, false),
964 ARCH (pentiumii
, PENTIUMPRO
, P2
, false),
965 ARCH (pentiumiii
, PENTIUMPRO
, P3
, false),
966 ARCH (pentium4
, PENTIUM4
, P4
, false),
967 ARCH (prescott
, NOCONA
, CORE
, false),
968 ARCH (nocona
, NOCONA
, NOCONA
, false),
969 ARCH (yonah
, CORE
, CORE
, true),
970 ARCH (core
, CORE
, CORE
, false),
971 ARCH (merom
, CORE2
, CORE2
, true),
972 ARCH (core2
, CORE2
, CORE2
, false),
973 ARCH (corei7
, COREI7
, COREI7
, false),
974 ARCH (iamcu
, IAMCU
, IAMCU
, false),
975 ARCH (k6
, K6
, K6
, false),
976 ARCH (k6_2
, K6
, K6_2
, false),
977 ARCH (athlon
, ATHLON
, ATHLON
, false),
978 ARCH (sledgehammer
, K8
, K8
, true),
979 ARCH (opteron
, K8
, K8
, false),
980 ARCH (k8
, K8
, K8
, false),
981 ARCH (amdfam10
, AMDFAM10
, AMDFAM10
, false),
982 ARCH (bdver1
, BD
, BDVER1
, false),
983 ARCH (bdver2
, BD
, BDVER2
, false),
984 ARCH (bdver3
, BD
, BDVER3
, false),
985 ARCH (bdver4
, BD
, BDVER4
, false),
986 ARCH (znver1
, ZNVER
, ZNVER1
, false),
987 ARCH (znver2
, ZNVER
, ZNVER2
, false),
988 ARCH (znver3
, ZNVER
, ZNVER3
, false),
989 ARCH (znver4
, ZNVER
, ZNVER4
, false),
990 ARCH (btver1
, BT
, BTVER1
, false),
991 ARCH (btver2
, BT
, BTVER2
, false),
993 SUBARCH (8087, 8087, ANY_X87
, false),
994 SUBARCH (87, NONE
, ANY_X87
, false), /* Disable only! */
995 SUBARCH (287, 287, ANY_287
, false),
996 SUBARCH (387, 387, ANY_387
, false),
997 SUBARCH (687, 687, ANY_687
, false),
998 SUBARCH (cmov
, CMOV
, ANY_CMOV
, false),
999 SUBARCH (fxsr
, FXSR
, ANY_FXSR
, false),
1000 SUBARCH (mmx
, MMX
, ANY_MMX
, false),
1001 SUBARCH (sse
, SSE
, ANY_SSE
, false),
1002 SUBARCH (sse2
, SSE2
, ANY_SSE2
, false),
1003 SUBARCH (sse3
, SSE3
, ANY_SSE3
, false),
1004 SUBARCH (sse4a
, SSE4A
, ANY_SSE4A
, false),
1005 SUBARCH (ssse3
, SSSE3
, ANY_SSSE3
, false),
1006 SUBARCH (sse4
.1
, SSE4_1
, ANY_SSE4_1
, false),
1007 SUBARCH (sse4
.2
, SSE4_2
, ANY_SSE4_2
, false),
1008 SUBARCH (sse4
, SSE4_2
, ANY_SSE4_1
, false),
1009 SUBARCH (avx
, AVX
, ANY_AVX
, false),
1010 SUBARCH (avx2
, AVX2
, ANY_AVX2
, false),
1011 SUBARCH (avx512f
, AVX512F
, ANY_AVX512F
, false),
1012 SUBARCH (avx512cd
, AVX512CD
, ANY_AVX512CD
, false),
1013 SUBARCH (avx512er
, AVX512ER
, ANY_AVX512ER
, false),
1014 SUBARCH (avx512pf
, AVX512PF
, ANY_AVX512PF
, false),
1015 SUBARCH (avx512dq
, AVX512DQ
, ANY_AVX512DQ
, false),
1016 SUBARCH (avx512bw
, AVX512BW
, ANY_AVX512BW
, false),
1017 SUBARCH (avx512vl
, AVX512VL
, ANY_AVX512VL
, false),
1018 SUBARCH (vmx
, VMX
, VMX
, false),
1019 SUBARCH (vmfunc
, VMFUNC
, VMFUNC
, false),
1020 SUBARCH (smx
, SMX
, SMX
, false),
1021 SUBARCH (xsave
, XSAVE
, XSAVE
, false),
1022 SUBARCH (xsaveopt
, XSAVEOPT
, XSAVEOPT
, false),
1023 SUBARCH (xsavec
, XSAVEC
, XSAVEC
, false),
1024 SUBARCH (xsaves
, XSAVES
, XSAVES
, false),
1025 SUBARCH (aes
, AES
, AES
, false),
1026 SUBARCH (pclmul
, PCLMUL
, PCLMUL
, false),
1027 SUBARCH (clmul
, PCLMUL
, PCLMUL
, true),
1028 SUBARCH (fsgsbase
, FSGSBASE
, FSGSBASE
, false),
1029 SUBARCH (rdrnd
, RDRND
, RDRND
, false),
1030 SUBARCH (f16c
, F16C
, F16C
, false),
1031 SUBARCH (bmi2
, BMI2
, BMI2
, false),
1032 SUBARCH (fma
, FMA
, FMA
, false),
1033 SUBARCH (fma4
, FMA4
, FMA4
, false),
1034 SUBARCH (xop
, XOP
, XOP
, false),
1035 SUBARCH (lwp
, LWP
, LWP
, false),
1036 SUBARCH (movbe
, MOVBE
, MOVBE
, false),
1037 SUBARCH (cx16
, CX16
, CX16
, false),
1038 SUBARCH (ept
, EPT
, EPT
, false),
1039 SUBARCH (lzcnt
, LZCNT
, LZCNT
, false),
1040 SUBARCH (popcnt
, POPCNT
, POPCNT
, false),
1041 SUBARCH (hle
, HLE
, HLE
, false),
1042 SUBARCH (rtm
, RTM
, RTM
, false),
1043 SUBARCH (invpcid
, INVPCID
, INVPCID
, false),
1044 SUBARCH (clflush
, CLFLUSH
, CLFLUSH
, false),
1045 SUBARCH (nop
, NOP
, NOP
, false),
1046 SUBARCH (syscall
, SYSCALL
, SYSCALL
, false),
1047 SUBARCH (rdtscp
, RDTSCP
, RDTSCP
, false),
1048 SUBARCH (3dnow
, 3DNOW
, 3DNOW
, false),
1049 SUBARCH (3dnowa
, 3DNOWA
, 3DNOWA
, false),
1050 SUBARCH (padlock
, PADLOCK
, PADLOCK
, false),
1051 SUBARCH (pacifica
, SVME
, SVME
, true),
1052 SUBARCH (svme
, SVME
, SVME
, false),
1053 SUBARCH (abm
, ABM
, ABM
, false),
1054 SUBARCH (bmi
, BMI
, BMI
, false),
1055 SUBARCH (tbm
, TBM
, TBM
, false),
1056 SUBARCH (adx
, ADX
, ADX
, false),
1057 SUBARCH (rdseed
, RDSEED
, RDSEED
, false),
1058 SUBARCH (prfchw
, PRFCHW
, PRFCHW
, false),
1059 SUBARCH (smap
, SMAP
, SMAP
, false),
1060 SUBARCH (mpx
, MPX
, MPX
, false),
1061 SUBARCH (sha
, SHA
, SHA
, false),
1062 SUBARCH (clflushopt
, CLFLUSHOPT
, CLFLUSHOPT
, false),
1063 SUBARCH (prefetchwt1
, PREFETCHWT1
, PREFETCHWT1
, false),
1064 SUBARCH (se1
, SE1
, SE1
, false),
1065 SUBARCH (clwb
, CLWB
, CLWB
, false),
1066 SUBARCH (avx512ifma
, AVX512IFMA
, ANY_AVX512IFMA
, false),
1067 SUBARCH (avx512vbmi
, AVX512VBMI
, ANY_AVX512VBMI
, false),
1068 SUBARCH (avx512_4fmaps
, AVX512_4FMAPS
, ANY_AVX512_4FMAPS
, false),
1069 SUBARCH (avx512_4vnniw
, AVX512_4VNNIW
, ANY_AVX512_4VNNIW
, false),
1070 SUBARCH (avx512_vpopcntdq
, AVX512_VPOPCNTDQ
, ANY_AVX512_VPOPCNTDQ
, false),
1071 SUBARCH (avx512_vbmi2
, AVX512_VBMI2
, ANY_AVX512_VBMI2
, false),
1072 SUBARCH (avx512_vnni
, AVX512_VNNI
, ANY_AVX512_VNNI
, false),
1073 SUBARCH (avx512_bitalg
, AVX512_BITALG
, ANY_AVX512_BITALG
, false),
1074 SUBARCH (avx_vnni
, AVX_VNNI
, ANY_AVX_VNNI
, false),
1075 SUBARCH (clzero
, CLZERO
, CLZERO
, false),
1076 SUBARCH (mwaitx
, MWAITX
, MWAITX
, false),
1077 SUBARCH (ospke
, OSPKE
, OSPKE
, false),
1078 SUBARCH (rdpid
, RDPID
, RDPID
, false),
1079 SUBARCH (ptwrite
, PTWRITE
, PTWRITE
, false),
1080 SUBARCH (ibt
, IBT
, ANY_IBT
, false),
1081 SUBARCH (shstk
, SHSTK
, ANY_SHSTK
, false),
1082 SUBARCH (gfni
, GFNI
, GFNI
, false),
1083 SUBARCH (vaes
, VAES
, VAES
, false),
1084 SUBARCH (vpclmulqdq
, VPCLMULQDQ
, VPCLMULQDQ
, false),
1085 SUBARCH (wbnoinvd
, WBNOINVD
, WBNOINVD
, false),
1086 SUBARCH (pconfig
, PCONFIG
, PCONFIG
, false),
1087 SUBARCH (waitpkg
, WAITPKG
, WAITPKG
, false),
1088 SUBARCH (cldemote
, CLDEMOTE
, CLDEMOTE
, false),
1089 SUBARCH (amx_int8
, AMX_INT8
, ANY_AMX_INT8
, false),
1090 SUBARCH (amx_bf16
, AMX_BF16
, ANY_AMX_BF16
, false),
1091 SUBARCH (amx_fp16
, AMX_FP16
, AMX_FP16
, false),
1092 SUBARCH (amx_tile
, AMX_TILE
, ANY_AMX_TILE
, false),
1093 SUBARCH (movdiri
, MOVDIRI
, ANY_MOVDIRI
, false),
1094 SUBARCH (movdir64b
, MOVDIR64B
, ANY_MOVDIR64B
, false),
1095 SUBARCH (avx512_bf16
, AVX512_BF16
, ANY_AVX512_BF16
, false),
1096 SUBARCH (avx512_vp2intersect
, AVX512_VP2INTERSECT
,
1097 ANY_AVX512_VP2INTERSECT
, false),
1098 SUBARCH (tdx
, TDX
, ANY_TDX
, false),
1099 SUBARCH (enqcmd
, ENQCMD
, ANY_ENQCMD
, false),
1100 SUBARCH (serialize
, SERIALIZE
, ANY_SERIALIZE
, false),
1101 SUBARCH (rdpru
, RDPRU
, RDPRU
, false),
1102 SUBARCH (mcommit
, MCOMMIT
, MCOMMIT
, false),
1103 SUBARCH (sev_es
, SEV_ES
, SEV_ES
, false),
1104 SUBARCH (tsxldtrk
, TSXLDTRK
, ANY_TSXLDTRK
, false),
1105 SUBARCH (kl
, KL
, ANY_KL
, false),
1106 SUBARCH (widekl
, WIDEKL
, ANY_WIDEKL
, false),
1107 SUBARCH (uintr
, UINTR
, ANY_UINTR
, false),
1108 SUBARCH (hreset
, HRESET
, ANY_HRESET
, false),
1109 SUBARCH (avx512_fp16
, AVX512_FP16
, ANY_AVX512_FP16
, false),
1110 SUBARCH (prefetchi
, PREFETCHI
, PREFETCHI
, false),
1111 SUBARCH (avx_ifma
, AVX_IFMA
, ANY_AVX_IFMA
, false),
1112 SUBARCH (avx_vnni_int8
, AVX_VNNI_INT8
, ANY_AVX_VNNI_INT8
, false),
1113 SUBARCH (cmpccxadd
, CMPCCXADD
, ANY_CMPCCXADD
, false),
1114 SUBARCH (wrmsrns
, WRMSRNS
, ANY_WRMSRNS
, false),
1115 SUBARCH (msrlist
, MSRLIST
, ANY_MSRLIST
, false),
1116 SUBARCH (avx_ne_convert
, AVX_NE_CONVERT
, ANY_AVX_NE_CONVERT
, false),
1117 SUBARCH (rao_int
, RAO_INT
, ANY_RAO_INT
, false),
1118 SUBARCH (rmpquery
, RMPQUERY
, RMPQUERY
, false),
1125 /* Like s_lcomm_internal in gas/read.c but the alignment string
1126 is allowed to be optional. */
1129 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1136 && *input_line_pointer
== ',')
1138 align
= parse_align (needs_align
- 1);
1140 if (align
== (addressT
) -1)
1155 bss_alloc (symbolP
, size
, align
);
1160 pe_lcomm (int needs_align
)
1162 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1166 const pseudo_typeS md_pseudo_table
[] =
1168 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1169 {"align", s_align_bytes
, 0},
1171 {"align", s_align_ptwo
, 0},
1173 {"arch", set_cpu_arch
, 0},
1177 {"lcomm", pe_lcomm
, 1},
1179 {"ffloat", float_cons
, 'f'},
1180 {"dfloat", float_cons
, 'd'},
1181 {"tfloat", float_cons
, 'x'},
1182 {"hfloat", float_cons
, 'h'},
1183 {"bfloat16", float_cons
, 'b'},
1185 {"slong", signed_cons
, 4},
1186 {"noopt", s_ignore
, 0},
1187 {"optim", s_ignore
, 0},
1188 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1189 {"code16", set_code_flag
, CODE_16BIT
},
1190 {"code32", set_code_flag
, CODE_32BIT
},
1192 {"code64", set_code_flag
, CODE_64BIT
},
1194 {"intel_syntax", set_intel_syntax
, 1},
1195 {"att_syntax", set_intel_syntax
, 0},
1196 {"intel_mnemonic", set_intel_mnemonic
, 1},
1197 {"att_mnemonic", set_intel_mnemonic
, 0},
1198 {"allow_index_reg", set_allow_index_reg
, 1},
1199 {"disallow_index_reg", set_allow_index_reg
, 0},
1200 {"sse_check", set_check
, 0},
1201 {"operand_check", set_check
, 1},
1202 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1203 {"largecomm", handle_large_common
, 0},
1205 {"file", dwarf2_directive_file
, 0},
1206 {"loc", dwarf2_directive_loc
, 0},
1207 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1210 {"secrel32", pe_directive_secrel
, 0},
1211 {"secidx", pe_directive_secidx
, 0},
1216 /* For interface with expression (). */
1217 extern char *input_line_pointer
;
1219 /* Hash table for instruction mnemonic lookup. */
1220 static htab_t op_hash
;
1222 /* Hash table for register lookup. */
1223 static htab_t reg_hash
;
1225 /* Various efficient no-op patterns for aligning code labels.
1226 Note: Don't try to assemble the instructions in the comments.
1227 0L and 0w are not legal. */
1228 static const unsigned char f32_1
[] =
1230 static const unsigned char f32_2
[] =
1231 {0x66,0x90}; /* xchg %ax,%ax */
1232 static const unsigned char f32_3
[] =
1233 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1234 static const unsigned char f32_4
[] =
1235 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1236 static const unsigned char f32_6
[] =
1237 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1238 static const unsigned char f32_7
[] =
1239 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1240 static const unsigned char f16_3
[] =
1241 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1242 static const unsigned char f16_4
[] =
1243 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1244 static const unsigned char jump_disp8
[] =
1245 {0xeb}; /* jmp disp8 */
1246 static const unsigned char jump32_disp32
[] =
1247 {0xe9}; /* jmp disp32 */
1248 static const unsigned char jump16_disp32
[] =
1249 {0x66,0xe9}; /* jmp disp32 */
1250 /* 32-bit NOPs patterns. */
1251 static const unsigned char *const f32_patt
[] = {
1252 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1254 /* 16-bit NOPs patterns. */
1255 static const unsigned char *const f16_patt
[] = {
1256 f32_1
, f32_2
, f16_3
, f16_4
1258 /* nopl (%[re]ax) */
1259 static const unsigned char alt_3
[] =
1261 /* nopl 0(%[re]ax) */
1262 static const unsigned char alt_4
[] =
1263 {0x0f,0x1f,0x40,0x00};
1264 /* nopl 0(%[re]ax,%[re]ax,1) */
1265 static const unsigned char alt_5
[] =
1266 {0x0f,0x1f,0x44,0x00,0x00};
1267 /* nopw 0(%[re]ax,%[re]ax,1) */
1268 static const unsigned char alt_6
[] =
1269 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1270 /* nopl 0L(%[re]ax) */
1271 static const unsigned char alt_7
[] =
1272 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1273 /* nopl 0L(%[re]ax,%[re]ax,1) */
1274 static const unsigned char alt_8
[] =
1275 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1276 /* nopw 0L(%[re]ax,%[re]ax,1) */
1277 static const unsigned char alt_9
[] =
1278 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1279 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1280 static const unsigned char alt_10
[] =
1281 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1282 /* data16 nopw %cs:0L(%eax,%eax,1) */
1283 static const unsigned char alt_11
[] =
1284 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1285 /* 32-bit and 64-bit NOPs patterns. */
1286 static const unsigned char *const alt_patt
[] = {
1287 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1288 alt_9
, alt_10
, alt_11
1291 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1292 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1295 i386_output_nops (char *where
, const unsigned char *const *patt
,
1296 int count
, int max_single_nop_size
)
1299 /* Place the longer NOP first. */
1302 const unsigned char *nops
;
1304 if (max_single_nop_size
< 1)
1306 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1307 max_single_nop_size
);
1311 nops
= patt
[max_single_nop_size
- 1];
1313 /* Use the smaller one if the requsted one isn't available. */
1316 max_single_nop_size
--;
1317 nops
= patt
[max_single_nop_size
- 1];
1320 last
= count
% max_single_nop_size
;
1323 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1324 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1328 nops
= patt
[last
- 1];
1331 /* Use the smaller one plus one-byte NOP if the needed one
1334 nops
= patt
[last
- 1];
1335 memcpy (where
+ offset
, nops
, last
);
1336 where
[offset
+ last
] = *patt
[0];
1339 memcpy (where
+ offset
, nops
, last
);
1344 fits_in_imm7 (offsetT num
)
1346 return (num
& 0x7f) == num
;
1350 fits_in_imm31 (offsetT num
)
1352 return (num
& 0x7fffffff) == num
;
1355 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1356 single NOP instruction LIMIT. */
1359 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1361 const unsigned char *const *patt
= NULL
;
1362 int max_single_nop_size
;
1363 /* Maximum number of NOPs before switching to jump over NOPs. */
1364 int max_number_of_nops
;
1366 switch (fragP
->fr_type
)
1371 case rs_machine_dependent
:
1372 /* Allow NOP padding for jumps and calls. */
1373 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1374 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1381 /* We need to decide which NOP sequence to use for 32bit and
1382 64bit. When -mtune= is used:
1384 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1385 PROCESSOR_GENERIC32, f32_patt will be used.
1386 2. For the rest, alt_patt will be used.
1388 When -mtune= isn't used, alt_patt will be used if
1389 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1392 When -march= or .arch is used, we can't use anything beyond
1393 cpu_arch_isa_flags. */
1395 if (flag_code
== CODE_16BIT
)
1398 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1399 /* Limit number of NOPs to 2 in 16-bit mode. */
1400 max_number_of_nops
= 2;
1404 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1406 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1407 switch (cpu_arch_tune
)
1409 case PROCESSOR_UNKNOWN
:
1410 /* We use cpu_arch_isa_flags to check if we SHOULD
1411 optimize with nops. */
1412 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1417 case PROCESSOR_PENTIUM4
:
1418 case PROCESSOR_NOCONA
:
1419 case PROCESSOR_CORE
:
1420 case PROCESSOR_CORE2
:
1421 case PROCESSOR_COREI7
:
1422 case PROCESSOR_GENERIC64
:
1424 case PROCESSOR_ATHLON
:
1426 case PROCESSOR_AMDFAM10
:
1428 case PROCESSOR_ZNVER
:
1432 case PROCESSOR_I386
:
1433 case PROCESSOR_I486
:
1434 case PROCESSOR_PENTIUM
:
1435 case PROCESSOR_PENTIUMPRO
:
1436 case PROCESSOR_IAMCU
:
1437 case PROCESSOR_GENERIC32
:
1440 case PROCESSOR_NONE
:
1446 switch (fragP
->tc_frag_data
.tune
)
1448 case PROCESSOR_UNKNOWN
:
1449 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1450 PROCESSOR_UNKNOWN. */
1454 case PROCESSOR_I386
:
1455 case PROCESSOR_I486
:
1456 case PROCESSOR_PENTIUM
:
1457 case PROCESSOR_IAMCU
:
1459 case PROCESSOR_ATHLON
:
1461 case PROCESSOR_AMDFAM10
:
1463 case PROCESSOR_ZNVER
:
1465 case PROCESSOR_GENERIC32
:
1466 /* We use cpu_arch_isa_flags to check if we CAN optimize
1468 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1473 case PROCESSOR_PENTIUMPRO
:
1474 case PROCESSOR_PENTIUM4
:
1475 case PROCESSOR_NOCONA
:
1476 case PROCESSOR_CORE
:
1477 case PROCESSOR_CORE2
:
1478 case PROCESSOR_COREI7
:
1479 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1484 case PROCESSOR_GENERIC64
:
1487 case PROCESSOR_NONE
:
1492 if (patt
== f32_patt
)
1494 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1495 /* Limit number of NOPs to 2 for older processors. */
1496 max_number_of_nops
= 2;
1500 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1501 /* Limit number of NOPs to 7 for newer processors. */
1502 max_number_of_nops
= 7;
1507 limit
= max_single_nop_size
;
1509 if (fragP
->fr_type
== rs_fill_nop
)
1511 /* Output NOPs for .nop directive. */
1512 if (limit
> max_single_nop_size
)
1514 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1515 _("invalid single nop size: %d "
1516 "(expect within [0, %d])"),
1517 limit
, max_single_nop_size
);
1521 else if (fragP
->fr_type
!= rs_machine_dependent
)
1522 fragP
->fr_var
= count
;
1524 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1526 /* Generate jump over NOPs. */
1527 offsetT disp
= count
- 2;
1528 if (fits_in_imm7 (disp
))
1530 /* Use "jmp disp8" if possible. */
1532 where
[0] = jump_disp8
[0];
1538 unsigned int size_of_jump
;
1540 if (flag_code
== CODE_16BIT
)
1542 where
[0] = jump16_disp32
[0];
1543 where
[1] = jump16_disp32
[1];
1548 where
[0] = jump32_disp32
[0];
1552 count
-= size_of_jump
+ 4;
1553 if (!fits_in_imm31 (count
))
1555 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1556 _("jump over nop padding out of range"));
1560 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1561 where
+= size_of_jump
+ 4;
1565 /* Generate multiple NOPs. */
1566 i386_output_nops (where
, patt
, count
, limit
);
1570 operand_type_all_zero (const union i386_operand_type
*x
)
1572 switch (ARRAY_SIZE(x
->array
))
1583 return !x
->array
[0];
1590 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1592 switch (ARRAY_SIZE(x
->array
))
1608 x
->bitfield
.class = ClassNone
;
1609 x
->bitfield
.instance
= InstanceNone
;
1613 operand_type_equal (const union i386_operand_type
*x
,
1614 const union i386_operand_type
*y
)
1616 switch (ARRAY_SIZE(x
->array
))
1619 if (x
->array
[2] != y
->array
[2])
1623 if (x
->array
[1] != y
->array
[1])
1627 return x
->array
[0] == y
->array
[0];
1635 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1637 switch (ARRAY_SIZE(x
->array
))
1656 return !x
->array
[0];
1663 cpu_flags_equal (const union i386_cpu_flags
*x
,
1664 const union i386_cpu_flags
*y
)
1666 switch (ARRAY_SIZE(x
->array
))
1669 if (x
->array
[4] != y
->array
[4])
1673 if (x
->array
[3] != y
->array
[3])
1677 if (x
->array
[2] != y
->array
[2])
1681 if (x
->array
[1] != y
->array
[1])
1685 return x
->array
[0] == y
->array
[0];
1693 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1695 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1696 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1699 static INLINE i386_cpu_flags
1700 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1702 switch (ARRAY_SIZE (x
.array
))
1705 x
.array
[4] &= y
.array
[4];
1708 x
.array
[3] &= y
.array
[3];
1711 x
.array
[2] &= y
.array
[2];
1714 x
.array
[1] &= y
.array
[1];
1717 x
.array
[0] &= y
.array
[0];
1725 static INLINE i386_cpu_flags
1726 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1728 switch (ARRAY_SIZE (x
.array
))
1731 x
.array
[4] |= y
.array
[4];
1734 x
.array
[3] |= y
.array
[3];
1737 x
.array
[2] |= y
.array
[2];
1740 x
.array
[1] |= y
.array
[1];
1743 x
.array
[0] |= y
.array
[0];
1751 static INLINE i386_cpu_flags
1752 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1754 switch (ARRAY_SIZE (x
.array
))
1757 x
.array
[4] &= ~y
.array
[4];
1760 x
.array
[3] &= ~y
.array
[3];
1763 x
.array
[2] &= ~y
.array
[2];
1766 x
.array
[1] &= ~y
.array
[1];
1769 x
.array
[0] &= ~y
.array
[0];
1777 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1779 #define CPU_FLAGS_ARCH_MATCH 0x1
1780 #define CPU_FLAGS_64BIT_MATCH 0x2
1782 #define CPU_FLAGS_PERFECT_MATCH \
1783 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1785 /* Return CPU flags match bits. */
1788 cpu_flags_match (const insn_template
*t
)
1790 i386_cpu_flags x
= t
->cpu_flags
;
1791 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1793 x
.bitfield
.cpu64
= 0;
1794 x
.bitfield
.cpuno64
= 0;
1796 if (cpu_flags_all_zero (&x
))
1798 /* This instruction is available on all archs. */
1799 match
|= CPU_FLAGS_ARCH_MATCH
;
1803 /* This instruction is available only on some archs. */
1804 i386_cpu_flags cpu
= cpu_arch_flags
;
1806 /* AVX512VL is no standalone feature - match it and then strip it. */
1807 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1809 x
.bitfield
.cpuavx512vl
= 0;
1811 /* AVX and AVX2 present at the same time express an operand size
1812 dependency - strip AVX2 for the purposes here. The operand size
1813 dependent check occurs in check_vecOperands(). */
1814 if (x
.bitfield
.cpuavx
&& x
.bitfield
.cpuavx2
)
1815 x
.bitfield
.cpuavx2
= 0;
1817 cpu
= cpu_flags_and (x
, cpu
);
1818 if (!cpu_flags_all_zero (&cpu
))
1820 if (x
.bitfield
.cpuavx
)
1822 /* We need to check a few extra flags with AVX. */
1823 if (cpu
.bitfield
.cpuavx
1824 && (!t
->opcode_modifier
.sse2avx
1825 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1826 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1827 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1828 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1829 match
|= CPU_FLAGS_ARCH_MATCH
;
1831 else if (x
.bitfield
.cpuavx512f
)
1833 /* We need to check a few extra flags with AVX512F. */
1834 if (cpu
.bitfield
.cpuavx512f
1835 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1836 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1837 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1838 match
|= CPU_FLAGS_ARCH_MATCH
;
1841 match
|= CPU_FLAGS_ARCH_MATCH
;
1847 static INLINE i386_operand_type
1848 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1850 if (x
.bitfield
.class != y
.bitfield
.class)
1851 x
.bitfield
.class = ClassNone
;
1852 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1853 x
.bitfield
.instance
= InstanceNone
;
1855 switch (ARRAY_SIZE (x
.array
))
1858 x
.array
[2] &= y
.array
[2];
1861 x
.array
[1] &= y
.array
[1];
1864 x
.array
[0] &= y
.array
[0];
1872 static INLINE i386_operand_type
1873 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1875 gas_assert (y
.bitfield
.class == ClassNone
);
1876 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1878 switch (ARRAY_SIZE (x
.array
))
1881 x
.array
[2] &= ~y
.array
[2];
1884 x
.array
[1] &= ~y
.array
[1];
1887 x
.array
[0] &= ~y
.array
[0];
1895 static INLINE i386_operand_type
1896 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1898 gas_assert (x
.bitfield
.class == ClassNone
||
1899 y
.bitfield
.class == ClassNone
||
1900 x
.bitfield
.class == y
.bitfield
.class);
1901 gas_assert (x
.bitfield
.instance
== InstanceNone
||
1902 y
.bitfield
.instance
== InstanceNone
||
1903 x
.bitfield
.instance
== y
.bitfield
.instance
);
1905 switch (ARRAY_SIZE (x
.array
))
1908 x
.array
[2] |= y
.array
[2];
1911 x
.array
[1] |= y
.array
[1];
1914 x
.array
[0] |= y
.array
[0];
1922 static INLINE i386_operand_type
1923 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1925 gas_assert (y
.bitfield
.class == ClassNone
);
1926 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1928 switch (ARRAY_SIZE (x
.array
))
1931 x
.array
[2] ^= y
.array
[2];
1934 x
.array
[1] ^= y
.array
[1];
1937 x
.array
[0] ^= y
.array
[0];
1945 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
1946 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
1947 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
1948 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
1949 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
1950 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
1951 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
1952 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
1953 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
1964 operand_type_check (i386_operand_type t
, enum operand_type c
)
1969 return t
.bitfield
.class == Reg
;
1972 return (t
.bitfield
.imm8
1976 || t
.bitfield
.imm32s
1977 || t
.bitfield
.imm64
);
1980 return (t
.bitfield
.disp8
1981 || t
.bitfield
.disp16
1982 || t
.bitfield
.disp32
1983 || t
.bitfield
.disp64
);
1986 return (t
.bitfield
.disp8
1987 || t
.bitfield
.disp16
1988 || t
.bitfield
.disp32
1989 || t
.bitfield
.disp64
1990 || t
.bitfield
.baseindex
);
1999 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2000 between operand GIVEN and opeand WANTED for instruction template T. */
2003 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2006 return !((i
.types
[given
].bitfield
.byte
2007 && !t
->operand_types
[wanted
].bitfield
.byte
)
2008 || (i
.types
[given
].bitfield
.word
2009 && !t
->operand_types
[wanted
].bitfield
.word
)
2010 || (i
.types
[given
].bitfield
.dword
2011 && !t
->operand_types
[wanted
].bitfield
.dword
)
2012 || (i
.types
[given
].bitfield
.qword
2013 && !t
->operand_types
[wanted
].bitfield
.qword
)
2014 || (i
.types
[given
].bitfield
.tbyte
2015 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2018 /* Return 1 if there is no conflict in SIMD register between operand
2019 GIVEN and opeand WANTED for instruction template T. */
2022 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2025 return !((i
.types
[given
].bitfield
.xmmword
2026 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2027 || (i
.types
[given
].bitfield
.ymmword
2028 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2029 || (i
.types
[given
].bitfield
.zmmword
2030 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2031 || (i
.types
[given
].bitfield
.tmmword
2032 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2035 /* Return 1 if there is no conflict in any size between operand GIVEN
2036 and opeand WANTED for instruction template T. */
2039 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2042 return (match_operand_size (t
, wanted
, given
)
2043 && !((i
.types
[given
].bitfield
.unspecified
2044 && !i
.broadcast
.type
2045 && !i
.broadcast
.bytes
2046 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2047 || (i
.types
[given
].bitfield
.fword
2048 && !t
->operand_types
[wanted
].bitfield
.fword
)
2049 /* For scalar opcode templates to allow register and memory
2050 operands at the same time, some special casing is needed
2051 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2052 down-conversion vpmov*. */
2053 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2054 && t
->operand_types
[wanted
].bitfield
.byte
2055 + t
->operand_types
[wanted
].bitfield
.word
2056 + t
->operand_types
[wanted
].bitfield
.dword
2057 + t
->operand_types
[wanted
].bitfield
.qword
2058 > !!t
->opcode_modifier
.broadcast
)
2059 ? (i
.types
[given
].bitfield
.xmmword
2060 || i
.types
[given
].bitfield
.ymmword
2061 || i
.types
[given
].bitfield
.zmmword
)
2062 : !match_simd_size(t
, wanted
, given
))));
2065 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2066 operands for instruction template T, and it has MATCH_REVERSE set if there
2067 is no size conflict on any operands for the template with operands reversed
2068 (and the template allows for reversing in the first place). */
2070 #define MATCH_STRAIGHT 1
2071 #define MATCH_REVERSE 2
2073 static INLINE
unsigned int
2074 operand_size_match (const insn_template
*t
)
2076 unsigned int j
, match
= MATCH_STRAIGHT
;
2078 /* Don't check non-absolute jump instructions. */
2079 if (t
->opcode_modifier
.jump
2080 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2083 /* Check memory and accumulator operand size. */
2084 for (j
= 0; j
< i
.operands
; j
++)
2086 if (i
.types
[j
].bitfield
.class != Reg
2087 && i
.types
[j
].bitfield
.class != RegSIMD
2088 && t
->opcode_modifier
.operandconstraint
== ANY_SIZE
)
2091 if (t
->operand_types
[j
].bitfield
.class == Reg
2092 && !match_operand_size (t
, j
, j
))
2098 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2099 && !match_simd_size (t
, j
, j
))
2105 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2106 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2112 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2119 if (!t
->opcode_modifier
.d
)
2122 /* Check reverse. */
2123 gas_assert ((i
.operands
>= 2 && i
.operands
<= 3)
2124 || t
->opcode_modifier
.vexsources
);
2126 for (j
= 0; j
< i
.operands
; j
++)
2128 unsigned int given
= i
.operands
- j
- 1;
2130 /* For 4- and 5-operand insns VEX.W controls just the first two
2131 register operands. */
2132 if (t
->opcode_modifier
.vexsources
)
2133 given
= j
< 2 ? 1 - j
: j
;
2135 if (t
->operand_types
[j
].bitfield
.class == Reg
2136 && !match_operand_size (t
, j
, given
))
2139 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2140 && !match_simd_size (t
, j
, given
))
2143 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2144 && (!match_operand_size (t
, j
, given
)
2145 || !match_simd_size (t
, j
, given
)))
2148 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2152 return match
| MATCH_REVERSE
;
2156 operand_type_match (i386_operand_type overlap
,
2157 i386_operand_type given
)
2159 i386_operand_type temp
= overlap
;
2161 temp
.bitfield
.unspecified
= 0;
2162 temp
.bitfield
.byte
= 0;
2163 temp
.bitfield
.word
= 0;
2164 temp
.bitfield
.dword
= 0;
2165 temp
.bitfield
.fword
= 0;
2166 temp
.bitfield
.qword
= 0;
2167 temp
.bitfield
.tbyte
= 0;
2168 temp
.bitfield
.xmmword
= 0;
2169 temp
.bitfield
.ymmword
= 0;
2170 temp
.bitfield
.zmmword
= 0;
2171 temp
.bitfield
.tmmword
= 0;
2172 if (operand_type_all_zero (&temp
))
2175 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2179 i
.error
= operand_type_mismatch
;
2183 /* If given types g0 and g1 are registers they must be of the same type
2184 unless the expected operand type register overlap is null.
2185 Some Intel syntax memory operand size checking also happens here. */
2188 operand_type_register_match (i386_operand_type g0
,
2189 i386_operand_type t0
,
2190 i386_operand_type g1
,
2191 i386_operand_type t1
)
2193 if (g0
.bitfield
.class != Reg
2194 && g0
.bitfield
.class != RegSIMD
2195 && (!operand_type_check (g0
, anymem
)
2196 || g0
.bitfield
.unspecified
2197 || (t0
.bitfield
.class != Reg
2198 && t0
.bitfield
.class != RegSIMD
)))
2201 if (g1
.bitfield
.class != Reg
2202 && g1
.bitfield
.class != RegSIMD
2203 && (!operand_type_check (g1
, anymem
)
2204 || g1
.bitfield
.unspecified
2205 || (t1
.bitfield
.class != Reg
2206 && t1
.bitfield
.class != RegSIMD
)))
2209 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2210 && g0
.bitfield
.word
== g1
.bitfield
.word
2211 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2212 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2213 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2214 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2215 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2218 /* If expectations overlap in no more than a single size, all is fine. */
2219 g0
= operand_type_and (t0
, t1
);
2220 if (g0
.bitfield
.byte
2224 + g0
.bitfield
.xmmword
2225 + g0
.bitfield
.ymmword
2226 + g0
.bitfield
.zmmword
<= 1)
2229 i
.error
= register_type_mismatch
;
2234 static INLINE
unsigned int
2235 register_number (const reg_entry
*r
)
2237 unsigned int nr
= r
->reg_num
;
2239 if (r
->reg_flags
& RegRex
)
2242 if (r
->reg_flags
& RegVRex
)
2248 static INLINE
unsigned int
2249 mode_from_disp_size (i386_operand_type t
)
2251 if (t
.bitfield
.disp8
)
2253 else if (t
.bitfield
.disp16
2254 || t
.bitfield
.disp32
)
2261 fits_in_signed_byte (addressT num
)
2263 return num
+ 0x80 <= 0xff;
2267 fits_in_unsigned_byte (addressT num
)
2273 fits_in_unsigned_word (addressT num
)
2275 return num
<= 0xffff;
2279 fits_in_signed_word (addressT num
)
2281 return num
+ 0x8000 <= 0xffff;
2285 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2290 return num
+ 0x80000000 <= 0xffffffff;
2292 } /* fits_in_signed_long() */
2295 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2300 return num
<= 0xffffffff;
2302 } /* fits_in_unsigned_long() */
2304 static INLINE valueT
extend_to_32bit_address (addressT num
)
2307 if (fits_in_unsigned_long(num
))
2308 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2310 if (!fits_in_signed_long (num
))
2311 return num
& 0xffffffff;
2318 fits_in_disp8 (offsetT num
)
2320 int shift
= i
.memshift
;
2326 mask
= (1 << shift
) - 1;
2328 /* Return 0 if NUM isn't properly aligned. */
2332 /* Check if NUM will fit in 8bit after shift. */
2333 return fits_in_signed_byte (num
>> shift
);
2337 fits_in_imm4 (offsetT num
)
2339 return (num
& 0xf) == num
;
2342 static i386_operand_type
2343 smallest_imm_type (offsetT num
)
2345 i386_operand_type t
;
2347 operand_type_set (&t
, 0);
2348 t
.bitfield
.imm64
= 1;
2350 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2352 /* This code is disabled on the 486 because all the Imm1 forms
2353 in the opcode table are slower on the i486. They're the
2354 versions with the implicitly specified single-position
2355 displacement, which has another syntax if you really want to
2357 t
.bitfield
.imm1
= 1;
2358 t
.bitfield
.imm8
= 1;
2359 t
.bitfield
.imm8s
= 1;
2360 t
.bitfield
.imm16
= 1;
2361 t
.bitfield
.imm32
= 1;
2362 t
.bitfield
.imm32s
= 1;
2364 else if (fits_in_signed_byte (num
))
2366 t
.bitfield
.imm8
= 1;
2367 t
.bitfield
.imm8s
= 1;
2368 t
.bitfield
.imm16
= 1;
2369 t
.bitfield
.imm32
= 1;
2370 t
.bitfield
.imm32s
= 1;
2372 else if (fits_in_unsigned_byte (num
))
2374 t
.bitfield
.imm8
= 1;
2375 t
.bitfield
.imm16
= 1;
2376 t
.bitfield
.imm32
= 1;
2377 t
.bitfield
.imm32s
= 1;
2379 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2381 t
.bitfield
.imm16
= 1;
2382 t
.bitfield
.imm32
= 1;
2383 t
.bitfield
.imm32s
= 1;
2385 else if (fits_in_signed_long (num
))
2387 t
.bitfield
.imm32
= 1;
2388 t
.bitfield
.imm32s
= 1;
2390 else if (fits_in_unsigned_long (num
))
2391 t
.bitfield
.imm32
= 1;
2397 offset_in_range (offsetT val
, int size
)
2403 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2404 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2406 case 4: mask
= ((addressT
) 1 << 32) - 1; break;
2408 case sizeof (val
): return val
;
2412 if ((val
& ~mask
) != 0 && (-val
& ~mask
) != 0)
2413 as_warn (_("0x%" PRIx64
" shortened to 0x%" PRIx64
),
2414 (uint64_t) val
, (uint64_t) (val
& mask
));
2429 a. PREFIX_EXIST if attempting to add a prefix where one from the
2430 same class already exists.
2431 b. PREFIX_LOCK if lock prefix is added.
2432 c. PREFIX_REP if rep/repne prefix is added.
2433 d. PREFIX_DS if ds prefix is added.
2434 e. PREFIX_OTHER if other prefix is added.
2437 static enum PREFIX_GROUP
2438 add_prefix (unsigned int prefix
)
2440 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2443 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2444 && flag_code
== CODE_64BIT
)
2446 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2447 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2448 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2449 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2460 case DS_PREFIX_OPCODE
:
2463 case CS_PREFIX_OPCODE
:
2464 case ES_PREFIX_OPCODE
:
2465 case FS_PREFIX_OPCODE
:
2466 case GS_PREFIX_OPCODE
:
2467 case SS_PREFIX_OPCODE
:
2471 case REPNE_PREFIX_OPCODE
:
2472 case REPE_PREFIX_OPCODE
:
2477 case LOCK_PREFIX_OPCODE
:
2486 case ADDR_PREFIX_OPCODE
:
2490 case DATA_PREFIX_OPCODE
:
2494 if (i
.prefix
[q
] != 0)
2502 i
.prefix
[q
] |= prefix
;
2505 as_bad (_("same type of prefix used twice"));
2511 update_code_flag (int value
, int check
)
2513 PRINTF_LIKE ((*as_error
));
2515 flag_code
= (enum flag_code
) value
;
2516 if (flag_code
== CODE_64BIT
)
2518 cpu_arch_flags
.bitfield
.cpu64
= 1;
2519 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2523 cpu_arch_flags
.bitfield
.cpu64
= 0;
2524 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2526 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2529 as_error
= as_fatal
;
2532 (*as_error
) (_("64bit mode not supported on `%s'."),
2533 cpu_arch_name
? cpu_arch_name
: default_arch
);
2535 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2538 as_error
= as_fatal
;
2541 (*as_error
) (_("32bit mode not supported on `%s'."),
2542 cpu_arch_name
? cpu_arch_name
: default_arch
);
2544 stackop_size
= '\0';
2548 set_code_flag (int value
)
2550 update_code_flag (value
, 0);
2554 set_16bit_gcc_code_flag (int new_code_flag
)
2556 flag_code
= (enum flag_code
) new_code_flag
;
2557 if (flag_code
!= CODE_16BIT
)
2559 cpu_arch_flags
.bitfield
.cpu64
= 0;
2560 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2561 stackop_size
= LONG_MNEM_SUFFIX
;
2565 set_intel_syntax (int syntax_flag
)
2567 /* Find out if register prefixing is specified. */
2568 int ask_naked_reg
= 0;
2571 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2574 int e
= get_symbol_name (&string
);
2576 if (strcmp (string
, "prefix") == 0)
2578 else if (strcmp (string
, "noprefix") == 0)
2581 as_bad (_("bad argument to syntax directive."));
2582 (void) restore_line_pointer (e
);
2584 demand_empty_rest_of_line ();
2586 intel_syntax
= syntax_flag
;
2588 if (ask_naked_reg
== 0)
2589 allow_naked_reg
= (intel_syntax
2590 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2592 allow_naked_reg
= (ask_naked_reg
< 0);
2594 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2596 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2597 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2598 register_prefix
= allow_naked_reg
? "" : "%";
2602 set_intel_mnemonic (int mnemonic_flag
)
2604 intel_mnemonic
= mnemonic_flag
;
2608 set_allow_index_reg (int flag
)
2610 allow_index_reg
= flag
;
2614 set_check (int what
)
2616 enum check_kind
*kind
;
2621 kind
= &operand_check
;
2632 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2635 int e
= get_symbol_name (&string
);
2637 if (strcmp (string
, "none") == 0)
2639 else if (strcmp (string
, "warning") == 0)
2640 *kind
= check_warning
;
2641 else if (strcmp (string
, "error") == 0)
2642 *kind
= check_error
;
2644 as_bad (_("bad argument to %s_check directive."), str
);
2645 (void) restore_line_pointer (e
);
2648 as_bad (_("missing argument for %s_check directive"), str
);
2650 demand_empty_rest_of_line ();
2654 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2655 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2657 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2658 static const char *arch
;
2660 /* Intel MCU is only supported on ELF. */
2666 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2667 use default_arch. */
2668 arch
= cpu_arch_name
;
2670 arch
= default_arch
;
2673 /* If we are targeting Intel MCU, we must enable it. */
2674 if ((get_elf_backend_data (stdoutput
)->elf_machine_code
== EM_IAMCU
)
2675 == new_flag
.bitfield
.cpuiamcu
)
2678 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2683 extend_cpu_sub_arch_name (const char *name
)
2685 if (cpu_sub_arch_name
)
2686 cpu_sub_arch_name
= reconcat (cpu_sub_arch_name
, cpu_sub_arch_name
,
2687 ".", name
, (const char *) NULL
);
2689 cpu_sub_arch_name
= concat (".", name
, (const char *) NULL
);
2693 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2695 typedef struct arch_stack_entry
2697 const struct arch_stack_entry
*prev
;
2700 i386_cpu_flags flags
;
2701 i386_cpu_flags isa_flags
;
2702 enum processor_type isa
;
2703 enum flag_code flag_code
;
2705 bool no_cond_jump_promotion
;
2707 static const arch_stack_entry
*arch_stack_top
;
2711 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2714 int e
= get_symbol_name (&s
);
2715 const char *string
= s
;
2717 i386_cpu_flags flags
;
2719 if (strcmp (string
, "default") == 0)
2721 if (strcmp (default_arch
, "iamcu") == 0)
2722 string
= default_arch
;
2725 static const i386_cpu_flags cpu_unknown_flags
= CPU_UNKNOWN_FLAGS
;
2727 cpu_arch_name
= NULL
;
2728 free (cpu_sub_arch_name
);
2729 cpu_sub_arch_name
= NULL
;
2730 cpu_arch_flags
= cpu_unknown_flags
;
2731 if (flag_code
== CODE_64BIT
)
2733 cpu_arch_flags
.bitfield
.cpu64
= 1;
2734 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2738 cpu_arch_flags
.bitfield
.cpu64
= 0;
2739 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2741 cpu_arch_isa
= PROCESSOR_UNKNOWN
;
2742 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
2743 if (!cpu_arch_tune_set
)
2745 cpu_arch_tune
= cpu_arch_isa
;
2746 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2749 j
= ARRAY_SIZE (cpu_arch
) + 1;
2752 else if (strcmp (string
, "push") == 0)
2754 arch_stack_entry
*top
= XNEW (arch_stack_entry
);
2756 top
->name
= cpu_arch_name
;
2757 if (cpu_sub_arch_name
)
2758 top
->sub_name
= xstrdup (cpu_sub_arch_name
);
2760 top
->sub_name
= NULL
;
2761 top
->flags
= cpu_arch_flags
;
2762 top
->isa
= cpu_arch_isa
;
2763 top
->isa_flags
= cpu_arch_isa_flags
;
2764 top
->flag_code
= flag_code
;
2765 top
->stackop_size
= stackop_size
;
2766 top
->no_cond_jump_promotion
= no_cond_jump_promotion
;
2768 top
->prev
= arch_stack_top
;
2769 arch_stack_top
= top
;
2771 (void) restore_line_pointer (e
);
2772 demand_empty_rest_of_line ();
2775 else if (strcmp (string
, "pop") == 0)
2777 const arch_stack_entry
*top
= arch_stack_top
;
2780 as_bad (_(".arch stack is empty"));
2781 else if (top
->flag_code
!= flag_code
2782 || top
->stackop_size
!= stackop_size
)
2784 static const unsigned int bits
[] = {
2790 as_bad (_("this `.arch pop' requires `.code%u%s' to be in effect"),
2791 bits
[top
->flag_code
],
2792 top
->stackop_size
== LONG_MNEM_SUFFIX
? "gcc" : "");
2796 arch_stack_top
= top
->prev
;
2798 cpu_arch_name
= top
->name
;
2799 free (cpu_sub_arch_name
);
2800 cpu_sub_arch_name
= top
->sub_name
;
2801 cpu_arch_flags
= top
->flags
;
2802 cpu_arch_isa
= top
->isa
;
2803 cpu_arch_isa_flags
= top
->isa_flags
;
2804 no_cond_jump_promotion
= top
->no_cond_jump_promotion
;
2809 (void) restore_line_pointer (e
);
2810 demand_empty_rest_of_line ();
2814 for (; j
< ARRAY_SIZE (cpu_arch
); j
++)
2816 if (strcmp (string
+ (*string
== '.'), cpu_arch
[j
].name
) == 0
2817 && (*string
== '.') == (cpu_arch
[j
].type
== PROCESSOR_NONE
))
2821 check_cpu_arch_compatible (string
, cpu_arch
[j
].enable
);
2823 cpu_arch_name
= cpu_arch
[j
].name
;
2824 free (cpu_sub_arch_name
);
2825 cpu_sub_arch_name
= NULL
;
2826 cpu_arch_flags
= cpu_arch
[j
].enable
;
2827 if (flag_code
== CODE_64BIT
)
2829 cpu_arch_flags
.bitfield
.cpu64
= 1;
2830 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2834 cpu_arch_flags
.bitfield
.cpu64
= 0;
2835 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2837 cpu_arch_isa
= cpu_arch
[j
].type
;
2838 cpu_arch_isa_flags
= cpu_arch
[j
].enable
;
2839 if (!cpu_arch_tune_set
)
2841 cpu_arch_tune
= cpu_arch_isa
;
2842 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2844 pre_386_16bit_warned
= false;
2848 if (cpu_flags_all_zero (&cpu_arch
[j
].enable
))
2851 flags
= cpu_flags_or (cpu_arch_flags
,
2852 cpu_arch
[j
].enable
);
2854 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2856 extend_cpu_sub_arch_name (string
+ 1);
2857 cpu_arch_flags
= flags
;
2858 cpu_arch_isa_flags
= flags
;
2862 = cpu_flags_or (cpu_arch_isa_flags
,
2863 cpu_arch
[j
].enable
);
2864 (void) restore_line_pointer (e
);
2865 demand_empty_rest_of_line ();
2870 if (startswith (string
, ".no") && j
>= ARRAY_SIZE (cpu_arch
))
2872 /* Disable an ISA extension. */
2873 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2874 if (cpu_arch
[j
].type
== PROCESSOR_NONE
2875 && strcmp (string
+ 3, cpu_arch
[j
].name
) == 0)
2877 flags
= cpu_flags_and_not (cpu_arch_flags
,
2878 cpu_arch
[j
].disable
);
2879 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2881 extend_cpu_sub_arch_name (string
+ 1);
2882 cpu_arch_flags
= flags
;
2883 cpu_arch_isa_flags
= flags
;
2885 (void) restore_line_pointer (e
);
2886 demand_empty_rest_of_line ();
2891 if (j
== ARRAY_SIZE (cpu_arch
))
2892 as_bad (_("no such architecture: `%s'"), string
);
2894 *input_line_pointer
= e
;
2897 as_bad (_("missing cpu architecture"));
2899 no_cond_jump_promotion
= 0;
2900 if (*input_line_pointer
== ','
2901 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2906 ++input_line_pointer
;
2907 e
= get_symbol_name (&string
);
2909 if (strcmp (string
, "nojumps") == 0)
2910 no_cond_jump_promotion
= 1;
2911 else if (strcmp (string
, "jumps") == 0)
2914 as_bad (_("no such architecture modifier: `%s'"), string
);
2916 (void) restore_line_pointer (e
);
2919 demand_empty_rest_of_line ();
2922 enum bfd_architecture
2925 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2927 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2928 || flag_code
== CODE_64BIT
)
2929 as_fatal (_("Intel MCU is 32bit ELF only"));
2930 return bfd_arch_iamcu
;
2933 return bfd_arch_i386
;
2939 if (startswith (default_arch
, "x86_64"))
2941 if (default_arch
[6] == '\0')
2942 return bfd_mach_x86_64
;
2944 return bfd_mach_x64_32
;
2946 else if (!strcmp (default_arch
, "i386")
2947 || !strcmp (default_arch
, "iamcu"))
2949 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2951 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2952 as_fatal (_("Intel MCU is 32bit ELF only"));
2953 return bfd_mach_i386_iamcu
;
2956 return bfd_mach_i386_i386
;
2959 as_fatal (_("unknown architecture"));
2965 /* Support pseudo prefixes like {disp32}. */
2966 lex_type
['{'] = LEX_BEGIN_NAME
;
2968 /* Initialize op_hash hash table. */
2969 op_hash
= str_htab_create ();
2972 const insn_template
*optab
;
2973 templates
*core_optab
;
2975 /* Setup for loop. */
2977 core_optab
= notes_alloc (sizeof (*core_optab
));
2978 core_optab
->start
= optab
;
2983 if (optab
->name
== NULL
2984 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2986 /* different name --> ship out current template list;
2987 add to hash table; & begin anew. */
2988 core_optab
->end
= optab
;
2989 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
2990 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
2992 if (optab
->name
== NULL
)
2994 core_optab
= notes_alloc (sizeof (*core_optab
));
2995 core_optab
->start
= optab
;
3000 /* Initialize reg_hash hash table. */
3001 reg_hash
= str_htab_create ();
3003 const reg_entry
*regtab
;
3004 unsigned int regtab_size
= i386_regtab_size
;
3006 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3008 switch (regtab
->reg_type
.bitfield
.class)
3011 if (regtab
->reg_type
.bitfield
.dword
)
3013 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3016 else if (regtab
->reg_type
.bitfield
.tbyte
)
3018 /* There's no point inserting st(<N>) in the hash table, as
3019 parentheses aren't included in register_chars[] anyway. */
3020 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3027 switch (regtab
->reg_num
)
3029 case 0: reg_es
= regtab
; break;
3030 case 2: reg_ss
= regtab
; break;
3031 case 3: reg_ds
= regtab
; break;
3036 if (!regtab
->reg_num
)
3041 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3042 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3046 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3051 for (c
= 0; c
< 256; c
++)
3053 if (ISDIGIT (c
) || ISLOWER (c
))
3055 mnemonic_chars
[c
] = c
;
3056 register_chars
[c
] = c
;
3057 operand_chars
[c
] = c
;
3059 else if (ISUPPER (c
))
3061 mnemonic_chars
[c
] = TOLOWER (c
);
3062 register_chars
[c
] = mnemonic_chars
[c
];
3063 operand_chars
[c
] = c
;
3065 else if (c
== '{' || c
== '}')
3067 mnemonic_chars
[c
] = c
;
3068 operand_chars
[c
] = c
;
3070 #ifdef SVR4_COMMENT_CHARS
3071 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3072 operand_chars
[c
] = c
;
3075 if (ISALPHA (c
) || ISDIGIT (c
))
3076 identifier_chars
[c
] = c
;
3079 identifier_chars
[c
] = c
;
3080 operand_chars
[c
] = c
;
3085 identifier_chars
['@'] = '@';
3088 identifier_chars
['?'] = '?';
3089 operand_chars
['?'] = '?';
3091 mnemonic_chars
['_'] = '_';
3092 mnemonic_chars
['-'] = '-';
3093 mnemonic_chars
['.'] = '.';
3094 identifier_chars
['_'] = '_';
3095 identifier_chars
['.'] = '.';
3097 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3098 operand_chars
[(unsigned char) *p
] = *p
;
3101 if (flag_code
== CODE_64BIT
)
3103 #if defined (OBJ_COFF) && defined (TE_PE)
3104 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3107 x86_dwarf2_return_column
= 16;
3109 x86_cie_data_alignment
= -8;
3110 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3111 x86_sframe_cfa_sp_reg
= 7;
3112 x86_sframe_cfa_fp_reg
= 6;
3117 x86_dwarf2_return_column
= 8;
3118 x86_cie_data_alignment
= -4;
3121 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3122 can be turned into BRANCH_PREFIX frag. */
3123 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3128 i386_print_statistics (FILE *file
)
3130 htab_print_statistics (file
, "i386 opcode", op_hash
);
3131 htab_print_statistics (file
, "i386 register", reg_hash
);
3137 htab_delete (op_hash
);
3138 htab_delete (reg_hash
);
3143 /* Debugging routines for md_assemble. */
3144 static void pte (insn_template
*);
3145 static void pt (i386_operand_type
);
3146 static void pe (expressionS
*);
3147 static void ps (symbolS
*);
3150 pi (const char *line
, i386_insn
*x
)
3154 fprintf (stdout
, "%s: template ", line
);
3156 fprintf (stdout
, " address: base %s index %s scale %x\n",
3157 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3158 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3159 x
->log2_scale_factor
);
3160 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3161 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3162 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3163 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3164 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3165 (x
->rex
& REX_W
) != 0,
3166 (x
->rex
& REX_R
) != 0,
3167 (x
->rex
& REX_X
) != 0,
3168 (x
->rex
& REX_B
) != 0);
3169 for (j
= 0; j
< x
->operands
; j
++)
3171 fprintf (stdout
, " #%d: ", j
+ 1);
3173 fprintf (stdout
, "\n");
3174 if (x
->types
[j
].bitfield
.class == Reg
3175 || x
->types
[j
].bitfield
.class == RegMMX
3176 || x
->types
[j
].bitfield
.class == RegSIMD
3177 || x
->types
[j
].bitfield
.class == RegMask
3178 || x
->types
[j
].bitfield
.class == SReg
3179 || x
->types
[j
].bitfield
.class == RegCR
3180 || x
->types
[j
].bitfield
.class == RegDR
3181 || x
->types
[j
].bitfield
.class == RegTR
3182 || x
->types
[j
].bitfield
.class == RegBND
)
3183 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3184 if (operand_type_check (x
->types
[j
], imm
))
3186 if (operand_type_check (x
->types
[j
], disp
))
3187 pe (x
->op
[j
].disps
);
3192 pte (insn_template
*t
)
3194 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3195 static const char *const opc_spc
[] = {
3196 NULL
, "0f", "0f38", "0f3a", NULL
, "evexmap5", "evexmap6", NULL
,
3197 "XOP08", "XOP09", "XOP0A",
3201 fprintf (stdout
, " %d operands ", t
->operands
);
3202 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3203 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3204 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3205 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3206 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3207 if (t
->extension_opcode
!= None
)
3208 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3209 if (t
->opcode_modifier
.d
)
3210 fprintf (stdout
, "D");
3211 if (t
->opcode_modifier
.w
)
3212 fprintf (stdout
, "W");
3213 fprintf (stdout
, "\n");
3214 for (j
= 0; j
< t
->operands
; j
++)
3216 fprintf (stdout
, " #%d type ", j
+ 1);
3217 pt (t
->operand_types
[j
]);
3218 fprintf (stdout
, "\n");
3225 fprintf (stdout
, " operation %d\n", e
->X_op
);
3226 fprintf (stdout
, " add_number %" PRId64
" (%" PRIx64
")\n",
3227 (int64_t) e
->X_add_number
, (uint64_t) (valueT
) e
->X_add_number
);
3228 if (e
->X_add_symbol
)
3230 fprintf (stdout
, " add_symbol ");
3231 ps (e
->X_add_symbol
);
3232 fprintf (stdout
, "\n");
3236 fprintf (stdout
, " op_symbol ");
3237 ps (e
->X_op_symbol
);
3238 fprintf (stdout
, "\n");
3245 fprintf (stdout
, "%s type %s%s",
3247 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3248 segment_name (S_GET_SEGMENT (s
)));
3251 static struct type_name
3253 i386_operand_type mask
;
3256 const type_names
[] =
3258 { OPERAND_TYPE_REG8
, "r8" },
3259 { OPERAND_TYPE_REG16
, "r16" },
3260 { OPERAND_TYPE_REG32
, "r32" },
3261 { OPERAND_TYPE_REG64
, "r64" },
3262 { OPERAND_TYPE_ACC8
, "acc8" },
3263 { OPERAND_TYPE_ACC16
, "acc16" },
3264 { OPERAND_TYPE_ACC32
, "acc32" },
3265 { OPERAND_TYPE_ACC64
, "acc64" },
3266 { OPERAND_TYPE_IMM8
, "i8" },
3267 { OPERAND_TYPE_IMM8
, "i8s" },
3268 { OPERAND_TYPE_IMM16
, "i16" },
3269 { OPERAND_TYPE_IMM32
, "i32" },
3270 { OPERAND_TYPE_IMM32S
, "i32s" },
3271 { OPERAND_TYPE_IMM64
, "i64" },
3272 { OPERAND_TYPE_IMM1
, "i1" },
3273 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3274 { OPERAND_TYPE_DISP8
, "d8" },
3275 { OPERAND_TYPE_DISP16
, "d16" },
3276 { OPERAND_TYPE_DISP32
, "d32" },
3277 { OPERAND_TYPE_DISP64
, "d64" },
3278 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3279 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3280 { OPERAND_TYPE_CONTROL
, "control reg" },
3281 { OPERAND_TYPE_TEST
, "test reg" },
3282 { OPERAND_TYPE_DEBUG
, "debug reg" },
3283 { OPERAND_TYPE_FLOATREG
, "FReg" },
3284 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3285 { OPERAND_TYPE_SREG
, "SReg" },
3286 { OPERAND_TYPE_REGMMX
, "rMMX" },
3287 { OPERAND_TYPE_REGXMM
, "rXMM" },
3288 { OPERAND_TYPE_REGYMM
, "rYMM" },
3289 { OPERAND_TYPE_REGZMM
, "rZMM" },
3290 { OPERAND_TYPE_REGTMM
, "rTMM" },
3291 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3295 pt (i386_operand_type t
)
3298 i386_operand_type a
;
3300 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3302 a
= operand_type_and (t
, type_names
[j
].mask
);
3303 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3304 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3309 #endif /* DEBUG386 */
3311 static bfd_reloc_code_real_type
3312 reloc (unsigned int size
,
3315 bfd_reloc_code_real_type other
)
3317 if (other
!= NO_RELOC
)
3319 reloc_howto_type
*rel
;
3324 case BFD_RELOC_X86_64_GOT32
:
3325 return BFD_RELOC_X86_64_GOT64
;
3327 case BFD_RELOC_X86_64_GOTPLT64
:
3328 return BFD_RELOC_X86_64_GOTPLT64
;
3330 case BFD_RELOC_X86_64_PLTOFF64
:
3331 return BFD_RELOC_X86_64_PLTOFF64
;
3333 case BFD_RELOC_X86_64_GOTPC32
:
3334 other
= BFD_RELOC_X86_64_GOTPC64
;
3336 case BFD_RELOC_X86_64_GOTPCREL
:
3337 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3339 case BFD_RELOC_X86_64_TPOFF32
:
3340 other
= BFD_RELOC_X86_64_TPOFF64
;
3342 case BFD_RELOC_X86_64_DTPOFF32
:
3343 other
= BFD_RELOC_X86_64_DTPOFF64
;
3349 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3350 if (other
== BFD_RELOC_SIZE32
)
3353 other
= BFD_RELOC_SIZE64
;
3356 as_bad (_("there are no pc-relative size relocations"));
3362 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3363 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3366 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3368 as_bad (_("unknown relocation (%u)"), other
);
3369 else if (size
!= bfd_get_reloc_size (rel
))
3370 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3371 bfd_get_reloc_size (rel
),
3373 else if (pcrel
&& !rel
->pc_relative
)
3374 as_bad (_("non-pc-relative relocation for pc-relative field"));
3375 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3377 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3379 as_bad (_("relocated field and relocation type differ in signedness"));
3388 as_bad (_("there are no unsigned pc-relative relocations"));
3391 case 1: return BFD_RELOC_8_PCREL
;
3392 case 2: return BFD_RELOC_16_PCREL
;
3393 case 4: return BFD_RELOC_32_PCREL
;
3394 case 8: return BFD_RELOC_64_PCREL
;
3396 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3403 case 4: return BFD_RELOC_X86_64_32S
;
3408 case 1: return BFD_RELOC_8
;
3409 case 2: return BFD_RELOC_16
;
3410 case 4: return BFD_RELOC_32
;
3411 case 8: return BFD_RELOC_64
;
3413 as_bad (_("cannot do %s %u byte relocation"),
3414 sign
> 0 ? "signed" : "unsigned", size
);
3420 /* Here we decide which fixups can be adjusted to make them relative to
3421 the beginning of the section instead of the symbol. Basically we need
3422 to make sure that the dynamic relocations are done correctly, so in
3423 some cases we force the original symbol to be used. */
3426 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3428 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3432 /* Don't adjust pc-relative references to merge sections in 64-bit
3434 if (use_rela_relocations
3435 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3439 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3440 and changed later by validate_fix. */
3441 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3442 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3445 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3446 for size relocations. */
3447 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3448 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3449 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3450 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3451 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3452 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3453 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3454 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3455 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3456 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3457 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3458 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3459 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3460 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3461 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3462 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3463 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3464 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3465 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3466 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3467 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3468 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3469 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3470 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3471 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3472 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3473 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3474 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3475 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3476 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3477 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3484 want_disp32 (const insn_template
*t
)
3486 return flag_code
!= CODE_64BIT
3487 || i
.prefix
[ADDR_PREFIX
]
3488 || (t
->base_opcode
== 0x8d
3489 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
3490 && (!i
.types
[1].bitfield
.qword
3491 || t
->opcode_modifier
.size
== SIZE32
));
3495 intel_float_operand (const char *mnemonic
)
3497 /* Note that the value returned is meaningful only for opcodes with (memory)
3498 operands, hence the code here is free to improperly handle opcodes that
3499 have no operands (for better performance and smaller code). */
3501 if (mnemonic
[0] != 'f')
3502 return 0; /* non-math */
3504 switch (mnemonic
[1])
3506 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3507 the fs segment override prefix not currently handled because no
3508 call path can make opcodes without operands get here */
3510 return 2 /* integer op */;
3512 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3513 return 3; /* fldcw/fldenv */
3516 if (mnemonic
[2] != 'o' /* fnop */)
3517 return 3; /* non-waiting control op */
3520 if (mnemonic
[2] == 's')
3521 return 3; /* frstor/frstpm */
3524 if (mnemonic
[2] == 'a')
3525 return 3; /* fsave */
3526 if (mnemonic
[2] == 't')
3528 switch (mnemonic
[3])
3530 case 'c': /* fstcw */
3531 case 'd': /* fstdw */
3532 case 'e': /* fstenv */
3533 case 's': /* fsts[gw] */
3539 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3540 return 0; /* fxsave/fxrstor are not really math ops */
3548 install_template (const insn_template
*t
)
3554 /* Note that for pseudo prefixes this produces a length of 1. But for them
3555 the length isn't interesting at all. */
3556 for (l
= 1; l
< 4; ++l
)
3557 if (!(t
->base_opcode
>> (8 * l
)))
3560 i
.opcode_length
= l
;
3563 /* Build the VEX prefix. */
3566 build_vex_prefix (const insn_template
*t
)
3568 unsigned int register_specifier
;
3569 unsigned int vector_length
;
3572 /* Check register specifier. */
3573 if (i
.vex
.register_specifier
)
3575 register_specifier
=
3576 ~register_number (i
.vex
.register_specifier
) & 0xf;
3577 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3580 register_specifier
= 0xf;
3582 /* Use 2-byte VEX prefix by swapping destination and source operand
3583 if there are more than 1 register operand. */
3584 if (i
.reg_operands
> 1
3585 && i
.vec_encoding
!= vex_encoding_vex3
3586 && i
.dir_encoding
== dir_encoding_default
3587 && i
.operands
== i
.reg_operands
3588 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3589 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3590 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3593 unsigned int xchg
= i
.operands
- 1;
3594 union i386_op temp_op
;
3595 i386_operand_type temp_type
;
3597 temp_type
= i
.types
[xchg
];
3598 i
.types
[xchg
] = i
.types
[0];
3599 i
.types
[0] = temp_type
;
3600 temp_op
= i
.op
[xchg
];
3601 i
.op
[xchg
] = i
.op
[0];
3604 gas_assert (i
.rm
.mode
== 3);
3608 i
.rm
.regmem
= i
.rm
.reg
;
3611 if (i
.tm
.opcode_modifier
.d
)
3612 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3613 ? Opcode_ExtD
: Opcode_SIMD_IntD
;
3614 else /* Use the next insn. */
3615 install_template (&t
[1]);
3618 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3619 are no memory operands and at least 3 register ones. */
3620 if (i
.reg_operands
>= 3
3621 && i
.vec_encoding
!= vex_encoding_vex3
3622 && i
.reg_operands
== i
.operands
- i
.imm_operands
3623 && i
.tm
.opcode_modifier
.vex
3624 && i
.tm
.opcode_modifier
.commutative
3625 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3627 && i
.vex
.register_specifier
3628 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3630 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3631 union i386_op temp_op
;
3632 i386_operand_type temp_type
;
3634 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3635 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3636 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3637 &i
.types
[i
.operands
- 3]));
3638 gas_assert (i
.rm
.mode
== 3);
3640 temp_type
= i
.types
[xchg
];
3641 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3642 i
.types
[xchg
+ 1] = temp_type
;
3643 temp_op
= i
.op
[xchg
];
3644 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3645 i
.op
[xchg
+ 1] = temp_op
;
3648 xchg
= i
.rm
.regmem
| 8;
3649 i
.rm
.regmem
= ~register_specifier
& 0xf;
3650 gas_assert (!(i
.rm
.regmem
& 8));
3651 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3652 register_specifier
= ~xchg
& 0xf;
3655 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3656 vector_length
= avxscalar
;
3657 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3663 /* Determine vector length from the last multi-length vector
3666 for (op
= t
->operands
; op
--;)
3667 if (t
->operand_types
[op
].bitfield
.xmmword
3668 && t
->operand_types
[op
].bitfield
.ymmword
3669 && i
.types
[op
].bitfield
.ymmword
)
3676 /* Check the REX.W bit and VEXW. */
3677 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3678 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3679 else if (i
.tm
.opcode_modifier
.vexw
)
3680 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3682 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3684 /* Use 2-byte VEX prefix if possible. */
3686 && i
.vec_encoding
!= vex_encoding_vex3
3687 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3688 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3690 /* 2-byte VEX prefix. */
3694 i
.vex
.bytes
[0] = 0xc5;
3696 /* Check the REX.R bit. */
3697 r
= (i
.rex
& REX_R
) ? 0 : 1;
3698 i
.vex
.bytes
[1] = (r
<< 7
3699 | register_specifier
<< 3
3700 | vector_length
<< 2
3701 | i
.tm
.opcode_modifier
.opcodeprefix
);
3705 /* 3-byte VEX prefix. */
3708 switch (i
.tm
.opcode_modifier
.opcodespace
)
3713 i
.vex
.bytes
[0] = 0xc4;
3718 i
.vex
.bytes
[0] = 0x8f;
3724 /* The high 3 bits of the second VEX byte are 1's compliment
3725 of RXB bits from REX. */
3726 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3728 i
.vex
.bytes
[2] = (w
<< 7
3729 | register_specifier
<< 3
3730 | vector_length
<< 2
3731 | i
.tm
.opcode_modifier
.opcodeprefix
);
3736 is_evex_encoding (const insn_template
*t
)
3738 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3739 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3740 || t
->opcode_modifier
.sae
;
3744 is_any_vex_encoding (const insn_template
*t
)
3746 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3750 get_broadcast_bytes (const insn_template
*t
, bool diag
)
3752 unsigned int op
, bytes
;
3753 const i386_operand_type
*types
;
3755 if (i
.broadcast
.type
)
3756 return i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
3757 * i
.broadcast
.type
);
3759 gas_assert (intel_syntax
);
3761 for (op
= 0; op
< t
->operands
; ++op
)
3762 if (t
->operand_types
[op
].bitfield
.baseindex
)
3765 gas_assert (op
< t
->operands
);
3767 if (t
->opcode_modifier
.evex
3768 && t
->opcode_modifier
.evex
!= EVEXDYN
)
3769 switch (i
.broadcast
.bytes
)
3772 if (t
->operand_types
[op
].bitfield
.word
)
3776 if (t
->operand_types
[op
].bitfield
.dword
)
3780 if (t
->operand_types
[op
].bitfield
.qword
)
3784 if (t
->operand_types
[op
].bitfield
.xmmword
)
3786 if (t
->operand_types
[op
].bitfield
.ymmword
)
3788 if (t
->operand_types
[op
].bitfield
.zmmword
)
3795 gas_assert (op
+ 1 < t
->operands
);
3797 if (t
->operand_types
[op
+ 1].bitfield
.xmmword
3798 + t
->operand_types
[op
+ 1].bitfield
.ymmword
3799 + t
->operand_types
[op
+ 1].bitfield
.zmmword
> 1)
3801 types
= &i
.types
[op
+ 1];
3804 else /* Ambiguous - guess with a preference to non-AVX512VL forms. */
3805 types
= &t
->operand_types
[op
];
3807 if (types
->bitfield
.zmmword
)
3809 else if (types
->bitfield
.ymmword
)
3815 as_warn (_("ambiguous broadcast for `%s', using %u-bit form"),
3816 t
->name
, bytes
* 8);
3821 /* Build the EVEX prefix. */
3824 build_evex_prefix (void)
3826 unsigned int register_specifier
, w
;
3827 rex_byte vrex_used
= 0;
3829 /* Check register specifier. */
3830 if (i
.vex
.register_specifier
)
3832 gas_assert ((i
.vrex
& REX_X
) == 0);
3834 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3835 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3836 register_specifier
+= 8;
3837 /* The upper 16 registers are encoded in the fourth byte of the
3839 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3840 i
.vex
.bytes
[3] = 0x8;
3841 register_specifier
= ~register_specifier
& 0xf;
3845 register_specifier
= 0xf;
3847 /* Encode upper 16 vector index register in the fourth byte of
3849 if (!(i
.vrex
& REX_X
))
3850 i
.vex
.bytes
[3] = 0x8;
3855 /* 4 byte EVEX prefix. */
3857 i
.vex
.bytes
[0] = 0x62;
3859 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3861 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3862 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_EVEXMAP6
);
3863 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3865 /* The fifth bit of the second EVEX byte is 1's compliment of the
3866 REX_R bit in VREX. */
3867 if (!(i
.vrex
& REX_R
))
3868 i
.vex
.bytes
[1] |= 0x10;
3872 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3874 /* When all operands are registers, the REX_X bit in REX is not
3875 used. We reuse it to encode the upper 16 registers, which is
3876 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3877 as 1's compliment. */
3878 if ((i
.vrex
& REX_B
))
3881 i
.vex
.bytes
[1] &= ~0x40;
3885 /* EVEX instructions shouldn't need the REX prefix. */
3886 i
.vrex
&= ~vrex_used
;
3887 gas_assert (i
.vrex
== 0);
3889 /* Check the REX.W bit and VEXW. */
3890 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3891 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3892 else if (i
.tm
.opcode_modifier
.vexw
)
3893 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3895 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3897 /* The third byte of the EVEX prefix. */
3898 i
.vex
.bytes
[2] = ((w
<< 7)
3899 | (register_specifier
<< 3)
3900 | 4 /* Encode the U bit. */
3901 | i
.tm
.opcode_modifier
.opcodeprefix
);
3903 /* The fourth byte of the EVEX prefix. */
3904 /* The zeroing-masking bit. */
3905 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3906 i
.vex
.bytes
[3] |= 0x80;
3908 /* Don't always set the broadcast bit if there is no RC. */
3909 if (i
.rounding
.type
== rc_none
)
3911 /* Encode the vector length. */
3912 unsigned int vec_length
;
3914 if (!i
.tm
.opcode_modifier
.evex
3915 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3919 /* Determine vector length from the last multi-length vector
3921 for (op
= i
.operands
; op
--;)
3922 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3923 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3924 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3926 if (i
.types
[op
].bitfield
.zmmword
)
3928 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3931 else if (i
.types
[op
].bitfield
.ymmword
)
3933 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3936 else if (i
.types
[op
].bitfield
.xmmword
)
3938 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3941 else if (i
.broadcast
.bytes
&& op
== i
.broadcast
.operand
)
3943 switch (get_broadcast_bytes (&i
.tm
, true))
3946 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3949 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3952 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3961 if (op
>= MAX_OPERANDS
)
3965 switch (i
.tm
.opcode_modifier
.evex
)
3967 case EVEXLIG
: /* LL' is ignored */
3968 vec_length
= evexlig
<< 5;
3971 vec_length
= 0 << 5;
3974 vec_length
= 1 << 5;
3977 vec_length
= 2 << 5;
3983 i
.vex
.bytes
[3] |= vec_length
;
3984 /* Encode the broadcast bit. */
3985 if (i
.broadcast
.bytes
)
3986 i
.vex
.bytes
[3] |= 0x10;
3988 else if (i
.rounding
.type
!= saeonly
)
3989 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
3991 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3994 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
3998 process_immext (void)
4002 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
4003 which is coded in the same place as an 8-bit immediate field
4004 would be. Here we fake an 8-bit immediate operand from the
4005 opcode suffix stored in tm.extension_opcode.
4007 AVX instructions also use this encoding, for some of
4008 3 argument instructions. */
4010 gas_assert (i
.imm_operands
<= 1
4012 || (is_any_vex_encoding (&i
.tm
)
4013 && i
.operands
<= 4)));
4015 exp
= &im_expressions
[i
.imm_operands
++];
4016 i
.op
[i
.operands
].imms
= exp
;
4017 i
.types
[i
.operands
].bitfield
.imm8
= 1;
4019 exp
->X_op
= O_constant
;
4020 exp
->X_add_number
= i
.tm
.extension_opcode
;
4021 i
.tm
.extension_opcode
= None
;
4028 switch (i
.tm
.opcode_modifier
.prefixok
)
4036 as_bad (_("invalid instruction `%s' after `%s'"),
4037 i
.tm
.name
, i
.hle_prefix
);
4040 if (i
.prefix
[LOCK_PREFIX
])
4042 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4046 case PrefixHLERelease
:
4047 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4049 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4053 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4055 as_bad (_("memory destination needed for instruction `%s'"
4056 " after `xrelease'"), i
.tm
.name
);
4063 /* Encode aligned vector move as unaligned vector move. */
4066 encode_with_unaligned_vector_move (void)
4068 switch (i
.tm
.base_opcode
)
4070 case 0x28: /* Load instructions. */
4071 case 0x29: /* Store instructions. */
4072 /* movaps/movapd/vmovaps/vmovapd. */
4073 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4074 && i
.tm
.opcode_modifier
.opcodeprefix
<= PREFIX_0X66
)
4075 i
.tm
.base_opcode
= 0x10 | (i
.tm
.base_opcode
& 1);
4077 case 0x6f: /* Load instructions. */
4078 case 0x7f: /* Store instructions. */
4079 /* movdqa/vmovdqa/vmovdqa64/vmovdqa32. */
4080 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4081 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0X66
)
4082 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4089 /* Try the shortest encoding by shortening operand size. */
4092 optimize_encoding (void)
4096 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4097 && i
.tm
.base_opcode
== 0x8d)
4100 lea symbol, %rN -> mov $symbol, %rN
4101 lea (%rM), %rN -> mov %rM, %rN
4102 lea (,%rM,1), %rN -> mov %rM, %rN
4104 and in 32-bit mode for 16-bit addressing
4106 lea (%rM), %rN -> movzx %rM, %rN
4108 and in 64-bit mode zap 32-bit addressing in favor of using a
4109 32-bit (or less) destination.
4111 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4113 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4114 i
.tm
.opcode_modifier
.size
= SIZE32
;
4115 i
.prefix
[ADDR_PREFIX
] = 0;
4118 if (!i
.index_reg
&& !i
.base_reg
)
4121 lea symbol, %rN -> mov $symbol, %rN
4123 if (flag_code
== CODE_64BIT
)
4125 /* Don't transform a relocation to a 16-bit one. */
4127 && i
.op
[0].disps
->X_op
!= O_constant
4128 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4131 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4132 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4134 i
.tm
.base_opcode
= 0xb8;
4135 i
.tm
.opcode_modifier
.modrm
= 0;
4136 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4137 i
.types
[0].bitfield
.imm32
= 1;
4140 i
.tm
.opcode_modifier
.size
= SIZE16
;
4141 i
.types
[0].bitfield
.imm16
= 1;
4146 /* Subject to further optimization below. */
4147 i
.tm
.base_opcode
= 0xc7;
4148 i
.tm
.extension_opcode
= 0;
4149 i
.types
[0].bitfield
.imm32s
= 1;
4150 i
.types
[0].bitfield
.baseindex
= 0;
4153 /* Outside of 64-bit mode address and operand sizes have to match if
4154 a relocation is involved, as otherwise we wouldn't (currently) or
4155 even couldn't express the relocation correctly. */
4156 else if (i
.op
[0].disps
4157 && i
.op
[0].disps
->X_op
!= O_constant
4158 && ((!i
.prefix
[ADDR_PREFIX
])
4159 != (flag_code
== CODE_32BIT
4160 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4161 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4163 /* In 16-bit mode converting LEA with 16-bit addressing and a 32-bit
4164 destination is going to grow encoding size. */
4165 else if (flag_code
== CODE_16BIT
4166 && (optimize
<= 1 || optimize_for_space
)
4167 && !i
.prefix
[ADDR_PREFIX
]
4168 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4172 i
.tm
.base_opcode
= 0xb8;
4173 i
.tm
.opcode_modifier
.modrm
= 0;
4174 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4175 i
.types
[0].bitfield
.imm32
= 1;
4177 i
.types
[0].bitfield
.imm16
= 1;
4180 && i
.op
[0].disps
->X_op
== O_constant
4181 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4182 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4184 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4185 i
.op
[0].disps
->X_add_number
&= 0xffff;
4188 i
.tm
.operand_types
[0] = i
.types
[0];
4192 i
.op
[0].imms
= &im_expressions
[0];
4193 i
.op
[0].imms
->X_op
= O_absent
;
4196 else if (i
.op
[0].disps
4197 && (i
.op
[0].disps
->X_op
!= O_constant
4198 || i
.op
[0].disps
->X_add_number
))
4203 lea (%rM), %rN -> mov %rM, %rN
4204 lea (,%rM,1), %rN -> mov %rM, %rN
4205 lea (%rM), %rN -> movzx %rM, %rN
4207 const reg_entry
*addr_reg
;
4209 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4210 addr_reg
= i
.base_reg
;
4211 else if (!i
.base_reg
4212 && i
.index_reg
->reg_num
!= RegIZ
4213 && !i
.log2_scale_factor
)
4214 addr_reg
= i
.index_reg
;
4218 if (addr_reg
->reg_type
.bitfield
.word
4219 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4221 if (flag_code
!= CODE_32BIT
)
4223 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
4224 i
.tm
.base_opcode
= 0xb7;
4227 i
.tm
.base_opcode
= 0x8b;
4229 if (addr_reg
->reg_type
.bitfield
.dword
4230 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4231 i
.tm
.opcode_modifier
.size
= SIZE32
;
4233 i
.op
[0].regs
= addr_reg
;
4238 i
.disp_operands
= 0;
4239 i
.prefix
[ADDR_PREFIX
] = 0;
4240 i
.prefix
[SEG_PREFIX
] = 0;
4244 if (optimize_for_space
4245 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4246 && i
.reg_operands
== 1
4247 && i
.imm_operands
== 1
4248 && !i
.types
[1].bitfield
.byte
4249 && i
.op
[0].imms
->X_op
== O_constant
4250 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4251 && (i
.tm
.base_opcode
== 0xa8
4252 || (i
.tm
.base_opcode
== 0xf6
4253 && i
.tm
.extension_opcode
== 0x0)))
4256 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4258 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4259 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4261 i
.types
[1].bitfield
.byte
= 1;
4262 /* Ignore the suffix. */
4264 /* Convert to byte registers. */
4265 if (i
.types
[1].bitfield
.word
)
4267 else if (i
.types
[1].bitfield
.dword
)
4271 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4276 else if (flag_code
== CODE_64BIT
4277 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4278 && ((i
.types
[1].bitfield
.qword
4279 && i
.reg_operands
== 1
4280 && i
.imm_operands
== 1
4281 && i
.op
[0].imms
->X_op
== O_constant
4282 && ((i
.tm
.base_opcode
== 0xb8
4283 && i
.tm
.extension_opcode
== None
4284 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4285 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4286 && ((i
.tm
.base_opcode
== 0x24
4287 || i
.tm
.base_opcode
== 0xa8)
4288 || (i
.tm
.base_opcode
== 0x80
4289 && i
.tm
.extension_opcode
== 0x4)
4290 || ((i
.tm
.base_opcode
== 0xf6
4291 || (i
.tm
.base_opcode
| 1) == 0xc7)
4292 && i
.tm
.extension_opcode
== 0x0)))
4293 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4294 && i
.tm
.base_opcode
== 0x83
4295 && i
.tm
.extension_opcode
== 0x4)))
4296 || (i
.types
[0].bitfield
.qword
4297 && ((i
.reg_operands
== 2
4298 && i
.op
[0].regs
== i
.op
[1].regs
4299 && (i
.tm
.base_opcode
== 0x30
4300 || i
.tm
.base_opcode
== 0x28))
4301 || (i
.reg_operands
== 1
4303 && i
.tm
.base_opcode
== 0x30)))))
4306 andq $imm31, %r64 -> andl $imm31, %r32
4307 andq $imm7, %r64 -> andl $imm7, %r32
4308 testq $imm31, %r64 -> testl $imm31, %r32
4309 xorq %r64, %r64 -> xorl %r32, %r32
4310 subq %r64, %r64 -> subl %r32, %r32
4311 movq $imm31, %r64 -> movl $imm31, %r32
4312 movq $imm32, %r64 -> movl $imm32, %r32
4314 i
.tm
.opcode_modifier
.norex64
= 1;
4315 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4318 movq $imm31, %r64 -> movl $imm31, %r32
4319 movq $imm32, %r64 -> movl $imm32, %r32
4321 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4322 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4323 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4324 i
.types
[0].bitfield
.imm32
= 1;
4325 i
.types
[0].bitfield
.imm32s
= 0;
4326 i
.types
[0].bitfield
.imm64
= 0;
4327 i
.types
[1].bitfield
.dword
= 1;
4328 i
.types
[1].bitfield
.qword
= 0;
4329 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4332 movq $imm31, %r64 -> movl $imm31, %r32
4334 i
.tm
.base_opcode
= 0xb8;
4335 i
.tm
.extension_opcode
= None
;
4336 i
.tm
.opcode_modifier
.w
= 0;
4337 i
.tm
.opcode_modifier
.modrm
= 0;
4341 else if (optimize
> 1
4342 && !optimize_for_space
4343 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4344 && i
.reg_operands
== 2
4345 && i
.op
[0].regs
== i
.op
[1].regs
4346 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4347 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4348 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4351 andb %rN, %rN -> testb %rN, %rN
4352 andw %rN, %rN -> testw %rN, %rN
4353 andq %rN, %rN -> testq %rN, %rN
4354 orb %rN, %rN -> testb %rN, %rN
4355 orw %rN, %rN -> testw %rN, %rN
4356 orq %rN, %rN -> testq %rN, %rN
4358 and outside of 64-bit mode
4360 andl %rN, %rN -> testl %rN, %rN
4361 orl %rN, %rN -> testl %rN, %rN
4363 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4365 else if (i
.reg_operands
== 3
4366 && i
.op
[0].regs
== i
.op
[1].regs
4367 && !i
.types
[2].bitfield
.xmmword
4368 && (i
.tm
.opcode_modifier
.vex
4369 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4370 && is_evex_encoding (&i
.tm
)
4371 && (i
.vec_encoding
!= vex_encoding_evex
4372 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4373 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4374 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4375 && i
.types
[2].bitfield
.ymmword
))))
4376 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4377 && ((i
.tm
.base_opcode
| 2) == 0x57
4378 || i
.tm
.base_opcode
== 0xdf
4379 || i
.tm
.base_opcode
== 0xef
4380 || (i
.tm
.base_opcode
| 3) == 0xfb
4381 || i
.tm
.base_opcode
== 0x42
4382 || i
.tm
.base_opcode
== 0x47))
4385 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4387 EVEX VOP %zmmM, %zmmM, %zmmN
4388 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4389 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4390 EVEX VOP %ymmM, %ymmM, %ymmN
4391 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4392 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4393 VEX VOP %ymmM, %ymmM, %ymmN
4394 -> VEX VOP %xmmM, %xmmM, %xmmN
4395 VOP, one of vpandn and vpxor:
4396 VEX VOP %ymmM, %ymmM, %ymmN
4397 -> VEX VOP %xmmM, %xmmM, %xmmN
4398 VOP, one of vpandnd and vpandnq:
4399 EVEX VOP %zmmM, %zmmM, %zmmN
4400 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4401 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4402 EVEX VOP %ymmM, %ymmM, %ymmN
4403 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4404 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4405 VOP, one of vpxord and vpxorq:
4406 EVEX VOP %zmmM, %zmmM, %zmmN
4407 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4408 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4409 EVEX VOP %ymmM, %ymmM, %ymmN
4410 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4411 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4412 VOP, one of kxord and kxorq:
4413 VEX VOP %kM, %kM, %kN
4414 -> VEX kxorw %kM, %kM, %kN
4415 VOP, one of kandnd and kandnq:
4416 VEX VOP %kM, %kM, %kN
4417 -> VEX kandnw %kM, %kM, %kN
4419 if (is_evex_encoding (&i
.tm
))
4421 if (i
.vec_encoding
!= vex_encoding_evex
)
4423 i
.tm
.opcode_modifier
.vex
= VEX128
;
4424 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4425 i
.tm
.opcode_modifier
.evex
= 0;
4427 else if (optimize
> 1)
4428 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4432 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4434 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4435 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4438 i
.tm
.opcode_modifier
.vex
= VEX128
;
4440 if (i
.tm
.opcode_modifier
.vex
)
4441 for (j
= 0; j
< 3; j
++)
4443 i
.types
[j
].bitfield
.xmmword
= 1;
4444 i
.types
[j
].bitfield
.ymmword
= 0;
4447 else if (i
.vec_encoding
!= vex_encoding_evex
4448 && !i
.types
[0].bitfield
.zmmword
4449 && !i
.types
[1].bitfield
.zmmword
4451 && !i
.broadcast
.bytes
4452 && is_evex_encoding (&i
.tm
)
4453 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4454 || (i
.tm
.base_opcode
& ~4) == 0xdb
4455 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4456 && i
.tm
.extension_opcode
== None
)
4459 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4460 vmovdqu32 and vmovdqu64:
4461 EVEX VOP %xmmM, %xmmN
4462 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4463 EVEX VOP %ymmM, %ymmN
4464 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4466 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4468 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4470 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4472 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4473 VOP, one of vpand, vpandn, vpor, vpxor:
4474 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4475 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4476 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4477 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4478 EVEX VOP{d,q} mem, %xmmM, %xmmN
4479 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4480 EVEX VOP{d,q} mem, %ymmM, %ymmN
4481 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4483 for (j
= 0; j
< i
.operands
; j
++)
4484 if (operand_type_check (i
.types
[j
], disp
)
4485 && i
.op
[j
].disps
->X_op
== O_constant
)
4487 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4488 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4489 bytes, we choose EVEX Disp8 over VEX Disp32. */
4490 int evex_disp8
, vex_disp8
;
4491 unsigned int memshift
= i
.memshift
;
4492 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4494 evex_disp8
= fits_in_disp8 (n
);
4496 vex_disp8
= fits_in_disp8 (n
);
4497 if (evex_disp8
!= vex_disp8
)
4499 i
.memshift
= memshift
;
4503 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4506 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4507 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4508 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4509 i
.tm
.opcode_modifier
.vex
4510 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4511 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4512 /* VPAND, VPOR, and VPXOR are commutative. */
4513 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4514 i
.tm
.opcode_modifier
.commutative
= 1;
4515 i
.tm
.opcode_modifier
.evex
= 0;
4516 i
.tm
.opcode_modifier
.masking
= 0;
4517 i
.tm
.opcode_modifier
.broadcast
= 0;
4518 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4521 i
.types
[j
].bitfield
.disp8
4522 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4526 /* Return non-zero for load instruction. */
4532 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4533 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4537 /* Anysize insns: lea, invlpg, clflush, prefetch*, bndmk, bndcl, bndcu,
4538 bndcn, bndstx, bndldx, clflushopt, clwb, cldemote. */
4539 if (i
.tm
.opcode_modifier
.operandconstraint
== ANY_SIZE
)
4543 if (strcmp (i
.tm
.name
, "pop") == 0)
4547 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4550 if (i
.tm
.base_opcode
== 0x9d
4551 || i
.tm
.base_opcode
== 0x61)
4554 /* movs, cmps, lods, scas. */
4555 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4559 if (base_opcode
== 0x6f
4560 || i
.tm
.base_opcode
== 0xd7)
4562 /* NB: For AMD-specific insns with implicit memory operands,
4563 they're intentionally not covered. */
4566 /* No memory operand. */
4567 if (!i
.mem_operands
)
4573 if (i
.tm
.base_opcode
== 0xae
4574 && i
.tm
.opcode_modifier
.vex
4575 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4576 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4577 && i
.tm
.extension_opcode
== 2)
4580 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4582 /* test, not, neg, mul, imul, div, idiv. */
4583 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4584 && i
.tm
.extension_opcode
!= 1)
4588 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4591 /* add, or, adc, sbb, and, sub, xor, cmp. */
4592 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4595 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4596 if ((base_opcode
== 0xc1
4597 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4598 && i
.tm
.extension_opcode
!= 6)
4601 /* Check for x87 instructions. */
4602 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4604 /* Skip fst, fstp, fstenv, fstcw. */
4605 if (i
.tm
.base_opcode
== 0xd9
4606 && (i
.tm
.extension_opcode
== 2
4607 || i
.tm
.extension_opcode
== 3
4608 || i
.tm
.extension_opcode
== 6
4609 || i
.tm
.extension_opcode
== 7))
4612 /* Skip fisttp, fist, fistp, fstp. */
4613 if (i
.tm
.base_opcode
== 0xdb
4614 && (i
.tm
.extension_opcode
== 1
4615 || i
.tm
.extension_opcode
== 2
4616 || i
.tm
.extension_opcode
== 3
4617 || i
.tm
.extension_opcode
== 7))
4620 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4621 if (i
.tm
.base_opcode
== 0xdd
4622 && (i
.tm
.extension_opcode
== 1
4623 || i
.tm
.extension_opcode
== 2
4624 || i
.tm
.extension_opcode
== 3
4625 || i
.tm
.extension_opcode
== 6
4626 || i
.tm
.extension_opcode
== 7))
4629 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4630 if (i
.tm
.base_opcode
== 0xdf
4631 && (i
.tm
.extension_opcode
== 1
4632 || i
.tm
.extension_opcode
== 2
4633 || i
.tm
.extension_opcode
== 3
4634 || i
.tm
.extension_opcode
== 6
4635 || i
.tm
.extension_opcode
== 7))
4641 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4643 /* bt, bts, btr, btc. */
4644 if (i
.tm
.base_opcode
== 0xba
4645 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4648 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4649 if (i
.tm
.base_opcode
== 0xc7
4650 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4651 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4652 || i
.tm
.extension_opcode
== 6))
4655 /* fxrstor, ldmxcsr, xrstor. */
4656 if (i
.tm
.base_opcode
== 0xae
4657 && (i
.tm
.extension_opcode
== 1
4658 || i
.tm
.extension_opcode
== 2
4659 || i
.tm
.extension_opcode
== 5))
4662 /* lgdt, lidt, lmsw. */
4663 if (i
.tm
.base_opcode
== 0x01
4664 && (i
.tm
.extension_opcode
== 2
4665 || i
.tm
.extension_opcode
== 3
4666 || i
.tm
.extension_opcode
== 6))
4670 dest
= i
.operands
- 1;
4672 /* Check fake imm8 operand and 3 source operands. */
4673 if ((i
.tm
.opcode_modifier
.immext
4674 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4675 && i
.types
[dest
].bitfield
.imm8
)
4678 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4679 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4680 && (base_opcode
== 0x1
4681 || base_opcode
== 0x9
4682 || base_opcode
== 0x11
4683 || base_opcode
== 0x19
4684 || base_opcode
== 0x21
4685 || base_opcode
== 0x29
4686 || base_opcode
== 0x31
4687 || base_opcode
== 0x39
4688 || (base_opcode
| 2) == 0x87))
4692 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4693 && base_opcode
== 0xc1)
4696 /* Check for load instruction. */
4697 return (i
.types
[dest
].bitfield
.class != ClassNone
4698 || i
.types
[dest
].bitfield
.instance
== Accum
);
4701 /* Output lfence, 0xfaee8, after instruction. */
4704 insert_lfence_after (void)
4706 if (lfence_after_load
&& load_insn_p ())
4708 /* There are also two REP string instructions that require
4709 special treatment. Specifically, the compare string (CMPS)
4710 and scan string (SCAS) instructions set EFLAGS in a manner
4711 that depends on the data being compared/scanned. When used
4712 with a REP prefix, the number of iterations may therefore
4713 vary depending on this data. If the data is a program secret
4714 chosen by the adversary using an LVI method,
4715 then this data-dependent behavior may leak some aspect
4717 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4718 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4719 && i
.prefix
[REP_PREFIX
])
4721 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4724 char *p
= frag_more (3);
4731 /* Output lfence, 0xfaee8, before instruction. */
4734 insert_lfence_before (void)
4738 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4741 if (i
.tm
.base_opcode
== 0xff
4742 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4744 /* Insert lfence before indirect branch if needed. */
4746 if (lfence_before_indirect_branch
== lfence_branch_none
)
4749 if (i
.operands
!= 1)
4752 if (i
.reg_operands
== 1)
4754 /* Indirect branch via register. Don't insert lfence with
4755 -mlfence-after-load=yes. */
4756 if (lfence_after_load
4757 || lfence_before_indirect_branch
== lfence_branch_memory
)
4760 else if (i
.mem_operands
== 1
4761 && lfence_before_indirect_branch
!= lfence_branch_register
)
4763 as_warn (_("indirect `%s` with memory operand should be avoided"),
4770 if (last_insn
.kind
!= last_insn_other
4771 && last_insn
.seg
== now_seg
)
4773 as_warn_where (last_insn
.file
, last_insn
.line
,
4774 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4775 last_insn
.name
, i
.tm
.name
);
4786 /* Output or/not/shl and lfence before near ret. */
4787 if (lfence_before_ret
!= lfence_before_ret_none
4788 && (i
.tm
.base_opcode
== 0xc2
4789 || i
.tm
.base_opcode
== 0xc3))
4791 if (last_insn
.kind
!= last_insn_other
4792 && last_insn
.seg
== now_seg
)
4794 as_warn_where (last_insn
.file
, last_insn
.line
,
4795 _("`%s` skips -mlfence-before-ret on `%s`"),
4796 last_insn
.name
, i
.tm
.name
);
4800 /* Near ret ingore operand size override under CPU64. */
4801 char prefix
= flag_code
== CODE_64BIT
4803 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4805 if (lfence_before_ret
== lfence_before_ret_not
)
4807 /* not: 0xf71424, may add prefix
4808 for operand size override or 64-bit code. */
4809 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4823 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4826 if (lfence_before_ret
== lfence_before_ret_or
)
4828 /* or: 0x830c2400, may add prefix
4829 for operand size override or 64-bit code. */
4835 /* shl: 0xc1242400, may add prefix
4836 for operand size override or 64-bit code. */
4851 /* This is the guts of the machine-dependent assembler. LINE points to a
4852 machine dependent instruction. This function is supposed to emit
4853 the frags/bytes it assembles to. */
4856 md_assemble (char *line
)
4859 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4860 const insn_template
*t
;
4862 /* Initialize globals. */
4863 memset (&i
, '\0', sizeof (i
));
4864 i
.rounding
.type
= rc_none
;
4865 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4866 i
.reloc
[j
] = NO_RELOC
;
4867 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4868 memset (im_expressions
, '\0', sizeof (im_expressions
));
4869 save_stack_p
= save_stack
;
4871 /* First parse an instruction mnemonic & call i386_operand for the operands.
4872 We assume that the scrubber has arranged it so that line[0] is the valid
4873 start of a (possibly prefixed) mnemonic. */
4875 line
= parse_insn (line
, mnemonic
);
4878 mnem_suffix
= i
.suffix
;
4880 line
= parse_operands (line
, mnemonic
);
4882 xfree (i
.memop1_string
);
4883 i
.memop1_string
= NULL
;
4887 /* Now we've parsed the mnemonic into a set of templates, and have the
4888 operands at hand. */
4890 /* All Intel opcodes have reversed operands except for "bound", "enter",
4891 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4892 "rmpadjust", "rmpupdate", and "rmpquery". We also don't reverse
4893 intersegment "jmp" and "call" instructions with 2 immediate operands so
4894 that the immediate segment precedes the offset consistently in Intel and
4898 && (strcmp (mnemonic
, "bound") != 0)
4899 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4900 && !startswith (mnemonic
, "monitor")
4901 && !startswith (mnemonic
, "mwait")
4902 && (strcmp (mnemonic
, "pvalidate") != 0)
4903 && !startswith (mnemonic
, "rmp")
4904 && (strcmp (mnemonic
, "tpause") != 0)
4905 && (strcmp (mnemonic
, "umwait") != 0)
4906 && !(i
.operands
== 2
4907 && operand_type_check (i
.types
[0], imm
)
4908 && operand_type_check (i
.types
[1], imm
)))
4911 /* The order of the immediates should be reversed
4912 for 2 immediates extrq and insertq instructions */
4913 if (i
.imm_operands
== 2
4914 && (strcmp (mnemonic
, "extrq") == 0
4915 || strcmp (mnemonic
, "insertq") == 0))
4916 swap_2_operands (0, 1);
4921 if (i
.disp_operands
&& !want_disp32 (current_templates
->start
)
4922 && (!current_templates
->start
->opcode_modifier
.jump
4923 || i
.jumpabsolute
|| i
.types
[0].bitfield
.baseindex
))
4925 for (j
= 0; j
< i
.operands
; ++j
)
4927 const expressionS
*exp
= i
.op
[j
].disps
;
4929 if (!operand_type_check (i
.types
[j
], disp
))
4932 if (exp
->X_op
!= O_constant
)
4935 /* Since displacement is signed extended to 64bit, don't allow
4936 disp32 if it is out of range. */
4937 if (fits_in_signed_long (exp
->X_add_number
))
4940 i
.types
[j
].bitfield
.disp32
= 0;
4941 if (i
.types
[j
].bitfield
.baseindex
)
4943 as_bad (_("0x%" PRIx64
" out of range of signed 32bit displacement"),
4944 (uint64_t) exp
->X_add_number
);
4950 /* Don't optimize displacement for movabs since it only takes 64bit
4953 && i
.disp_encoding
<= disp_encoding_8bit
4954 && (flag_code
!= CODE_64BIT
4955 || strcmp (mnemonic
, "movabs") != 0))
4958 /* Next, we find a template that matches the given insn,
4959 making sure the overlap of the given operands types is consistent
4960 with the template operand types. */
4962 if (!(t
= match_template (mnem_suffix
)))
4965 if (sse_check
!= check_none
4966 /* The opcode space check isn't strictly needed; it's there only to
4967 bypass the logic below when easily possible. */
4968 && t
->opcode_modifier
.opcodespace
>= SPACE_0F
4969 && t
->opcode_modifier
.opcodespace
<= SPACE_0F3A
4970 && !i
.tm
.cpu_flags
.bitfield
.cpusse4a
4971 && !is_any_vex_encoding (t
))
4975 for (j
= 0; j
< t
->operands
; ++j
)
4977 if (t
->operand_types
[j
].bitfield
.class == RegMMX
)
4979 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
)
4983 if (j
>= t
->operands
&& simd
)
4984 (sse_check
== check_warning
4986 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4989 if (i
.tm
.opcode_modifier
.fwait
)
4990 if (!add_prefix (FWAIT_OPCODE
))
4993 /* Check if REP prefix is OK. */
4994 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4996 as_bad (_("invalid instruction `%s' after `%s'"),
4997 i
.tm
.name
, i
.rep_prefix
);
5001 /* Check for lock without a lockable instruction. Destination operand
5002 must be memory unless it is xchg (0x86). */
5003 if (i
.prefix
[LOCK_PREFIX
]
5004 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
5005 || i
.mem_operands
== 0
5006 || (i
.tm
.base_opcode
!= 0x86
5007 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
5009 as_bad (_("expecting lockable instruction after `lock'"));
5013 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
5014 if (i
.prefix
[DATA_PREFIX
]
5015 && (is_any_vex_encoding (&i
.tm
)
5016 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
5017 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
5019 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
5023 /* Check if HLE prefix is OK. */
5024 if (i
.hle_prefix
&& !check_hle ())
5027 /* Check BND prefix. */
5028 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5029 as_bad (_("expecting valid branch instruction after `bnd'"));
5031 /* Check NOTRACK prefix. */
5032 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5033 as_bad (_("expecting indirect branch instruction after `notrack'"));
5035 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5037 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5038 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5039 else if (flag_code
!= CODE_16BIT
5040 ? i
.prefix
[ADDR_PREFIX
]
5041 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5042 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5045 /* Insert BND prefix. */
5046 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5048 if (!i
.prefix
[BND_PREFIX
])
5049 add_prefix (BND_PREFIX_OPCODE
);
5050 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5052 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5053 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5057 /* Check string instruction segment overrides. */
5058 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5060 gas_assert (i
.mem_operands
);
5061 if (!check_string ())
5063 i
.disp_operands
= 0;
5066 /* The memory operand of (%dx) should be only used with input/output
5067 instructions (base opcodes: 0x6c, 0x6e, 0xec, 0xee). */
5068 if (i
.input_output_operand
5069 && ((i
.tm
.base_opcode
| 0x82) != 0xee
5070 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
))
5072 as_bad (_("input/output port address isn't allowed with `%s'"),
5077 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5078 optimize_encoding ();
5080 if (use_unaligned_vector_move
)
5081 encode_with_unaligned_vector_move ();
5083 if (!process_suffix ())
5086 /* Check if IP-relative addressing requirements can be satisfied. */
5087 if (i
.tm
.cpu_flags
.bitfield
.cpuprefetchi
5088 && !(i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
))
5089 as_warn (_("'%s' only supports RIP-relative address"), i
.tm
.name
);
5091 /* Update operand types and check extended states. */
5092 for (j
= 0; j
< i
.operands
; j
++)
5094 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5095 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5100 i
.xstate
|= xstate_mmx
;
5103 i
.xstate
|= xstate_mask
;
5106 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5107 i
.xstate
|= xstate_tmm
;
5108 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5109 i
.xstate
|= xstate_zmm
;
5110 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5111 i
.xstate
|= xstate_ymm
;
5112 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5113 i
.xstate
|= xstate_xmm
;
5118 /* Make still unresolved immediate matches conform to size of immediate
5119 given in i.suffix. */
5120 if (!finalize_imm ())
5123 if (i
.types
[0].bitfield
.imm1
)
5124 i
.imm_operands
= 0; /* kludge for shift insns. */
5126 /* We only need to check those implicit registers for instructions
5127 with 3 operands or less. */
5128 if (i
.operands
<= 3)
5129 for (j
= 0; j
< i
.operands
; j
++)
5130 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
5131 && !i
.types
[j
].bitfield
.xmmword
)
5134 /* For insns with operands there are more diddles to do to the opcode. */
5137 if (!process_operands ())
5140 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.operandconstraint
== UGH
)
5142 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5143 as_warn (_("translating to `%sp'"), i
.tm
.name
);
5146 if (is_any_vex_encoding (&i
.tm
))
5148 if (!cpu_arch_flags
.bitfield
.cpui286
)
5150 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5155 /* Check for explicit REX prefix. */
5156 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5158 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
5162 if (i
.tm
.opcode_modifier
.vex
)
5163 build_vex_prefix (t
);
5165 build_evex_prefix ();
5167 /* The individual REX.RXBW bits got consumed. */
5168 i
.rex
&= REX_OPCODE
;
5171 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
5172 instructions may define INT_OPCODE as well, so avoid this corner
5173 case for those instructions that use MODRM. */
5174 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
5175 && i
.tm
.base_opcode
== INT_OPCODE
5176 && !i
.tm
.opcode_modifier
.modrm
5177 && i
.op
[0].imms
->X_add_number
== 3)
5179 i
.tm
.base_opcode
= INT3_OPCODE
;
5183 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5184 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5185 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5186 && i
.op
[0].disps
->X_op
== O_constant
)
5188 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5189 the absolute address given by the constant. Since ix86 jumps and
5190 calls are pc relative, we need to generate a reloc. */
5191 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5192 i
.op
[0].disps
->X_op
= O_symbol
;
5195 /* For 8 bit registers we need an empty rex prefix. Also if the
5196 instruction already has a prefix, we need to convert old
5197 registers to new ones. */
5199 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5200 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5201 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5202 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5203 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5204 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5209 i
.rex
|= REX_OPCODE
;
5210 for (x
= 0; x
< 2; x
++)
5212 /* Look for 8 bit operand that uses old registers. */
5213 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5214 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5216 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5217 /* In case it is "hi" register, give up. */
5218 if (i
.op
[x
].regs
->reg_num
> 3)
5219 as_bad (_("can't encode register '%s%s' in an "
5220 "instruction requiring REX prefix."),
5221 register_prefix
, i
.op
[x
].regs
->reg_name
);
5223 /* Otherwise it is equivalent to the extended register.
5224 Since the encoding doesn't change this is merely
5225 cosmetic cleanup for debug output. */
5227 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5232 if (i
.rex
== 0 && i
.rex_encoding
)
5234 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5235 that uses legacy register. If it is "hi" register, don't add
5236 the REX_OPCODE byte. */
5238 for (x
= 0; x
< 2; x
++)
5239 if (i
.types
[x
].bitfield
.class == Reg
5240 && i
.types
[x
].bitfield
.byte
5241 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5242 && i
.op
[x
].regs
->reg_num
> 3)
5244 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5245 i
.rex_encoding
= false;
5254 add_prefix (REX_OPCODE
| i
.rex
);
5256 insert_lfence_before ();
5258 /* We are ready to output the insn. */
5261 insert_lfence_after ();
5263 last_insn
.seg
= now_seg
;
5265 if (i
.tm
.opcode_modifier
.isprefix
)
5267 last_insn
.kind
= last_insn_prefix
;
5268 last_insn
.name
= i
.tm
.name
;
5269 last_insn
.file
= as_where (&last_insn
.line
);
5272 last_insn
.kind
= last_insn_other
;
5276 parse_insn (char *line
, char *mnemonic
)
5279 char *token_start
= l
;
5282 const insn_template
*t
;
5288 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5293 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5295 as_bad (_("no such instruction: `%s'"), token_start
);
5300 if (!is_space_char (*l
)
5301 && *l
!= END_OF_INSN
5303 || (*l
!= PREFIX_SEPARATOR
5306 as_bad (_("invalid character %s in mnemonic"),
5307 output_invalid (*l
));
5310 if (token_start
== l
)
5312 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5313 as_bad (_("expecting prefix; got nothing"));
5315 as_bad (_("expecting mnemonic; got nothing"));
5319 /* Look up instruction (or prefix) via hash table. */
5320 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5322 if (*l
!= END_OF_INSN
5323 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5324 && current_templates
5325 && current_templates
->start
->opcode_modifier
.isprefix
)
5327 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5329 as_bad ((flag_code
!= CODE_64BIT
5330 ? _("`%s' is only supported in 64-bit mode")
5331 : _("`%s' is not supported in 64-bit mode")),
5332 current_templates
->start
->name
);
5335 /* If we are in 16-bit mode, do not allow addr16 or data16.
5336 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5337 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5338 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5339 && flag_code
!= CODE_64BIT
5340 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5341 ^ (flag_code
== CODE_16BIT
)))
5343 as_bad (_("redundant %s prefix"),
5344 current_templates
->start
->name
);
5348 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5350 /* Handle pseudo prefixes. */
5351 switch (current_templates
->start
->extension_opcode
)
5355 i
.disp_encoding
= disp_encoding_8bit
;
5359 i
.disp_encoding
= disp_encoding_16bit
;
5363 i
.disp_encoding
= disp_encoding_32bit
;
5367 i
.dir_encoding
= dir_encoding_load
;
5371 i
.dir_encoding
= dir_encoding_store
;
5375 i
.vec_encoding
= vex_encoding_vex
;
5379 i
.vec_encoding
= vex_encoding_vex3
;
5383 i
.vec_encoding
= vex_encoding_evex
;
5387 i
.rex_encoding
= true;
5389 case Prefix_NoOptimize
:
5391 i
.no_optimize
= true;
5399 /* Add prefix, checking for repeated prefixes. */
5400 switch (add_prefix (current_templates
->start
->base_opcode
))
5405 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5406 i
.notrack_prefix
= current_templates
->start
->name
;
5409 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5410 i
.hle_prefix
= current_templates
->start
->name
;
5411 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5412 i
.bnd_prefix
= current_templates
->start
->name
;
5414 i
.rep_prefix
= current_templates
->start
->name
;
5420 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5427 if (!current_templates
)
5429 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5430 Check if we should swap operand or force 32bit displacement in
5432 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5433 i
.dir_encoding
= dir_encoding_swap
;
5434 else if (mnem_p
- 3 == dot_p
5437 i
.disp_encoding
= disp_encoding_8bit
;
5438 else if (mnem_p
- 4 == dot_p
5442 i
.disp_encoding
= disp_encoding_32bit
;
5447 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5450 if (!current_templates
)
5453 if (mnem_p
> mnemonic
)
5455 /* See if we can get a match by trimming off a suffix. */
5458 case WORD_MNEM_SUFFIX
:
5459 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5460 i
.suffix
= SHORT_MNEM_SUFFIX
;
5463 case BYTE_MNEM_SUFFIX
:
5464 case QWORD_MNEM_SUFFIX
:
5465 i
.suffix
= mnem_p
[-1];
5468 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5470 case SHORT_MNEM_SUFFIX
:
5471 case LONG_MNEM_SUFFIX
:
5474 i
.suffix
= mnem_p
[-1];
5477 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5485 if (intel_float_operand (mnemonic
) == 1)
5486 i
.suffix
= SHORT_MNEM_SUFFIX
;
5488 i
.suffix
= LONG_MNEM_SUFFIX
;
5491 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5497 if (!current_templates
)
5499 as_bad (_("no such instruction: `%s'"), token_start
);
5504 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5505 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5507 /* Check for a branch hint. We allow ",pt" and ",pn" for
5508 predict taken and predict not taken respectively.
5509 I'm not sure that branch hints actually do anything on loop
5510 and jcxz insns (JumpByte) for current Pentium4 chips. They
5511 may work in the future and it doesn't hurt to accept them
5513 if (l
[0] == ',' && l
[1] == 'p')
5517 if (!add_prefix (DS_PREFIX_OPCODE
))
5521 else if (l
[2] == 'n')
5523 if (!add_prefix (CS_PREFIX_OPCODE
))
5529 /* Any other comma loses. */
5532 as_bad (_("invalid character %s in mnemonic"),
5533 output_invalid (*l
));
5537 /* Check if instruction is supported on specified architecture. */
5539 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5541 supported
|= cpu_flags_match (t
);
5542 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5546 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5547 as_bad (flag_code
== CODE_64BIT
5548 ? _("`%s' is not supported in 64-bit mode")
5549 : _("`%s' is only supported in 64-bit mode"),
5550 current_templates
->start
->name
);
5552 as_bad (_("`%s' is not supported on `%s%s'"),
5553 current_templates
->start
->name
,
5554 cpu_arch_name
? cpu_arch_name
: default_arch
,
5555 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5561 parse_operands (char *l
, const char *mnemonic
)
5565 /* 1 if operand is pending after ','. */
5566 unsigned int expecting_operand
= 0;
5568 while (*l
!= END_OF_INSN
)
5570 /* Non-zero if operand parens not balanced. */
5571 unsigned int paren_not_balanced
= 0;
5572 /* True if inside double quotes. */
5573 bool in_quotes
= false;
5575 /* Skip optional white space before operand. */
5576 if (is_space_char (*l
))
5578 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5580 as_bad (_("invalid character %s before operand %d"),
5581 output_invalid (*l
),
5585 token_start
= l
; /* After white space. */
5586 while (in_quotes
|| paren_not_balanced
|| *l
!= ',')
5588 if (*l
== END_OF_INSN
)
5592 as_bad (_("unbalanced double quotes in operand %d."),
5596 if (paren_not_balanced
)
5598 know (!intel_syntax
);
5599 as_bad (_("unbalanced parenthesis in operand %d."),
5604 break; /* we are done */
5606 else if (*l
== '\\' && l
[1] == '"')
5609 in_quotes
= !in_quotes
;
5610 else if (!in_quotes
&& !is_operand_char (*l
) && !is_space_char (*l
))
5612 as_bad (_("invalid character %s in operand %d"),
5613 output_invalid (*l
),
5617 if (!intel_syntax
&& !in_quotes
)
5620 ++paren_not_balanced
;
5622 --paren_not_balanced
;
5626 if (l
!= token_start
)
5627 { /* Yes, we've read in another operand. */
5628 unsigned int operand_ok
;
5629 this_operand
= i
.operands
++;
5630 if (i
.operands
> MAX_OPERANDS
)
5632 as_bad (_("spurious operands; (%d operands/instruction max)"),
5636 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5637 /* Now parse operand adding info to 'i' as we go along. */
5638 END_STRING_AND_SAVE (l
);
5640 if (i
.mem_operands
> 1)
5642 as_bad (_("too many memory references for `%s'"),
5649 i386_intel_operand (token_start
,
5650 intel_float_operand (mnemonic
));
5652 operand_ok
= i386_att_operand (token_start
);
5654 RESTORE_END_STRING (l
);
5660 if (expecting_operand
)
5662 expecting_operand_after_comma
:
5663 as_bad (_("expecting operand after ','; got nothing"));
5668 as_bad (_("expecting operand before ','; got nothing"));
5673 /* Now *l must be either ',' or END_OF_INSN. */
5676 if (*++l
== END_OF_INSN
)
5678 /* Just skip it, if it's \n complain. */
5679 goto expecting_operand_after_comma
;
5681 expecting_operand
= 1;
5688 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5690 union i386_op temp_op
;
5691 i386_operand_type temp_type
;
5692 unsigned int temp_flags
;
5693 enum bfd_reloc_code_real temp_reloc
;
5695 temp_type
= i
.types
[xchg2
];
5696 i
.types
[xchg2
] = i
.types
[xchg1
];
5697 i
.types
[xchg1
] = temp_type
;
5699 temp_flags
= i
.flags
[xchg2
];
5700 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5701 i
.flags
[xchg1
] = temp_flags
;
5703 temp_op
= i
.op
[xchg2
];
5704 i
.op
[xchg2
] = i
.op
[xchg1
];
5705 i
.op
[xchg1
] = temp_op
;
5707 temp_reloc
= i
.reloc
[xchg2
];
5708 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5709 i
.reloc
[xchg1
] = temp_reloc
;
5713 if (i
.mask
.operand
== xchg1
)
5714 i
.mask
.operand
= xchg2
;
5715 else if (i
.mask
.operand
== xchg2
)
5716 i
.mask
.operand
= xchg1
;
5718 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
5720 if (i
.broadcast
.operand
== xchg1
)
5721 i
.broadcast
.operand
= xchg2
;
5722 else if (i
.broadcast
.operand
== xchg2
)
5723 i
.broadcast
.operand
= xchg1
;
5728 swap_operands (void)
5734 swap_2_operands (1, i
.operands
- 2);
5738 swap_2_operands (0, i
.operands
- 1);
5744 if (i
.mem_operands
== 2)
5746 const reg_entry
*temp_seg
;
5747 temp_seg
= i
.seg
[0];
5748 i
.seg
[0] = i
.seg
[1];
5749 i
.seg
[1] = temp_seg
;
5753 /* Try to ensure constant immediates are represented in the smallest
5758 char guess_suffix
= 0;
5762 guess_suffix
= i
.suffix
;
5763 else if (i
.reg_operands
)
5765 /* Figure out a suffix from the last register operand specified.
5766 We can't do this properly yet, i.e. excluding special register
5767 instances, but the following works for instructions with
5768 immediates. In any case, we can't set i.suffix yet. */
5769 for (op
= i
.operands
; --op
>= 0;)
5770 if (i
.types
[op
].bitfield
.class != Reg
)
5772 else if (i
.types
[op
].bitfield
.byte
)
5774 guess_suffix
= BYTE_MNEM_SUFFIX
;
5777 else if (i
.types
[op
].bitfield
.word
)
5779 guess_suffix
= WORD_MNEM_SUFFIX
;
5782 else if (i
.types
[op
].bitfield
.dword
)
5784 guess_suffix
= LONG_MNEM_SUFFIX
;
5787 else if (i
.types
[op
].bitfield
.qword
)
5789 guess_suffix
= QWORD_MNEM_SUFFIX
;
5793 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5794 guess_suffix
= WORD_MNEM_SUFFIX
;
5796 for (op
= i
.operands
; --op
>= 0;)
5797 if (operand_type_check (i
.types
[op
], imm
))
5799 switch (i
.op
[op
].imms
->X_op
)
5802 /* If a suffix is given, this operand may be shortened. */
5803 switch (guess_suffix
)
5805 case LONG_MNEM_SUFFIX
:
5806 i
.types
[op
].bitfield
.imm32
= 1;
5807 i
.types
[op
].bitfield
.imm64
= 1;
5809 case WORD_MNEM_SUFFIX
:
5810 i
.types
[op
].bitfield
.imm16
= 1;
5811 i
.types
[op
].bitfield
.imm32
= 1;
5812 i
.types
[op
].bitfield
.imm32s
= 1;
5813 i
.types
[op
].bitfield
.imm64
= 1;
5815 case BYTE_MNEM_SUFFIX
:
5816 i
.types
[op
].bitfield
.imm8
= 1;
5817 i
.types
[op
].bitfield
.imm8s
= 1;
5818 i
.types
[op
].bitfield
.imm16
= 1;
5819 i
.types
[op
].bitfield
.imm32
= 1;
5820 i
.types
[op
].bitfield
.imm32s
= 1;
5821 i
.types
[op
].bitfield
.imm64
= 1;
5825 /* If this operand is at most 16 bits, convert it
5826 to a signed 16 bit number before trying to see
5827 whether it will fit in an even smaller size.
5828 This allows a 16-bit operand such as $0xffe0 to
5829 be recognised as within Imm8S range. */
5830 if ((i
.types
[op
].bitfield
.imm16
)
5831 && fits_in_unsigned_word (i
.op
[op
].imms
->X_add_number
))
5833 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5834 ^ 0x8000) - 0x8000);
5837 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5838 if ((i
.types
[op
].bitfield
.imm32
)
5839 && fits_in_unsigned_long (i
.op
[op
].imms
->X_add_number
))
5841 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5842 ^ ((offsetT
) 1 << 31))
5843 - ((offsetT
) 1 << 31));
5847 = operand_type_or (i
.types
[op
],
5848 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5850 /* We must avoid matching of Imm32 templates when 64bit
5851 only immediate is available. */
5852 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5853 i
.types
[op
].bitfield
.imm32
= 0;
5860 /* Symbols and expressions. */
5862 /* Convert symbolic operand to proper sizes for matching, but don't
5863 prevent matching a set of insns that only supports sizes other
5864 than those matching the insn suffix. */
5866 i386_operand_type mask
, allowed
;
5867 const insn_template
*t
= current_templates
->start
;
5869 operand_type_set (&mask
, 0);
5870 switch (guess_suffix
)
5872 case QWORD_MNEM_SUFFIX
:
5873 mask
.bitfield
.imm64
= 1;
5874 mask
.bitfield
.imm32s
= 1;
5876 case LONG_MNEM_SUFFIX
:
5877 mask
.bitfield
.imm32
= 1;
5879 case WORD_MNEM_SUFFIX
:
5880 mask
.bitfield
.imm16
= 1;
5882 case BYTE_MNEM_SUFFIX
:
5883 mask
.bitfield
.imm8
= 1;
5889 allowed
= operand_type_and (t
->operand_types
[op
], mask
);
5890 while (++t
< current_templates
->end
)
5892 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5893 allowed
= operand_type_and (allowed
, mask
);
5896 if (!operand_type_all_zero (&allowed
))
5897 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5904 /* Try to use the smallest displacement type too. */
5906 optimize_disp (void)
5910 for (op
= i
.operands
; --op
>= 0;)
5911 if (operand_type_check (i
.types
[op
], disp
))
5913 if (i
.op
[op
].disps
->X_op
== O_constant
)
5915 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5917 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5919 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5920 i
.op
[op
].disps
= NULL
;
5925 if (i
.types
[op
].bitfield
.disp16
5926 && fits_in_unsigned_word (op_disp
))
5928 /* If this operand is at most 16 bits, convert
5929 to a signed 16 bit number and don't use 64bit
5931 op_disp
= ((op_disp
^ 0x8000) - 0x8000);
5932 i
.types
[op
].bitfield
.disp64
= 0;
5936 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5937 if ((flag_code
!= CODE_64BIT
5938 ? i
.types
[op
].bitfield
.disp32
5939 : want_disp32 (current_templates
->start
)
5940 && (!current_templates
->start
->opcode_modifier
.jump
5941 || i
.jumpabsolute
|| i
.types
[op
].bitfield
.baseindex
))
5942 && fits_in_unsigned_long (op_disp
))
5944 /* If this operand is at most 32 bits, convert
5945 to a signed 32 bit number and don't use 64bit
5947 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5948 i
.types
[op
].bitfield
.disp64
= 0;
5949 i
.types
[op
].bitfield
.disp32
= 1;
5952 if (flag_code
== CODE_64BIT
&& fits_in_signed_long (op_disp
))
5954 i
.types
[op
].bitfield
.disp64
= 0;
5955 i
.types
[op
].bitfield
.disp32
= 1;
5958 if ((i
.types
[op
].bitfield
.disp32
5959 || i
.types
[op
].bitfield
.disp16
)
5960 && fits_in_disp8 (op_disp
))
5961 i
.types
[op
].bitfield
.disp8
= 1;
5963 i
.op
[op
].disps
->X_add_number
= op_disp
;
5965 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5966 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5968 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5969 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5970 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5973 /* We only support 64bit displacement on constants. */
5974 i
.types
[op
].bitfield
.disp64
= 0;
5978 /* Return 1 if there is a match in broadcast bytes between operand
5979 GIVEN and instruction template T. */
5982 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5984 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5985 && i
.types
[given
].bitfield
.byte
)
5986 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5987 && i
.types
[given
].bitfield
.word
)
5988 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5989 && i
.types
[given
].bitfield
.dword
)
5990 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5991 && i
.types
[given
].bitfield
.qword
));
5994 /* Check if operands are valid for the instruction. */
5997 check_VecOperands (const insn_template
*t
)
6002 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
6003 any one operand are implicity requiring AVX512VL support if the actual
6004 operand size is YMMword or XMMword. Since this function runs after
6005 template matching, there's no need to check for YMMword/XMMword in
6007 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
6008 if (!cpu_flags_all_zero (&cpu
)
6009 && !t
->cpu_flags
.bitfield
.cpuavx512vl
6010 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6012 for (op
= 0; op
< t
->operands
; ++op
)
6014 if (t
->operand_types
[op
].bitfield
.zmmword
6015 && (i
.types
[op
].bitfield
.ymmword
6016 || i
.types
[op
].bitfield
.xmmword
))
6018 i
.error
= unsupported
;
6024 /* Somewhat similarly, templates specifying both AVX and AVX2 are
6025 requiring AVX2 support if the actual operand size is YMMword. */
6026 if (t
->cpu_flags
.bitfield
.cpuavx
6027 && t
->cpu_flags
.bitfield
.cpuavx2
6028 && !cpu_arch_flags
.bitfield
.cpuavx2
)
6030 for (op
= 0; op
< t
->operands
; ++op
)
6032 if (t
->operand_types
[op
].bitfield
.xmmword
6033 && i
.types
[op
].bitfield
.ymmword
)
6035 i
.error
= unsupported
;
6041 /* Without VSIB byte, we can't have a vector register for index. */
6042 if (!t
->opcode_modifier
.sib
6044 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6045 || i
.index_reg
->reg_type
.bitfield
.ymmword
6046 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6048 i
.error
= unsupported_vector_index_register
;
6052 /* Check if default mask is allowed. */
6053 if (t
->opcode_modifier
.operandconstraint
== NO_DEFAULT_MASK
6054 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6056 i
.error
= no_default_mask
;
6060 /* For VSIB byte, we need a vector register for index, and all vector
6061 registers must be distinct. */
6062 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6065 || !((t
->opcode_modifier
.sib
== VECSIB128
6066 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6067 || (t
->opcode_modifier
.sib
== VECSIB256
6068 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6069 || (t
->opcode_modifier
.sib
== VECSIB512
6070 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6072 i
.error
= invalid_vsib_address
;
6076 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6077 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6079 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6080 gas_assert (i
.types
[0].bitfield
.xmmword
6081 || i
.types
[0].bitfield
.ymmword
);
6082 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6083 gas_assert (i
.types
[2].bitfield
.xmmword
6084 || i
.types
[2].bitfield
.ymmword
);
6085 if (operand_check
== check_none
)
6087 if (register_number (i
.op
[0].regs
)
6088 != register_number (i
.index_reg
)
6089 && register_number (i
.op
[2].regs
)
6090 != register_number (i
.index_reg
)
6091 && register_number (i
.op
[0].regs
)
6092 != register_number (i
.op
[2].regs
))
6094 if (operand_check
== check_error
)
6096 i
.error
= invalid_vector_register_set
;
6099 as_warn (_("mask, index, and destination registers should be distinct"));
6101 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6103 if (i
.types
[1].bitfield
.class == RegSIMD
6104 && (i
.types
[1].bitfield
.xmmword
6105 || i
.types
[1].bitfield
.ymmword
6106 || i
.types
[1].bitfield
.zmmword
)
6107 && (register_number (i
.op
[1].regs
)
6108 == register_number (i
.index_reg
)))
6110 if (operand_check
== check_error
)
6112 i
.error
= invalid_vector_register_set
;
6115 if (operand_check
!= check_none
)
6116 as_warn (_("index and destination registers should be distinct"));
6121 /* For AMX instructions with 3 TMM register operands, all operands
6122 must be distinct. */
6123 if (i
.reg_operands
== 3
6124 && t
->operand_types
[0].bitfield
.tmmword
6125 && (i
.op
[0].regs
== i
.op
[1].regs
6126 || i
.op
[0].regs
== i
.op
[2].regs
6127 || i
.op
[1].regs
== i
.op
[2].regs
))
6129 i
.error
= invalid_tmm_register_set
;
6133 /* For some special instructions require that destination must be distinct
6134 from source registers. */
6135 if (t
->opcode_modifier
.operandconstraint
== DISTINCT_DEST
)
6137 unsigned int dest_reg
= i
.operands
- 1;
6139 know (i
.operands
>= 3);
6141 /* #UD if dest_reg == src1_reg or dest_reg == src2_reg. */
6142 if (i
.op
[dest_reg
- 1].regs
== i
.op
[dest_reg
].regs
6143 || (i
.reg_operands
> 2
6144 && i
.op
[dest_reg
- 2].regs
== i
.op
[dest_reg
].regs
))
6146 i
.error
= invalid_dest_and_src_register_set
;
6151 /* Check if broadcast is supported by the instruction and is applied
6152 to the memory operand. */
6153 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6155 i386_operand_type type
, overlap
;
6157 /* Check if specified broadcast is supported in this instruction,
6158 and its broadcast bytes match the memory operand. */
6159 op
= i
.broadcast
.operand
;
6160 if (!t
->opcode_modifier
.broadcast
6161 || !(i
.flags
[op
] & Operand_Mem
)
6162 || (!i
.types
[op
].bitfield
.unspecified
6163 && !match_broadcast_size (t
, op
)))
6166 i
.error
= unsupported_broadcast
;
6170 if (i
.broadcast
.type
)
6171 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
6172 * i
.broadcast
.type
);
6173 operand_type_set (&type
, 0);
6174 switch (get_broadcast_bytes (t
, false))
6177 type
.bitfield
.word
= 1;
6180 type
.bitfield
.dword
= 1;
6183 type
.bitfield
.qword
= 1;
6186 type
.bitfield
.xmmword
= 1;
6189 type
.bitfield
.ymmword
= 1;
6192 type
.bitfield
.zmmword
= 1;
6198 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6199 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6200 && t
->operand_types
[op
].bitfield
.byte
6201 + t
->operand_types
[op
].bitfield
.word
6202 + t
->operand_types
[op
].bitfield
.dword
6203 + t
->operand_types
[op
].bitfield
.qword
> 1)
6205 overlap
.bitfield
.xmmword
= 0;
6206 overlap
.bitfield
.ymmword
= 0;
6207 overlap
.bitfield
.zmmword
= 0;
6209 if (operand_type_all_zero (&overlap
))
6212 if (t
->opcode_modifier
.checkregsize
)
6216 type
.bitfield
.baseindex
= 1;
6217 for (j
= 0; j
< i
.operands
; ++j
)
6220 && !operand_type_register_match(i
.types
[j
],
6221 t
->operand_types
[j
],
6223 t
->operand_types
[op
]))
6228 /* If broadcast is supported in this instruction, we need to check if
6229 operand of one-element size isn't specified without broadcast. */
6230 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6232 /* Find memory operand. */
6233 for (op
= 0; op
< i
.operands
; op
++)
6234 if (i
.flags
[op
] & Operand_Mem
)
6236 gas_assert (op
< i
.operands
);
6237 /* Check size of the memory operand. */
6238 if (match_broadcast_size (t
, op
))
6240 i
.error
= broadcast_needed
;
6245 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6247 /* Check if requested masking is supported. */
6250 switch (t
->opcode_modifier
.masking
)
6254 case MERGING_MASKING
:
6258 i
.error
= unsupported_masking
;
6262 case DYNAMIC_MASKING
:
6263 /* Memory destinations allow only merging masking. */
6264 if (i
.mask
.zeroing
&& i
.mem_operands
)
6266 /* Find memory operand. */
6267 for (op
= 0; op
< i
.operands
; op
++)
6268 if (i
.flags
[op
] & Operand_Mem
)
6270 gas_assert (op
< i
.operands
);
6271 if (op
== i
.operands
- 1)
6273 i
.error
= unsupported_masking
;
6283 /* Check if masking is applied to dest operand. */
6284 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6286 i
.error
= mask_not_on_destination
;
6291 if (i
.rounding
.type
!= rc_none
)
6293 if (!t
->opcode_modifier
.sae
6294 || ((i
.rounding
.type
!= saeonly
) != t
->opcode_modifier
.staticrounding
)
6297 i
.error
= unsupported_rc_sae
;
6301 /* Non-EVEX.LIG forms need to have a ZMM register as at least one
6303 if (t
->opcode_modifier
.evex
!= EVEXLIG
)
6305 for (op
= 0; op
< t
->operands
; ++op
)
6306 if (i
.types
[op
].bitfield
.zmmword
)
6308 if (op
>= t
->operands
)
6310 i
.error
= operand_size_mismatch
;
6316 /* Check the special Imm4 cases; must be the first operand. */
6317 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6319 if (i
.op
[0].imms
->X_op
!= O_constant
6320 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6326 /* Turn off Imm<N> so that update_imm won't complain. */
6327 operand_type_set (&i
.types
[0], 0);
6330 /* Check vector Disp8 operand. */
6331 if (t
->opcode_modifier
.disp8memshift
6332 && i
.disp_encoding
<= disp_encoding_8bit
)
6334 if (i
.broadcast
.bytes
)
6335 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6336 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6337 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6340 const i386_operand_type
*type
= NULL
, *fallback
= NULL
;
6343 for (op
= 0; op
< i
.operands
; op
++)
6344 if (i
.flags
[op
] & Operand_Mem
)
6346 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6347 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6348 else if (t
->operand_types
[op
].bitfield
.xmmword
6349 + t
->operand_types
[op
].bitfield
.ymmword
6350 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6351 type
= &t
->operand_types
[op
];
6352 else if (!i
.types
[op
].bitfield
.unspecified
)
6353 type
= &i
.types
[op
];
6354 else /* Ambiguities get resolved elsewhere. */
6355 fallback
= &t
->operand_types
[op
];
6357 else if (i
.types
[op
].bitfield
.class == RegSIMD
6358 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6360 if (i
.types
[op
].bitfield
.zmmword
)
6362 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6364 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6368 if (!type
&& !i
.memshift
)
6372 if (type
->bitfield
.zmmword
)
6374 else if (type
->bitfield
.ymmword
)
6376 else if (type
->bitfield
.xmmword
)
6380 /* For the check in fits_in_disp8(). */
6381 if (i
.memshift
== 0)
6385 for (op
= 0; op
< i
.operands
; op
++)
6386 if (operand_type_check (i
.types
[op
], disp
)
6387 && i
.op
[op
].disps
->X_op
== O_constant
)
6389 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6391 i
.types
[op
].bitfield
.disp8
= 1;
6394 i
.types
[op
].bitfield
.disp8
= 0;
6403 /* Check if encoding requirements are met by the instruction. */
6406 VEX_check_encoding (const insn_template
*t
)
6408 if (i
.vec_encoding
== vex_encoding_error
)
6410 i
.error
= unsupported
;
6414 if (i
.vec_encoding
== vex_encoding_evex
)
6416 /* This instruction must be encoded with EVEX prefix. */
6417 if (!is_evex_encoding (t
))
6419 i
.error
= unsupported
;
6425 if (!t
->opcode_modifier
.vex
)
6427 /* This instruction template doesn't have VEX prefix. */
6428 if (i
.vec_encoding
!= vex_encoding_default
)
6430 i
.error
= unsupported
;
6439 /* Helper function for the progress() macro in match_template(). */
6440 static INLINE
enum i386_error
progress (enum i386_error
new,
6441 enum i386_error last
,
6442 unsigned int line
, unsigned int *line_p
)
6444 if (line
<= *line_p
)
6450 static const insn_template
*
6451 match_template (char mnem_suffix
)
6453 /* Points to template once we've found it. */
6454 const insn_template
*t
;
6455 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6456 i386_operand_type overlap4
;
6457 unsigned int found_reverse_match
;
6458 i386_opcode_modifier suffix_check
;
6459 i386_operand_type operand_types
[MAX_OPERANDS
];
6460 int addr_prefix_disp
;
6461 unsigned int j
, size_match
, check_register
, errline
= __LINE__
;
6462 enum i386_error specific_error
= number_of_operands_mismatch
;
6463 #define progress(err) progress (err, specific_error, __LINE__, &errline)
6465 #if MAX_OPERANDS != 5
6466 # error "MAX_OPERANDS must be 5."
6469 found_reverse_match
= 0;
6470 addr_prefix_disp
= -1;
6472 /* Prepare for mnemonic suffix check. */
6473 memset (&suffix_check
, 0, sizeof (suffix_check
));
6474 switch (mnem_suffix
)
6476 case BYTE_MNEM_SUFFIX
:
6477 suffix_check
.no_bsuf
= 1;
6479 case WORD_MNEM_SUFFIX
:
6480 suffix_check
.no_wsuf
= 1;
6482 case SHORT_MNEM_SUFFIX
:
6483 suffix_check
.no_ssuf
= 1;
6485 case LONG_MNEM_SUFFIX
:
6486 suffix_check
.no_lsuf
= 1;
6488 case QWORD_MNEM_SUFFIX
:
6489 suffix_check
.no_qsuf
= 1;
6492 /* NB: In Intel syntax, normally we can check for memory operand
6493 size when there is no mnemonic suffix. But jmp and call have
6494 2 different encodings with Dword memory operand size, one with
6495 No_ldSuf and the other without. i.suffix is set to
6496 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6497 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6498 suffix_check
.no_ldsuf
= 1;
6501 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6503 addr_prefix_disp
= -1;
6504 found_reverse_match
= 0;
6506 /* Must have right number of operands. */
6507 if (i
.operands
!= t
->operands
)
6510 /* Check processor support. */
6511 specific_error
= progress (unsupported
);
6512 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6515 /* Check AT&T mnemonic. */
6516 specific_error
= progress (unsupported_with_intel_mnemonic
);
6517 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6520 /* Check AT&T/Intel syntax. */
6521 specific_error
= progress (unsupported_syntax
);
6522 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6523 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6526 /* Check Intel64/AMD64 ISA. */
6530 /* Default: Don't accept Intel64. */
6531 if (t
->opcode_modifier
.isa64
== INTEL64
)
6535 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6536 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6540 /* -mintel64: Don't accept AMD64. */
6541 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6546 /* Check the suffix. */
6547 specific_error
= progress (invalid_instruction_suffix
);
6548 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6549 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6550 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6551 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6552 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6553 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6556 specific_error
= progress (operand_size_mismatch
);
6557 size_match
= operand_size_match (t
);
6561 /* This is intentionally not
6563 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6565 as the case of a missing * on the operand is accepted (perhaps with
6566 a warning, issued further down). */
6567 specific_error
= progress (operand_type_mismatch
);
6568 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6571 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6572 operand_types
[j
] = t
->operand_types
[j
];
6574 /* In general, don't allow
6575 - 64-bit operands outside of 64-bit mode,
6576 - 32-bit operands on pre-386. */
6577 specific_error
= progress (mnem_suffix
? invalid_instruction_suffix
6578 : operand_size_mismatch
);
6579 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6580 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6581 && flag_code
!= CODE_64BIT
6582 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6583 && t
->base_opcode
== 0xc7
6584 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6585 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6586 || (i
.suffix
== LONG_MNEM_SUFFIX
6587 && !cpu_arch_flags
.bitfield
.cpui386
))
6589 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6590 && !intel_float_operand (t
->name
))
6591 : intel_float_operand (t
->name
) != 2)
6592 && (t
->operands
== i
.imm_operands
6593 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6594 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6595 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6596 || (operand_types
[j
].bitfield
.class != RegMMX
6597 && operand_types
[j
].bitfield
.class != RegSIMD
6598 && operand_types
[j
].bitfield
.class != RegMask
))
6599 && !t
->opcode_modifier
.sib
)
6602 /* Do not verify operands when there are none. */
6605 if (VEX_check_encoding (t
))
6607 specific_error
= progress (i
.error
);
6611 /* We've found a match; break out of loop. */
6615 if (!t
->opcode_modifier
.jump
6616 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6618 /* There should be only one Disp operand. */
6619 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6620 if (operand_type_check (operand_types
[j
], disp
))
6622 if (j
< MAX_OPERANDS
)
6624 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6626 addr_prefix_disp
= j
;
6628 /* Address size prefix will turn Disp64 operand into Disp32 and
6629 Disp32/Disp16 one into Disp16/Disp32 respectively. */
6633 override
= !override
;
6636 if (operand_types
[j
].bitfield
.disp32
6637 && operand_types
[j
].bitfield
.disp16
)
6639 operand_types
[j
].bitfield
.disp16
= override
;
6640 operand_types
[j
].bitfield
.disp32
= !override
;
6642 gas_assert (!operand_types
[j
].bitfield
.disp64
);
6646 if (operand_types
[j
].bitfield
.disp64
)
6648 gas_assert (!operand_types
[j
].bitfield
.disp32
);
6649 operand_types
[j
].bitfield
.disp32
= override
;
6650 operand_types
[j
].bitfield
.disp64
= !override
;
6652 operand_types
[j
].bitfield
.disp16
= 0;
6660 case BFD_RELOC_386_GOT32
:
6661 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6662 if (t
->base_opcode
== 0xa0
6663 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6666 case BFD_RELOC_386_TLS_GOTIE
:
6667 case BFD_RELOC_386_TLS_LE_32
:
6668 case BFD_RELOC_X86_64_GOTTPOFF
:
6669 case BFD_RELOC_X86_64_TLSLD
:
6670 /* Don't allow KMOV in TLS code sequences. */
6671 if (t
->opcode_modifier
.vex
)
6678 /* We check register size if needed. */
6679 if (t
->opcode_modifier
.checkregsize
)
6681 check_register
= (1 << t
->operands
) - 1;
6682 if (i
.broadcast
.type
|| i
.broadcast
.bytes
)
6683 check_register
&= ~(1 << i
.broadcast
.operand
);
6688 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6689 switch (t
->operands
)
6692 if (!operand_type_match (overlap0
, i
.types
[0]))
6696 /* xchg %eax, %eax is a special case. It is an alias for nop
6697 only in 32bit mode and we can use opcode 0x90. In 64bit
6698 mode, we can't use 0x90 for xchg %eax, %eax since it should
6699 zero-extend %eax to %rax. */
6700 if (flag_code
== CODE_64BIT
6701 && t
->base_opcode
== 0x90
6702 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6703 && i
.types
[0].bitfield
.instance
== Accum
6704 && i
.types
[0].bitfield
.dword
6705 && i
.types
[1].bitfield
.instance
== Accum
6706 && i
.types
[1].bitfield
.dword
)
6708 /* xrelease mov %eax, <disp> is another special case. It must not
6709 match the accumulator-only encoding of mov. */
6710 if (flag_code
!= CODE_64BIT
6712 && t
->base_opcode
== 0xa0
6713 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6714 && i
.types
[0].bitfield
.instance
== Accum
6715 && (i
.flags
[1] & Operand_Mem
))
6720 if (!(size_match
& MATCH_STRAIGHT
))
6722 /* Reverse direction of operands if swapping is possible in the first
6723 place (operands need to be symmetric) and
6724 - the load form is requested, and the template is a store form,
6725 - the store form is requested, and the template is a load form,
6726 - the non-default (swapped) form is requested. */
6727 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6728 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6729 && !operand_type_all_zero (&overlap1
))
6730 switch (i
.dir_encoding
)
6732 case dir_encoding_load
:
6733 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6734 || t
->opcode_modifier
.regmem
)
6738 case dir_encoding_store
:
6739 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6740 && !t
->opcode_modifier
.regmem
)
6744 case dir_encoding_swap
:
6747 case dir_encoding_default
:
6750 /* If we want store form, we skip the current load. */
6751 if ((i
.dir_encoding
== dir_encoding_store
6752 || i
.dir_encoding
== dir_encoding_swap
)
6753 && i
.mem_operands
== 0
6754 && t
->opcode_modifier
.load
)
6759 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6760 if (!operand_type_match (overlap0
, i
.types
[0])
6761 || !operand_type_match (overlap1
, i
.types
[1])
6762 || ((check_register
& 3) == 3
6763 && !operand_type_register_match (i
.types
[0],
6768 specific_error
= progress (i
.error
);
6770 /* Check if other direction is valid ... */
6771 if (!t
->opcode_modifier
.d
)
6775 if (!(size_match
& MATCH_REVERSE
))
6777 /* Try reversing direction of operands. */
6778 j
= t
->opcode_modifier
.vexsources
? 1 : i
.operands
- 1;
6779 overlap0
= operand_type_and (i
.types
[0], operand_types
[j
]);
6780 overlap1
= operand_type_and (i
.types
[j
], operand_types
[0]);
6781 overlap2
= operand_type_and (i
.types
[1], operand_types
[1]);
6782 gas_assert (t
->operands
!= 3 || !check_register
);
6783 if (!operand_type_match (overlap0
, i
.types
[0])
6784 || !operand_type_match (overlap1
, i
.types
[j
])
6785 || (t
->operands
== 3
6786 && !operand_type_match (overlap2
, i
.types
[1]))
6788 && !operand_type_register_match (i
.types
[0],
6793 /* Does not match either direction. */
6794 specific_error
= progress (i
.error
);
6797 /* found_reverse_match holds which of D or FloatR
6799 if (!t
->opcode_modifier
.d
)
6800 found_reverse_match
= 0;
6801 else if (operand_types
[0].bitfield
.tbyte
)
6802 found_reverse_match
= Opcode_FloatD
;
6803 else if (t
->opcode_modifier
.vexsources
)
6805 found_reverse_match
= Opcode_VexW
;
6806 goto check_operands_345
;
6808 else if (t
->opcode_modifier
.opcodespace
!= SPACE_BASE
6809 && (t
->opcode_modifier
.opcodespace
!= SPACE_0F
6810 /* MOV to/from CR/DR/TR, as an exception, follow
6811 the base opcode space encoding model. */
6812 || (t
->base_opcode
| 7) != 0x27))
6813 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6814 ? Opcode_ExtD
: Opcode_SIMD_IntD
;
6816 found_reverse_match
= Opcode_D
;
6817 if (t
->opcode_modifier
.floatr
)
6818 found_reverse_match
|= Opcode_FloatR
;
6822 /* Found a forward 2 operand match here. */
6824 switch (t
->operands
)
6827 overlap4
= operand_type_and (i
.types
[4], operand_types
[4]);
6828 if (!operand_type_match (overlap4
, i
.types
[4])
6829 || !operand_type_register_match (i
.types
[3],
6834 specific_error
= progress (i
.error
);
6839 overlap3
= operand_type_and (i
.types
[3], operand_types
[3]);
6840 if (!operand_type_match (overlap3
, i
.types
[3])
6841 || ((check_register
& 0xa) == 0xa
6842 && !operand_type_register_match (i
.types
[1],
6846 || ((check_register
& 0xc) == 0xc
6847 && !operand_type_register_match (i
.types
[2],
6852 specific_error
= progress (i
.error
);
6857 overlap2
= operand_type_and (i
.types
[2], operand_types
[2]);
6858 if (!operand_type_match (overlap2
, i
.types
[2])
6859 || ((check_register
& 5) == 5
6860 && !operand_type_register_match (i
.types
[0],
6864 || ((check_register
& 6) == 6
6865 && !operand_type_register_match (i
.types
[1],
6870 specific_error
= progress (i
.error
);
6876 /* Found either forward/reverse 2, 3 or 4 operand match here:
6877 slip through to break. */
6880 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6881 if (VEX_check_encoding (t
))
6883 specific_error
= progress (i
.error
);
6887 /* Check if vector operands are valid. */
6888 if (check_VecOperands (t
))
6890 specific_error
= progress (i
.error
);
6894 /* We've found a match; break out of loop. */
6900 if (t
== current_templates
->end
)
6902 /* We found no match. */
6903 const char *err_msg
;
6904 switch (specific_error
)
6908 case operand_size_mismatch
:
6909 err_msg
= _("operand size mismatch");
6911 case operand_type_mismatch
:
6912 err_msg
= _("operand type mismatch");
6914 case register_type_mismatch
:
6915 err_msg
= _("register type mismatch");
6917 case number_of_operands_mismatch
:
6918 err_msg
= _("number of operands mismatch");
6920 case invalid_instruction_suffix
:
6921 err_msg
= _("invalid instruction suffix");
6924 err_msg
= _("constant doesn't fit in 4 bits");
6926 case unsupported_with_intel_mnemonic
:
6927 err_msg
= _("unsupported with Intel mnemonic");
6929 case unsupported_syntax
:
6930 err_msg
= _("unsupported syntax");
6933 as_bad (_("unsupported instruction `%s'"),
6934 current_templates
->start
->name
);
6936 case invalid_sib_address
:
6937 err_msg
= _("invalid SIB address");
6939 case invalid_vsib_address
:
6940 err_msg
= _("invalid VSIB address");
6942 case invalid_vector_register_set
:
6943 err_msg
= _("mask, index, and destination registers must be distinct");
6945 case invalid_tmm_register_set
:
6946 err_msg
= _("all tmm registers must be distinct");
6948 case invalid_dest_and_src_register_set
:
6949 err_msg
= _("destination and source registers must be distinct");
6951 case unsupported_vector_index_register
:
6952 err_msg
= _("unsupported vector index register");
6954 case unsupported_broadcast
:
6955 err_msg
= _("unsupported broadcast");
6957 case broadcast_needed
:
6958 err_msg
= _("broadcast is needed for operand of such type");
6960 case unsupported_masking
:
6961 err_msg
= _("unsupported masking");
6963 case mask_not_on_destination
:
6964 err_msg
= _("mask not on destination operand");
6966 case no_default_mask
:
6967 err_msg
= _("default mask isn't allowed");
6969 case unsupported_rc_sae
:
6970 err_msg
= _("unsupported static rounding/sae");
6972 case invalid_register_operand
:
6973 err_msg
= _("invalid register operand");
6976 as_bad (_("%s for `%s'"), err_msg
,
6977 current_templates
->start
->name
);
6981 if (!quiet_warnings
)
6984 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6985 as_warn (_("indirect %s without `*'"), t
->name
);
6987 if (t
->opcode_modifier
.isprefix
6988 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6990 /* Warn them that a data or address size prefix doesn't
6991 affect assembly of the next line of code. */
6992 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6996 /* Copy the template we found. */
6997 install_template (t
);
6999 if (addr_prefix_disp
!= -1)
7000 i
.tm
.operand_types
[addr_prefix_disp
]
7001 = operand_types
[addr_prefix_disp
];
7003 switch (found_reverse_match
)
7009 /* If we found a reverse match we must alter the opcode direction
7010 bit and clear/flip the regmem modifier one. found_reverse_match
7011 holds bits to change (different for int & float insns). */
7013 i
.tm
.base_opcode
^= found_reverse_match
;
7015 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
7016 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
7018 /* Certain SIMD insns have their load forms specified in the opcode
7019 table, and hence we need to _set_ RegMem instead of clearing it.
7020 We need to avoid setting the bit though on insns like KMOVW. */
7021 i
.tm
.opcode_modifier
.regmem
7022 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
7023 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
7024 && !i
.tm
.opcode_modifier
.regmem
;
7028 /* Only the first two register operands need reversing, alongside
7030 i
.tm
.opcode_modifier
.vexw
^= VEXW0
^ VEXW1
;
7032 j
= i
.tm
.operand_types
[0].bitfield
.imm8
;
7033 i
.tm
.operand_types
[j
] = operand_types
[j
+ 1];
7034 i
.tm
.operand_types
[j
+ 1] = operand_types
[j
];
7044 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
7045 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
7047 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
7049 as_bad (_("`%s' operand %u must use `%ses' segment"),
7051 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
7056 /* There's only ever one segment override allowed per instruction.
7057 This instruction possibly has a legal segment override on the
7058 second operand, so copy the segment to where non-string
7059 instructions store it, allowing common code. */
7060 i
.seg
[op
] = i
.seg
[1];
7066 process_suffix (void)
7068 bool is_crc32
= false, is_movx
= false;
7070 /* If matched instruction specifies an explicit instruction mnemonic
7072 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
7073 i
.suffix
= WORD_MNEM_SUFFIX
;
7074 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
7075 i
.suffix
= LONG_MNEM_SUFFIX
;
7076 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
7077 i
.suffix
= QWORD_MNEM_SUFFIX
;
7078 else if (i
.reg_operands
7079 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
7080 && i
.tm
.opcode_modifier
.operandconstraint
!= ADDR_PREFIX_OP_REG
)
7082 unsigned int numop
= i
.operands
;
7085 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7086 && (i
.tm
.base_opcode
| 8) == 0xbe)
7087 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7088 && i
.tm
.base_opcode
== 0x63
7089 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
7092 is_crc32
= (i
.tm
.base_opcode
== 0xf0
7093 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7094 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
7096 /* movsx/movzx want only their source operand considered here, for the
7097 ambiguity checking below. The suffix will be replaced afterwards
7098 to represent the destination (register). */
7099 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
7102 /* crc32 needs REX.W set regardless of suffix / source operand size. */
7103 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
7106 /* If there's no instruction mnemonic suffix we try to invent one
7107 based on GPR operands. */
7110 /* We take i.suffix from the last register operand specified,
7111 Destination register type is more significant than source
7112 register type. crc32 in SSE4.2 prefers source register
7114 unsigned int op
= is_crc32
? 1 : i
.operands
;
7117 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7118 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7120 if (i
.types
[op
].bitfield
.class != Reg
)
7122 if (i
.types
[op
].bitfield
.byte
)
7123 i
.suffix
= BYTE_MNEM_SUFFIX
;
7124 else if (i
.types
[op
].bitfield
.word
)
7125 i
.suffix
= WORD_MNEM_SUFFIX
;
7126 else if (i
.types
[op
].bitfield
.dword
)
7127 i
.suffix
= LONG_MNEM_SUFFIX
;
7128 else if (i
.types
[op
].bitfield
.qword
)
7129 i
.suffix
= QWORD_MNEM_SUFFIX
;
7135 /* As an exception, movsx/movzx silently default to a byte source
7137 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7138 i
.suffix
= BYTE_MNEM_SUFFIX
;
7140 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7142 if (!check_byte_reg ())
7145 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7147 if (!check_long_reg ())
7150 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7152 if (!check_qword_reg ())
7155 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7157 if (!check_word_reg ())
7160 else if (intel_syntax
7161 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7162 /* Do nothing if the instruction is going to ignore the prefix. */
7167 /* Undo the movsx/movzx change done above. */
7170 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7173 i
.suffix
= stackop_size
;
7174 if (stackop_size
== LONG_MNEM_SUFFIX
)
7176 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7177 .code16gcc directive to support 16-bit mode with
7178 32-bit address. For IRET without a suffix, generate
7179 16-bit IRET (opcode 0xcf) to return from an interrupt
7181 if (i
.tm
.base_opcode
== 0xcf)
7183 i
.suffix
= WORD_MNEM_SUFFIX
;
7184 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7186 /* Warn about changed behavior for segment register push/pop. */
7187 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7188 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7193 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7194 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7195 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7196 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7197 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7198 && i
.tm
.extension_opcode
<= 3)))
7203 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7205 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7206 || i
.tm
.opcode_modifier
.no_lsuf
)
7207 i
.suffix
= QWORD_MNEM_SUFFIX
;
7212 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7213 i
.suffix
= LONG_MNEM_SUFFIX
;
7216 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7217 i
.suffix
= WORD_MNEM_SUFFIX
;
7223 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7224 /* Also cover lret/retf/iret in 64-bit mode. */
7225 || (flag_code
== CODE_64BIT
7226 && !i
.tm
.opcode_modifier
.no_lsuf
7227 && !i
.tm
.opcode_modifier
.no_qsuf
))
7228 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7229 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7230 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7231 /* Accept FLDENV et al without suffix. */
7232 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7234 unsigned int suffixes
, evex
= 0;
7236 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7237 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7239 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7241 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
7243 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7245 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7248 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7249 also suitable for AT&T syntax mode, it was requested that this be
7250 restricted to just Intel syntax. */
7251 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
)
7252 && !i
.broadcast
.type
&& !i
.broadcast
.bytes
)
7256 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7258 if (is_evex_encoding (&i
.tm
)
7259 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7261 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7262 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7263 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7264 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7265 if (!i
.tm
.opcode_modifier
.evex
7266 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7267 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7270 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7271 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7272 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7275 /* Any properly sized operand disambiguates the insn. */
7276 if (i
.types
[op
].bitfield
.xmmword
7277 || i
.types
[op
].bitfield
.ymmword
7278 || i
.types
[op
].bitfield
.zmmword
)
7280 suffixes
&= ~(7 << 6);
7285 if ((i
.flags
[op
] & Operand_Mem
)
7286 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7288 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7290 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7292 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7294 if (is_evex_encoding (&i
.tm
))
7300 /* Are multiple suffixes / operand sizes allowed? */
7301 if (suffixes
& (suffixes
- 1))
7304 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7305 || operand_check
== check_error
))
7307 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7310 if (operand_check
== check_error
)
7312 as_bad (_("no instruction mnemonic suffix given and "
7313 "no register operands; can't size `%s'"), i
.tm
.name
);
7316 if (operand_check
== check_warning
)
7317 as_warn (_("%s; using default for `%s'"),
7319 ? _("ambiguous operand size")
7320 : _("no instruction mnemonic suffix given and "
7321 "no register operands"),
7324 if (i
.tm
.opcode_modifier
.floatmf
)
7325 i
.suffix
= SHORT_MNEM_SUFFIX
;
7327 /* handled below */;
7329 i
.tm
.opcode_modifier
.evex
= evex
;
7330 else if (flag_code
== CODE_16BIT
)
7331 i
.suffix
= WORD_MNEM_SUFFIX
;
7332 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7333 i
.suffix
= LONG_MNEM_SUFFIX
;
7335 i
.suffix
= QWORD_MNEM_SUFFIX
;
7341 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7342 In AT&T syntax, if there is no suffix (warned about above), the default
7343 will be byte extension. */
7344 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7345 i
.tm
.base_opcode
|= 1;
7347 /* For further processing, the suffix should represent the destination
7348 (register). This is already the case when one was used with
7349 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7350 no suffix to begin with. */
7351 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7353 if (i
.types
[1].bitfield
.word
)
7354 i
.suffix
= WORD_MNEM_SUFFIX
;
7355 else if (i
.types
[1].bitfield
.qword
)
7356 i
.suffix
= QWORD_MNEM_SUFFIX
;
7358 i
.suffix
= LONG_MNEM_SUFFIX
;
7360 i
.tm
.opcode_modifier
.w
= 0;
7364 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7365 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7366 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7368 /* Change the opcode based on the operand size given by i.suffix. */
7371 /* Size floating point instruction. */
7372 case LONG_MNEM_SUFFIX
:
7373 if (i
.tm
.opcode_modifier
.floatmf
)
7375 i
.tm
.base_opcode
^= 4;
7379 case WORD_MNEM_SUFFIX
:
7380 case QWORD_MNEM_SUFFIX
:
7381 /* It's not a byte, select word/dword operation. */
7382 if (i
.tm
.opcode_modifier
.w
)
7385 i
.tm
.base_opcode
|= 8;
7387 i
.tm
.base_opcode
|= 1;
7390 case SHORT_MNEM_SUFFIX
:
7391 /* Now select between word & dword operations via the operand
7392 size prefix, except for instructions that will ignore this
7394 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7395 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7396 && !i
.tm
.opcode_modifier
.floatmf
7397 && !is_any_vex_encoding (&i
.tm
)
7398 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7399 || (flag_code
== CODE_64BIT
7400 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7402 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7404 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7405 prefix
= ADDR_PREFIX_OPCODE
;
7407 if (!add_prefix (prefix
))
7411 /* Set mode64 for an operand. */
7412 if (i
.suffix
== QWORD_MNEM_SUFFIX
7413 && flag_code
== CODE_64BIT
7414 && !i
.tm
.opcode_modifier
.norex64
7415 && !i
.tm
.opcode_modifier
.vexw
7416 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7418 && ! (i
.operands
== 2
7419 && i
.tm
.base_opcode
== 0x90
7420 && i
.tm
.extension_opcode
== None
7421 && i
.types
[0].bitfield
.instance
== Accum
7422 && i
.types
[0].bitfield
.qword
7423 && i
.types
[1].bitfield
.instance
== Accum
7424 && i
.types
[1].bitfield
.qword
))
7430 /* Select word/dword/qword operation with explicit data sizing prefix
7431 when there are no suitable register operands. */
7432 if (i
.tm
.opcode_modifier
.w
7433 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7435 || (i
.reg_operands
== 1
7437 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7439 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7440 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7443 i
.tm
.base_opcode
|= 1;
7447 if (i
.tm
.opcode_modifier
.operandconstraint
== ADDR_PREFIX_OP_REG
)
7449 gas_assert (!i
.suffix
);
7450 gas_assert (i
.reg_operands
);
7452 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7455 /* The address size override prefix changes the size of the
7457 if (flag_code
== CODE_64BIT
7458 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7460 as_bad (_("16-bit addressing unavailable for `%s'"),
7465 if ((flag_code
== CODE_32BIT
7466 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7467 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7468 && !add_prefix (ADDR_PREFIX_OPCODE
))
7473 /* Check invalid register operand when the address size override
7474 prefix changes the size of register operands. */
7476 enum { need_word
, need_dword
, need_qword
} need
;
7478 /* Check the register operand for the address size prefix if
7479 the memory operand has no real registers, like symbol, DISP
7480 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7481 if (i
.mem_operands
== 1
7482 && i
.reg_operands
== 1
7484 && i
.types
[1].bitfield
.class == Reg
7485 && (flag_code
== CODE_32BIT
7486 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7487 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7488 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7489 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7490 || (x86_elf_abi
== X86_64_X32_ABI
7492 && i
.base_reg
->reg_num
== RegIP
7493 && i
.base_reg
->reg_type
.bitfield
.qword
))
7497 && !add_prefix (ADDR_PREFIX_OPCODE
))
7500 if (flag_code
== CODE_32BIT
)
7501 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7502 else if (i
.prefix
[ADDR_PREFIX
])
7505 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7507 for (op
= 0; op
< i
.operands
; op
++)
7509 if (i
.types
[op
].bitfield
.class != Reg
)
7515 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7519 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7523 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7528 as_bad (_("invalid register operand size for `%s'"),
7539 check_byte_reg (void)
7543 for (op
= i
.operands
; --op
>= 0;)
7545 /* Skip non-register operands. */
7546 if (i
.types
[op
].bitfield
.class != Reg
)
7549 /* If this is an eight bit register, it's OK. If it's the 16 or
7550 32 bit version of an eight bit register, we will just use the
7551 low portion, and that's OK too. */
7552 if (i
.types
[op
].bitfield
.byte
)
7555 /* I/O port address operands are OK too. */
7556 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7557 && i
.tm
.operand_types
[op
].bitfield
.word
)
7560 /* crc32 only wants its source operand checked here. */
7561 if (i
.tm
.base_opcode
== 0xf0
7562 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7563 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7567 /* Any other register is bad. */
7568 as_bad (_("`%s%s' not allowed with `%s%c'"),
7569 register_prefix
, i
.op
[op
].regs
->reg_name
,
7570 i
.tm
.name
, i
.suffix
);
7577 check_long_reg (void)
7581 for (op
= i
.operands
; --op
>= 0;)
7582 /* Skip non-register operands. */
7583 if (i
.types
[op
].bitfield
.class != Reg
)
7585 /* Reject eight bit registers, except where the template requires
7586 them. (eg. movzb) */
7587 else if (i
.types
[op
].bitfield
.byte
7588 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7589 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7590 && (i
.tm
.operand_types
[op
].bitfield
.word
7591 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7593 as_bad (_("`%s%s' not allowed with `%s%c'"),
7595 i
.op
[op
].regs
->reg_name
,
7600 /* Error if the e prefix on a general reg is missing. */
7601 else if (i
.types
[op
].bitfield
.word
7602 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7603 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7604 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7606 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7607 register_prefix
, i
.op
[op
].regs
->reg_name
,
7611 /* Warn if the r prefix on a general reg is present. */
7612 else if (i
.types
[op
].bitfield
.qword
7613 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7614 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7615 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7617 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7618 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
7625 check_qword_reg (void)
7629 for (op
= i
.operands
; --op
>= 0; )
7630 /* Skip non-register operands. */
7631 if (i
.types
[op
].bitfield
.class != Reg
)
7633 /* Reject eight bit registers, except where the template requires
7634 them. (eg. movzb) */
7635 else if (i
.types
[op
].bitfield
.byte
7636 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7637 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7638 && (i
.tm
.operand_types
[op
].bitfield
.word
7639 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7641 as_bad (_("`%s%s' not allowed with `%s%c'"),
7643 i
.op
[op
].regs
->reg_name
,
7648 /* Warn if the r prefix on a general reg is missing. */
7649 else if ((i
.types
[op
].bitfield
.word
7650 || i
.types
[op
].bitfield
.dword
)
7651 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7652 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7653 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7655 /* Prohibit these changes in the 64bit mode, since the
7656 lowering is more complicated. */
7657 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7658 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
7665 check_word_reg (void)
7668 for (op
= i
.operands
; --op
>= 0;)
7669 /* Skip non-register operands. */
7670 if (i
.types
[op
].bitfield
.class != Reg
)
7672 /* Reject eight bit registers, except where the template requires
7673 them. (eg. movzb) */
7674 else if (i
.types
[op
].bitfield
.byte
7675 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7676 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7677 && (i
.tm
.operand_types
[op
].bitfield
.word
7678 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7680 as_bad (_("`%s%s' not allowed with `%s%c'"),
7682 i
.op
[op
].regs
->reg_name
,
7687 /* Error if the e or r prefix on a general reg is present. */
7688 else if ((i
.types
[op
].bitfield
.dword
7689 || i
.types
[op
].bitfield
.qword
)
7690 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7691 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7692 && i
.tm
.operand_types
[op
].bitfield
.word
)
7694 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7695 register_prefix
, i
.op
[op
].regs
->reg_name
,
7703 update_imm (unsigned int j
)
7705 i386_operand_type overlap
= i
.types
[j
];
7706 if (overlap
.bitfield
.imm8
7707 + overlap
.bitfield
.imm8s
7708 + overlap
.bitfield
.imm16
7709 + overlap
.bitfield
.imm32
7710 + overlap
.bitfield
.imm32s
7711 + overlap
.bitfield
.imm64
> 1)
7715 i386_operand_type temp
;
7717 operand_type_set (&temp
, 0);
7718 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7720 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7721 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7723 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7724 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7725 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7727 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7728 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7731 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7734 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7735 || operand_type_equal (&overlap
, &imm16_32
)
7736 || operand_type_equal (&overlap
, &imm16_32s
))
7738 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7743 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7744 overlap
= operand_type_and (overlap
, imm32s
);
7745 else if (i
.prefix
[DATA_PREFIX
])
7746 overlap
= operand_type_and (overlap
,
7747 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7748 if (overlap
.bitfield
.imm8
7749 + overlap
.bitfield
.imm8s
7750 + overlap
.bitfield
.imm16
7751 + overlap
.bitfield
.imm32
7752 + overlap
.bitfield
.imm32s
7753 + overlap
.bitfield
.imm64
!= 1)
7755 as_bad (_("no instruction mnemonic suffix given; "
7756 "can't determine immediate size"));
7760 i
.types
[j
] = overlap
;
7770 /* Update the first 2 immediate operands. */
7771 n
= i
.operands
> 2 ? 2 : i
.operands
;
7774 for (j
= 0; j
< n
; j
++)
7775 if (update_imm (j
) == 0)
7778 /* The 3rd operand can't be immediate operand. */
7779 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7786 process_operands (void)
7788 /* Default segment register this instruction will use for memory
7789 accesses. 0 means unknown. This is only for optimizing out
7790 unnecessary segment overrides. */
7791 const reg_entry
*default_seg
= NULL
;
7793 if (i
.tm
.opcode_modifier
.sse2avx
)
7795 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7797 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7798 i
.prefix
[REX_PREFIX
] = 0;
7801 /* ImmExt should be processed after SSE2AVX. */
7802 else if (i
.tm
.opcode_modifier
.immext
)
7805 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7807 unsigned int dupl
= i
.operands
;
7808 unsigned int dest
= dupl
- 1;
7811 /* The destination must be an xmm register. */
7812 gas_assert (i
.reg_operands
7813 && MAX_OPERANDS
> dupl
7814 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7816 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7817 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7819 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7821 /* Keep xmm0 for instructions with VEX prefix and 3
7823 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7824 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7829 /* We remove the first xmm0 and keep the number of
7830 operands unchanged, which in fact duplicates the
7832 for (j
= 1; j
< i
.operands
; j
++)
7834 i
.op
[j
- 1] = i
.op
[j
];
7835 i
.types
[j
- 1] = i
.types
[j
];
7836 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7837 i
.flags
[j
- 1] = i
.flags
[j
];
7841 else if (i
.tm
.opcode_modifier
.operandconstraint
== IMPLICIT_1ST_XMM0
)
7843 gas_assert ((MAX_OPERANDS
- 1) > dupl
7844 && (i
.tm
.opcode_modifier
.vexsources
7847 /* Add the implicit xmm0 for instructions with VEX prefix
7849 for (j
= i
.operands
; j
> 0; j
--)
7851 i
.op
[j
] = i
.op
[j
- 1];
7852 i
.types
[j
] = i
.types
[j
- 1];
7853 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7854 i
.flags
[j
] = i
.flags
[j
- 1];
7857 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7858 i
.types
[0] = regxmm
;
7859 i
.tm
.operand_types
[0] = regxmm
;
7862 i
.reg_operands
+= 2;
7867 i
.op
[dupl
] = i
.op
[dest
];
7868 i
.types
[dupl
] = i
.types
[dest
];
7869 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7870 i
.flags
[dupl
] = i
.flags
[dest
];
7879 i
.op
[dupl
] = i
.op
[dest
];
7880 i
.types
[dupl
] = i
.types
[dest
];
7881 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7882 i
.flags
[dupl
] = i
.flags
[dest
];
7885 if (i
.tm
.opcode_modifier
.immext
)
7888 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7889 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7893 for (j
= 1; j
< i
.operands
; j
++)
7895 i
.op
[j
- 1] = i
.op
[j
];
7896 i
.types
[j
- 1] = i
.types
[j
];
7898 /* We need to adjust fields in i.tm since they are used by
7899 build_modrm_byte. */
7900 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7902 i
.flags
[j
- 1] = i
.flags
[j
];
7909 else if (i
.tm
.opcode_modifier
.operandconstraint
== IMPLICIT_QUAD_GROUP
)
7911 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7913 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7914 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7915 regnum
= register_number (i
.op
[1].regs
);
7916 first_reg_in_group
= regnum
& ~3;
7917 last_reg_in_group
= first_reg_in_group
+ 3;
7918 if (regnum
!= first_reg_in_group
)
7919 as_warn (_("source register `%s%s' implicitly denotes"
7920 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7921 register_prefix
, i
.op
[1].regs
->reg_name
,
7922 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7923 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7926 else if (i
.tm
.opcode_modifier
.operandconstraint
== REG_KLUDGE
)
7928 /* The imul $imm, %reg instruction is converted into
7929 imul $imm, %reg, %reg, and the clr %reg instruction
7930 is converted into xor %reg, %reg. */
7932 unsigned int first_reg_op
;
7934 if (operand_type_check (i
.types
[0], reg
))
7938 /* Pretend we saw the extra register operand. */
7939 gas_assert (i
.reg_operands
== 1
7940 && i
.op
[first_reg_op
+ 1].regs
== 0);
7941 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7942 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7947 if (i
.tm
.opcode_modifier
.modrm
)
7949 /* The opcode is completed (modulo i.tm.extension_opcode which
7950 must be put into the modrm byte). Now, we make the modrm and
7951 index base bytes based on all the info we've collected. */
7953 default_seg
= build_modrm_byte ();
7955 else if (i
.types
[0].bitfield
.class == SReg
)
7957 if (flag_code
!= CODE_64BIT
7958 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7959 && i
.op
[0].regs
->reg_num
== 1
7960 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7961 && i
.op
[0].regs
->reg_num
< 4)
7963 as_bad (_("you can't `%s %s%s'"),
7964 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7967 if (i
.op
[0].regs
->reg_num
> 3
7968 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7970 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7971 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7973 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7975 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7976 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7978 default_seg
= reg_ds
;
7980 else if (i
.tm
.opcode_modifier
.isstring
)
7982 /* For the string instructions that allow a segment override
7983 on one of their operands, the default segment is ds. */
7984 default_seg
= reg_ds
;
7986 else if (i
.short_form
)
7988 /* The register or float register operand is in operand
7990 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7992 /* Register goes in low 3 bits of opcode. */
7993 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7994 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7996 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.operandconstraint
== UGH
)
7998 /* Warn about some common errors, but press on regardless.
7999 The first case can be generated by gcc (<= 2.8.1). */
8000 if (i
.operands
== 2)
8002 /* Reversed arguments on faddp, fsubp, etc. */
8003 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
8004 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
8005 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
8009 /* Extraneous `l' suffix on fp insn. */
8010 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
8011 register_prefix
, i
.op
[0].regs
->reg_name
);
8016 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
8017 && i
.tm
.base_opcode
== 0x8d /* lea */
8018 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
8019 && !is_any_vex_encoding(&i
.tm
))
8021 if (!quiet_warnings
)
8022 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
8026 i
.prefix
[SEG_PREFIX
] = 0;
8030 /* If a segment was explicitly specified, and the specified segment
8031 is neither the default nor the one already recorded from a prefix,
8032 use an opcode prefix to select it. If we never figured out what
8033 the default segment is, then default_seg will be zero at this
8034 point, and the specified segment prefix will always be used. */
8036 && i
.seg
[0] != default_seg
8037 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
8039 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
8045 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
8048 if (r
->reg_flags
& RegRex
)
8050 if (i
.rex
& rex_bit
)
8051 as_bad (_("same type of prefix used twice"));
8054 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
8056 gas_assert (i
.vex
.register_specifier
== r
);
8057 i
.vex
.register_specifier
+= 8;
8060 if (r
->reg_flags
& RegVRex
)
8064 static const reg_entry
*
8065 build_modrm_byte (void)
8067 const reg_entry
*default_seg
= NULL
;
8068 unsigned int source
, dest
;
8071 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
8074 unsigned int nds
, reg_slot
;
8077 dest
= i
.operands
- 1;
8080 /* There are 2 kinds of instructions:
8081 1. 5 operands: 4 register operands or 3 register operands
8082 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8083 VexW0 or VexW1. The destination must be either XMM, YMM or
8085 2. 4 operands: 4 register operands or 3 register operands
8086 plus 1 memory operand, with VexXDS. */
8087 gas_assert ((i
.reg_operands
== 4
8088 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
8089 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8090 && i
.tm
.opcode_modifier
.vexw
8091 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8093 /* If VexW1 is set, the first non-immediate operand is the source and
8094 the second non-immediate one is encoded in the immediate operand. */
8095 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
8097 source
= i
.imm_operands
;
8098 reg_slot
= i
.imm_operands
+ 1;
8102 source
= i
.imm_operands
+ 1;
8103 reg_slot
= i
.imm_operands
;
8106 if (i
.imm_operands
== 0)
8108 /* When there is no immediate operand, generate an 8bit
8109 immediate operand to encode the first operand. */
8110 exp
= &im_expressions
[i
.imm_operands
++];
8111 i
.op
[i
.operands
].imms
= exp
;
8112 i
.types
[i
.operands
].bitfield
.imm8
= 1;
8115 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8116 exp
->X_op
= O_constant
;
8117 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
8118 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8122 gas_assert (i
.imm_operands
== 1);
8123 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8124 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8126 /* Turn on Imm8 again so that output_imm will generate it. */
8127 i
.types
[0].bitfield
.imm8
= 1;
8129 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8130 i
.op
[0].imms
->X_add_number
8131 |= register_number (i
.op
[reg_slot
].regs
) << 4;
8132 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8135 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
8136 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
8141 /* i.reg_operands MUST be the number of real register operands;
8142 implicit registers do not count. If there are 3 register
8143 operands, it must be a instruction with VexNDS. For a
8144 instruction with VexNDD, the destination register is encoded
8145 in VEX prefix. If there are 4 register operands, it must be
8146 a instruction with VEX prefix and 3 sources. */
8147 if (i
.mem_operands
== 0
8148 && ((i
.reg_operands
== 2
8149 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
8150 || (i
.reg_operands
== 3
8151 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8152 || (i
.reg_operands
== 4 && vex_3_sources
)))
8160 /* When there are 3 operands, one of them may be immediate,
8161 which may be the first or the last operand. Otherwise,
8162 the first operand must be shift count register (cl) or it
8163 is an instruction with VexNDS. */
8164 gas_assert (i
.imm_operands
== 1
8165 || (i
.imm_operands
== 0
8166 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8167 || (i
.types
[0].bitfield
.instance
== RegC
8168 && i
.types
[0].bitfield
.byte
))));
8169 if (operand_type_check (i
.types
[0], imm
)
8170 || (i
.types
[0].bitfield
.instance
== RegC
8171 && i
.types
[0].bitfield
.byte
))
8177 /* When there are 4 operands, the first two must be 8bit
8178 immediate operands. The source operand will be the 3rd
8181 For instructions with VexNDS, if the first operand
8182 an imm8, the source operand is the 2nd one. If the last
8183 operand is imm8, the source operand is the first one. */
8184 gas_assert ((i
.imm_operands
== 2
8185 && i
.types
[0].bitfield
.imm8
8186 && i
.types
[1].bitfield
.imm8
)
8187 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8188 && i
.imm_operands
== 1
8189 && (i
.types
[0].bitfield
.imm8
8190 || i
.types
[i
.operands
- 1].bitfield
.imm8
)));
8191 if (i
.imm_operands
== 2)
8195 if (i
.types
[0].bitfield
.imm8
)
8202 gas_assert (!is_evex_encoding (&i
.tm
));
8203 gas_assert (i
.imm_operands
== 1 && vex_3_sources
);
8213 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8215 /* For instructions with VexNDS, the register-only source
8216 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8217 register. It is encoded in VEX prefix. */
8219 i386_operand_type op
;
8222 /* Swap two source operands if needed. */
8223 if (i
.tm
.opcode_modifier
.operandconstraint
== SWAP_SOURCES
)
8231 op
= i
.tm
.operand_types
[vvvv
];
8232 if ((dest
+ 1) >= i
.operands
8233 || ((op
.bitfield
.class != Reg
8234 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8235 && op
.bitfield
.class != RegSIMD
8236 && op
.bitfield
.class != RegMask
))
8238 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8244 /* One of the register operands will be encoded in the i.rm.reg
8245 field, the other in the combined i.rm.mode and i.rm.regmem
8246 fields. If no form of this instruction supports a memory
8247 destination operand, then we assume the source operand may
8248 sometimes be a memory operand and so we need to store the
8249 destination in the i.rm.reg field. */
8250 if (!i
.tm
.opcode_modifier
.regmem
8251 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8253 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8254 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8255 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8256 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8260 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8261 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8262 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8263 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8265 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8267 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8270 add_prefix (LOCK_PREFIX_OPCODE
);
8274 { /* If it's not 2 reg operands... */
8279 unsigned int fake_zero_displacement
= 0;
8282 for (op
= 0; op
< i
.operands
; op
++)
8283 if (i
.flags
[op
] & Operand_Mem
)
8285 gas_assert (op
< i
.operands
);
8287 if (i
.tm
.opcode_modifier
.sib
)
8289 /* The index register of VSIB shouldn't be RegIZ. */
8290 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8291 && i
.index_reg
->reg_num
== RegIZ
)
8294 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8297 i
.sib
.base
= NO_BASE_REGISTER
;
8298 i
.sib
.scale
= i
.log2_scale_factor
;
8299 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8300 i
.types
[op
].bitfield
.disp32
= 1;
8303 /* Since the mandatory SIB always has index register, so
8304 the code logic remains unchanged. The non-mandatory SIB
8305 without index register is allowed and will be handled
8309 if (i
.index_reg
->reg_num
== RegIZ
)
8310 i
.sib
.index
= NO_INDEX_REGISTER
;
8312 i
.sib
.index
= i
.index_reg
->reg_num
;
8313 set_rex_vrex (i
.index_reg
, REX_X
, false);
8317 default_seg
= reg_ds
;
8319 if (i
.base_reg
== 0)
8322 if (!i
.disp_operands
)
8323 fake_zero_displacement
= 1;
8324 if (i
.index_reg
== 0)
8326 /* Both check for VSIB and mandatory non-vector SIB. */
8327 gas_assert (!i
.tm
.opcode_modifier
.sib
8328 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8329 /* Operand is just <disp> */
8330 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8331 if (flag_code
== CODE_64BIT
)
8333 /* 64bit mode overwrites the 32bit absolute
8334 addressing by RIP relative addressing and
8335 absolute addressing is encoded by one of the
8336 redundant SIB forms. */
8337 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8338 i
.sib
.base
= NO_BASE_REGISTER
;
8339 i
.sib
.index
= NO_INDEX_REGISTER
;
8340 i
.types
[op
].bitfield
.disp32
= 1;
8342 else if ((flag_code
== CODE_16BIT
)
8343 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8345 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8346 i
.types
[op
].bitfield
.disp16
= 1;
8350 i
.rm
.regmem
= NO_BASE_REGISTER
;
8351 i
.types
[op
].bitfield
.disp32
= 1;
8354 else if (!i
.tm
.opcode_modifier
.sib
)
8356 /* !i.base_reg && i.index_reg */
8357 if (i
.index_reg
->reg_num
== RegIZ
)
8358 i
.sib
.index
= NO_INDEX_REGISTER
;
8360 i
.sib
.index
= i
.index_reg
->reg_num
;
8361 i
.sib
.base
= NO_BASE_REGISTER
;
8362 i
.sib
.scale
= i
.log2_scale_factor
;
8363 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8364 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8365 i
.types
[op
].bitfield
.disp32
= 1;
8366 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8370 /* RIP addressing for 64bit mode. */
8371 else if (i
.base_reg
->reg_num
== RegIP
)
8373 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8374 i
.rm
.regmem
= NO_BASE_REGISTER
;
8375 i
.types
[op
].bitfield
.disp8
= 0;
8376 i
.types
[op
].bitfield
.disp16
= 0;
8377 i
.types
[op
].bitfield
.disp32
= 1;
8378 i
.types
[op
].bitfield
.disp64
= 0;
8379 i
.flags
[op
] |= Operand_PCrel
;
8380 if (! i
.disp_operands
)
8381 fake_zero_displacement
= 1;
8383 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8385 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8386 switch (i
.base_reg
->reg_num
)
8389 if (i
.index_reg
== 0)
8391 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8392 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8395 default_seg
= reg_ss
;
8396 if (i
.index_reg
== 0)
8399 if (operand_type_check (i
.types
[op
], disp
) == 0)
8401 /* fake (%bp) into 0(%bp) */
8402 if (i
.disp_encoding
== disp_encoding_16bit
)
8403 i
.types
[op
].bitfield
.disp16
= 1;
8405 i
.types
[op
].bitfield
.disp8
= 1;
8406 fake_zero_displacement
= 1;
8409 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8410 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8412 default: /* (%si) -> 4 or (%di) -> 5 */
8413 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8415 if (!fake_zero_displacement
8419 fake_zero_displacement
= 1;
8420 if (i
.disp_encoding
== disp_encoding_8bit
)
8421 i
.types
[op
].bitfield
.disp8
= 1;
8423 i
.types
[op
].bitfield
.disp16
= 1;
8425 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8427 else /* i.base_reg and 32/64 bit mode */
8429 if (operand_type_check (i
.types
[op
], disp
))
8431 i
.types
[op
].bitfield
.disp16
= 0;
8432 i
.types
[op
].bitfield
.disp64
= 0;
8433 i
.types
[op
].bitfield
.disp32
= 1;
8436 if (!i
.tm
.opcode_modifier
.sib
)
8437 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8438 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8440 i
.sib
.base
= i
.base_reg
->reg_num
;
8441 /* x86-64 ignores REX prefix bit here to avoid decoder
8443 if (!(i
.base_reg
->reg_flags
& RegRex
)
8444 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8445 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8446 default_seg
= reg_ss
;
8447 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8449 fake_zero_displacement
= 1;
8450 if (i
.disp_encoding
== disp_encoding_32bit
)
8451 i
.types
[op
].bitfield
.disp32
= 1;
8453 i
.types
[op
].bitfield
.disp8
= 1;
8455 i
.sib
.scale
= i
.log2_scale_factor
;
8456 if (i
.index_reg
== 0)
8458 /* Only check for VSIB. */
8459 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8460 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8461 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8463 /* <disp>(%esp) becomes two byte modrm with no index
8464 register. We've already stored the code for esp
8465 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8466 Any base register besides %esp will not use the
8467 extra modrm byte. */
8468 i
.sib
.index
= NO_INDEX_REGISTER
;
8470 else if (!i
.tm
.opcode_modifier
.sib
)
8472 if (i
.index_reg
->reg_num
== RegIZ
)
8473 i
.sib
.index
= NO_INDEX_REGISTER
;
8475 i
.sib
.index
= i
.index_reg
->reg_num
;
8476 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8477 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8482 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8483 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8487 if (!fake_zero_displacement
8491 fake_zero_displacement
= 1;
8492 if (i
.disp_encoding
== disp_encoding_8bit
)
8493 i
.types
[op
].bitfield
.disp8
= 1;
8495 i
.types
[op
].bitfield
.disp32
= 1;
8497 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8501 if (fake_zero_displacement
)
8503 /* Fakes a zero displacement assuming that i.types[op]
8504 holds the correct displacement size. */
8507 gas_assert (i
.op
[op
].disps
== 0);
8508 exp
= &disp_expressions
[i
.disp_operands
++];
8509 i
.op
[op
].disps
= exp
;
8510 exp
->X_op
= O_constant
;
8511 exp
->X_add_number
= 0;
8512 exp
->X_add_symbol
= (symbolS
*) 0;
8513 exp
->X_op_symbol
= (symbolS
*) 0;
8521 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8523 if (operand_type_check (i
.types
[0], imm
))
8524 i
.vex
.register_specifier
= NULL
;
8527 /* VEX.vvvv encodes one of the sources when the first
8528 operand is not an immediate. */
8529 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8530 i
.vex
.register_specifier
= i
.op
[0].regs
;
8532 i
.vex
.register_specifier
= i
.op
[1].regs
;
8535 /* Destination is a XMM register encoded in the ModRM.reg
8537 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8538 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8541 /* ModRM.rm and VEX.B encodes the other source. */
8542 if (!i
.mem_operands
)
8546 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8547 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8549 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8551 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8555 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8557 i
.vex
.register_specifier
= i
.op
[2].regs
;
8558 if (!i
.mem_operands
)
8561 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8562 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8566 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8567 (if any) based on i.tm.extension_opcode. Again, we must be
8568 careful to make sure that segment/control/debug/test/MMX
8569 registers are coded into the i.rm.reg field. */
8570 else if (i
.reg_operands
)
8573 unsigned int vex_reg
= ~0;
8575 for (op
= 0; op
< i
.operands
; op
++)
8576 if (i
.types
[op
].bitfield
.class == Reg
8577 || i
.types
[op
].bitfield
.class == RegBND
8578 || i
.types
[op
].bitfield
.class == RegMask
8579 || i
.types
[op
].bitfield
.class == SReg
8580 || i
.types
[op
].bitfield
.class == RegCR
8581 || i
.types
[op
].bitfield
.class == RegDR
8582 || i
.types
[op
].bitfield
.class == RegTR
8583 || i
.types
[op
].bitfield
.class == RegSIMD
8584 || i
.types
[op
].bitfield
.class == RegMMX
)
8589 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8591 /* For instructions with VexNDS, the register-only
8592 source operand is encoded in VEX prefix. */
8593 gas_assert (mem
!= (unsigned int) ~0);
8595 if (op
> mem
|| i
.tm
.cpu_flags
.bitfield
.cpucmpccxadd
)
8598 gas_assert (op
< i
.operands
);
8602 /* Check register-only source operand when two source
8603 operands are swapped. */
8604 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8605 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8609 gas_assert (mem
== (vex_reg
+ 1)
8610 && op
< i
.operands
);
8615 gas_assert (vex_reg
< i
.operands
);
8619 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8621 /* For instructions with VexNDD, the register destination
8622 is encoded in VEX prefix. */
8623 if (i
.mem_operands
== 0)
8625 /* There is no memory operand. */
8626 gas_assert ((op
+ 2) == i
.operands
);
8631 /* There are only 2 non-immediate operands. */
8632 gas_assert (op
< i
.imm_operands
+ 2
8633 && i
.operands
== i
.imm_operands
+ 2);
8634 vex_reg
= i
.imm_operands
+ 1;
8638 gas_assert (op
< i
.operands
);
8640 if (vex_reg
!= (unsigned int) ~0)
8642 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8644 if ((type
->bitfield
.class != Reg
8645 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8646 && type
->bitfield
.class != RegSIMD
8647 && type
->bitfield
.class != RegMask
)
8650 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8653 /* Don't set OP operand twice. */
8656 /* If there is an extension opcode to put here, the
8657 register number must be put into the regmem field. */
8658 if (i
.tm
.extension_opcode
!= None
)
8660 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8661 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8662 i
.tm
.opcode_modifier
.sse2avx
);
8666 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8667 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8668 i
.tm
.opcode_modifier
.sse2avx
);
8672 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8673 must set it to 3 to indicate this is a register operand
8674 in the regmem field. */
8675 if (!i
.mem_operands
)
8679 /* Fill in i.rm.reg field with extension opcode (if any). */
8680 if (i
.tm
.extension_opcode
!= None
)
8681 i
.rm
.reg
= i
.tm
.extension_opcode
;
8687 frag_opcode_byte (unsigned char byte
)
8689 if (now_seg
!= absolute_section
)
8690 FRAG_APPEND_1_CHAR (byte
);
8692 ++abs_section_offset
;
8696 flip_code16 (unsigned int code16
)
8698 gas_assert (i
.tm
.operands
== 1);
8700 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8701 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8702 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8707 output_branch (void)
8713 relax_substateT subtype
;
8717 if (now_seg
== absolute_section
)
8719 as_bad (_("relaxable branches not supported in absolute section"));
8723 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8724 size
= i
.disp_encoding
> disp_encoding_8bit
? BIG
: SMALL
;
8727 if (i
.prefix
[DATA_PREFIX
] != 0)
8731 code16
^= flip_code16(code16
);
8733 /* Pentium4 branch hints. */
8734 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8735 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8740 if (i
.prefix
[REX_PREFIX
] != 0)
8746 /* BND prefixed jump. */
8747 if (i
.prefix
[BND_PREFIX
] != 0)
8753 if (i
.prefixes
!= 0)
8754 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8756 /* It's always a symbol; End frag & setup for relax.
8757 Make sure there is enough room in this frag for the largest
8758 instruction we may generate in md_convert_frag. This is 2
8759 bytes for the opcode and room for the prefix and largest
8761 frag_grow (prefix
+ 2 + 4);
8762 /* Prefix and 1 opcode byte go in fr_fix. */
8763 p
= frag_more (prefix
+ 1);
8764 if (i
.prefix
[DATA_PREFIX
] != 0)
8765 *p
++ = DATA_PREFIX_OPCODE
;
8766 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8767 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8768 *p
++ = i
.prefix
[SEG_PREFIX
];
8769 if (i
.prefix
[BND_PREFIX
] != 0)
8770 *p
++ = BND_PREFIX_OPCODE
;
8771 if (i
.prefix
[REX_PREFIX
] != 0)
8772 *p
++ = i
.prefix
[REX_PREFIX
];
8773 *p
= i
.tm
.base_opcode
;
8775 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8776 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8777 else if (cpu_arch_flags
.bitfield
.cpui386
)
8778 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8780 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8783 sym
= i
.op
[0].disps
->X_add_symbol
;
8784 off
= i
.op
[0].disps
->X_add_number
;
8786 if (i
.op
[0].disps
->X_op
!= O_constant
8787 && i
.op
[0].disps
->X_op
!= O_symbol
)
8789 /* Handle complex expressions. */
8790 sym
= make_expr_symbol (i
.op
[0].disps
);
8794 frag_now
->tc_frag_data
.code64
= flag_code
== CODE_64BIT
;
8796 /* 1 possible extra opcode + 4 byte displacement go in var part.
8797 Pass reloc in fr_var. */
8798 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8801 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8802 /* Return TRUE iff PLT32 relocation should be used for branching to
8806 need_plt32_p (symbolS
*s
)
8808 /* PLT32 relocation is ELF only. */
8813 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8814 krtld support it. */
8818 /* Since there is no need to prepare for PLT branch on x86-64, we
8819 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8820 be used as a marker for 32-bit PC-relative branches. */
8827 /* Weak or undefined symbol need PLT32 relocation. */
8828 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8831 /* Non-global symbol doesn't need PLT32 relocation. */
8832 if (! S_IS_EXTERNAL (s
))
8835 /* Other global symbols need PLT32 relocation. NB: Symbol with
8836 non-default visibilities are treated as normal global symbol
8837 so that PLT32 relocation can be used as a marker for 32-bit
8838 PC-relative branches. It is useful for linker relaxation. */
8849 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8851 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8853 /* This is a loop or jecxz type instruction. */
8855 if (i
.prefix
[ADDR_PREFIX
] != 0)
8857 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8860 /* Pentium4 branch hints. */
8861 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8862 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8864 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8873 if (flag_code
== CODE_16BIT
)
8876 if (i
.prefix
[DATA_PREFIX
] != 0)
8878 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8880 code16
^= flip_code16(code16
);
8888 /* BND prefixed jump. */
8889 if (i
.prefix
[BND_PREFIX
] != 0)
8891 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8895 if (i
.prefix
[REX_PREFIX
] != 0)
8897 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8901 if (i
.prefixes
!= 0)
8902 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8904 if (now_seg
== absolute_section
)
8906 abs_section_offset
+= i
.opcode_length
+ size
;
8910 p
= frag_more (i
.opcode_length
+ size
);
8911 switch (i
.opcode_length
)
8914 *p
++ = i
.tm
.base_opcode
>> 8;
8917 *p
++ = i
.tm
.base_opcode
;
8923 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8924 if (flag_code
== CODE_64BIT
&& size
== 4
8925 && jump_reloc
== NO_RELOC
&& i
.op
[0].disps
->X_add_number
== 0
8926 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8927 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8930 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8932 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8933 i
.op
[0].disps
, 1, jump_reloc
);
8935 /* All jumps handled here are signed, but don't unconditionally use a
8936 signed limit check for 32 and 16 bit jumps as we want to allow wrap
8937 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
8942 fixP
->fx_signed
= 1;
8946 if (i
.tm
.base_opcode
== 0xc7f8)
8947 fixP
->fx_signed
= 1;
8951 if (flag_code
== CODE_64BIT
)
8952 fixP
->fx_signed
= 1;
8958 output_interseg_jump (void)
8966 if (flag_code
== CODE_16BIT
)
8970 if (i
.prefix
[DATA_PREFIX
] != 0)
8977 gas_assert (!i
.prefix
[REX_PREFIX
]);
8983 if (i
.prefixes
!= 0)
8984 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8986 if (now_seg
== absolute_section
)
8988 abs_section_offset
+= prefix
+ 1 + 2 + size
;
8992 /* 1 opcode; 2 segment; offset */
8993 p
= frag_more (prefix
+ 1 + 2 + size
);
8995 if (i
.prefix
[DATA_PREFIX
] != 0)
8996 *p
++ = DATA_PREFIX_OPCODE
;
8998 if (i
.prefix
[REX_PREFIX
] != 0)
8999 *p
++ = i
.prefix
[REX_PREFIX
];
9001 *p
++ = i
.tm
.base_opcode
;
9002 if (i
.op
[1].imms
->X_op
== O_constant
)
9004 offsetT n
= i
.op
[1].imms
->X_add_number
;
9007 && !fits_in_unsigned_word (n
)
9008 && !fits_in_signed_word (n
))
9010 as_bad (_("16-bit jump out of range"));
9013 md_number_to_chars (p
, n
, size
);
9016 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9017 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9020 if (i
.op
[0].imms
->X_op
== O_constant
)
9021 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9023 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9024 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9027 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9032 asection
*seg
= now_seg
;
9033 subsegT subseg
= now_subseg
;
9035 unsigned int alignment
, align_size_1
;
9036 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9037 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9038 unsigned int padding
;
9040 if (!IS_ELF
|| !x86_used_note
)
9043 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9045 /* The .note.gnu.property section layout:
9047 Field Length Contents
9050 n_descsz 4 The note descriptor size
9051 n_type 4 NT_GNU_PROPERTY_TYPE_0
9053 n_desc n_descsz The program property array
9057 /* Create the .note.gnu.property section. */
9058 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9059 bfd_set_section_flags (sec
,
9066 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9077 bfd_set_section_alignment (sec
, alignment
);
9078 elf_section_type (sec
) = SHT_NOTE
;
9080 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9082 isa_1_descsz_raw
= 4 + 4 + 4;
9083 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9084 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9086 feature_2_descsz_raw
= isa_1_descsz
;
9087 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9089 feature_2_descsz_raw
+= 4 + 4 + 4;
9090 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9091 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9094 descsz
= feature_2_descsz
;
9095 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9096 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9098 /* Write n_namsz. */
9099 md_number_to_chars (p
, (valueT
) 4, 4);
9101 /* Write n_descsz. */
9102 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9105 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9108 memcpy (p
+ 4 * 3, "GNU", 4);
9110 /* Write 4-byte type. */
9111 md_number_to_chars (p
+ 4 * 4,
9112 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9114 /* Write 4-byte data size. */
9115 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9117 /* Write 4-byte data. */
9118 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9120 /* Zero out paddings. */
9121 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9123 memset (p
+ 4 * 7, 0, padding
);
9125 /* Write 4-byte type. */
9126 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9127 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9129 /* Write 4-byte data size. */
9130 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9132 /* Write 4-byte data. */
9133 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9134 (valueT
) x86_feature_2_used
, 4);
9136 /* Zero out paddings. */
9137 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9139 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9141 /* We probably can't restore the current segment, for there likely
9144 subseg_set (seg
, subseg
);
9148 x86_support_sframe_p (void)
9150 /* At this time, SFrame unwind is supported for AMD64 ABI only. */
9151 return (x86_elf_abi
== X86_64_ABI
);
9155 x86_sframe_ra_tracking_p (void)
9157 /* In AMD64, return address is always stored on the stack at a fixed offset
9158 from the CFA (provided via x86_sframe_cfa_ra_offset ()).
9159 Do not track explicitly via an SFrame Frame Row Entry. */
9164 x86_sframe_cfa_ra_offset (void)
9166 gas_assert (x86_elf_abi
== X86_64_ABI
);
9167 return (offsetT
) -8;
9171 x86_sframe_get_abi_arch (void)
9173 unsigned char sframe_abi_arch
= 0;
9175 if (x86_support_sframe_p ())
9177 gas_assert (!target_big_endian
);
9178 sframe_abi_arch
= SFRAME_ABI_AMD64_ENDIAN_LITTLE
;
9181 return sframe_abi_arch
;
9187 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9188 const char *frag_now_ptr
)
9190 unsigned int len
= 0;
9192 if (start_frag
!= frag_now
)
9194 const fragS
*fr
= start_frag
;
9199 } while (fr
&& fr
!= frag_now
);
9202 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9205 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9206 be macro-fused with conditional jumps.
9207 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9208 or is one of the following format:
9221 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9223 /* No RIP address. */
9224 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9227 /* No opcodes outside of base encoding space. */
9228 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9231 /* add, sub without add/sub m, imm. */
9232 if (i
.tm
.base_opcode
<= 5
9233 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9234 || ((i
.tm
.base_opcode
| 3) == 0x83
9235 && (i
.tm
.extension_opcode
== 0x5
9236 || i
.tm
.extension_opcode
== 0x0)))
9238 *mf_cmp_p
= mf_cmp_alu_cmp
;
9239 return !(i
.mem_operands
&& i
.imm_operands
);
9242 /* and without and m, imm. */
9243 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9244 || ((i
.tm
.base_opcode
| 3) == 0x83
9245 && i
.tm
.extension_opcode
== 0x4))
9247 *mf_cmp_p
= mf_cmp_test_and
;
9248 return !(i
.mem_operands
&& i
.imm_operands
);
9251 /* test without test m imm. */
9252 if ((i
.tm
.base_opcode
| 1) == 0x85
9253 || (i
.tm
.base_opcode
| 1) == 0xa9
9254 || ((i
.tm
.base_opcode
| 1) == 0xf7
9255 && i
.tm
.extension_opcode
== 0))
9257 *mf_cmp_p
= mf_cmp_test_and
;
9258 return !(i
.mem_operands
&& i
.imm_operands
);
9261 /* cmp without cmp m, imm. */
9262 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9263 || ((i
.tm
.base_opcode
| 3) == 0x83
9264 && (i
.tm
.extension_opcode
== 0x7)))
9266 *mf_cmp_p
= mf_cmp_alu_cmp
;
9267 return !(i
.mem_operands
&& i
.imm_operands
);
9270 /* inc, dec without inc/dec m. */
9271 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9272 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9273 || ((i
.tm
.base_opcode
| 1) == 0xff
9274 && i
.tm
.extension_opcode
<= 0x1))
9276 *mf_cmp_p
= mf_cmp_incdec
;
9277 return !i
.mem_operands
;
9283 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9286 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9288 /* NB: Don't work with COND_JUMP86 without i386. */
9289 if (!align_branch_power
9290 || now_seg
== absolute_section
9291 || !cpu_arch_flags
.bitfield
.cpui386
9292 || !(align_branch
& align_branch_fused_bit
))
9295 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9297 if (last_insn
.kind
== last_insn_other
9298 || last_insn
.seg
!= now_seg
)
9301 as_warn_where (last_insn
.file
, last_insn
.line
,
9302 _("`%s` skips -malign-branch-boundary on `%s`"),
9303 last_insn
.name
, i
.tm
.name
);
9309 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9312 add_branch_prefix_frag_p (void)
9314 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9315 to PadLock instructions since they include prefixes in opcode. */
9316 if (!align_branch_power
9317 || !align_branch_prefix_size
9318 || now_seg
== absolute_section
9319 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9320 || !cpu_arch_flags
.bitfield
.cpui386
)
9323 /* Don't add prefix if it is a prefix or there is no operand in case
9324 that segment prefix is special. */
9325 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9328 if (last_insn
.kind
== last_insn_other
9329 || last_insn
.seg
!= now_seg
)
9333 as_warn_where (last_insn
.file
, last_insn
.line
,
9334 _("`%s` skips -malign-branch-boundary on `%s`"),
9335 last_insn
.name
, i
.tm
.name
);
9340 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9343 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9344 enum mf_jcc_kind
*mf_jcc_p
)
9348 /* NB: Don't work with COND_JUMP86 without i386. */
9349 if (!align_branch_power
9350 || now_seg
== absolute_section
9351 || !cpu_arch_flags
.bitfield
.cpui386
9352 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9357 /* Check for jcc and direct jmp. */
9358 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9360 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9362 *branch_p
= align_branch_jmp
;
9363 add_padding
= align_branch
& align_branch_jmp_bit
;
9367 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9368 igore the lowest bit. */
9369 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9370 *branch_p
= align_branch_jcc
;
9371 if ((align_branch
& align_branch_jcc_bit
))
9375 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9378 *branch_p
= align_branch_ret
;
9379 if ((align_branch
& align_branch_ret_bit
))
9384 /* Check for indirect jmp, direct and indirect calls. */
9385 if (i
.tm
.base_opcode
== 0xe8)
9388 *branch_p
= align_branch_call
;
9389 if ((align_branch
& align_branch_call_bit
))
9392 else if (i
.tm
.base_opcode
== 0xff
9393 && (i
.tm
.extension_opcode
== 2
9394 || i
.tm
.extension_opcode
== 4))
9396 /* Indirect call and jmp. */
9397 *branch_p
= align_branch_indirect
;
9398 if ((align_branch
& align_branch_indirect_bit
))
9405 && (i
.op
[0].disps
->X_op
== O_symbol
9406 || (i
.op
[0].disps
->X_op
== O_subtract
9407 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9409 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9410 /* No padding to call to global or undefined tls_get_addr. */
9411 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9412 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9418 && last_insn
.kind
!= last_insn_other
9419 && last_insn
.seg
== now_seg
)
9422 as_warn_where (last_insn
.file
, last_insn
.line
,
9423 _("`%s` skips -malign-branch-boundary on `%s`"),
9424 last_insn
.name
, i
.tm
.name
);
9434 fragS
*insn_start_frag
;
9435 offsetT insn_start_off
;
9436 fragS
*fragP
= NULL
;
9437 enum align_branch_kind branch
= align_branch_none
;
9438 /* The initializer is arbitrary just to avoid uninitialized error.
9439 it's actually either assigned in add_branch_padding_frag_p
9440 or never be used. */
9441 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9443 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9444 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9446 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9447 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9448 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9450 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9451 || i
.tm
.cpu_flags
.bitfield
.cpu287
9452 || i
.tm
.cpu_flags
.bitfield
.cpu387
9453 || i
.tm
.cpu_flags
.bitfield
.cpu687
9454 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9455 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9457 if ((i
.xstate
& xstate_mmx
)
9458 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9459 && !is_any_vex_encoding (&i
.tm
)
9460 && (i
.tm
.base_opcode
== 0x77 /* emms */
9461 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9462 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9466 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9467 i
.xstate
|= xstate_zmm
;
9468 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9469 i
.xstate
|= xstate_ymm
;
9470 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9471 i
.xstate
|= xstate_xmm
;
9474 /* vzeroall / vzeroupper */
9475 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9476 i
.xstate
|= xstate_ymm
;
9478 if ((i
.xstate
& xstate_xmm
)
9479 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9480 || (i
.tm
.base_opcode
== 0xae
9481 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9482 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9483 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9484 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9485 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9487 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9488 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9489 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9490 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9491 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9492 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9493 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9494 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9495 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9496 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9497 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9498 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9499 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9500 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9502 if (x86_feature_2_used
9503 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9504 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9505 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9506 && i
.tm
.base_opcode
== 0xc7
9507 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9508 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9509 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9510 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9511 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9512 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9513 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9514 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9515 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9516 /* LAHF-SAHF insns in 64-bit mode. */
9517 || (flag_code
== CODE_64BIT
9518 && (i
.tm
.base_opcode
| 1) == 0x9f
9519 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9520 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9521 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9522 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9523 /* Any VEX encoded insns execpt for AVX512F, AVX512BW, AVX512DQ,
9524 XOP, FMA4, LPW, TBM, and AMX. */
9525 || (i
.tm
.opcode_modifier
.vex
9526 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9527 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9528 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9529 && !i
.tm
.cpu_flags
.bitfield
.cpuxop
9530 && !i
.tm
.cpu_flags
.bitfield
.cpufma4
9531 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9532 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9533 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9534 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9535 || i
.tm
.cpu_flags
.bitfield
.cpufma
9536 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9537 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9538 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9539 || (x86_feature_2_used
9540 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9541 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9542 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9543 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9544 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9545 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9546 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9547 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9548 /* Any EVEX encoded insns except for AVX512ER, AVX512PF,
9549 AVX512-4FMAPS, and AVX512-4VNNIW. */
9550 || (i
.tm
.opcode_modifier
.evex
9551 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9552 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9553 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
9554 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9555 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9559 /* Tie dwarf2 debug info to the address at the start of the insn.
9560 We can't do this after the insn has been output as the current
9561 frag may have been closed off. eg. by frag_var. */
9562 dwarf2_emit_insn (0);
9564 insn_start_frag
= frag_now
;
9565 insn_start_off
= frag_now_fix ();
9567 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9570 /* Branch can be 8 bytes. Leave some room for prefixes. */
9571 unsigned int max_branch_padding_size
= 14;
9573 /* Align section to boundary. */
9574 record_alignment (now_seg
, align_branch_power
);
9576 /* Make room for padding. */
9577 frag_grow (max_branch_padding_size
);
9579 /* Start of the padding. */
9584 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9585 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9588 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9589 fragP
->tc_frag_data
.branch_type
= branch
;
9590 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9593 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
)
9594 && !pre_386_16bit_warned
)
9596 as_warn (_("use .code16 to ensure correct addressing mode"));
9597 pre_386_16bit_warned
= true;
9601 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9603 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9604 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9606 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9607 output_interseg_jump ();
9610 /* Output normal instructions here. */
9614 enum mf_cmp_kind mf_cmp
;
9617 && (i
.tm
.base_opcode
== 0xaee8
9618 || i
.tm
.base_opcode
== 0xaef0
9619 || i
.tm
.base_opcode
== 0xaef8))
9621 /* Encode lfence, mfence, and sfence as
9622 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9623 if (flag_code
== CODE_16BIT
)
9624 as_bad (_("Cannot convert `%s' in 16-bit mode"), i
.tm
.name
);
9625 else if (omit_lock_prefix
)
9626 as_bad (_("Cannot convert `%s' with `-momit-lock-prefix=yes' in effect"),
9628 else if (now_seg
!= absolute_section
)
9630 offsetT val
= 0x240483f0ULL
;
9633 md_number_to_chars (p
, val
, 5);
9636 abs_section_offset
+= 5;
9640 /* Some processors fail on LOCK prefix. This options makes
9641 assembler ignore LOCK prefix and serves as a workaround. */
9642 if (omit_lock_prefix
)
9644 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9645 && i
.tm
.opcode_modifier
.isprefix
)
9647 i
.prefix
[LOCK_PREFIX
] = 0;
9651 /* Skip if this is a branch. */
9653 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9655 /* Make room for padding. */
9656 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9661 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9662 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9665 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9666 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9667 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9669 else if (add_branch_prefix_frag_p ())
9671 unsigned int max_prefix_size
= align_branch_prefix_size
;
9673 /* Make room for padding. */
9674 frag_grow (max_prefix_size
);
9679 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9680 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9683 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9686 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9687 don't need the explicit prefix. */
9688 if (!is_any_vex_encoding (&i
.tm
))
9690 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9699 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9700 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9704 switch (i
.opcode_length
)
9709 /* Check for pseudo prefixes. */
9710 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9712 as_bad_where (insn_start_frag
->fr_file
,
9713 insn_start_frag
->fr_line
,
9714 _("pseudo prefix without instruction"));
9724 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9725 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9726 R_X86_64_GOTTPOFF relocation so that linker can safely
9727 perform IE->LE optimization. A dummy REX_OPCODE prefix
9728 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9729 relocation for GDesc -> IE/LE optimization. */
9730 if (x86_elf_abi
== X86_64_X32_ABI
9732 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9733 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9734 && i
.prefix
[REX_PREFIX
] == 0)
9735 add_prefix (REX_OPCODE
);
9738 /* The prefix bytes. */
9739 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9741 frag_opcode_byte (*q
);
9745 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9751 frag_opcode_byte (*q
);
9754 /* There should be no other prefixes for instructions
9759 /* For EVEX instructions i.vrex should become 0 after
9760 build_evex_prefix. For VEX instructions upper 16 registers
9761 aren't available, so VREX should be 0. */
9764 /* Now the VEX prefix. */
9765 if (now_seg
!= absolute_section
)
9767 p
= frag_more (i
.vex
.length
);
9768 for (j
= 0; j
< i
.vex
.length
; j
++)
9769 p
[j
] = i
.vex
.bytes
[j
];
9772 abs_section_offset
+= i
.vex
.length
;
9775 /* Now the opcode; be careful about word order here! */
9776 j
= i
.opcode_length
;
9778 switch (i
.tm
.opcode_modifier
.opcodespace
)
9793 if (now_seg
== absolute_section
)
9794 abs_section_offset
+= j
;
9797 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9803 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9806 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9807 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9811 switch (i
.opcode_length
)
9814 /* Put out high byte first: can't use md_number_to_chars! */
9815 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9818 *p
= i
.tm
.base_opcode
& 0xff;
9827 /* Now the modrm byte and sib byte (if present). */
9828 if (i
.tm
.opcode_modifier
.modrm
)
9830 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9832 | (i
.rm
.mode
<< 6));
9833 /* If i.rm.regmem == ESP (4)
9834 && i.rm.mode != (Register mode)
9836 ==> need second modrm byte. */
9837 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9839 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9840 frag_opcode_byte ((i
.sib
.base
<< 0)
9841 | (i
.sib
.index
<< 3)
9842 | (i
.sib
.scale
<< 6));
9845 if (i
.disp_operands
)
9846 output_disp (insn_start_frag
, insn_start_off
);
9849 output_imm (insn_start_frag
, insn_start_off
);
9852 * frag_now_fix () returning plain abs_section_offset when we're in the
9853 * absolute section, and abs_section_offset not getting updated as data
9854 * gets added to the frag breaks the logic below.
9856 if (now_seg
!= absolute_section
)
9858 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9860 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9864 /* NB: Don't add prefix with GOTPC relocation since
9865 output_disp() above depends on the fixed encoding
9866 length. Can't add prefix with TLS relocation since
9867 it breaks TLS linker optimization. */
9868 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9869 /* Prefix count on the current instruction. */
9870 unsigned int count
= i
.vex
.length
;
9872 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9873 /* REX byte is encoded in VEX/EVEX prefix. */
9874 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9877 /* Count prefixes for extended opcode maps. */
9879 switch (i
.tm
.opcode_modifier
.opcodespace
)
9894 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9897 /* Set the maximum prefix size in BRANCH_PREFIX
9899 if (fragP
->tc_frag_data
.max_bytes
> max
)
9900 fragP
->tc_frag_data
.max_bytes
= max
;
9901 if (fragP
->tc_frag_data
.max_bytes
> count
)
9902 fragP
->tc_frag_data
.max_bytes
-= count
;
9904 fragP
->tc_frag_data
.max_bytes
= 0;
9908 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9910 unsigned int max_prefix_size
;
9911 if (align_branch_prefix_size
> max
)
9912 max_prefix_size
= max
;
9914 max_prefix_size
= align_branch_prefix_size
;
9915 if (max_prefix_size
> count
)
9916 fragP
->tc_frag_data
.max_prefix_length
9917 = max_prefix_size
- count
;
9920 /* Use existing segment prefix if possible. Use CS
9921 segment prefix in 64-bit mode. In 32-bit mode, use SS
9922 segment prefix with ESP/EBP base register and use DS
9923 segment prefix without ESP/EBP base register. */
9924 if (i
.prefix
[SEG_PREFIX
])
9925 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9926 else if (flag_code
== CODE_64BIT
)
9927 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9929 && (i
.base_reg
->reg_num
== 4
9930 || i
.base_reg
->reg_num
== 5))
9931 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9933 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9938 /* NB: Don't work with COND_JUMP86 without i386. */
9939 if (align_branch_power
9940 && now_seg
!= absolute_section
9941 && cpu_arch_flags
.bitfield
.cpui386
)
9943 /* Terminate each frag so that we can add prefix and check for
9945 frag_wane (frag_now
);
9952 pi ("" /*line*/, &i
);
9954 #endif /* DEBUG386 */
9957 /* Return the size of the displacement operand N. */
9960 disp_size (unsigned int n
)
9964 if (i
.types
[n
].bitfield
.disp64
)
9966 else if (i
.types
[n
].bitfield
.disp8
)
9968 else if (i
.types
[n
].bitfield
.disp16
)
9973 /* Return the size of the immediate operand N. */
9976 imm_size (unsigned int n
)
9979 if (i
.types
[n
].bitfield
.imm64
)
9981 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9983 else if (i
.types
[n
].bitfield
.imm16
)
9989 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9994 for (n
= 0; n
< i
.operands
; n
++)
9996 if (operand_type_check (i
.types
[n
], disp
))
9998 int size
= disp_size (n
);
10000 if (now_seg
== absolute_section
)
10001 abs_section_offset
+= size
;
10002 else if (i
.op
[n
].disps
->X_op
== O_constant
)
10004 offsetT val
= i
.op
[n
].disps
->X_add_number
;
10006 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
10008 p
= frag_more (size
);
10009 md_number_to_chars (p
, val
, size
);
10013 enum bfd_reloc_code_real reloc_type
;
10014 bool pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
10015 bool sign
= (flag_code
== CODE_64BIT
&& size
== 4
10016 && (!want_disp32 (&i
.tm
)
10017 || (i
.tm
.opcode_modifier
.jump
&& !i
.jumpabsolute
10018 && !i
.types
[n
].bitfield
.baseindex
)))
10022 /* We can't have 8 bit displacement here. */
10023 gas_assert (!i
.types
[n
].bitfield
.disp8
);
10025 /* The PC relative address is computed relative
10026 to the instruction boundary, so in case immediate
10027 fields follows, we need to adjust the value. */
10028 if (pcrel
&& i
.imm_operands
)
10033 for (n1
= 0; n1
< i
.operands
; n1
++)
10034 if (operand_type_check (i
.types
[n1
], imm
))
10036 /* Only one immediate is allowed for PC
10037 relative address. */
10038 gas_assert (sz
== 0);
10039 sz
= imm_size (n1
);
10040 i
.op
[n
].disps
->X_add_number
-= sz
;
10042 /* We should find the immediate. */
10043 gas_assert (sz
!= 0);
10046 p
= frag_more (size
);
10047 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
10049 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
10050 && (((reloc_type
== BFD_RELOC_32
10051 || reloc_type
== BFD_RELOC_X86_64_32S
10052 || (reloc_type
== BFD_RELOC_64
10054 && (i
.op
[n
].disps
->X_op
== O_symbol
10055 || (i
.op
[n
].disps
->X_op
== O_add
10056 && ((symbol_get_value_expression
10057 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10059 || reloc_type
== BFD_RELOC_32_PCREL
))
10063 reloc_type
= BFD_RELOC_386_GOTPC
;
10064 i
.has_gotpc_tls_reloc
= true;
10065 i
.op
[n
].disps
->X_add_number
+=
10066 encoding_length (insn_start_frag
, insn_start_off
, p
);
10068 else if (reloc_type
== BFD_RELOC_64
)
10069 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10071 /* Don't do the adjustment for x86-64, as there
10072 the pcrel addressing is relative to the _next_
10073 insn, and that is taken care of in other code. */
10074 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10076 else if (align_branch_power
)
10078 switch (reloc_type
)
10080 case BFD_RELOC_386_TLS_GD
:
10081 case BFD_RELOC_386_TLS_LDM
:
10082 case BFD_RELOC_386_TLS_IE
:
10083 case BFD_RELOC_386_TLS_IE_32
:
10084 case BFD_RELOC_386_TLS_GOTIE
:
10085 case BFD_RELOC_386_TLS_GOTDESC
:
10086 case BFD_RELOC_386_TLS_DESC_CALL
:
10087 case BFD_RELOC_X86_64_TLSGD
:
10088 case BFD_RELOC_X86_64_TLSLD
:
10089 case BFD_RELOC_X86_64_GOTTPOFF
:
10090 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10091 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10092 i
.has_gotpc_tls_reloc
= true;
10097 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10098 size
, i
.op
[n
].disps
, pcrel
,
10101 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10102 && !i
.prefix
[ADDR_PREFIX
])
10103 fixP
->fx_signed
= 1;
10105 /* Check for "call/jmp *mem", "mov mem, %reg",
10106 "test %reg, mem" and "binop mem, %reg" where binop
10107 is one of adc, add, and, cmp, or, sbb, sub, xor
10108 instructions without data prefix. Always generate
10109 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10110 if (i
.prefix
[DATA_PREFIX
] == 0
10111 && (generate_relax_relocations
10114 && i
.rm
.regmem
== 5))
10116 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10117 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
10118 && ((i
.operands
== 1
10119 && i
.tm
.base_opcode
== 0xff
10120 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10121 || (i
.operands
== 2
10122 && (i
.tm
.base_opcode
== 0x8b
10123 || i
.tm
.base_opcode
== 0x85
10124 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10128 fixP
->fx_tcbit
= i
.rex
!= 0;
10130 && (i
.base_reg
->reg_num
== RegIP
))
10131 fixP
->fx_tcbit2
= 1;
10134 fixP
->fx_tcbit2
= 1;
10142 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10147 for (n
= 0; n
< i
.operands
; n
++)
10149 if (operand_type_check (i
.types
[n
], imm
))
10151 int size
= imm_size (n
);
10153 if (now_seg
== absolute_section
)
10154 abs_section_offset
+= size
;
10155 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10159 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10161 p
= frag_more (size
);
10162 md_number_to_chars (p
, val
, size
);
10166 /* Not absolute_section.
10167 Need a 32-bit fixup (don't support 8bit
10168 non-absolute imms). Try to support other
10170 enum bfd_reloc_code_real reloc_type
;
10173 if (i
.types
[n
].bitfield
.imm32s
10174 && (i
.suffix
== QWORD_MNEM_SUFFIX
10175 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
10180 p
= frag_more (size
);
10181 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10183 /* This is tough to explain. We end up with this one if we
10184 * have operands that look like
10185 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10186 * obtain the absolute address of the GOT, and it is strongly
10187 * preferable from a performance point of view to avoid using
10188 * a runtime relocation for this. The actual sequence of
10189 * instructions often look something like:
10194 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10196 * The call and pop essentially return the absolute address
10197 * of the label .L66 and store it in %ebx. The linker itself
10198 * will ultimately change the first operand of the addl so
10199 * that %ebx points to the GOT, but to keep things simple, the
10200 * .o file must have this operand set so that it generates not
10201 * the absolute address of .L66, but the absolute address of
10202 * itself. This allows the linker itself simply treat a GOTPC
10203 * relocation as asking for a pcrel offset to the GOT to be
10204 * added in, and the addend of the relocation is stored in the
10205 * operand field for the instruction itself.
10207 * Our job here is to fix the operand so that it would add
10208 * the correct offset so that %ebx would point to itself. The
10209 * thing that is tricky is that .-.L66 will point to the
10210 * beginning of the instruction, so we need to further modify
10211 * the operand so that it will point to itself. There are
10212 * other cases where you have something like:
10214 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10216 * and here no correction would be required. Internally in
10217 * the assembler we treat operands of this form as not being
10218 * pcrel since the '.' is explicitly mentioned, and I wonder
10219 * whether it would simplify matters to do it this way. Who
10220 * knows. In earlier versions of the PIC patches, the
10221 * pcrel_adjust field was used to store the correction, but
10222 * since the expression is not pcrel, I felt it would be
10223 * confusing to do it this way. */
10225 if ((reloc_type
== BFD_RELOC_32
10226 || reloc_type
== BFD_RELOC_X86_64_32S
10227 || reloc_type
== BFD_RELOC_64
)
10229 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10230 && (i
.op
[n
].imms
->X_op
== O_symbol
10231 || (i
.op
[n
].imms
->X_op
== O_add
10232 && ((symbol_get_value_expression
10233 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10237 reloc_type
= BFD_RELOC_386_GOTPC
;
10238 else if (size
== 4)
10239 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10240 else if (size
== 8)
10241 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10242 i
.has_gotpc_tls_reloc
= true;
10243 i
.op
[n
].imms
->X_add_number
+=
10244 encoding_length (insn_start_frag
, insn_start_off
, p
);
10246 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10247 i
.op
[n
].imms
, 0, reloc_type
);
10253 /* x86_cons_fix_new is called via the expression parsing code when a
10254 reloc is needed. We use this hook to get the correct .got reloc. */
10255 static int cons_sign
= -1;
10258 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10259 expressionS
*exp
, bfd_reloc_code_real_type r
)
10261 r
= reloc (len
, 0, cons_sign
, r
);
10264 if (exp
->X_op
== O_secrel
)
10266 exp
->X_op
= O_symbol
;
10267 r
= BFD_RELOC_32_SECREL
;
10269 else if (exp
->X_op
== O_secidx
)
10270 r
= BFD_RELOC_16_SECIDX
;
10273 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10276 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10277 purpose of the `.dc.a' internal pseudo-op. */
10280 x86_address_bytes (void)
10282 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10284 return stdoutput
->arch_info
->bits_per_address
/ 8;
10287 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10288 || defined (LEX_AT)) && !defined (TE_PE)
10289 # define lex_got(reloc, adjust, types) NULL
10291 /* Parse operands of the form
10292 <symbol>@GOTOFF+<nnn>
10293 and similar .plt or .got references.
10295 If we find one, set up the correct relocation in RELOC and copy the
10296 input string, minus the `@GOTOFF' into a malloc'd buffer for
10297 parsing by the calling routine. Return this buffer, and if ADJUST
10298 is non-null set it to the length of the string we removed from the
10299 input line. Otherwise return NULL. */
10301 lex_got (enum bfd_reloc_code_real
*rel
,
10303 i386_operand_type
*types
)
10305 /* Some of the relocations depend on the size of what field is to
10306 be relocated. But in our callers i386_immediate and i386_displacement
10307 we don't yet know the operand size (this will be set by insn
10308 matching). Hence we record the word32 relocation here,
10309 and adjust the reloc according to the real size in reloc(). */
10310 static const struct
10314 const enum bfd_reloc_code_real rel
[2];
10315 const i386_operand_type types64
;
10316 bool need_GOT_symbol
;
10321 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10322 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10323 BFD_RELOC_SIZE32
},
10324 OPERAND_TYPE_IMM32_64
, false },
10326 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10327 BFD_RELOC_X86_64_PLTOFF64
},
10328 OPERAND_TYPE_IMM64
, true },
10329 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10330 BFD_RELOC_X86_64_PLT32
},
10331 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10332 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10333 BFD_RELOC_X86_64_GOTPLT64
},
10334 OPERAND_TYPE_IMM64_DISP64
, true },
10335 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10336 BFD_RELOC_X86_64_GOTOFF64
},
10337 OPERAND_TYPE_IMM64_DISP64
, true },
10338 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10339 BFD_RELOC_X86_64_GOTPCREL
},
10340 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10341 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10342 BFD_RELOC_X86_64_TLSGD
},
10343 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10344 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10345 _dummy_first_bfd_reloc_code_real
},
10346 OPERAND_TYPE_NONE
, true },
10347 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10348 BFD_RELOC_X86_64_TLSLD
},
10349 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10350 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10351 BFD_RELOC_X86_64_GOTTPOFF
},
10352 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10353 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10354 BFD_RELOC_X86_64_TPOFF32
},
10355 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10356 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10357 _dummy_first_bfd_reloc_code_real
},
10358 OPERAND_TYPE_NONE
, true },
10359 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10360 BFD_RELOC_X86_64_DTPOFF32
},
10361 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10362 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10363 _dummy_first_bfd_reloc_code_real
},
10364 OPERAND_TYPE_NONE
, true },
10365 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10366 _dummy_first_bfd_reloc_code_real
},
10367 OPERAND_TYPE_NONE
, true },
10368 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10369 BFD_RELOC_X86_64_GOT32
},
10370 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10371 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10372 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10373 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10374 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10375 BFD_RELOC_X86_64_TLSDESC_CALL
},
10376 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10378 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10379 BFD_RELOC_32_SECREL
},
10380 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10386 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10391 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10392 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10395 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10397 int len
= gotrel
[j
].len
;
10398 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10400 if (gotrel
[j
].rel
[object_64bit
] != 0)
10403 char *tmpbuf
, *past_reloc
;
10405 *rel
= gotrel
[j
].rel
[object_64bit
];
10409 if (flag_code
!= CODE_64BIT
)
10411 types
->bitfield
.imm32
= 1;
10412 types
->bitfield
.disp32
= 1;
10415 *types
= gotrel
[j
].types64
;
10418 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10419 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10421 /* The length of the first part of our input line. */
10422 first
= cp
- input_line_pointer
;
10424 /* The second part goes from after the reloc token until
10425 (and including) an end_of_line char or comma. */
10426 past_reloc
= cp
+ 1 + len
;
10428 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10430 second
= cp
+ 1 - past_reloc
;
10432 /* Allocate and copy string. The trailing NUL shouldn't
10433 be necessary, but be safe. */
10434 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10435 memcpy (tmpbuf
, input_line_pointer
, first
);
10436 if (second
!= 0 && *past_reloc
!= ' ')
10437 /* Replace the relocation token with ' ', so that
10438 errors like foo@GOTOFF1 will be detected. */
10439 tmpbuf
[first
++] = ' ';
10441 /* Increment length by 1 if the relocation token is
10446 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10447 tmpbuf
[first
+ second
] = '\0';
10451 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10452 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10457 /* Might be a symbol version string. Don't as_bad here. */
10462 bfd_reloc_code_real_type
10463 x86_cons (expressionS
*exp
, int size
)
10465 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10467 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
10468 && !defined (LEX_AT)) \
10470 intel_syntax
= -intel_syntax
;
10473 if (size
== 4 || (object_64bit
&& size
== 8))
10475 /* Handle @GOTOFF and the like in an expression. */
10477 char *gotfree_input_line
;
10480 save
= input_line_pointer
;
10481 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10482 if (gotfree_input_line
)
10483 input_line_pointer
= gotfree_input_line
;
10487 if (gotfree_input_line
)
10489 /* expression () has merrily parsed up to the end of line,
10490 or a comma - in the wrong buffer. Transfer how far
10491 input_line_pointer has moved to the right buffer. */
10492 input_line_pointer
= (save
10493 + (input_line_pointer
- gotfree_input_line
)
10495 free (gotfree_input_line
);
10496 if (exp
->X_op
== O_constant
10497 || exp
->X_op
== O_absent
10498 || exp
->X_op
== O_illegal
10499 || exp
->X_op
== O_register
10500 || exp
->X_op
== O_big
)
10502 char c
= *input_line_pointer
;
10503 *input_line_pointer
= 0;
10504 as_bad (_("missing or invalid expression `%s'"), save
);
10505 *input_line_pointer
= c
;
10507 else if ((got_reloc
== BFD_RELOC_386_PLT32
10508 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10509 && exp
->X_op
!= O_symbol
)
10511 char c
= *input_line_pointer
;
10512 *input_line_pointer
= 0;
10513 as_bad (_("invalid PLT expression `%s'"), save
);
10514 *input_line_pointer
= c
;
10521 intel_syntax
= -intel_syntax
;
10524 i386_intel_simplify (exp
);
10529 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10530 if (size
== 4 && exp
->X_op
== O_constant
&& !object_64bit
)
10531 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10537 signed_cons (int size
)
10547 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10554 if (exp
.X_op
== O_symbol
)
10555 exp
.X_op
= O_secrel
;
10557 emit_expr (&exp
, 4);
10559 while (*input_line_pointer
++ == ',');
10561 input_line_pointer
--;
10562 demand_empty_rest_of_line ();
10566 pe_directive_secidx (int dummy ATTRIBUTE_UNUSED
)
10573 if (exp
.X_op
== O_symbol
)
10574 exp
.X_op
= O_secidx
;
10576 emit_expr (&exp
, 2);
10578 while (*input_line_pointer
++ == ',');
10580 input_line_pointer
--;
10581 demand_empty_rest_of_line ();
10585 /* Handle Rounding Control / SAE specifiers. */
10588 RC_SAE_specifier (const char *pstr
)
10592 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
10594 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
10596 if (i
.rounding
.type
!= rc_none
)
10598 as_bad (_("duplicated `{%s}'"), RC_NamesTable
[j
].name
);
10602 i
.rounding
.type
= RC_NamesTable
[j
].type
;
10604 return (char *)(pstr
+ RC_NamesTable
[j
].len
);
10611 /* Handle Vector operations. */
10614 check_VecOperations (char *op_string
)
10616 const reg_entry
*mask
;
10623 if (*op_string
== '{')
10627 /* Check broadcasts. */
10628 if (startswith (op_string
, "1to"))
10630 unsigned int bcst_type
;
10632 if (i
.broadcast
.type
)
10633 goto duplicated_vec_op
;
10636 if (*op_string
== '8')
10638 else if (*op_string
== '4')
10640 else if (*op_string
== '2')
10642 else if (*op_string
== '1'
10643 && *(op_string
+1) == '6')
10648 else if (*op_string
== '3'
10649 && *(op_string
+1) == '2')
10656 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10661 i
.broadcast
.type
= bcst_type
;
10662 i
.broadcast
.operand
= this_operand
;
10664 /* Check masking operation. */
10665 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10667 if (mask
== &bad_reg
)
10670 /* k0 can't be used for write mask. */
10671 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10673 as_bad (_("`%s%s' can't be used for write mask"),
10674 register_prefix
, mask
->reg_name
);
10681 i
.mask
.operand
= this_operand
;
10683 else if (i
.mask
.reg
->reg_num
)
10684 goto duplicated_vec_op
;
10689 /* Only "{z}" is allowed here. No need to check
10690 zeroing mask explicitly. */
10691 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10693 as_bad (_("invalid write mask `%s'"), saved
);
10698 op_string
= end_op
;
10700 /* Check zeroing-flag for masking operation. */
10701 else if (*op_string
== 'z')
10705 i
.mask
.reg
= reg_k0
;
10706 i
.mask
.zeroing
= 1;
10707 i
.mask
.operand
= this_operand
;
10711 if (i
.mask
.zeroing
)
10714 as_bad (_("duplicated `%s'"), saved
);
10718 i
.mask
.zeroing
= 1;
10720 /* Only "{%k}" is allowed here. No need to check mask
10721 register explicitly. */
10722 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10724 as_bad (_("invalid zeroing-masking `%s'"),
10732 else if (intel_syntax
10733 && (op_string
= RC_SAE_specifier (op_string
)) != NULL
)
10734 i
.rounding
.modifier
= true;
10736 goto unknown_vec_op
;
10738 if (*op_string
!= '}')
10740 as_bad (_("missing `}' in `%s'"), saved
);
10745 /* Strip whitespace since the addition of pseudo prefixes
10746 changed how the scrubber treats '{'. */
10747 if (is_space_char (*op_string
))
10753 /* We don't know this one. */
10754 as_bad (_("unknown vector operation: `%s'"), saved
);
10758 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10760 as_bad (_("zeroing-masking only allowed with write mask"));
10768 i386_immediate (char *imm_start
)
10770 char *save_input_line_pointer
;
10771 char *gotfree_input_line
;
10774 i386_operand_type types
;
10776 operand_type_set (&types
, ~0);
10778 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10780 as_bad (_("at most %d immediate operands are allowed"),
10781 MAX_IMMEDIATE_OPERANDS
);
10785 exp
= &im_expressions
[i
.imm_operands
++];
10786 i
.op
[this_operand
].imms
= exp
;
10788 if (is_space_char (*imm_start
))
10791 save_input_line_pointer
= input_line_pointer
;
10792 input_line_pointer
= imm_start
;
10794 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10795 if (gotfree_input_line
)
10796 input_line_pointer
= gotfree_input_line
;
10798 exp_seg
= expression (exp
);
10800 SKIP_WHITESPACE ();
10801 if (*input_line_pointer
)
10802 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10804 input_line_pointer
= save_input_line_pointer
;
10805 if (gotfree_input_line
)
10807 free (gotfree_input_line
);
10809 if (exp
->X_op
== O_constant
)
10810 exp
->X_op
= O_illegal
;
10813 if (exp_seg
== reg_section
)
10815 as_bad (_("illegal immediate register operand %s"), imm_start
);
10819 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10823 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10824 i386_operand_type types
, const char *imm_start
)
10826 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10829 as_bad (_("missing or invalid immediate expression `%s'"),
10833 else if (exp
->X_op
== O_constant
)
10835 /* Size it properly later. */
10836 i
.types
[this_operand
].bitfield
.imm64
= 1;
10838 /* If not 64bit, sign/zero extend val, to account for wraparound
10840 if (flag_code
!= CODE_64BIT
)
10841 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10843 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10844 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10845 && exp_seg
!= absolute_section
10846 && exp_seg
!= text_section
10847 && exp_seg
!= data_section
10848 && exp_seg
!= bss_section
10849 && exp_seg
!= undefined_section
10850 && !bfd_is_com_section (exp_seg
))
10852 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10858 /* This is an address. The size of the address will be
10859 determined later, depending on destination register,
10860 suffix, or the default for the section. */
10861 i
.types
[this_operand
].bitfield
.imm8
= 1;
10862 i
.types
[this_operand
].bitfield
.imm16
= 1;
10863 i
.types
[this_operand
].bitfield
.imm32
= 1;
10864 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10865 i
.types
[this_operand
].bitfield
.imm64
= 1;
10866 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10874 i386_scale (char *scale
)
10877 char *save
= input_line_pointer
;
10879 input_line_pointer
= scale
;
10880 val
= get_absolute_expression ();
10885 i
.log2_scale_factor
= 0;
10888 i
.log2_scale_factor
= 1;
10891 i
.log2_scale_factor
= 2;
10894 i
.log2_scale_factor
= 3;
10898 char sep
= *input_line_pointer
;
10900 *input_line_pointer
= '\0';
10901 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10903 *input_line_pointer
= sep
;
10904 input_line_pointer
= save
;
10908 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10910 as_warn (_("scale factor of %d without an index register"),
10911 1 << i
.log2_scale_factor
);
10912 i
.log2_scale_factor
= 0;
10914 scale
= input_line_pointer
;
10915 input_line_pointer
= save
;
10920 i386_displacement (char *disp_start
, char *disp_end
)
10924 char *save_input_line_pointer
;
10925 char *gotfree_input_line
;
10927 i386_operand_type bigdisp
, types
= anydisp
;
10930 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10932 as_bad (_("at most %d displacement operands are allowed"),
10933 MAX_MEMORY_OPERANDS
);
10937 operand_type_set (&bigdisp
, 0);
10939 || i
.types
[this_operand
].bitfield
.baseindex
10940 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10941 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10943 i386_addressing_mode ();
10944 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10945 if (flag_code
== CODE_64BIT
)
10947 bigdisp
.bitfield
.disp32
= 1;
10949 bigdisp
.bitfield
.disp64
= 1;
10951 else if ((flag_code
== CODE_16BIT
) ^ override
)
10952 bigdisp
.bitfield
.disp16
= 1;
10954 bigdisp
.bitfield
.disp32
= 1;
10958 /* For PC-relative branches, the width of the displacement may be
10959 dependent upon data size, but is never dependent upon address size.
10960 Also make sure to not unintentionally match against a non-PC-relative
10961 branch template. */
10962 static templates aux_templates
;
10963 const insn_template
*t
= current_templates
->start
;
10964 bool has_intel64
= false;
10966 aux_templates
.start
= t
;
10967 while (++t
< current_templates
->end
)
10969 if (t
->opcode_modifier
.jump
10970 != current_templates
->start
->opcode_modifier
.jump
)
10972 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10973 has_intel64
= true;
10975 if (t
< current_templates
->end
)
10977 aux_templates
.end
= t
;
10978 current_templates
= &aux_templates
;
10981 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10982 if (flag_code
== CODE_64BIT
)
10984 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10985 && (!intel64
|| !has_intel64
))
10986 bigdisp
.bitfield
.disp16
= 1;
10988 bigdisp
.bitfield
.disp32
= 1;
10993 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10995 : LONG_MNEM_SUFFIX
));
10996 bigdisp
.bitfield
.disp32
= 1;
10997 if ((flag_code
== CODE_16BIT
) ^ override
)
10999 bigdisp
.bitfield
.disp32
= 0;
11000 bigdisp
.bitfield
.disp16
= 1;
11004 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11007 exp
= &disp_expressions
[i
.disp_operands
];
11008 i
.op
[this_operand
].disps
= exp
;
11010 save_input_line_pointer
= input_line_pointer
;
11011 input_line_pointer
= disp_start
;
11012 END_STRING_AND_SAVE (disp_end
);
11014 #ifndef GCC_ASM_O_HACK
11015 #define GCC_ASM_O_HACK 0
11018 END_STRING_AND_SAVE (disp_end
+ 1);
11019 if (i
.types
[this_operand
].bitfield
.baseIndex
11020 && displacement_string_end
[-1] == '+')
11022 /* This hack is to avoid a warning when using the "o"
11023 constraint within gcc asm statements.
11026 #define _set_tssldt_desc(n,addr,limit,type) \
11027 __asm__ __volatile__ ( \
11028 "movw %w2,%0\n\t" \
11029 "movw %w1,2+%0\n\t" \
11030 "rorl $16,%1\n\t" \
11031 "movb %b1,4+%0\n\t" \
11032 "movb %4,5+%0\n\t" \
11033 "movb $0,6+%0\n\t" \
11034 "movb %h1,7+%0\n\t" \
11036 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
11038 This works great except that the output assembler ends
11039 up looking a bit weird if it turns out that there is
11040 no offset. You end up producing code that looks like:
11053 So here we provide the missing zero. */
11055 *displacement_string_end
= '0';
11058 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
11059 if (gotfree_input_line
)
11060 input_line_pointer
= gotfree_input_line
;
11062 exp_seg
= expression (exp
);
11064 SKIP_WHITESPACE ();
11065 if (*input_line_pointer
)
11066 as_bad (_("junk `%s' after expression"), input_line_pointer
);
11068 RESTORE_END_STRING (disp_end
+ 1);
11070 input_line_pointer
= save_input_line_pointer
;
11071 if (gotfree_input_line
)
11073 free (gotfree_input_line
);
11075 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
11076 exp
->X_op
= O_illegal
;
11079 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
11081 RESTORE_END_STRING (disp_end
);
11087 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
11088 i386_operand_type types
, const char *disp_start
)
11092 /* We do this to make sure that the section symbol is in
11093 the symbol table. We will ultimately change the relocation
11094 to be relative to the beginning of the section. */
11095 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
11096 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
11097 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11099 if (exp
->X_op
!= O_symbol
)
11102 if (S_IS_LOCAL (exp
->X_add_symbol
)
11103 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
11104 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
11105 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
11106 exp
->X_op
= O_subtract
;
11107 exp
->X_op_symbol
= GOT_symbol
;
11108 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
11109 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
11110 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11111 i
.reloc
[this_operand
] = BFD_RELOC_64
;
11113 i
.reloc
[this_operand
] = BFD_RELOC_32
;
11116 else if (exp
->X_op
== O_absent
11117 || exp
->X_op
== O_illegal
11118 || exp
->X_op
== O_big
)
11121 as_bad (_("missing or invalid displacement expression `%s'"),
11126 else if (exp
->X_op
== O_constant
)
11128 /* Sizing gets taken care of by optimize_disp().
11130 If not 64bit, sign/zero extend val, to account for wraparound
11132 if (flag_code
!= CODE_64BIT
)
11133 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
11136 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11137 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
11138 && exp_seg
!= absolute_section
11139 && exp_seg
!= text_section
11140 && exp_seg
!= data_section
11141 && exp_seg
!= bss_section
11142 && exp_seg
!= undefined_section
11143 && !bfd_is_com_section (exp_seg
))
11145 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11150 else if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
11151 i
.types
[this_operand
].bitfield
.disp8
= 1;
11153 /* Check if this is a displacement only operand. */
11154 if (!i
.types
[this_operand
].bitfield
.baseindex
)
11155 i
.types
[this_operand
] =
11156 operand_type_or (operand_type_and_not (i
.types
[this_operand
], anydisp
),
11157 operand_type_and (i
.types
[this_operand
], types
));
11162 /* Return the active addressing mode, taking address override and
11163 registers forming the address into consideration. Update the
11164 address override prefix if necessary. */
11166 static enum flag_code
11167 i386_addressing_mode (void)
11169 enum flag_code addr_mode
;
11171 if (i
.prefix
[ADDR_PREFIX
])
11172 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
11173 else if (flag_code
== CODE_16BIT
11174 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
11175 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
11176 from md_assemble() by "is not a valid base/index expression"
11177 when there is a base and/or index. */
11178 && !i
.types
[this_operand
].bitfield
.baseindex
)
11180 /* MPX insn memory operands with neither base nor index must be forced
11181 to use 32-bit addressing in 16-bit mode. */
11182 addr_mode
= CODE_32BIT
;
11183 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11185 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11186 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11190 addr_mode
= flag_code
;
11192 #if INFER_ADDR_PREFIX
11193 if (i
.mem_operands
== 0)
11195 /* Infer address prefix from the first memory operand. */
11196 const reg_entry
*addr_reg
= i
.base_reg
;
11198 if (addr_reg
== NULL
)
11199 addr_reg
= i
.index_reg
;
11203 if (addr_reg
->reg_type
.bitfield
.dword
)
11204 addr_mode
= CODE_32BIT
;
11205 else if (flag_code
!= CODE_64BIT
11206 && addr_reg
->reg_type
.bitfield
.word
)
11207 addr_mode
= CODE_16BIT
;
11209 if (addr_mode
!= flag_code
)
11211 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11213 /* Change the size of any displacement too. At most one
11214 of Disp16 or Disp32 is set.
11215 FIXME. There doesn't seem to be any real need for
11216 separate Disp16 and Disp32 flags. The same goes for
11217 Imm16 and Imm32. Removing them would probably clean
11218 up the code quite a lot. */
11219 if (flag_code
!= CODE_64BIT
11220 && (i
.types
[this_operand
].bitfield
.disp16
11221 || i
.types
[this_operand
].bitfield
.disp32
))
11222 i
.types
[this_operand
]
11223 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11233 /* Make sure the memory operand we've been dealt is valid.
11234 Return 1 on success, 0 on a failure. */
11237 i386_index_check (const char *operand_string
)
11239 const char *kind
= "base/index";
11240 enum flag_code addr_mode
= i386_addressing_mode ();
11241 const insn_template
*t
= current_templates
->start
;
11243 if (t
->opcode_modifier
.isstring
11244 && (current_templates
->end
[-1].opcode_modifier
.isstring
11245 || i
.mem_operands
))
11247 /* Memory operands of string insns are special in that they only allow
11248 a single register (rDI, rSI, or rBX) as their memory address. */
11249 const reg_entry
*expected_reg
;
11250 static const char *di_si
[][2] =
11256 static const char *bx
[] = { "ebx", "bx", "rbx" };
11258 kind
= "string address";
11260 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11262 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11263 - IS_STRING_ES_OP0
;
11266 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11267 || ((!i
.mem_operands
!= !intel_syntax
)
11268 && current_templates
->end
[-1].operand_types
[1]
11269 .bitfield
.baseindex
))
11272 = (const reg_entry
*) str_hash_find (reg_hash
,
11273 di_si
[addr_mode
][op
== es_op
]);
11277 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11279 if (i
.base_reg
!= expected_reg
11281 || operand_type_check (i
.types
[this_operand
], disp
))
11283 /* The second memory operand must have the same size as
11287 && !((addr_mode
== CODE_64BIT
11288 && i
.base_reg
->reg_type
.bitfield
.qword
)
11289 || (addr_mode
== CODE_32BIT
11290 ? i
.base_reg
->reg_type
.bitfield
.dword
11291 : i
.base_reg
->reg_type
.bitfield
.word
)))
11294 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11296 intel_syntax
? '[' : '(',
11298 expected_reg
->reg_name
,
11299 intel_syntax
? ']' : ')');
11306 as_bad (_("`%s' is not a valid %s expression"),
11307 operand_string
, kind
);
11312 if (addr_mode
!= CODE_16BIT
)
11314 /* 32-bit/64-bit checks. */
11315 if (i
.disp_encoding
== disp_encoding_16bit
)
11318 as_bad (_("invalid `%s' prefix"),
11319 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11324 && ((addr_mode
== CODE_64BIT
11325 ? !i
.base_reg
->reg_type
.bitfield
.qword
11326 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11327 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11328 || i
.base_reg
->reg_num
== RegIZ
))
11330 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11331 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11332 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11333 && ((addr_mode
== CODE_64BIT
11334 ? !i
.index_reg
->reg_type
.bitfield
.qword
11335 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11336 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11339 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11340 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11341 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11342 && t
->base_opcode
== 0x1b)
11343 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11344 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11345 && (t
->base_opcode
& ~1) == 0x1a)
11346 || t
->opcode_modifier
.sib
== SIBMEM
)
11348 /* They cannot use RIP-relative addressing. */
11349 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11351 as_bad (_("`%s' cannot be used here"), operand_string
);
11355 /* bndldx and bndstx ignore their scale factor. */
11356 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11357 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11358 && (t
->base_opcode
& ~1) == 0x1a
11359 && i
.log2_scale_factor
)
11360 as_warn (_("register scaling is being ignored here"));
11365 /* 16-bit checks. */
11366 if (i
.disp_encoding
== disp_encoding_32bit
)
11370 && (!i
.base_reg
->reg_type
.bitfield
.word
11371 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11373 && (!i
.index_reg
->reg_type
.bitfield
.word
11374 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11376 && i
.base_reg
->reg_num
< 6
11377 && i
.index_reg
->reg_num
>= 6
11378 && i
.log2_scale_factor
== 0))))
11385 /* Handle vector immediates. */
11388 RC_SAE_immediate (const char *imm_start
)
11390 const char *pstr
= imm_start
;
11395 pstr
= RC_SAE_specifier (pstr
+ 1);
11399 if (*pstr
++ != '}')
11401 as_bad (_("Missing '}': '%s'"), imm_start
);
11404 /* RC/SAE immediate string should contain nothing more. */;
11407 as_bad (_("Junk after '}': '%s'"), imm_start
);
11411 /* Internally this doesn't count as an operand. */
11417 /* Only string instructions can have a second memory operand, so
11418 reduce current_templates to just those if it contains any. */
11420 maybe_adjust_templates (void)
11422 const insn_template
*t
;
11424 gas_assert (i
.mem_operands
== 1);
11426 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11427 if (t
->opcode_modifier
.isstring
)
11430 if (t
< current_templates
->end
)
11432 static templates aux_templates
;
11435 aux_templates
.start
= t
;
11436 for (; t
< current_templates
->end
; ++t
)
11437 if (!t
->opcode_modifier
.isstring
)
11439 aux_templates
.end
= t
;
11441 /* Determine whether to re-check the first memory operand. */
11442 recheck
= (aux_templates
.start
!= current_templates
->start
11443 || t
!= current_templates
->end
);
11445 current_templates
= &aux_templates
;
11449 i
.mem_operands
= 0;
11450 if (i
.memop1_string
!= NULL
11451 && i386_index_check (i
.memop1_string
) == 0)
11453 i
.mem_operands
= 1;
11460 static INLINE
bool starts_memory_operand (char c
)
11463 || is_identifier_char (c
)
11464 || strchr ("([\"+-!~", c
);
11467 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11471 i386_att_operand (char *operand_string
)
11473 const reg_entry
*r
;
11475 char *op_string
= operand_string
;
11477 if (is_space_char (*op_string
))
11480 /* We check for an absolute prefix (differentiating,
11481 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11482 if (*op_string
== ABSOLUTE_PREFIX
)
11485 if (is_space_char (*op_string
))
11487 i
.jumpabsolute
= true;
11490 /* Check if operand is a register. */
11491 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11493 i386_operand_type temp
;
11498 /* Check for a segment override by searching for ':' after a
11499 segment register. */
11500 op_string
= end_op
;
11501 if (is_space_char (*op_string
))
11503 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11505 i
.seg
[i
.mem_operands
] = r
;
11507 /* Skip the ':' and whitespace. */
11509 if (is_space_char (*op_string
))
11512 /* Handle case of %es:*foo. */
11513 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
)
11516 if (is_space_char (*op_string
))
11518 i
.jumpabsolute
= true;
11521 if (!starts_memory_operand (*op_string
))
11523 as_bad (_("bad memory operand `%s'"), op_string
);
11526 goto do_memory_reference
;
11529 /* Handle vector operations. */
11530 if (*op_string
== '{')
11532 op_string
= check_VecOperations (op_string
);
11533 if (op_string
== NULL
)
11539 as_bad (_("junk `%s' after register"), op_string
);
11542 temp
= r
->reg_type
;
11543 temp
.bitfield
.baseindex
= 0;
11544 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11546 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11547 i
.op
[this_operand
].regs
= r
;
11550 /* A GPR may follow an RC or SAE immediate only if a (vector) register
11551 operand was also present earlier on. */
11552 if (i
.rounding
.type
!= rc_none
&& temp
.bitfield
.class == Reg
11553 && i
.reg_operands
== 1)
11557 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); ++j
)
11558 if (i
.rounding
.type
== RC_NamesTable
[j
].type
)
11560 as_bad (_("`%s': misplaced `{%s}'"),
11561 current_templates
->start
->name
, RC_NamesTable
[j
].name
);
11565 else if (*op_string
== REGISTER_PREFIX
)
11567 as_bad (_("bad register name `%s'"), op_string
);
11570 else if (*op_string
== IMMEDIATE_PREFIX
)
11573 if (i
.jumpabsolute
)
11575 as_bad (_("immediate operand illegal with absolute jump"));
11578 if (!i386_immediate (op_string
))
11580 if (i
.rounding
.type
!= rc_none
)
11582 as_bad (_("`%s': RC/SAE operand must follow immediate operands"),
11583 current_templates
->start
->name
);
11587 else if (RC_SAE_immediate (operand_string
))
11589 /* If it is a RC or SAE immediate, do the necessary placement check:
11590 Only another immediate or a GPR may precede it. */
11591 if (i
.mem_operands
|| i
.reg_operands
+ i
.imm_operands
> 1
11592 || (i
.reg_operands
== 1
11593 && i
.op
[0].regs
->reg_type
.bitfield
.class != Reg
))
11595 as_bad (_("`%s': misplaced `%s'"),
11596 current_templates
->start
->name
, operand_string
);
11600 else if (starts_memory_operand (*op_string
))
11602 /* This is a memory reference of some sort. */
11605 /* Start and end of displacement string expression (if found). */
11606 char *displacement_string_start
;
11607 char *displacement_string_end
;
11609 do_memory_reference
:
11610 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11612 if ((i
.mem_operands
== 1
11613 && !current_templates
->start
->opcode_modifier
.isstring
)
11614 || i
.mem_operands
== 2)
11616 as_bad (_("too many memory references for `%s'"),
11617 current_templates
->start
->name
);
11621 /* Check for base index form. We detect the base index form by
11622 looking for an ')' at the end of the operand, searching
11623 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11625 base_string
= op_string
+ strlen (op_string
);
11627 /* Handle vector operations. */
11629 if (is_space_char (*base_string
))
11632 if (*base_string
== '}')
11634 char *vop_start
= NULL
;
11636 while (base_string
-- > op_string
)
11638 if (*base_string
== '"')
11640 if (*base_string
!= '{')
11643 vop_start
= base_string
;
11646 if (is_space_char (*base_string
))
11649 if (*base_string
!= '}')
11657 as_bad (_("unbalanced figure braces"));
11661 if (check_VecOperations (vop_start
) == NULL
)
11665 /* If we only have a displacement, set-up for it to be parsed later. */
11666 displacement_string_start
= op_string
;
11667 displacement_string_end
= base_string
+ 1;
11669 if (*base_string
== ')')
11672 unsigned int parens_not_balanced
= 0;
11673 bool in_quotes
= false;
11675 /* We've already checked that the number of left & right ()'s are
11676 equal, and that there's a matching set of double quotes. */
11677 end_op
= base_string
;
11678 for (temp_string
= op_string
; temp_string
< end_op
; temp_string
++)
11680 if (*temp_string
== '\\' && temp_string
[1] == '"')
11682 else if (*temp_string
== '"')
11683 in_quotes
= !in_quotes
;
11684 else if (!in_quotes
)
11686 if (*temp_string
== '(' && !parens_not_balanced
++)
11687 base_string
= temp_string
;
11688 if (*temp_string
== ')')
11689 --parens_not_balanced
;
11693 temp_string
= base_string
;
11695 /* Skip past '(' and whitespace. */
11696 gas_assert (*base_string
== '(');
11698 if (is_space_char (*base_string
))
11701 if (*base_string
== ','
11702 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11705 displacement_string_end
= temp_string
;
11707 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11711 if (i
.base_reg
== &bad_reg
)
11713 base_string
= end_op
;
11714 if (is_space_char (*base_string
))
11718 /* There may be an index reg or scale factor here. */
11719 if (*base_string
== ',')
11722 if (is_space_char (*base_string
))
11725 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11728 if (i
.index_reg
== &bad_reg
)
11730 base_string
= end_op
;
11731 if (is_space_char (*base_string
))
11733 if (*base_string
== ',')
11736 if (is_space_char (*base_string
))
11739 else if (*base_string
!= ')')
11741 as_bad (_("expecting `,' or `)' "
11742 "after index register in `%s'"),
11747 else if (*base_string
== REGISTER_PREFIX
)
11749 end_op
= strchr (base_string
, ',');
11752 as_bad (_("bad register name `%s'"), base_string
);
11756 /* Check for scale factor. */
11757 if (*base_string
!= ')')
11759 char *end_scale
= i386_scale (base_string
);
11764 base_string
= end_scale
;
11765 if (is_space_char (*base_string
))
11767 if (*base_string
!= ')')
11769 as_bad (_("expecting `)' "
11770 "after scale factor in `%s'"),
11775 else if (!i
.index_reg
)
11777 as_bad (_("expecting index register or scale factor "
11778 "after `,'; got '%c'"),
11783 else if (*base_string
!= ')')
11785 as_bad (_("expecting `,' or `)' "
11786 "after base register in `%s'"),
11791 else if (*base_string
== REGISTER_PREFIX
)
11793 end_op
= strchr (base_string
, ',');
11796 as_bad (_("bad register name `%s'"), base_string
);
11801 /* If there's an expression beginning the operand, parse it,
11802 assuming displacement_string_start and
11803 displacement_string_end are meaningful. */
11804 if (displacement_string_start
!= displacement_string_end
)
11806 if (!i386_displacement (displacement_string_start
,
11807 displacement_string_end
))
11811 /* Special case for (%dx) while doing input/output op. */
11813 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11814 && i
.base_reg
->reg_type
.bitfield
.word
11815 && i
.index_reg
== 0
11816 && i
.log2_scale_factor
== 0
11817 && i
.seg
[i
.mem_operands
] == 0
11818 && !operand_type_check (i
.types
[this_operand
], disp
))
11820 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11821 i
.input_output_operand
= true;
11825 if (i386_index_check (operand_string
) == 0)
11827 i
.flags
[this_operand
] |= Operand_Mem
;
11828 if (i
.mem_operands
== 0)
11829 i
.memop1_string
= xstrdup (operand_string
);
11834 /* It's not a memory operand; argh! */
11835 as_bad (_("invalid char %s beginning operand %d `%s'"),
11836 output_invalid (*op_string
),
11841 return 1; /* Normal return. */
11844 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11845 that an rs_machine_dependent frag may reach. */
11848 i386_frag_max_var (fragS
*frag
)
11850 /* The only relaxable frags are for jumps.
11851 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11852 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11853 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11856 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11858 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11860 /* STT_GNU_IFUNC symbol must go through PLT. */
11861 if ((symbol_get_bfdsym (fr_symbol
)->flags
11862 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11865 if (!S_IS_EXTERNAL (fr_symbol
))
11866 /* Symbol may be weak or local. */
11867 return !S_IS_WEAK (fr_symbol
);
11869 /* Global symbols with non-default visibility can't be preempted. */
11870 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11873 if (fr_var
!= NO_RELOC
)
11874 switch ((enum bfd_reloc_code_real
) fr_var
)
11876 case BFD_RELOC_386_PLT32
:
11877 case BFD_RELOC_X86_64_PLT32
:
11878 /* Symbol with PLT relocation may be preempted. */
11884 /* Global symbols with default visibility in a shared library may be
11885 preempted by another definition. */
11890 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11891 Note also work for Skylake and Cascadelake.
11892 ---------------------------------------------------------------------
11893 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11894 | ------ | ----------- | ------- | -------- |
11896 | Jno | N | N | Y |
11897 | Jc/Jb | Y | N | Y |
11898 | Jae/Jnb | Y | N | Y |
11899 | Je/Jz | Y | Y | Y |
11900 | Jne/Jnz | Y | Y | Y |
11901 | Jna/Jbe | Y | N | Y |
11902 | Ja/Jnbe | Y | N | Y |
11904 | Jns | N | N | Y |
11905 | Jp/Jpe | N | N | Y |
11906 | Jnp/Jpo | N | N | Y |
11907 | Jl/Jnge | Y | Y | Y |
11908 | Jge/Jnl | Y | Y | Y |
11909 | Jle/Jng | Y | Y | Y |
11910 | Jg/Jnle | Y | Y | Y |
11911 --------------------------------------------------------------------- */
11913 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11915 if (mf_cmp
== mf_cmp_alu_cmp
)
11916 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11917 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11918 if (mf_cmp
== mf_cmp_incdec
)
11919 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11920 || mf_jcc
== mf_jcc_jle
);
11921 if (mf_cmp
== mf_cmp_test_and
)
11926 /* Return the next non-empty frag. */
11929 i386_next_non_empty_frag (fragS
*fragP
)
11931 /* There may be a frag with a ".fill 0" when there is no room in
11932 the current frag for frag_grow in output_insn. */
11933 for (fragP
= fragP
->fr_next
;
11935 && fragP
->fr_type
== rs_fill
11936 && fragP
->fr_fix
== 0);
11937 fragP
= fragP
->fr_next
)
11942 /* Return the next jcc frag after BRANCH_PADDING. */
11945 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11947 fragS
*branch_fragP
;
11951 if (pad_fragP
->fr_type
== rs_machine_dependent
11952 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11953 == BRANCH_PADDING
))
11955 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11956 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11958 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11959 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11960 pad_fragP
->tc_frag_data
.mf_type
))
11961 return branch_fragP
;
11967 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11970 i386_classify_machine_dependent_frag (fragS
*fragP
)
11974 fragS
*branch_fragP
;
11976 unsigned int max_prefix_length
;
11978 if (fragP
->tc_frag_data
.classified
)
11981 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11982 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11983 for (next_fragP
= fragP
;
11984 next_fragP
!= NULL
;
11985 next_fragP
= next_fragP
->fr_next
)
11987 next_fragP
->tc_frag_data
.classified
= 1;
11988 if (next_fragP
->fr_type
== rs_machine_dependent
)
11989 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11991 case BRANCH_PADDING
:
11992 /* The BRANCH_PADDING frag must be followed by a branch
11994 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11995 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11997 case FUSED_JCC_PADDING
:
11998 /* Check if this is a fused jcc:
12000 CMP like instruction
12004 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
12005 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
12006 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
12009 /* The BRANCH_PADDING frag is merged with the
12010 FUSED_JCC_PADDING frag. */
12011 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
12012 /* CMP like instruction size. */
12013 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
12014 frag_wane (pad_fragP
);
12015 /* Skip to branch_fragP. */
12016 next_fragP
= branch_fragP
;
12018 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
12020 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
12022 next_fragP
->fr_subtype
12023 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
12024 next_fragP
->tc_frag_data
.max_bytes
12025 = next_fragP
->tc_frag_data
.max_prefix_length
;
12026 /* This will be updated in the BRANCH_PREFIX scan. */
12027 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
12030 frag_wane (next_fragP
);
12035 /* Stop if there is no BRANCH_PREFIX. */
12036 if (!align_branch_prefix_size
)
12039 /* Scan for BRANCH_PREFIX. */
12040 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
12042 if (fragP
->fr_type
!= rs_machine_dependent
12043 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12047 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
12048 COND_JUMP_PREFIX. */
12049 max_prefix_length
= 0;
12050 for (next_fragP
= fragP
;
12051 next_fragP
!= NULL
;
12052 next_fragP
= next_fragP
->fr_next
)
12054 if (next_fragP
->fr_type
== rs_fill
)
12055 /* Skip rs_fill frags. */
12057 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
12058 /* Stop for all other frags. */
12061 /* rs_machine_dependent frags. */
12062 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12065 /* Count BRANCH_PREFIX frags. */
12066 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
12068 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
12069 frag_wane (next_fragP
);
12073 += next_fragP
->tc_frag_data
.max_bytes
;
12075 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12077 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12078 == FUSED_JCC_PADDING
))
12080 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
12081 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
12085 /* Stop for other rs_machine_dependent frags. */
12089 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
12091 /* Skip to the next frag. */
12092 fragP
= next_fragP
;
12096 /* Compute padding size for
12099 CMP like instruction
12101 COND_JUMP/UNCOND_JUMP
12106 COND_JUMP/UNCOND_JUMP
12110 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
12112 unsigned int offset
, size
, padding_size
;
12113 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
12115 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
12117 address
= fragP
->fr_address
;
12118 address
+= fragP
->fr_fix
;
12120 /* CMP like instrunction size. */
12121 size
= fragP
->tc_frag_data
.cmp_size
;
12123 /* The base size of the branch frag. */
12124 size
+= branch_fragP
->fr_fix
;
12126 /* Add opcode and displacement bytes for the rs_machine_dependent
12128 if (branch_fragP
->fr_type
== rs_machine_dependent
)
12129 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
12131 /* Check if branch is within boundary and doesn't end at the last
12133 offset
= address
& ((1U << align_branch_power
) - 1);
12134 if ((offset
+ size
) >= (1U << align_branch_power
))
12135 /* Padding needed to avoid crossing boundary. */
12136 padding_size
= (1U << align_branch_power
) - offset
;
12138 /* No padding needed. */
12141 /* The return value may be saved in tc_frag_data.length which is
12143 if (!fits_in_unsigned_byte (padding_size
))
12146 return padding_size
;
12149 /* i386_generic_table_relax_frag()
12151 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
12152 grow/shrink padding to align branch frags. Hand others to
12156 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
12158 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12159 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12161 long padding_size
= i386_branch_padding_size (fragP
, 0);
12162 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
12164 /* When the BRANCH_PREFIX frag is used, the computed address
12165 must match the actual address and there should be no padding. */
12166 if (fragP
->tc_frag_data
.padding_address
12167 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
12171 /* Update the padding size. */
12173 fragP
->tc_frag_data
.length
= padding_size
;
12177 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12179 fragS
*padding_fragP
, *next_fragP
;
12180 long padding_size
, left_size
, last_size
;
12182 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12183 if (!padding_fragP
)
12184 /* Use the padding set by the leading BRANCH_PREFIX frag. */
12185 return (fragP
->tc_frag_data
.length
12186 - fragP
->tc_frag_data
.last_length
);
12188 /* Compute the relative address of the padding frag in the very
12189 first time where the BRANCH_PREFIX frag sizes are zero. */
12190 if (!fragP
->tc_frag_data
.padding_address
)
12191 fragP
->tc_frag_data
.padding_address
12192 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
12194 /* First update the last length from the previous interation. */
12195 left_size
= fragP
->tc_frag_data
.prefix_length
;
12196 for (next_fragP
= fragP
;
12197 next_fragP
!= padding_fragP
;
12198 next_fragP
= next_fragP
->fr_next
)
12199 if (next_fragP
->fr_type
== rs_machine_dependent
12200 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12205 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12209 if (max
> left_size
)
12214 next_fragP
->tc_frag_data
.last_length
= size
;
12218 next_fragP
->tc_frag_data
.last_length
= 0;
12221 /* Check the padding size for the padding frag. */
12222 padding_size
= i386_branch_padding_size
12223 (padding_fragP
, (fragP
->fr_address
12224 + fragP
->tc_frag_data
.padding_address
));
12226 last_size
= fragP
->tc_frag_data
.prefix_length
;
12227 /* Check if there is change from the last interation. */
12228 if (padding_size
== last_size
)
12230 /* Update the expected address of the padding frag. */
12231 padding_fragP
->tc_frag_data
.padding_address
12232 = (fragP
->fr_address
+ padding_size
12233 + fragP
->tc_frag_data
.padding_address
);
12237 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12239 /* No padding if there is no sufficient room. Clear the
12240 expected address of the padding frag. */
12241 padding_fragP
->tc_frag_data
.padding_address
= 0;
12245 /* Store the expected address of the padding frag. */
12246 padding_fragP
->tc_frag_data
.padding_address
12247 = (fragP
->fr_address
+ padding_size
12248 + fragP
->tc_frag_data
.padding_address
);
12250 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12252 /* Update the length for the current interation. */
12253 left_size
= padding_size
;
12254 for (next_fragP
= fragP
;
12255 next_fragP
!= padding_fragP
;
12256 next_fragP
= next_fragP
->fr_next
)
12257 if (next_fragP
->fr_type
== rs_machine_dependent
12258 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12263 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12267 if (max
> left_size
)
12272 next_fragP
->tc_frag_data
.length
= size
;
12276 next_fragP
->tc_frag_data
.length
= 0;
12279 return (fragP
->tc_frag_data
.length
12280 - fragP
->tc_frag_data
.last_length
);
12282 return relax_frag (segment
, fragP
, stretch
);
12285 /* md_estimate_size_before_relax()
12287 Called just before relax() for rs_machine_dependent frags. The x86
12288 assembler uses these frags to handle variable size jump
12291 Any symbol that is now undefined will not become defined.
12292 Return the correct fr_subtype in the frag.
12293 Return the initial "guess for variable size of frag" to caller.
12294 The guess is actually the growth beyond the fixed part. Whatever
12295 we do to grow the fixed or variable part contributes to our
12299 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12301 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12302 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12303 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12305 i386_classify_machine_dependent_frag (fragP
);
12306 return fragP
->tc_frag_data
.length
;
12309 /* We've already got fragP->fr_subtype right; all we have to do is
12310 check for un-relaxable symbols. On an ELF system, we can't relax
12311 an externally visible symbol, because it may be overridden by a
12313 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12314 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12316 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12319 #if defined (OBJ_COFF) && defined (TE_PE)
12320 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12321 && S_IS_WEAK (fragP
->fr_symbol
))
12325 /* Symbol is undefined in this segment, or we need to keep a
12326 reloc so that weak symbols can be overridden. */
12327 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12328 enum bfd_reloc_code_real reloc_type
;
12329 unsigned char *opcode
;
12333 if (fragP
->fr_var
!= NO_RELOC
)
12334 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12335 else if (size
== 2)
12336 reloc_type
= BFD_RELOC_16_PCREL
;
12337 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12338 else if (fragP
->tc_frag_data
.code64
&& fragP
->fr_offset
== 0
12339 && need_plt32_p (fragP
->fr_symbol
))
12340 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12343 reloc_type
= BFD_RELOC_32_PCREL
;
12345 old_fr_fix
= fragP
->fr_fix
;
12346 opcode
= (unsigned char *) fragP
->fr_opcode
;
12348 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12351 /* Make jmp (0xeb) a (d)word displacement jump. */
12353 fragP
->fr_fix
+= size
;
12354 fixP
= fix_new (fragP
, old_fr_fix
, size
,
12356 fragP
->fr_offset
, 1,
12362 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12364 /* Negate the condition, and branch past an
12365 unconditional jump. */
12368 /* Insert an unconditional jump. */
12370 /* We added two extra opcode bytes, and have a two byte
12372 fragP
->fr_fix
+= 2 + 2;
12373 fix_new (fragP
, old_fr_fix
+ 2, 2,
12375 fragP
->fr_offset
, 1,
12379 /* Fall through. */
12382 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12384 fragP
->fr_fix
+= 1;
12385 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12387 fragP
->fr_offset
, 1,
12388 BFD_RELOC_8_PCREL
);
12389 fixP
->fx_signed
= 1;
12393 /* This changes the byte-displacement jump 0x7N
12394 to the (d)word-displacement jump 0x0f,0x8N. */
12395 opcode
[1] = opcode
[0] + 0x10;
12396 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12397 /* We've added an opcode byte. */
12398 fragP
->fr_fix
+= 1 + size
;
12399 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
12401 fragP
->fr_offset
, 1,
12406 BAD_CASE (fragP
->fr_subtype
);
12410 /* All jumps handled here are signed, but don't unconditionally use a
12411 signed limit check for 32 and 16 bit jumps as we want to allow wrap
12412 around at 4G (outside of 64-bit mode) and 64k. */
12413 if (size
== 4 && flag_code
== CODE_64BIT
)
12414 fixP
->fx_signed
= 1;
12417 return fragP
->fr_fix
- old_fr_fix
;
12420 /* Guess size depending on current relax state. Initially the relax
12421 state will correspond to a short jump and we return 1, because
12422 the variable part of the frag (the branch offset) is one byte
12423 long. However, we can relax a section more than once and in that
12424 case we must either set fr_subtype back to the unrelaxed state,
12425 or return the value for the appropriate branch. */
12426 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12429 /* Called after relax() is finished.
12431 In: Address of frag.
12432 fr_type == rs_machine_dependent.
12433 fr_subtype is what the address relaxed to.
12435 Out: Any fixSs and constants are set up.
12436 Caller will turn frag into a ".space 0". */
12439 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12442 unsigned char *opcode
;
12443 unsigned char *where_to_put_displacement
= NULL
;
12444 offsetT target_address
;
12445 offsetT opcode_address
;
12446 unsigned int extension
= 0;
12447 offsetT displacement_from_opcode_start
;
12449 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12450 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12451 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12453 /* Generate nop padding. */
12454 unsigned int size
= fragP
->tc_frag_data
.length
;
12457 if (size
> fragP
->tc_frag_data
.max_bytes
)
12463 const char *branch
= "branch";
12464 const char *prefix
= "";
12465 fragS
*padding_fragP
;
12466 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12469 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12470 switch (fragP
->tc_frag_data
.default_prefix
)
12475 case CS_PREFIX_OPCODE
:
12478 case DS_PREFIX_OPCODE
:
12481 case ES_PREFIX_OPCODE
:
12484 case FS_PREFIX_OPCODE
:
12487 case GS_PREFIX_OPCODE
:
12490 case SS_PREFIX_OPCODE
:
12495 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12496 "%s within %d-byte boundary\n");
12498 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12499 "align %s within %d-byte boundary\n");
12503 padding_fragP
= fragP
;
12504 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12505 "%s within %d-byte boundary\n");
12509 switch (padding_fragP
->tc_frag_data
.branch_type
)
12511 case align_branch_jcc
:
12514 case align_branch_fused
:
12515 branch
= "fused jcc";
12517 case align_branch_jmp
:
12520 case align_branch_call
:
12523 case align_branch_indirect
:
12524 branch
= "indiret branch";
12526 case align_branch_ret
:
12533 fprintf (stdout
, msg
,
12534 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12535 (long long) fragP
->fr_address
, branch
,
12536 1 << align_branch_power
);
12538 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12539 memset (fragP
->fr_opcode
,
12540 fragP
->tc_frag_data
.default_prefix
, size
);
12542 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12544 fragP
->fr_fix
+= size
;
12549 opcode
= (unsigned char *) fragP
->fr_opcode
;
12551 /* Address we want to reach in file space. */
12552 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12554 /* Address opcode resides at in file space. */
12555 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12557 /* Displacement from opcode start to fill into instruction. */
12558 displacement_from_opcode_start
= target_address
- opcode_address
;
12560 if ((fragP
->fr_subtype
& BIG
) == 0)
12562 /* Don't have to change opcode. */
12563 extension
= 1; /* 1 opcode + 1 displacement */
12564 where_to_put_displacement
= &opcode
[1];
12568 if (no_cond_jump_promotion
12569 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12570 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12571 _("long jump required"));
12573 switch (fragP
->fr_subtype
)
12575 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12576 extension
= 4; /* 1 opcode + 4 displacement */
12578 where_to_put_displacement
= &opcode
[1];
12581 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12582 extension
= 2; /* 1 opcode + 2 displacement */
12584 where_to_put_displacement
= &opcode
[1];
12587 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12588 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12589 extension
= 5; /* 2 opcode + 4 displacement */
12590 opcode
[1] = opcode
[0] + 0x10;
12591 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12592 where_to_put_displacement
= &opcode
[2];
12595 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12596 extension
= 3; /* 2 opcode + 2 displacement */
12597 opcode
[1] = opcode
[0] + 0x10;
12598 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12599 where_to_put_displacement
= &opcode
[2];
12602 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12607 where_to_put_displacement
= &opcode
[3];
12611 BAD_CASE (fragP
->fr_subtype
);
12616 /* If size if less then four we are sure that the operand fits,
12617 but if it's 4, then it could be that the displacement is larger
12619 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12621 && ((addressT
) (displacement_from_opcode_start
- extension
12622 + ((addressT
) 1 << 31))
12623 > (((addressT
) 2 << 31) - 1)))
12625 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12626 _("jump target out of range"));
12627 /* Make us emit 0. */
12628 displacement_from_opcode_start
= extension
;
12630 /* Now put displacement after opcode. */
12631 md_number_to_chars ((char *) where_to_put_displacement
,
12632 (valueT
) (displacement_from_opcode_start
- extension
),
12633 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12634 fragP
->fr_fix
+= extension
;
12637 /* Apply a fixup (fixP) to segment data, once it has been determined
12638 by our caller that we have all the info we need to fix it up.
12640 Parameter valP is the pointer to the value of the bits.
12642 On the 386, immediates, displacements, and data pointers are all in
12643 the same (little-endian) format, so we don't need to care about which
12644 we are handling. */
12647 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12649 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12650 valueT value
= *valP
;
12652 #if !defined (TE_Mach)
12653 if (fixP
->fx_pcrel
)
12655 switch (fixP
->fx_r_type
)
12661 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12664 case BFD_RELOC_X86_64_32S
:
12665 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12668 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12671 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12676 if (fixP
->fx_addsy
!= NULL
12677 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12678 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12679 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12680 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12681 && !use_rela_relocations
)
12683 /* This is a hack. There should be a better way to handle this.
12684 This covers for the fact that bfd_install_relocation will
12685 subtract the current location (for partial_inplace, PC relative
12686 relocations); see more below. */
12690 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12693 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12695 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12698 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12700 if ((sym_seg
== seg
12701 || (symbol_section_p (fixP
->fx_addsy
)
12702 && sym_seg
!= absolute_section
))
12703 && !generic_force_reloc (fixP
))
12705 /* Yes, we add the values in twice. This is because
12706 bfd_install_relocation subtracts them out again. I think
12707 bfd_install_relocation is broken, but I don't dare change
12709 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12713 #if defined (OBJ_COFF) && defined (TE_PE)
12714 /* For some reason, the PE format does not store a
12715 section address offset for a PC relative symbol. */
12716 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12717 || S_IS_WEAK (fixP
->fx_addsy
))
12718 value
+= md_pcrel_from (fixP
);
12721 #if defined (OBJ_COFF) && defined (TE_PE)
12722 if (fixP
->fx_addsy
!= NULL
12723 && S_IS_WEAK (fixP
->fx_addsy
)
12724 /* PR 16858: Do not modify weak function references. */
12725 && ! fixP
->fx_pcrel
)
12727 #if !defined (TE_PEP)
12728 /* For x86 PE weak function symbols are neither PC-relative
12729 nor do they set S_IS_FUNCTION. So the only reliable way
12730 to detect them is to check the flags of their containing
12732 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12733 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12737 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12741 /* Fix a few things - the dynamic linker expects certain values here,
12742 and we must not disappoint it. */
12743 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12744 if (IS_ELF
&& fixP
->fx_addsy
)
12745 switch (fixP
->fx_r_type
)
12747 case BFD_RELOC_386_PLT32
:
12748 case BFD_RELOC_X86_64_PLT32
:
12749 /* Make the jump instruction point to the address of the operand.
12750 At runtime we merely add the offset to the actual PLT entry.
12751 NB: Subtract the offset size only for jump instructions. */
12752 if (fixP
->fx_pcrel
)
12756 case BFD_RELOC_386_TLS_GD
:
12757 case BFD_RELOC_386_TLS_LDM
:
12758 case BFD_RELOC_386_TLS_IE_32
:
12759 case BFD_RELOC_386_TLS_IE
:
12760 case BFD_RELOC_386_TLS_GOTIE
:
12761 case BFD_RELOC_386_TLS_GOTDESC
:
12762 case BFD_RELOC_X86_64_TLSGD
:
12763 case BFD_RELOC_X86_64_TLSLD
:
12764 case BFD_RELOC_X86_64_GOTTPOFF
:
12765 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12766 value
= 0; /* Fully resolved at runtime. No addend. */
12768 case BFD_RELOC_386_TLS_LE
:
12769 case BFD_RELOC_386_TLS_LDO_32
:
12770 case BFD_RELOC_386_TLS_LE_32
:
12771 case BFD_RELOC_X86_64_DTPOFF32
:
12772 case BFD_RELOC_X86_64_DTPOFF64
:
12773 case BFD_RELOC_X86_64_TPOFF32
:
12774 case BFD_RELOC_X86_64_TPOFF64
:
12775 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12778 case BFD_RELOC_386_TLS_DESC_CALL
:
12779 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12780 value
= 0; /* Fully resolved at runtime. No addend. */
12781 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12785 case BFD_RELOC_VTABLE_INHERIT
:
12786 case BFD_RELOC_VTABLE_ENTRY
:
12793 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12795 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
12797 value
= extend_to_32bit_address (value
);
12800 #endif /* !defined (TE_Mach) */
12802 /* Are we finished with this relocation now? */
12803 if (fixP
->fx_addsy
== NULL
)
12806 switch (fixP
->fx_r_type
)
12808 case BFD_RELOC_X86_64_32S
:
12809 fixP
->fx_signed
= 1;
12816 #if defined (OBJ_COFF) && defined (TE_PE)
12817 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12820 /* Remember value for tc_gen_reloc. */
12821 fixP
->fx_addnumber
= value
;
12822 /* Clear out the frag for now. */
12826 else if (use_rela_relocations
)
12828 if (!disallow_64bit_reloc
|| fixP
->fx_r_type
== NO_RELOC
)
12829 fixP
->fx_no_overflow
= 1;
12830 /* Remember value for tc_gen_reloc. */
12831 fixP
->fx_addnumber
= value
;
12835 md_number_to_chars (p
, value
, fixP
->fx_size
);
12839 md_atof (int type
, char *litP
, int *sizeP
)
12841 /* This outputs the LITTLENUMs in REVERSE order;
12842 in accord with the bigendian 386. */
12843 return ieee_md_atof (type
, litP
, sizeP
, false);
12846 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12849 output_invalid (int c
)
12852 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12855 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12856 "(0x%x)", (unsigned char) c
);
12857 return output_invalid_buf
;
12860 /* Verify that @r can be used in the current context. */
12862 static bool check_register (const reg_entry
*r
)
12864 if (allow_pseudo_reg
)
12867 if (operand_type_all_zero (&r
->reg_type
))
12870 if ((r
->reg_type
.bitfield
.dword
12871 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12872 || r
->reg_type
.bitfield
.class == RegCR
12873 || r
->reg_type
.bitfield
.class == RegDR
)
12874 && !cpu_arch_flags
.bitfield
.cpui386
)
12877 if (r
->reg_type
.bitfield
.class == RegTR
12878 && (flag_code
== CODE_64BIT
12879 || !cpu_arch_flags
.bitfield
.cpui386
12880 || cpu_arch_isa_flags
.bitfield
.cpui586
12881 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12884 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12887 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12889 if (r
->reg_type
.bitfield
.zmmword
12890 || r
->reg_type
.bitfield
.class == RegMask
)
12893 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12895 if (r
->reg_type
.bitfield
.ymmword
)
12898 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12903 if (r
->reg_type
.bitfield
.tmmword
12904 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12905 || flag_code
!= CODE_64BIT
))
12908 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12911 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12912 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12915 /* Upper 16 vector registers are only available with VREX in 64bit
12916 mode, and require EVEX encoding. */
12917 if (r
->reg_flags
& RegVRex
)
12919 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12920 || flag_code
!= CODE_64BIT
)
12923 if (i
.vec_encoding
== vex_encoding_default
)
12924 i
.vec_encoding
= vex_encoding_evex
;
12925 else if (i
.vec_encoding
!= vex_encoding_evex
)
12926 i
.vec_encoding
= vex_encoding_error
;
12929 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12930 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12931 && flag_code
!= CODE_64BIT
)
12934 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12941 /* REG_STRING starts *before* REGISTER_PREFIX. */
12943 static const reg_entry
*
12944 parse_real_register (char *reg_string
, char **end_op
)
12946 char *s
= reg_string
;
12948 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12949 const reg_entry
*r
;
12951 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12952 if (*s
== REGISTER_PREFIX
)
12955 if (is_space_char (*s
))
12958 p
= reg_name_given
;
12959 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12961 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12962 return (const reg_entry
*) NULL
;
12966 /* For naked regs, make sure that we are not dealing with an identifier.
12967 This prevents confusing an identifier like `eax_var' with register
12969 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12970 return (const reg_entry
*) NULL
;
12974 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12976 /* Handle floating point regs, allowing spaces in the (i) part. */
12979 if (!cpu_arch_flags
.bitfield
.cpu8087
12980 && !cpu_arch_flags
.bitfield
.cpu287
12981 && !cpu_arch_flags
.bitfield
.cpu387
12982 && !allow_pseudo_reg
)
12983 return (const reg_entry
*) NULL
;
12985 if (is_space_char (*s
))
12990 if (is_space_char (*s
))
12992 if (*s
>= '0' && *s
<= '7')
12994 int fpr
= *s
- '0';
12996 if (is_space_char (*s
))
13001 know (r
[fpr
].reg_num
== fpr
);
13005 /* We have "%st(" then garbage. */
13006 return (const reg_entry
*) NULL
;
13010 return r
&& check_register (r
) ? r
: NULL
;
13013 /* REG_STRING starts *before* REGISTER_PREFIX. */
13015 static const reg_entry
*
13016 parse_register (char *reg_string
, char **end_op
)
13018 const reg_entry
*r
;
13020 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
13021 r
= parse_real_register (reg_string
, end_op
);
13026 char *save
= input_line_pointer
;
13030 input_line_pointer
= reg_string
;
13031 c
= get_symbol_name (®_string
);
13032 symbolP
= symbol_find (reg_string
);
13033 while (symbolP
&& S_GET_SEGMENT (symbolP
) != reg_section
)
13035 const expressionS
*e
= symbol_get_value_expression(symbolP
);
13037 if (e
->X_op
!= O_symbol
|| e
->X_add_number
)
13039 symbolP
= e
->X_add_symbol
;
13041 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
13043 const expressionS
*e
= symbol_get_value_expression (symbolP
);
13045 know (e
->X_op
== O_register
);
13046 know (e
->X_add_number
>= 0
13047 && (valueT
) e
->X_add_number
< i386_regtab_size
);
13048 r
= i386_regtab
+ e
->X_add_number
;
13049 if (!check_register (r
))
13051 as_bad (_("register '%s%s' cannot be used here"),
13052 register_prefix
, r
->reg_name
);
13055 *end_op
= input_line_pointer
;
13057 *input_line_pointer
= c
;
13058 input_line_pointer
= save
;
13064 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
13066 const reg_entry
*r
= NULL
;
13067 char *end
= input_line_pointer
;
13070 if (*name
== REGISTER_PREFIX
|| allow_naked_reg
)
13071 r
= parse_real_register (name
, &input_line_pointer
);
13072 if (r
&& end
<= input_line_pointer
)
13074 *nextcharP
= *input_line_pointer
;
13075 *input_line_pointer
= 0;
13078 e
->X_op
= O_register
;
13079 e
->X_add_number
= r
- i386_regtab
;
13082 e
->X_op
= O_illegal
;
13085 input_line_pointer
= end
;
13087 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
13091 md_operand (expressionS
*e
)
13094 const reg_entry
*r
;
13096 switch (*input_line_pointer
)
13098 case REGISTER_PREFIX
:
13099 r
= parse_real_register (input_line_pointer
, &end
);
13102 e
->X_op
= O_register
;
13103 e
->X_add_number
= r
- i386_regtab
;
13104 input_line_pointer
= end
;
13109 gas_assert (intel_syntax
);
13110 end
= input_line_pointer
++;
13112 if (*input_line_pointer
== ']')
13114 ++input_line_pointer
;
13115 e
->X_op_symbol
= make_expr_symbol (e
);
13116 e
->X_add_symbol
= NULL
;
13117 e
->X_add_number
= 0;
13122 e
->X_op
= O_absent
;
13123 input_line_pointer
= end
;
13130 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13131 const char *md_shortopts
= "kVQ:sqnO::";
13133 const char *md_shortopts
= "qnO::";
13136 #define OPTION_32 (OPTION_MD_BASE + 0)
13137 #define OPTION_64 (OPTION_MD_BASE + 1)
13138 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
13139 #define OPTION_MARCH (OPTION_MD_BASE + 3)
13140 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
13141 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
13142 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
13143 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
13144 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
13145 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
13146 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
13147 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
13148 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
13149 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
13150 #define OPTION_X32 (OPTION_MD_BASE + 14)
13151 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
13152 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
13153 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
13154 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
13155 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
13156 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
13157 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
13158 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
13159 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
13160 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
13161 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
13162 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
13163 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
13164 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
13165 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
13166 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
13167 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
13168 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
13169 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
13170 #define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
13172 struct option md_longopts
[] =
13174 {"32", no_argument
, NULL
, OPTION_32
},
13175 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13176 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13177 {"64", no_argument
, NULL
, OPTION_64
},
13179 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13180 {"x32", no_argument
, NULL
, OPTION_X32
},
13181 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
13182 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
13184 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
13185 {"march", required_argument
, NULL
, OPTION_MARCH
},
13186 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
13187 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
13188 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
13189 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
13190 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
13191 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
13192 {"muse-unaligned-vector-move", no_argument
, NULL
, OPTION_MUSE_UNALIGNED_VECTOR_MOVE
},
13193 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
13194 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
13195 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
13196 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
13197 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
13198 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
13199 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
13200 # if defined (TE_PE) || defined (TE_PEP)
13201 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
13203 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
13204 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
13205 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
13206 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
13207 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
13208 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
13209 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
13210 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
13211 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
13212 {"mlfence-before-indirect-branch", required_argument
, NULL
,
13213 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
13214 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
13215 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
13216 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
13217 {NULL
, no_argument
, NULL
, 0}
13219 size_t md_longopts_size
= sizeof (md_longopts
);
13222 md_parse_option (int c
, const char *arg
)
13225 char *arch
, *next
, *saved
, *type
;
13230 optimize_align_code
= 0;
13234 quiet_warnings
= 1;
13237 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13238 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
13239 should be emitted or not. FIXME: Not implemented. */
13241 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
13245 /* -V: SVR4 argument to print version ID. */
13247 print_version_id ();
13250 /* -k: Ignore for FreeBSD compatibility. */
13255 /* -s: On i386 Solaris, this tells the native assembler to use
13256 .stab instead of .stab.excl. We always use .stab anyhow. */
13259 case OPTION_MSHARED
:
13263 case OPTION_X86_USED_NOTE
:
13264 if (strcasecmp (arg
, "yes") == 0)
13266 else if (strcasecmp (arg
, "no") == 0)
13269 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13274 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13275 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13278 const char **list
, **l
;
13280 list
= bfd_target_list ();
13281 for (l
= list
; *l
!= NULL
; l
++)
13282 if (startswith (*l
, "elf64-x86-64")
13283 || strcmp (*l
, "coff-x86-64") == 0
13284 || strcmp (*l
, "pe-x86-64") == 0
13285 || strcmp (*l
, "pei-x86-64") == 0
13286 || strcmp (*l
, "mach-o-x86-64") == 0)
13288 default_arch
= "x86_64";
13292 as_fatal (_("no compiled in support for x86_64"));
13298 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13302 const char **list
, **l
;
13304 list
= bfd_target_list ();
13305 for (l
= list
; *l
!= NULL
; l
++)
13306 if (startswith (*l
, "elf32-x86-64"))
13308 default_arch
= "x86_64:32";
13312 as_fatal (_("no compiled in support for 32bit x86_64"));
13316 as_fatal (_("32bit x86_64 is only supported for ELF"));
13321 default_arch
= "i386";
13324 case OPTION_DIVIDE
:
13325 #ifdef SVR4_COMMENT_CHARS
13330 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13332 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13336 i386_comment_chars
= n
;
13342 saved
= xstrdup (arg
);
13344 /* Allow -march=+nosse. */
13350 as_fatal (_("invalid -march= option: `%s'"), arg
);
13351 next
= strchr (arch
, '+');
13354 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13356 if (arch
== saved
&& cpu_arch
[j
].type
!= PROCESSOR_NONE
13357 && strcmp (arch
, cpu_arch
[j
].name
) == 0)
13360 if (! cpu_arch
[j
].enable
.bitfield
.cpui386
)
13363 cpu_arch_name
= cpu_arch
[j
].name
;
13364 free (cpu_sub_arch_name
);
13365 cpu_sub_arch_name
= NULL
;
13366 cpu_arch_flags
= cpu_arch
[j
].enable
;
13367 cpu_arch_isa
= cpu_arch
[j
].type
;
13368 cpu_arch_isa_flags
= cpu_arch
[j
].enable
;
13369 if (!cpu_arch_tune_set
)
13371 cpu_arch_tune
= cpu_arch_isa
;
13372 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13376 else if (cpu_arch
[j
].type
== PROCESSOR_NONE
13377 && strcmp (arch
, cpu_arch
[j
].name
) == 0
13378 && !cpu_flags_all_zero (&cpu_arch
[j
].enable
))
13380 /* ISA extension. */
13381 i386_cpu_flags flags
;
13383 flags
= cpu_flags_or (cpu_arch_flags
,
13384 cpu_arch
[j
].enable
);
13386 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13388 extend_cpu_sub_arch_name (arch
);
13389 cpu_arch_flags
= flags
;
13390 cpu_arch_isa_flags
= flags
;
13394 = cpu_flags_or (cpu_arch_isa_flags
,
13395 cpu_arch
[j
].enable
);
13400 if (j
>= ARRAY_SIZE (cpu_arch
) && startswith (arch
, "no"))
13402 /* Disable an ISA extension. */
13403 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13404 if (cpu_arch
[j
].type
== PROCESSOR_NONE
13405 && strcmp (arch
+ 2, cpu_arch
[j
].name
) == 0)
13407 i386_cpu_flags flags
;
13409 flags
= cpu_flags_and_not (cpu_arch_flags
,
13410 cpu_arch
[j
].disable
);
13411 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13413 extend_cpu_sub_arch_name (arch
);
13414 cpu_arch_flags
= flags
;
13415 cpu_arch_isa_flags
= flags
;
13421 if (j
>= ARRAY_SIZE (cpu_arch
))
13422 as_fatal (_("invalid -march= option: `%s'"), arg
);
13426 while (next
!= NULL
);
13432 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13433 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13435 if (cpu_arch
[j
].type
!= PROCESSOR_NONE
13436 && strcmp (arg
, cpu_arch
[j
].name
) == 0)
13438 cpu_arch_tune_set
= 1;
13439 cpu_arch_tune
= cpu_arch
[j
].type
;
13440 cpu_arch_tune_flags
= cpu_arch
[j
].enable
;
13444 if (j
>= ARRAY_SIZE (cpu_arch
))
13445 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13448 case OPTION_MMNEMONIC
:
13449 if (strcasecmp (arg
, "att") == 0)
13450 intel_mnemonic
= 0;
13451 else if (strcasecmp (arg
, "intel") == 0)
13452 intel_mnemonic
= 1;
13454 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13457 case OPTION_MSYNTAX
:
13458 if (strcasecmp (arg
, "att") == 0)
13460 else if (strcasecmp (arg
, "intel") == 0)
13463 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13466 case OPTION_MINDEX_REG
:
13467 allow_index_reg
= 1;
13470 case OPTION_MNAKED_REG
:
13471 allow_naked_reg
= 1;
13474 case OPTION_MSSE2AVX
:
13478 case OPTION_MUSE_UNALIGNED_VECTOR_MOVE
:
13479 use_unaligned_vector_move
= 1;
13482 case OPTION_MSSE_CHECK
:
13483 if (strcasecmp (arg
, "error") == 0)
13484 sse_check
= check_error
;
13485 else if (strcasecmp (arg
, "warning") == 0)
13486 sse_check
= check_warning
;
13487 else if (strcasecmp (arg
, "none") == 0)
13488 sse_check
= check_none
;
13490 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13493 case OPTION_MOPERAND_CHECK
:
13494 if (strcasecmp (arg
, "error") == 0)
13495 operand_check
= check_error
;
13496 else if (strcasecmp (arg
, "warning") == 0)
13497 operand_check
= check_warning
;
13498 else if (strcasecmp (arg
, "none") == 0)
13499 operand_check
= check_none
;
13501 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13504 case OPTION_MAVXSCALAR
:
13505 if (strcasecmp (arg
, "128") == 0)
13506 avxscalar
= vex128
;
13507 else if (strcasecmp (arg
, "256") == 0)
13508 avxscalar
= vex256
;
13510 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13513 case OPTION_MVEXWIG
:
13514 if (strcmp (arg
, "0") == 0)
13516 else if (strcmp (arg
, "1") == 0)
13519 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13522 case OPTION_MADD_BND_PREFIX
:
13523 add_bnd_prefix
= 1;
13526 case OPTION_MEVEXLIG
:
13527 if (strcmp (arg
, "128") == 0)
13528 evexlig
= evexl128
;
13529 else if (strcmp (arg
, "256") == 0)
13530 evexlig
= evexl256
;
13531 else if (strcmp (arg
, "512") == 0)
13532 evexlig
= evexl512
;
13534 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13537 case OPTION_MEVEXRCIG
:
13538 if (strcmp (arg
, "rne") == 0)
13540 else if (strcmp (arg
, "rd") == 0)
13542 else if (strcmp (arg
, "ru") == 0)
13544 else if (strcmp (arg
, "rz") == 0)
13547 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13550 case OPTION_MEVEXWIG
:
13551 if (strcmp (arg
, "0") == 0)
13553 else if (strcmp (arg
, "1") == 0)
13556 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13559 # if defined (TE_PE) || defined (TE_PEP)
13560 case OPTION_MBIG_OBJ
:
13565 case OPTION_MOMIT_LOCK_PREFIX
:
13566 if (strcasecmp (arg
, "yes") == 0)
13567 omit_lock_prefix
= 1;
13568 else if (strcasecmp (arg
, "no") == 0)
13569 omit_lock_prefix
= 0;
13571 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13574 case OPTION_MFENCE_AS_LOCK_ADD
:
13575 if (strcasecmp (arg
, "yes") == 0)
13577 else if (strcasecmp (arg
, "no") == 0)
13580 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13583 case OPTION_MLFENCE_AFTER_LOAD
:
13584 if (strcasecmp (arg
, "yes") == 0)
13585 lfence_after_load
= 1;
13586 else if (strcasecmp (arg
, "no") == 0)
13587 lfence_after_load
= 0;
13589 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13592 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13593 if (strcasecmp (arg
, "all") == 0)
13595 lfence_before_indirect_branch
= lfence_branch_all
;
13596 if (lfence_before_ret
== lfence_before_ret_none
)
13597 lfence_before_ret
= lfence_before_ret_shl
;
13599 else if (strcasecmp (arg
, "memory") == 0)
13600 lfence_before_indirect_branch
= lfence_branch_memory
;
13601 else if (strcasecmp (arg
, "register") == 0)
13602 lfence_before_indirect_branch
= lfence_branch_register
;
13603 else if (strcasecmp (arg
, "none") == 0)
13604 lfence_before_indirect_branch
= lfence_branch_none
;
13606 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13610 case OPTION_MLFENCE_BEFORE_RET
:
13611 if (strcasecmp (arg
, "or") == 0)
13612 lfence_before_ret
= lfence_before_ret_or
;
13613 else if (strcasecmp (arg
, "not") == 0)
13614 lfence_before_ret
= lfence_before_ret_not
;
13615 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13616 lfence_before_ret
= lfence_before_ret_shl
;
13617 else if (strcasecmp (arg
, "none") == 0)
13618 lfence_before_ret
= lfence_before_ret_none
;
13620 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13624 case OPTION_MRELAX_RELOCATIONS
:
13625 if (strcasecmp (arg
, "yes") == 0)
13626 generate_relax_relocations
= 1;
13627 else if (strcasecmp (arg
, "no") == 0)
13628 generate_relax_relocations
= 0;
13630 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13633 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13636 long int align
= strtoul (arg
, &end
, 0);
13641 align_branch_power
= 0;
13644 else if (align
>= 16)
13647 for (align_power
= 0;
13649 align
>>= 1, align_power
++)
13651 /* Limit alignment power to 31. */
13652 if (align
== 1 && align_power
< 32)
13654 align_branch_power
= align_power
;
13659 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13663 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13666 int align
= strtoul (arg
, &end
, 0);
13667 /* Some processors only support 5 prefixes. */
13668 if (*end
== '\0' && align
>= 0 && align
< 6)
13670 align_branch_prefix_size
= align
;
13673 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13678 case OPTION_MALIGN_BRANCH
:
13680 saved
= xstrdup (arg
);
13684 next
= strchr (type
, '+');
13687 if (strcasecmp (type
, "jcc") == 0)
13688 align_branch
|= align_branch_jcc_bit
;
13689 else if (strcasecmp (type
, "fused") == 0)
13690 align_branch
|= align_branch_fused_bit
;
13691 else if (strcasecmp (type
, "jmp") == 0)
13692 align_branch
|= align_branch_jmp_bit
;
13693 else if (strcasecmp (type
, "call") == 0)
13694 align_branch
|= align_branch_call_bit
;
13695 else if (strcasecmp (type
, "ret") == 0)
13696 align_branch
|= align_branch_ret_bit
;
13697 else if (strcasecmp (type
, "indirect") == 0)
13698 align_branch
|= align_branch_indirect_bit
;
13700 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13703 while (next
!= NULL
);
13707 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13708 align_branch_power
= 5;
13709 align_branch_prefix_size
= 5;
13710 align_branch
= (align_branch_jcc_bit
13711 | align_branch_fused_bit
13712 | align_branch_jmp_bit
);
13715 case OPTION_MAMD64
:
13719 case OPTION_MINTEL64
:
13727 /* Turn off -Os. */
13728 optimize_for_space
= 0;
13730 else if (*arg
== 's')
13732 optimize_for_space
= 1;
13733 /* Turn on all encoding optimizations. */
13734 optimize
= INT_MAX
;
13738 optimize
= atoi (arg
);
13739 /* Turn off -Os. */
13740 optimize_for_space
= 0;
13750 #define MESSAGE_TEMPLATE \
13754 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13755 int *left_p
, const char *name
, int len
)
13757 int size
= sizeof (MESSAGE_TEMPLATE
);
13758 int left
= *left_p
;
13760 /* Reserve 2 spaces for ", " or ",\0" */
13763 /* Check if there is any room. */
13771 p
= mempcpy (p
, name
, len
);
13775 /* Output the current message now and start a new one. */
13778 fprintf (stream
, "%s\n", message
);
13780 left
= size
- (start
- message
) - len
- 2;
13782 gas_assert (left
>= 0);
13784 p
= mempcpy (p
, name
, len
);
13792 show_arch (FILE *stream
, int ext
, int check
)
13794 static char message
[] = MESSAGE_TEMPLATE
;
13795 char *start
= message
+ 27;
13797 int size
= sizeof (MESSAGE_TEMPLATE
);
13804 left
= size
- (start
- message
);
13808 p
= output_message (stream
, p
, message
, start
, &left
,
13809 STRING_COMMA_LEN ("default"));
13810 p
= output_message (stream
, p
, message
, start
, &left
,
13811 STRING_COMMA_LEN ("push"));
13812 p
= output_message (stream
, p
, message
, start
, &left
,
13813 STRING_COMMA_LEN ("pop"));
13816 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13818 /* Should it be skipped? */
13819 if (cpu_arch
[j
].skip
)
13822 name
= cpu_arch
[j
].name
;
13823 len
= cpu_arch
[j
].len
;
13824 if (cpu_arch
[j
].type
== PROCESSOR_NONE
)
13826 /* It is an extension. Skip if we aren't asked to show it. */
13827 if (!ext
|| cpu_flags_all_zero (&cpu_arch
[j
].enable
))
13832 /* It is an processor. Skip if we show only extension. */
13835 else if (check
&& ! cpu_arch
[j
].enable
.bitfield
.cpui386
)
13837 /* It is an impossible processor - skip. */
13841 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13844 /* Display disabled extensions. */
13846 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13850 if (cpu_arch
[j
].type
!= PROCESSOR_NONE
13851 || !cpu_flags_all_zero (&cpu_arch
[j
].enable
))
13853 str
= xasprintf ("no%s", cpu_arch
[j
].name
);
13854 p
= output_message (stream
, p
, message
, start
, &left
, str
,
13860 fprintf (stream
, "%s\n", message
);
13864 md_show_usage (FILE *stream
)
13866 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13867 fprintf (stream
, _("\
13868 -Qy, -Qn ignored\n\
13869 -V print assembler version number\n\
13872 fprintf (stream
, _("\
13873 -n do not optimize code alignment\n\
13874 -O{012s} attempt some code optimizations\n\
13875 -q quieten some warnings\n"));
13876 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13877 fprintf (stream
, _("\
13881 # if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13882 fprintf (stream
, _("\
13883 --32/--64/--x32 generate 32bit/64bit/x32 object\n"));
13884 # elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
13885 fprintf (stream
, _("\
13886 --32/--64 generate 32bit/64bit object\n"));
13889 #ifdef SVR4_COMMENT_CHARS
13890 fprintf (stream
, _("\
13891 --divide do not treat `/' as a comment character\n"));
13893 fprintf (stream
, _("\
13894 --divide ignored\n"));
13896 fprintf (stream
, _("\
13897 -march=CPU[,+EXTENSION...]\n\
13898 generate code for CPU and EXTENSION, CPU is one of:\n"));
13899 show_arch (stream
, 0, 1);
13900 fprintf (stream
, _("\
13901 EXTENSION is combination of (possibly \"no\"-prefixed):\n"));
13902 show_arch (stream
, 1, 0);
13903 fprintf (stream
, _("\
13904 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13905 show_arch (stream
, 0, 0);
13906 fprintf (stream
, _("\
13907 -msse2avx encode SSE instructions with VEX prefix\n"));
13908 fprintf (stream
, _("\
13909 -muse-unaligned-vector-move\n\
13910 encode aligned vector move as unaligned vector move\n"));
13911 fprintf (stream
, _("\
13912 -msse-check=[none|error|warning] (default: warning)\n\
13913 check SSE instructions\n"));
13914 fprintf (stream
, _("\
13915 -moperand-check=[none|error|warning] (default: warning)\n\
13916 check operand combinations for validity\n"));
13917 fprintf (stream
, _("\
13918 -mavxscalar=[128|256] (default: 128)\n\
13919 encode scalar AVX instructions with specific vector\n\
13921 fprintf (stream
, _("\
13922 -mvexwig=[0|1] (default: 0)\n\
13923 encode VEX instructions with specific VEX.W value\n\
13924 for VEX.W bit ignored instructions\n"));
13925 fprintf (stream
, _("\
13926 -mevexlig=[128|256|512] (default: 128)\n\
13927 encode scalar EVEX instructions with specific vector\n\
13929 fprintf (stream
, _("\
13930 -mevexwig=[0|1] (default: 0)\n\
13931 encode EVEX instructions with specific EVEX.W value\n\
13932 for EVEX.W bit ignored instructions\n"));
13933 fprintf (stream
, _("\
13934 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13935 encode EVEX instructions with specific EVEX.RC value\n\
13936 for SAE-only ignored instructions\n"));
13937 fprintf (stream
, _("\
13938 -mmnemonic=[att|intel] "));
13939 if (SYSV386_COMPAT
)
13940 fprintf (stream
, _("(default: att)\n"));
13942 fprintf (stream
, _("(default: intel)\n"));
13943 fprintf (stream
, _("\
13944 use AT&T/Intel mnemonic\n"));
13945 fprintf (stream
, _("\
13946 -msyntax=[att|intel] (default: att)\n\
13947 use AT&T/Intel syntax\n"));
13948 fprintf (stream
, _("\
13949 -mindex-reg support pseudo index registers\n"));
13950 fprintf (stream
, _("\
13951 -mnaked-reg don't require `%%' prefix for registers\n"));
13952 fprintf (stream
, _("\
13953 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13954 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13955 fprintf (stream
, _("\
13956 -mshared disable branch optimization for shared code\n"));
13957 fprintf (stream
, _("\
13958 -mx86-used-note=[no|yes] "));
13959 if (DEFAULT_X86_USED_NOTE
)
13960 fprintf (stream
, _("(default: yes)\n"));
13962 fprintf (stream
, _("(default: no)\n"));
13963 fprintf (stream
, _("\
13964 generate x86 used ISA and feature properties\n"));
13966 #if defined (TE_PE) || defined (TE_PEP)
13967 fprintf (stream
, _("\
13968 -mbig-obj generate big object files\n"));
13970 fprintf (stream
, _("\
13971 -momit-lock-prefix=[no|yes] (default: no)\n\
13972 strip all lock prefixes\n"));
13973 fprintf (stream
, _("\
13974 -mfence-as-lock-add=[no|yes] (default: no)\n\
13975 encode lfence, mfence and sfence as\n\
13976 lock addl $0x0, (%%{re}sp)\n"));
13977 fprintf (stream
, _("\
13978 -mrelax-relocations=[no|yes] "));
13979 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13980 fprintf (stream
, _("(default: yes)\n"));
13982 fprintf (stream
, _("(default: no)\n"));
13983 fprintf (stream
, _("\
13984 generate relax relocations\n"));
13985 fprintf (stream
, _("\
13986 -malign-branch-boundary=NUM (default: 0)\n\
13987 align branches within NUM byte boundary\n"));
13988 fprintf (stream
, _("\
13989 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13990 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13992 specify types of branches to align\n"));
13993 fprintf (stream
, _("\
13994 -malign-branch-prefix-size=NUM (default: 5)\n\
13995 align branches with NUM prefixes per instruction\n"));
13996 fprintf (stream
, _("\
13997 -mbranches-within-32B-boundaries\n\
13998 align branches within 32 byte boundary\n"));
13999 fprintf (stream
, _("\
14000 -mlfence-after-load=[no|yes] (default: no)\n\
14001 generate lfence after load\n"));
14002 fprintf (stream
, _("\
14003 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
14004 generate lfence before indirect near branch\n"));
14005 fprintf (stream
, _("\
14006 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
14007 generate lfence before ret\n"));
14008 fprintf (stream
, _("\
14009 -mamd64 accept only AMD64 ISA [default]\n"));
14010 fprintf (stream
, _("\
14011 -mintel64 accept only Intel64 ISA\n"));
14014 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
14015 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
14016 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
14018 /* Pick the target format to use. */
14021 i386_target_format (void)
14023 if (startswith (default_arch
, "x86_64"))
14025 update_code_flag (CODE_64BIT
, 1);
14026 if (default_arch
[6] == '\0')
14027 x86_elf_abi
= X86_64_ABI
;
14029 x86_elf_abi
= X86_64_X32_ABI
;
14031 else if (!strcmp (default_arch
, "i386"))
14032 update_code_flag (CODE_32BIT
, 1);
14033 else if (!strcmp (default_arch
, "iamcu"))
14035 update_code_flag (CODE_32BIT
, 1);
14036 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
14038 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
14039 cpu_arch_name
= "iamcu";
14040 free (cpu_sub_arch_name
);
14041 cpu_sub_arch_name
= NULL
;
14042 cpu_arch_flags
= iamcu_flags
;
14043 cpu_arch_isa
= PROCESSOR_IAMCU
;
14044 cpu_arch_isa_flags
= iamcu_flags
;
14045 if (!cpu_arch_tune_set
)
14047 cpu_arch_tune
= cpu_arch_isa
;
14048 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
14051 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
14052 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
14056 as_fatal (_("unknown architecture"));
14058 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
14059 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
14060 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
14061 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].enable
;
14063 switch (OUTPUT_FLAVOR
)
14065 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
14066 case bfd_target_aout_flavour
:
14067 return AOUT_TARGET_FORMAT
;
14069 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
14070 # if defined (TE_PE) || defined (TE_PEP)
14071 case bfd_target_coff_flavour
:
14072 if (flag_code
== CODE_64BIT
)
14075 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
14077 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
14078 # elif defined (TE_GO32)
14079 case bfd_target_coff_flavour
:
14080 return "coff-go32";
14082 case bfd_target_coff_flavour
:
14083 return "coff-i386";
14086 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14087 case bfd_target_elf_flavour
:
14089 const char *format
;
14091 switch (x86_elf_abi
)
14094 format
= ELF_TARGET_FORMAT
;
14096 tls_get_addr
= "___tls_get_addr";
14100 use_rela_relocations
= 1;
14103 tls_get_addr
= "__tls_get_addr";
14105 format
= ELF_TARGET_FORMAT64
;
14107 case X86_64_X32_ABI
:
14108 use_rela_relocations
= 1;
14111 tls_get_addr
= "__tls_get_addr";
14113 disallow_64bit_reloc
= 1;
14114 format
= ELF_TARGET_FORMAT32
;
14117 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
14119 if (x86_elf_abi
!= I386_ABI
)
14120 as_fatal (_("Intel MCU is 32bit only"));
14121 return ELF_TARGET_IAMCU_FORMAT
;
14127 #if defined (OBJ_MACH_O)
14128 case bfd_target_mach_o_flavour
:
14129 if (flag_code
== CODE_64BIT
)
14131 use_rela_relocations
= 1;
14133 return "mach-o-x86-64";
14136 return "mach-o-i386";
14144 #endif /* OBJ_MAYBE_ more than one */
14147 md_undefined_symbol (char *name
)
14149 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
14150 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
14151 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
14152 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
14156 if (symbol_find (name
))
14157 as_bad (_("GOT already in symbol table"));
14158 GOT_symbol
= symbol_new (name
, undefined_section
,
14159 &zero_address_frag
, 0);
14166 /* Round up a section size to the appropriate boundary. */
14169 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
14171 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
14172 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
14174 /* For a.out, force the section size to be aligned. If we don't do
14175 this, BFD will align it for us, but it will not write out the
14176 final bytes of the section. This may be a bug in BFD, but it is
14177 easier to fix it here since that is how the other a.out targets
14181 align
= bfd_section_alignment (segment
);
14182 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
14189 /* On the i386, PC-relative offsets are relative to the start of the
14190 next instruction. That is, the address of the offset, plus its
14191 size, since the offset is always the last part of the insn. */
14194 md_pcrel_from (fixS
*fixP
)
14196 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
14202 s_bss (int ignore ATTRIBUTE_UNUSED
)
14206 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14208 obj_elf_section_change_hook ();
14210 temp
= get_absolute_expression ();
14211 subseg_set (bss_section
, (subsegT
) temp
);
14212 demand_empty_rest_of_line ();
14217 /* Remember constant directive. */
14220 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
14222 if (last_insn
.kind
!= last_insn_directive
14223 && (bfd_section_flags (now_seg
) & SEC_CODE
))
14225 last_insn
.seg
= now_seg
;
14226 last_insn
.kind
= last_insn_directive
;
14227 last_insn
.name
= "constant directive";
14228 last_insn
.file
= as_where (&last_insn
.line
);
14229 if (lfence_before_ret
!= lfence_before_ret_none
)
14231 if (lfence_before_indirect_branch
!= lfence_branch_none
)
14232 as_warn (_("constant directive skips -mlfence-before-ret "
14233 "and -mlfence-before-indirect-branch"));
14235 as_warn (_("constant directive skips -mlfence-before-ret"));
14237 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
14238 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
14243 i386_validate_fix (fixS
*fixp
)
14245 if (fixp
->fx_addsy
&& S_GET_SEGMENT(fixp
->fx_addsy
) == reg_section
)
14247 reloc_howto_type
*howto
;
14249 howto
= bfd_reloc_type_lookup (stdoutput
, fixp
->fx_r_type
);
14250 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14251 _("invalid %s relocation against register"),
14252 howto
? howto
->name
: "<unknown>");
14256 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14257 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14258 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
14259 return IS_ELF
&& fixp
->fx_addsy
14260 && (!S_IS_DEFINED (fixp
->fx_addsy
)
14261 || S_IS_EXTERNAL (fixp
->fx_addsy
));
14264 if (fixp
->fx_subsy
)
14266 if (fixp
->fx_subsy
== GOT_symbol
)
14268 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
14272 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14273 if (fixp
->fx_tcbit2
)
14274 fixp
->fx_r_type
= (fixp
->fx_tcbit
14275 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14276 : BFD_RELOC_X86_64_GOTPCRELX
);
14279 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14284 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14286 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14288 fixp
->fx_subsy
= 0;
14291 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14294 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14295 to section. Since PLT32 relocation must be against symbols,
14296 turn such PLT32 relocation into PC32 relocation. */
14298 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14299 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14300 && symbol_section_p (fixp
->fx_addsy
))
14301 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14304 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14305 && fixp
->fx_tcbit2
)
14306 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14315 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14318 bfd_reloc_code_real_type code
;
14320 switch (fixp
->fx_r_type
)
14322 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14325 case BFD_RELOC_SIZE32
:
14326 case BFD_RELOC_SIZE64
:
14328 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
14329 && (!fixp
->fx_subsy
14330 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
14331 sym
= fixp
->fx_addsy
;
14332 else if (fixp
->fx_subsy
14333 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
14334 && (!fixp
->fx_addsy
14335 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
14336 sym
= fixp
->fx_subsy
;
14339 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
14341 /* Resolve size relocation against local symbol to size of
14342 the symbol plus addend. */
14343 valueT value
= S_GET_SIZE (sym
);
14345 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
14346 value
= bfd_section_size (S_GET_SEGMENT (sym
));
14347 if (sym
== fixp
->fx_subsy
)
14350 if (fixp
->fx_addsy
)
14351 value
+= S_GET_VALUE (fixp
->fx_addsy
);
14353 else if (fixp
->fx_subsy
)
14354 value
-= S_GET_VALUE (fixp
->fx_subsy
);
14355 value
+= fixp
->fx_offset
;
14356 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14358 && !fits_in_unsigned_long (value
))
14359 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14360 _("symbol size computation overflow"));
14361 fixp
->fx_addsy
= NULL
;
14362 fixp
->fx_subsy
= NULL
;
14363 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14366 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
14368 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14369 "unsupported expression involving @size");
14373 /* Fall through. */
14375 case BFD_RELOC_X86_64_PLT32
:
14376 case BFD_RELOC_X86_64_GOT32
:
14377 case BFD_RELOC_X86_64_GOTPCREL
:
14378 case BFD_RELOC_X86_64_GOTPCRELX
:
14379 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14380 case BFD_RELOC_386_PLT32
:
14381 case BFD_RELOC_386_GOT32
:
14382 case BFD_RELOC_386_GOT32X
:
14383 case BFD_RELOC_386_GOTOFF
:
14384 case BFD_RELOC_386_GOTPC
:
14385 case BFD_RELOC_386_TLS_GD
:
14386 case BFD_RELOC_386_TLS_LDM
:
14387 case BFD_RELOC_386_TLS_LDO_32
:
14388 case BFD_RELOC_386_TLS_IE_32
:
14389 case BFD_RELOC_386_TLS_IE
:
14390 case BFD_RELOC_386_TLS_GOTIE
:
14391 case BFD_RELOC_386_TLS_LE_32
:
14392 case BFD_RELOC_386_TLS_LE
:
14393 case BFD_RELOC_386_TLS_GOTDESC
:
14394 case BFD_RELOC_386_TLS_DESC_CALL
:
14395 case BFD_RELOC_X86_64_TLSGD
:
14396 case BFD_RELOC_X86_64_TLSLD
:
14397 case BFD_RELOC_X86_64_DTPOFF32
:
14398 case BFD_RELOC_X86_64_DTPOFF64
:
14399 case BFD_RELOC_X86_64_GOTTPOFF
:
14400 case BFD_RELOC_X86_64_TPOFF32
:
14401 case BFD_RELOC_X86_64_TPOFF64
:
14402 case BFD_RELOC_X86_64_GOTOFF64
:
14403 case BFD_RELOC_X86_64_GOTPC32
:
14404 case BFD_RELOC_X86_64_GOT64
:
14405 case BFD_RELOC_X86_64_GOTPCREL64
:
14406 case BFD_RELOC_X86_64_GOTPC64
:
14407 case BFD_RELOC_X86_64_GOTPLT64
:
14408 case BFD_RELOC_X86_64_PLTOFF64
:
14409 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14410 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14411 case BFD_RELOC_RVA
:
14412 case BFD_RELOC_VTABLE_ENTRY
:
14413 case BFD_RELOC_VTABLE_INHERIT
:
14415 case BFD_RELOC_32_SECREL
:
14416 case BFD_RELOC_16_SECIDX
:
14418 code
= fixp
->fx_r_type
;
14420 case BFD_RELOC_X86_64_32S
:
14421 if (!fixp
->fx_pcrel
)
14423 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14424 code
= fixp
->fx_r_type
;
14427 /* Fall through. */
14429 if (fixp
->fx_pcrel
)
14431 switch (fixp
->fx_size
)
14434 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14435 _("can not do %d byte pc-relative relocation"),
14437 code
= BFD_RELOC_32_PCREL
;
14439 case 1: code
= BFD_RELOC_8_PCREL
; break;
14440 case 2: code
= BFD_RELOC_16_PCREL
; break;
14441 case 4: code
= BFD_RELOC_32_PCREL
; break;
14443 case 8: code
= BFD_RELOC_64_PCREL
; break;
14449 switch (fixp
->fx_size
)
14452 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14453 _("can not do %d byte relocation"),
14455 code
= BFD_RELOC_32
;
14457 case 1: code
= BFD_RELOC_8
; break;
14458 case 2: code
= BFD_RELOC_16
; break;
14459 case 4: code
= BFD_RELOC_32
; break;
14461 case 8: code
= BFD_RELOC_64
; break;
14468 if ((code
== BFD_RELOC_32
14469 || code
== BFD_RELOC_32_PCREL
14470 || code
== BFD_RELOC_X86_64_32S
)
14472 && fixp
->fx_addsy
== GOT_symbol
)
14475 code
= BFD_RELOC_386_GOTPC
;
14477 code
= BFD_RELOC_X86_64_GOTPC32
;
14479 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14481 && fixp
->fx_addsy
== GOT_symbol
)
14483 code
= BFD_RELOC_X86_64_GOTPC64
;
14486 rel
= XNEW (arelent
);
14487 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14488 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14490 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14492 if (!use_rela_relocations
)
14494 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14495 vtable entry to be used in the relocation's section offset. */
14496 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14497 rel
->address
= fixp
->fx_offset
;
14498 #if defined (OBJ_COFF) && defined (TE_PE)
14499 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14500 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14505 /* Use the rela in 64bit mode. */
14508 if (disallow_64bit_reloc
)
14511 case BFD_RELOC_X86_64_DTPOFF64
:
14512 case BFD_RELOC_X86_64_TPOFF64
:
14513 case BFD_RELOC_64_PCREL
:
14514 case BFD_RELOC_X86_64_GOTOFF64
:
14515 case BFD_RELOC_X86_64_GOT64
:
14516 case BFD_RELOC_X86_64_GOTPCREL64
:
14517 case BFD_RELOC_X86_64_GOTPC64
:
14518 case BFD_RELOC_X86_64_GOTPLT64
:
14519 case BFD_RELOC_X86_64_PLTOFF64
:
14520 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14521 _("cannot represent relocation type %s in x32 mode"),
14522 bfd_get_reloc_code_name (code
));
14528 if (!fixp
->fx_pcrel
)
14529 rel
->addend
= fixp
->fx_offset
;
14533 case BFD_RELOC_X86_64_PLT32
:
14534 case BFD_RELOC_X86_64_GOT32
:
14535 case BFD_RELOC_X86_64_GOTPCREL
:
14536 case BFD_RELOC_X86_64_GOTPCRELX
:
14537 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14538 case BFD_RELOC_X86_64_TLSGD
:
14539 case BFD_RELOC_X86_64_TLSLD
:
14540 case BFD_RELOC_X86_64_GOTTPOFF
:
14541 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14542 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14543 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14546 rel
->addend
= (section
->vma
14548 + fixp
->fx_addnumber
14549 + md_pcrel_from (fixp
));
14554 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14555 if (rel
->howto
== NULL
)
14557 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14558 _("cannot represent relocation type %s"),
14559 bfd_get_reloc_code_name (code
));
14560 /* Set howto to a garbage value so that we can keep going. */
14561 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14562 gas_assert (rel
->howto
!= NULL
);
14568 #include "tc-i386-intel.c"
14571 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14573 int saved_naked_reg
;
14574 char saved_register_dot
;
14576 saved_naked_reg
= allow_naked_reg
;
14577 allow_naked_reg
= 1;
14578 saved_register_dot
= register_chars
['.'];
14579 register_chars
['.'] = '.';
14580 allow_pseudo_reg
= 1;
14581 expression_and_evaluate (exp
);
14582 allow_pseudo_reg
= 0;
14583 register_chars
['.'] = saved_register_dot
;
14584 allow_naked_reg
= saved_naked_reg
;
14586 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14588 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14590 exp
->X_op
= O_constant
;
14591 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14592 .dw2_regnum
[flag_code
>> 1];
14595 exp
->X_op
= O_illegal
;
14600 tc_x86_frame_initial_instructions (void)
14602 static unsigned int sp_regno
[2];
14604 if (!sp_regno
[flag_code
>> 1])
14606 char *saved_input
= input_line_pointer
;
14607 char sp
[][4] = {"esp", "rsp"};
14610 input_line_pointer
= sp
[flag_code
>> 1];
14611 tc_x86_parse_to_dw2regnum (&exp
);
14612 gas_assert (exp
.X_op
== O_constant
);
14613 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14614 input_line_pointer
= saved_input
;
14617 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14618 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14622 x86_dwarf2_addr_size (void)
14624 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14625 if (x86_elf_abi
== X86_64_X32_ABI
)
14628 return bfd_arch_bits_per_address (stdoutput
) / 8;
14632 i386_elf_section_type (const char *str
, size_t len
)
14634 if (flag_code
== CODE_64BIT
14635 && len
== sizeof ("unwind") - 1
14636 && startswith (str
, "unwind"))
14637 return SHT_X86_64_UNWIND
;
14644 i386_solaris_fix_up_eh_frame (segT sec
)
14646 if (flag_code
== CODE_64BIT
)
14647 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14653 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14657 exp
.X_op
= O_secrel
;
14658 exp
.X_add_symbol
= symbol
;
14659 exp
.X_add_number
= 0;
14660 emit_expr (&exp
, size
);
14664 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14665 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14668 x86_64_section_letter (int letter
, const char **ptr_msg
)
14670 if (flag_code
== CODE_64BIT
)
14673 return SHF_X86_64_LARGE
;
14675 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14678 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14683 x86_64_section_word (char *str
, size_t len
)
14685 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14686 return SHF_X86_64_LARGE
;
14692 handle_large_common (int small ATTRIBUTE_UNUSED
)
14694 if (flag_code
!= CODE_64BIT
)
14696 s_comm_internal (0, elf_common_parse
);
14697 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14701 static segT lbss_section
;
14702 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14703 asection
*saved_bss_section
= bss_section
;
14705 if (lbss_section
== NULL
)
14707 flagword applicable
;
14708 segT seg
= now_seg
;
14709 subsegT subseg
= now_subseg
;
14711 /* The .lbss section is for local .largecomm symbols. */
14712 lbss_section
= subseg_new (".lbss", 0);
14713 applicable
= bfd_applicable_section_flags (stdoutput
);
14714 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14715 seg_info (lbss_section
)->bss
= 1;
14717 subseg_set (seg
, subseg
);
14720 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14721 bss_section
= lbss_section
;
14723 s_comm_internal (0, elf_common_parse
);
14725 elf_com_section_ptr
= saved_com_section_ptr
;
14726 bss_section
= saved_bss_section
;
14729 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */