1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2022 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 #ifndef INFER_ADDR_PREFIX
38 #define INFER_ADDR_PREFIX 1
42 #define DEFAULT_ARCH "i386"
47 #define INLINE __inline__
53 /* Prefixes will be emitted in the order defined below.
54 WAIT_PREFIX must be the first prefix since FWAIT is really is an
55 instruction, and so must come before any prefixes.
56 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
57 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
63 #define HLE_PREFIX REP_PREFIX
64 #define BND_PREFIX REP_PREFIX
66 #define REX_PREFIX 6 /* must come last. */
67 #define MAX_PREFIXES 7 /* max prefixes per opcode */
69 /* we define the syntax here (modulo base,index,scale syntax) */
70 #define REGISTER_PREFIX '%'
71 #define IMMEDIATE_PREFIX '$'
72 #define ABSOLUTE_PREFIX '*'
74 /* these are the instruction mnemonic suffixes in AT&T syntax or
75 memory operand size in Intel syntax. */
76 #define WORD_MNEM_SUFFIX 'w'
77 #define BYTE_MNEM_SUFFIX 'b'
78 #define SHORT_MNEM_SUFFIX 's'
79 #define LONG_MNEM_SUFFIX 'l'
80 #define QWORD_MNEM_SUFFIX 'q'
81 /* Intel Syntax. Use a non-ascii letter since since it never appears
83 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
85 #define END_OF_INSN '\0'
87 /* This matches the C -> StaticRounding alias in the opcode table. */
88 #define commutative staticrounding
91 'templates' is for grouping together 'template' structures for opcodes
92 of the same name. This is only used for storing the insns in the grand
93 ole hash table of insns.
94 The templates themselves start at START and range up to (but not including)
99 const insn_template
*start
;
100 const insn_template
*end
;
104 /* 386 operand encoding bytes: see 386 book for details of this. */
107 unsigned int regmem
; /* codes register or memory operand */
108 unsigned int reg
; /* codes register operand (or extended opcode) */
109 unsigned int mode
; /* how to interpret regmem & reg */
113 /* x86-64 extension prefix. */
114 typedef int rex_byte
;
116 /* 386 opcode byte to code indirect addressing. */
125 /* x86 arch names, types and features */
128 const char *name
; /* arch name */
129 unsigned int len
; /* arch string length */
130 enum processor_type type
; /* arch type */
131 i386_cpu_flags flags
; /* cpu feature flags */
132 unsigned int skip
; /* show_arch should skip this. */
136 /* Used to turn off indicated flags. */
139 const char *name
; /* arch name */
140 unsigned int len
; /* arch string length */
141 i386_cpu_flags flags
; /* cpu feature flags */
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
154 static void pe_directive_secrel (int);
156 static void signed_cons (int);
157 static char *output_invalid (int c
);
158 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
160 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
162 static int i386_att_operand (char *);
163 static int i386_intel_operand (char *, int);
164 static int i386_intel_simplify (expressionS
*);
165 static int i386_intel_parse_name (const char *, expressionS
*);
166 static const reg_entry
*parse_register (char *, char **);
167 static char *parse_insn (char *, char *);
168 static char *parse_operands (char *, const char *);
169 static void swap_operands (void);
170 static void swap_2_operands (unsigned int, unsigned int);
171 static enum flag_code
i386_addressing_mode (void);
172 static void optimize_imm (void);
173 static void optimize_disp (void);
174 static const insn_template
*match_template (char);
175 static int check_string (void);
176 static int process_suffix (void);
177 static int check_byte_reg (void);
178 static int check_long_reg (void);
179 static int check_qword_reg (void);
180 static int check_word_reg (void);
181 static int finalize_imm (void);
182 static int process_operands (void);
183 static const reg_entry
*build_modrm_byte (void);
184 static void output_insn (void);
185 static void output_imm (fragS
*, offsetT
);
186 static void output_disp (fragS
*, offsetT
);
188 static void s_bss (int);
190 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
191 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
193 /* GNU_PROPERTY_X86_ISA_1_USED. */
194 static unsigned int x86_isa_1_used
;
195 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
196 static unsigned int x86_feature_2_used
;
197 /* Generate x86 used ISA and feature properties. */
198 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
201 static const char *default_arch
= DEFAULT_ARCH
;
203 /* parse_register() returns this when a register alias cannot be used. */
204 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
205 { Dw2Inval
, Dw2Inval
} };
207 static const reg_entry
*reg_eax
;
208 static const reg_entry
*reg_ds
;
209 static const reg_entry
*reg_es
;
210 static const reg_entry
*reg_ss
;
211 static const reg_entry
*reg_st0
;
212 static const reg_entry
*reg_k0
;
217 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
218 unsigned char bytes
[4];
220 /* Destination or source register specifier. */
221 const reg_entry
*register_specifier
;
224 /* 'md_assemble ()' gathers together information and puts it into a
231 const reg_entry
*regs
;
236 operand_size_mismatch
,
237 operand_type_mismatch
,
238 register_type_mismatch
,
239 number_of_operands_mismatch
,
240 invalid_instruction_suffix
,
242 unsupported_with_intel_mnemonic
,
246 invalid_vsib_address
,
247 invalid_vector_register_set
,
248 invalid_tmm_register_set
,
249 invalid_dest_and_src_register_set
,
250 unsupported_vector_index_register
,
251 unsupported_broadcast
,
254 mask_not_on_destination
,
257 rc_sae_operand_not_last_imm
,
258 invalid_register_operand
,
263 /* TM holds the template for the insn were currently assembling. */
266 /* SUFFIX holds the instruction size suffix for byte, word, dword
267 or qword, if given. */
270 /* OPCODE_LENGTH holds the number of base opcode bytes. */
271 unsigned char opcode_length
;
273 /* OPERANDS gives the number of given operands. */
274 unsigned int operands
;
276 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
277 of given register, displacement, memory operands and immediate
279 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
281 /* TYPES [i] is the type (see above #defines) which tells us how to
282 use OP[i] for the corresponding operand. */
283 i386_operand_type types
[MAX_OPERANDS
];
285 /* Displacement expression, immediate expression, or register for each
287 union i386_op op
[MAX_OPERANDS
];
289 /* Flags for operands. */
290 unsigned int flags
[MAX_OPERANDS
];
291 #define Operand_PCrel 1
292 #define Operand_Mem 2
294 /* Relocation type for operand */
295 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
297 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
298 the base index byte below. */
299 const reg_entry
*base_reg
;
300 const reg_entry
*index_reg
;
301 unsigned int log2_scale_factor
;
303 /* SEG gives the seg_entries of this insn. They are zero unless
304 explicit segment overrides are given. */
305 const reg_entry
*seg
[2];
307 /* Copied first memory operand string, for re-checking. */
310 /* PREFIX holds all the given prefix opcodes (usually null).
311 PREFIXES is the number of prefix opcodes. */
312 unsigned int prefixes
;
313 unsigned char prefix
[MAX_PREFIXES
];
315 /* Register is in low 3 bits of opcode. */
318 /* The operand to a branch insn indicates an absolute branch. */
321 /* Extended states. */
329 xstate_ymm
= 1 << 2 | xstate_xmm
,
331 xstate_zmm
= 1 << 3 | xstate_ymm
,
334 /* Use MASK state. */
338 /* Has GOTPC or TLS relocation. */
339 bool has_gotpc_tls_reloc
;
341 /* RM and SIB are the modrm byte and the sib byte where the
342 addressing modes of this insn are encoded. */
349 /* Masking attributes.
351 The struct describes masking, applied to OPERAND in the instruction.
352 REG is a pointer to the corresponding mask register. ZEROING tells
353 whether merging or zeroing mask is used. */
354 struct Mask_Operation
356 const reg_entry
*reg
;
357 unsigned int zeroing
;
358 /* The operand where this operation is associated. */
359 unsigned int operand
;
362 /* Rounding control and SAE attributes. */
375 unsigned int operand
;
378 /* Broadcasting attributes.
380 The struct describes broadcasting, applied to OPERAND. TYPE is
381 expresses the broadcast factor. */
382 struct Broadcast_Operation
384 /* Type of broadcast: {1to2}, {1to4}, {1to8}, {1to16} or {1to32}. */
387 /* Index of broadcasted operand. */
388 unsigned int operand
;
390 /* Number of bytes to broadcast. */
394 /* Compressed disp8*N attribute. */
395 unsigned int memshift
;
397 /* Prefer load or store in encoding. */
400 dir_encoding_default
= 0,
406 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
409 disp_encoding_default
= 0,
415 /* Prefer the REX byte in encoding. */
418 /* Disable instruction size optimization. */
421 /* How to encode vector instructions. */
424 vex_encoding_default
= 0,
432 const char *rep_prefix
;
435 const char *hle_prefix
;
437 /* Have BND prefix. */
438 const char *bnd_prefix
;
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix
;
444 enum i386_error error
;
447 typedef struct _i386_insn i386_insn
;
449 /* Link RC type with corresponding string, that'll be looked for in
458 static const struct RC_name RC_NamesTable
[] =
460 { rne
, STRING_COMMA_LEN ("rn-sae") },
461 { rd
, STRING_COMMA_LEN ("rd-sae") },
462 { ru
, STRING_COMMA_LEN ("ru-sae") },
463 { rz
, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly
, STRING_COMMA_LEN ("sae") },
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars
[] = "*%-([{}"
478 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
479 && !defined (TE_GNU) \
480 && !defined (TE_LINUX) \
481 && !defined (TE_Haiku) \
482 && !defined (TE_FreeBSD) \
483 && !defined (TE_DragonFly) \
484 && !defined (TE_NetBSD))
485 /* This array holds the chars that always start a comment. If the
486 pre-processor is disabled, these aren't very useful. The option
487 --divide will remove '/' from this list. */
488 const char *i386_comment_chars
= "#/";
489 #define SVR4_COMMENT_CHARS 1
490 #define PREFIX_SEPARATOR '\\'
493 const char *i386_comment_chars
= "#";
494 #define PREFIX_SEPARATOR '/'
497 /* This array holds the chars that only start a comment at the beginning of
498 a line. If the line seems to have the form '# 123 filename'
499 .line and .file directives will appear in the pre-processed output.
500 Note that input_file.c hand checks for '#' at the beginning of the
501 first line of the input file. This is because the compiler outputs
502 #NO_APP at the beginning of its output.
503 Also note that comments started like this one will always work if
504 '/' isn't otherwise defined. */
505 const char line_comment_chars
[] = "#/";
507 const char line_separator_chars
[] = ";";
509 /* Chars that can be used to separate mant from exp in floating point
511 const char EXP_CHARS
[] = "eE";
513 /* Chars that mean this number is a floating point constant
516 const char FLT_CHARS
[] = "fFdDxXhHbB";
518 /* Tables for lexical analysis. */
519 static char mnemonic_chars
[256];
520 static char register_chars
[256];
521 static char operand_chars
[256];
522 static char identifier_chars
[256];
524 /* Lexical macros. */
525 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
526 #define is_operand_char(x) (operand_chars[(unsigned char) x])
527 #define is_register_char(x) (register_chars[(unsigned char) x])
528 #define is_space_char(x) ((x) == ' ')
529 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
531 /* All non-digit non-letter characters that may occur in an operand. */
532 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
534 /* md_assemble() always leaves the strings it's passed unaltered. To
535 effect this we maintain a stack of saved characters that we've smashed
536 with '\0's (indicating end of strings for various sub-fields of the
537 assembler instruction). */
538 static char save_stack
[32];
539 static char *save_stack_p
;
540 #define END_STRING_AND_SAVE(s) \
541 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
542 #define RESTORE_END_STRING(s) \
543 do { *(s) = *--save_stack_p; } while (0)
545 /* The instruction we're assembling. */
548 /* Possible templates for current insn. */
549 static const templates
*current_templates
;
551 /* Per instruction expressionS buffers: max displacements & immediates. */
552 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
553 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
555 /* Current operand we are working on. */
556 static int this_operand
= -1;
558 /* We support four different modes. FLAG_CODE variable is used to distinguish
566 static enum flag_code flag_code
;
567 static unsigned int object_64bit
;
568 static unsigned int disallow_64bit_reloc
;
569 static int use_rela_relocations
= 0;
570 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
571 static const char *tls_get_addr
;
573 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
574 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
575 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
577 /* The ELF ABI to use. */
585 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
588 #if defined (TE_PE) || defined (TE_PEP)
589 /* Use big object file format. */
590 static int use_big_obj
= 0;
593 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
594 /* 1 if generating code for a shared library. */
595 static int shared
= 0;
598 /* 1 for intel syntax,
600 static int intel_syntax
= 0;
602 static enum x86_64_isa
604 amd64
= 1, /* AMD64 ISA. */
605 intel64
/* Intel64 ISA. */
608 /* 1 for intel mnemonic,
609 0 if att mnemonic. */
610 static int intel_mnemonic
= !SYSV386_COMPAT
;
612 /* 1 if pseudo registers are permitted. */
613 static int allow_pseudo_reg
= 0;
615 /* 1 if register prefix % not required. */
616 static int allow_naked_reg
= 0;
618 /* 1 if the assembler should add BND prefix for all control-transferring
619 instructions supporting it, even if this prefix wasn't specified
621 static int add_bnd_prefix
= 0;
623 /* 1 if pseudo index register, eiz/riz, is allowed . */
624 static int allow_index_reg
= 0;
626 /* 1 if the assembler should ignore LOCK prefix, even if it was
627 specified explicitly. */
628 static int omit_lock_prefix
= 0;
630 /* 1 if the assembler should encode lfence, mfence, and sfence as
631 "lock addl $0, (%{re}sp)". */
632 static int avoid_fence
= 0;
634 /* 1 if lfence should be inserted after every load. */
635 static int lfence_after_load
= 0;
637 /* Non-zero if lfence should be inserted before indirect branch. */
638 static enum lfence_before_indirect_branch_kind
640 lfence_branch_none
= 0,
641 lfence_branch_register
,
642 lfence_branch_memory
,
645 lfence_before_indirect_branch
;
647 /* Non-zero if lfence should be inserted before ret. */
648 static enum lfence_before_ret_kind
650 lfence_before_ret_none
= 0,
651 lfence_before_ret_not
,
652 lfence_before_ret_or
,
653 lfence_before_ret_shl
657 /* Types of previous instruction is .byte or prefix. */
672 /* 1 if the assembler should generate relax relocations. */
674 static int generate_relax_relocations
675 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
677 static enum check_kind
683 sse_check
, operand_check
= check_warning
;
685 /* Non-zero if branches should be aligned within power of 2 boundary. */
686 static int align_branch_power
= 0;
688 /* Types of branches to align. */
689 enum align_branch_kind
691 align_branch_none
= 0,
692 align_branch_jcc
= 1,
693 align_branch_fused
= 2,
694 align_branch_jmp
= 3,
695 align_branch_call
= 4,
696 align_branch_indirect
= 5,
700 /* Type bits of branches to align. */
701 enum align_branch_bit
703 align_branch_jcc_bit
= 1 << align_branch_jcc
,
704 align_branch_fused_bit
= 1 << align_branch_fused
,
705 align_branch_jmp_bit
= 1 << align_branch_jmp
,
706 align_branch_call_bit
= 1 << align_branch_call
,
707 align_branch_indirect_bit
= 1 << align_branch_indirect
,
708 align_branch_ret_bit
= 1 << align_branch_ret
711 static unsigned int align_branch
= (align_branch_jcc_bit
712 | align_branch_fused_bit
713 | align_branch_jmp_bit
);
715 /* Types of condition jump used by macro-fusion. */
718 mf_jcc_jo
= 0, /* base opcode 0x70 */
719 mf_jcc_jc
, /* base opcode 0x72 */
720 mf_jcc_je
, /* base opcode 0x74 */
721 mf_jcc_jna
, /* base opcode 0x76 */
722 mf_jcc_js
, /* base opcode 0x78 */
723 mf_jcc_jp
, /* base opcode 0x7a */
724 mf_jcc_jl
, /* base opcode 0x7c */
725 mf_jcc_jle
, /* base opcode 0x7e */
728 /* Types of compare flag-modifying insntructions used by macro-fusion. */
731 mf_cmp_test_and
, /* test/cmp */
732 mf_cmp_alu_cmp
, /* add/sub/cmp */
733 mf_cmp_incdec
/* inc/dec */
736 /* The maximum padding size for fused jcc. CMP like instruction can
737 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
739 #define MAX_FUSED_JCC_PADDING_SIZE 20
741 /* The maximum number of prefixes added for an instruction. */
742 static unsigned int align_branch_prefix_size
= 5;
745 1. Clear the REX_W bit with register operand if possible.
746 2. Above plus use 128bit vector instruction to clear the full vector
749 static int optimize
= 0;
752 1. Clear the REX_W bit with register operand if possible.
753 2. Above plus use 128bit vector instruction to clear the full vector
755 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
758 static int optimize_for_space
= 0;
760 /* Register prefix used for error message. */
761 static const char *register_prefix
= "%";
763 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
764 leave, push, and pop instructions so that gcc has the same stack
765 frame as in 32 bit mode. */
766 static char stackop_size
= '\0';
768 /* Non-zero to optimize code alignment. */
769 int optimize_align_code
= 1;
771 /* Non-zero to quieten some warnings. */
772 static int quiet_warnings
= 0;
775 static const char *cpu_arch_name
= NULL
;
776 static char *cpu_sub_arch_name
= NULL
;
778 /* CPU feature flags. */
779 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
781 /* If we have selected a cpu we are generating instructions for. */
782 static int cpu_arch_tune_set
= 0;
784 /* Cpu we are generating instructions for. */
785 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
787 /* CPU feature flags of cpu we are generating instructions for. */
788 static i386_cpu_flags cpu_arch_tune_flags
;
790 /* CPU instruction set architecture used. */
791 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
793 /* CPU feature flags of instruction set architecture used. */
794 i386_cpu_flags cpu_arch_isa_flags
;
796 /* If set, conditional jumps are not automatically promoted to handle
797 larger than a byte offset. */
798 static unsigned int no_cond_jump_promotion
= 0;
800 /* Encode SSE instructions with VEX prefix. */
801 static unsigned int sse2avx
;
803 /* Encode aligned vector move as unaligned vector move. */
804 static unsigned int use_unaligned_vector_move
;
806 /* Encode scalar AVX instructions with specific vector length. */
813 /* Encode VEX WIG instructions with specific vex.w. */
820 /* Encode scalar EVEX LIG instructions with specific vector length. */
828 /* Encode EVEX WIG instructions with specific evex.w. */
835 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
836 static enum rc_type evexrcig
= rne
;
838 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
839 static symbolS
*GOT_symbol
;
841 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
842 unsigned int x86_dwarf2_return_column
;
844 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
845 int x86_cie_data_alignment
;
847 /* Interface to relax_segment.
848 There are 3 major relax states for 386 jump insns because the
849 different types of jumps add different sizes to frags when we're
850 figuring out what sort of jump to choose to reach a given label.
852 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
853 branches which are handled by md_estimate_size_before_relax() and
854 i386_generic_table_relax_frag(). */
857 #define UNCOND_JUMP 0
859 #define COND_JUMP86 2
860 #define BRANCH_PADDING 3
861 #define BRANCH_PREFIX 4
862 #define FUSED_JCC_PADDING 5
867 #define SMALL16 (SMALL | CODE16)
869 #define BIG16 (BIG | CODE16)
873 #define INLINE __inline__
879 #define ENCODE_RELAX_STATE(type, size) \
880 ((relax_substateT) (((type) << 2) | (size)))
881 #define TYPE_FROM_RELAX_STATE(s) \
883 #define DISP_SIZE_FROM_RELAX_STATE(s) \
884 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
886 /* This table is used by relax_frag to promote short jumps to long
887 ones where necessary. SMALL (short) jumps may be promoted to BIG
888 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
889 don't allow a short jump in a 32 bit code segment to be promoted to
890 a 16 bit offset jump because it's slower (requires data size
891 prefix), and doesn't work, unless the destination is in the bottom
892 64k of the code segment (The top 16 bits of eip are zeroed). */
894 const relax_typeS md_relax_table
[] =
897 1) most positive reach of this state,
898 2) most negative reach of this state,
899 3) how many bytes this mode will have in the variable part of the frag
900 4) which index into the table to try if we can't fit into this one. */
902 /* UNCOND_JUMP states. */
903 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
904 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
905 /* dword jmp adds 4 bytes to frag:
906 0 extra opcode bytes, 4 displacement bytes. */
908 /* word jmp adds 2 byte2 to frag:
909 0 extra opcode bytes, 2 displacement bytes. */
912 /* COND_JUMP states. */
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
914 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
915 /* dword conditionals adds 5 bytes to frag:
916 1 extra opcode byte, 4 displacement bytes. */
918 /* word conditionals add 3 bytes to frag:
919 1 extra opcode byte, 2 displacement bytes. */
922 /* COND_JUMP86 states. */
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
924 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
925 /* dword conditionals adds 5 bytes to frag:
926 1 extra opcode byte, 4 displacement bytes. */
928 /* word conditionals add 4 bytes to frag:
929 1 displacement byte and a 3 byte long branch insn. */
933 static const arch_entry cpu_arch
[] =
935 /* Do not replace the first two entries - i386_target_format()
936 relies on them being there in this order. */
937 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
938 CPU_GENERIC32_FLAGS
, 0 },
939 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
940 CPU_GENERIC64_FLAGS
, 0 },
941 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
943 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
945 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
947 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
949 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
951 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
953 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
955 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
957 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
958 CPU_PENTIUMPRO_FLAGS
, 0 },
959 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
961 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
963 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
965 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
967 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
968 CPU_NOCONA_FLAGS
, 0 },
969 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
971 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
973 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
974 CPU_CORE2_FLAGS
, 1 },
975 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
976 CPU_CORE2_FLAGS
, 0 },
977 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
978 CPU_COREI7_FLAGS
, 0 },
979 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
981 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
983 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
984 CPU_IAMCU_FLAGS
, 0 },
985 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
987 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
989 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
990 CPU_ATHLON_FLAGS
, 0 },
991 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
993 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
995 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
997 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
998 CPU_AMDFAM10_FLAGS
, 0 },
999 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
1000 CPU_BDVER1_FLAGS
, 0 },
1001 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
1002 CPU_BDVER2_FLAGS
, 0 },
1003 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1004 CPU_BDVER3_FLAGS
, 0 },
1005 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1006 CPU_BDVER4_FLAGS
, 0 },
1007 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1008 CPU_ZNVER1_FLAGS
, 0 },
1009 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1010 CPU_ZNVER2_FLAGS
, 0 },
1011 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1012 CPU_ZNVER3_FLAGS
, 0 },
1013 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1014 CPU_BTVER1_FLAGS
, 0 },
1015 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1016 CPU_BTVER2_FLAGS
, 0 },
1017 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1018 CPU_8087_FLAGS
, 0 },
1019 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1021 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1023 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1025 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1026 CPU_CMOV_FLAGS
, 0 },
1027 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1028 CPU_FXSR_FLAGS
, 0 },
1029 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1031 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1033 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1034 CPU_SSE2_FLAGS
, 0 },
1035 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1036 CPU_SSE3_FLAGS
, 0 },
1037 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1038 CPU_SSE4A_FLAGS
, 0 },
1039 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1040 CPU_SSSE3_FLAGS
, 0 },
1041 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1042 CPU_SSE4_1_FLAGS
, 0 },
1043 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1044 CPU_SSE4_2_FLAGS
, 0 },
1045 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1046 CPU_SSE4_2_FLAGS
, 0 },
1047 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1049 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1050 CPU_AVX2_FLAGS
, 0 },
1051 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1052 CPU_AVX512F_FLAGS
, 0 },
1053 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1054 CPU_AVX512CD_FLAGS
, 0 },
1055 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1056 CPU_AVX512ER_FLAGS
, 0 },
1057 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1058 CPU_AVX512PF_FLAGS
, 0 },
1059 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1060 CPU_AVX512DQ_FLAGS
, 0 },
1061 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1062 CPU_AVX512BW_FLAGS
, 0 },
1063 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1064 CPU_AVX512VL_FLAGS
, 0 },
1065 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1067 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1068 CPU_VMFUNC_FLAGS
, 0 },
1069 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1071 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1072 CPU_XSAVE_FLAGS
, 0 },
1073 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1074 CPU_XSAVEOPT_FLAGS
, 0 },
1075 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1076 CPU_XSAVEC_FLAGS
, 0 },
1077 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1078 CPU_XSAVES_FLAGS
, 0 },
1079 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1081 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1082 CPU_PCLMUL_FLAGS
, 0 },
1083 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1084 CPU_PCLMUL_FLAGS
, 1 },
1085 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1086 CPU_FSGSBASE_FLAGS
, 0 },
1087 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1088 CPU_RDRND_FLAGS
, 0 },
1089 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1090 CPU_F16C_FLAGS
, 0 },
1091 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1092 CPU_BMI2_FLAGS
, 0 },
1093 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1095 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1096 CPU_FMA4_FLAGS
, 0 },
1097 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1099 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1101 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1102 CPU_MOVBE_FLAGS
, 0 },
1103 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1104 CPU_CX16_FLAGS
, 0 },
1105 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1107 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1108 CPU_LZCNT_FLAGS
, 0 },
1109 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1110 CPU_POPCNT_FLAGS
, 0 },
1111 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1113 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1115 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1116 CPU_INVPCID_FLAGS
, 0 },
1117 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1118 CPU_CLFLUSH_FLAGS
, 0 },
1119 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1121 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1122 CPU_SYSCALL_FLAGS
, 0 },
1123 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1124 CPU_RDTSCP_FLAGS
, 0 },
1125 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1126 CPU_3DNOW_FLAGS
, 0 },
1127 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1128 CPU_3DNOWA_FLAGS
, 0 },
1129 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1130 CPU_PADLOCK_FLAGS
, 0 },
1131 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1132 CPU_SVME_FLAGS
, 1 },
1133 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1134 CPU_SVME_FLAGS
, 0 },
1135 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1136 CPU_SSE4A_FLAGS
, 0 },
1137 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1139 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1141 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1143 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1145 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1146 CPU_RDSEED_FLAGS
, 0 },
1147 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1148 CPU_PRFCHW_FLAGS
, 0 },
1149 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1150 CPU_SMAP_FLAGS
, 0 },
1151 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1153 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1155 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1156 CPU_CLFLUSHOPT_FLAGS
, 0 },
1157 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1158 CPU_PREFETCHWT1_FLAGS
, 0 },
1159 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1161 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1162 CPU_CLWB_FLAGS
, 0 },
1163 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1164 CPU_AVX512IFMA_FLAGS
, 0 },
1165 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1166 CPU_AVX512VBMI_FLAGS
, 0 },
1167 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1168 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1169 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1170 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1171 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1172 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1173 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1174 CPU_AVX512_VBMI2_FLAGS
, 0 },
1175 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1176 CPU_AVX512_VNNI_FLAGS
, 0 },
1177 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1178 CPU_AVX512_BITALG_FLAGS
, 0 },
1179 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1180 CPU_AVX_VNNI_FLAGS
, 0 },
1181 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1182 CPU_CLZERO_FLAGS
, 0 },
1183 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1184 CPU_MWAITX_FLAGS
, 0 },
1185 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1186 CPU_OSPKE_FLAGS
, 0 },
1187 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1188 CPU_RDPID_FLAGS
, 0 },
1189 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1190 CPU_PTWRITE_FLAGS
, 0 },
1191 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1193 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1194 CPU_SHSTK_FLAGS
, 0 },
1195 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1196 CPU_GFNI_FLAGS
, 0 },
1197 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1198 CPU_VAES_FLAGS
, 0 },
1199 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1200 CPU_VPCLMULQDQ_FLAGS
, 0 },
1201 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1202 CPU_WBNOINVD_FLAGS
, 0 },
1203 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1204 CPU_PCONFIG_FLAGS
, 0 },
1205 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1206 CPU_WAITPKG_FLAGS
, 0 },
1207 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1208 CPU_CLDEMOTE_FLAGS
, 0 },
1209 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1210 CPU_AMX_INT8_FLAGS
, 0 },
1211 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1212 CPU_AMX_BF16_FLAGS
, 0 },
1213 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1214 CPU_AMX_TILE_FLAGS
, 0 },
1215 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1216 CPU_MOVDIRI_FLAGS
, 0 },
1217 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1218 CPU_MOVDIR64B_FLAGS
, 0 },
1219 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1220 CPU_AVX512_BF16_FLAGS
, 0 },
1221 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1222 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1223 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1225 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1226 CPU_ENQCMD_FLAGS
, 0 },
1227 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1228 CPU_SERIALIZE_FLAGS
, 0 },
1229 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1230 CPU_RDPRU_FLAGS
, 0 },
1231 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1232 CPU_MCOMMIT_FLAGS
, 0 },
1233 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1234 CPU_SEV_ES_FLAGS
, 0 },
1235 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1236 CPU_TSXLDTRK_FLAGS
, 0 },
1237 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1239 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1240 CPU_WIDEKL_FLAGS
, 0 },
1241 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1242 CPU_UINTR_FLAGS
, 0 },
1243 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1244 CPU_HRESET_FLAGS
, 0 },
1245 { STRING_COMMA_LEN (".avx512_fp16"), PROCESSOR_UNKNOWN
,
1246 CPU_AVX512_FP16_FLAGS
, 0 },
1249 static const noarch_entry cpu_noarch
[] =
1251 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1252 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1253 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1254 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1255 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1256 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1257 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1258 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1259 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1260 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1261 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1262 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1263 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1264 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1265 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1266 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1267 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1268 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1269 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1270 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1271 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1279 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1280 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1281 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1282 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1283 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1284 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1285 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1286 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1287 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1288 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1289 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1290 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1291 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1292 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1293 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1294 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1295 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1296 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1297 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1298 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1299 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1300 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1301 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1302 { STRING_COMMA_LEN ("noavx512_fp16"), CPU_ANY_AVX512_FP16_FLAGS
},
1306 /* Like s_lcomm_internal in gas/read.c but the alignment string
1307 is allowed to be optional. */
1310 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1317 && *input_line_pointer
== ',')
1319 align
= parse_align (needs_align
- 1);
1321 if (align
== (addressT
) -1)
1336 bss_alloc (symbolP
, size
, align
);
1341 pe_lcomm (int needs_align
)
1343 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1347 const pseudo_typeS md_pseudo_table
[] =
1349 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1350 {"align", s_align_bytes
, 0},
1352 {"align", s_align_ptwo
, 0},
1354 {"arch", set_cpu_arch
, 0},
1358 {"lcomm", pe_lcomm
, 1},
1360 {"ffloat", float_cons
, 'f'},
1361 {"dfloat", float_cons
, 'd'},
1362 {"tfloat", float_cons
, 'x'},
1363 {"hfloat", float_cons
, 'h'},
1364 {"bfloat16", float_cons
, 'b'},
1366 {"slong", signed_cons
, 4},
1367 {"noopt", s_ignore
, 0},
1368 {"optim", s_ignore
, 0},
1369 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1370 {"code16", set_code_flag
, CODE_16BIT
},
1371 {"code32", set_code_flag
, CODE_32BIT
},
1373 {"code64", set_code_flag
, CODE_64BIT
},
1375 {"intel_syntax", set_intel_syntax
, 1},
1376 {"att_syntax", set_intel_syntax
, 0},
1377 {"intel_mnemonic", set_intel_mnemonic
, 1},
1378 {"att_mnemonic", set_intel_mnemonic
, 0},
1379 {"allow_index_reg", set_allow_index_reg
, 1},
1380 {"disallow_index_reg", set_allow_index_reg
, 0},
1381 {"sse_check", set_check
, 0},
1382 {"operand_check", set_check
, 1},
1383 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1384 {"largecomm", handle_large_common
, 0},
1386 {"file", dwarf2_directive_file
, 0},
1387 {"loc", dwarf2_directive_loc
, 0},
1388 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1391 {"secrel32", pe_directive_secrel
, 0},
1396 /* For interface with expression (). */
1397 extern char *input_line_pointer
;
1399 /* Hash table for instruction mnemonic lookup. */
1400 static htab_t op_hash
;
1402 /* Hash table for register lookup. */
1403 static htab_t reg_hash
;
1405 /* Various efficient no-op patterns for aligning code labels.
1406 Note: Don't try to assemble the instructions in the comments.
1407 0L and 0w are not legal. */
1408 static const unsigned char f32_1
[] =
1410 static const unsigned char f32_2
[] =
1411 {0x66,0x90}; /* xchg %ax,%ax */
1412 static const unsigned char f32_3
[] =
1413 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1414 static const unsigned char f32_4
[] =
1415 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1416 static const unsigned char f32_6
[] =
1417 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1418 static const unsigned char f32_7
[] =
1419 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1420 static const unsigned char f16_3
[] =
1421 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1422 static const unsigned char f16_4
[] =
1423 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1424 static const unsigned char jump_disp8
[] =
1425 {0xeb}; /* jmp disp8 */
1426 static const unsigned char jump32_disp32
[] =
1427 {0xe9}; /* jmp disp32 */
1428 static const unsigned char jump16_disp32
[] =
1429 {0x66,0xe9}; /* jmp disp32 */
1430 /* 32-bit NOPs patterns. */
1431 static const unsigned char *const f32_patt
[] = {
1432 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1434 /* 16-bit NOPs patterns. */
1435 static const unsigned char *const f16_patt
[] = {
1436 f32_1
, f32_2
, f16_3
, f16_4
1438 /* nopl (%[re]ax) */
1439 static const unsigned char alt_3
[] =
1441 /* nopl 0(%[re]ax) */
1442 static const unsigned char alt_4
[] =
1443 {0x0f,0x1f,0x40,0x00};
1444 /* nopl 0(%[re]ax,%[re]ax,1) */
1445 static const unsigned char alt_5
[] =
1446 {0x0f,0x1f,0x44,0x00,0x00};
1447 /* nopw 0(%[re]ax,%[re]ax,1) */
1448 static const unsigned char alt_6
[] =
1449 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1450 /* nopl 0L(%[re]ax) */
1451 static const unsigned char alt_7
[] =
1452 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1453 /* nopl 0L(%[re]ax,%[re]ax,1) */
1454 static const unsigned char alt_8
[] =
1455 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1456 /* nopw 0L(%[re]ax,%[re]ax,1) */
1457 static const unsigned char alt_9
[] =
1458 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1459 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1460 static const unsigned char alt_10
[] =
1461 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1462 /* data16 nopw %cs:0L(%eax,%eax,1) */
1463 static const unsigned char alt_11
[] =
1464 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1465 /* 32-bit and 64-bit NOPs patterns. */
1466 static const unsigned char *const alt_patt
[] = {
1467 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1468 alt_9
, alt_10
, alt_11
1471 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1472 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1475 i386_output_nops (char *where
, const unsigned char *const *patt
,
1476 int count
, int max_single_nop_size
)
1479 /* Place the longer NOP first. */
1482 const unsigned char *nops
;
1484 if (max_single_nop_size
< 1)
1486 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1487 max_single_nop_size
);
1491 nops
= patt
[max_single_nop_size
- 1];
1493 /* Use the smaller one if the requsted one isn't available. */
1496 max_single_nop_size
--;
1497 nops
= patt
[max_single_nop_size
- 1];
1500 last
= count
% max_single_nop_size
;
1503 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1504 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1508 nops
= patt
[last
- 1];
1511 /* Use the smaller one plus one-byte NOP if the needed one
1514 nops
= patt
[last
- 1];
1515 memcpy (where
+ offset
, nops
, last
);
1516 where
[offset
+ last
] = *patt
[0];
1519 memcpy (where
+ offset
, nops
, last
);
1524 fits_in_imm7 (offsetT num
)
1526 return (num
& 0x7f) == num
;
1530 fits_in_imm31 (offsetT num
)
1532 return (num
& 0x7fffffff) == num
;
1535 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1536 single NOP instruction LIMIT. */
1539 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1541 const unsigned char *const *patt
= NULL
;
1542 int max_single_nop_size
;
1543 /* Maximum number of NOPs before switching to jump over NOPs. */
1544 int max_number_of_nops
;
1546 switch (fragP
->fr_type
)
1551 case rs_machine_dependent
:
1552 /* Allow NOP padding for jumps and calls. */
1553 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1554 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1561 /* We need to decide which NOP sequence to use for 32bit and
1562 64bit. When -mtune= is used:
1564 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1565 PROCESSOR_GENERIC32, f32_patt will be used.
1566 2. For the rest, alt_patt will be used.
1568 When -mtune= isn't used, alt_patt will be used if
1569 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1572 When -march= or .arch is used, we can't use anything beyond
1573 cpu_arch_isa_flags. */
1575 if (flag_code
== CODE_16BIT
)
1578 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1579 /* Limit number of NOPs to 2 in 16-bit mode. */
1580 max_number_of_nops
= 2;
1584 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1586 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1587 switch (cpu_arch_tune
)
1589 case PROCESSOR_UNKNOWN
:
1590 /* We use cpu_arch_isa_flags to check if we SHOULD
1591 optimize with nops. */
1592 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1597 case PROCESSOR_PENTIUM4
:
1598 case PROCESSOR_NOCONA
:
1599 case PROCESSOR_CORE
:
1600 case PROCESSOR_CORE2
:
1601 case PROCESSOR_COREI7
:
1602 case PROCESSOR_L1OM
:
1603 case PROCESSOR_K1OM
:
1604 case PROCESSOR_GENERIC64
:
1606 case PROCESSOR_ATHLON
:
1608 case PROCESSOR_AMDFAM10
:
1610 case PROCESSOR_ZNVER
:
1614 case PROCESSOR_I386
:
1615 case PROCESSOR_I486
:
1616 case PROCESSOR_PENTIUM
:
1617 case PROCESSOR_PENTIUMPRO
:
1618 case PROCESSOR_IAMCU
:
1619 case PROCESSOR_GENERIC32
:
1626 switch (fragP
->tc_frag_data
.tune
)
1628 case PROCESSOR_UNKNOWN
:
1629 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1630 PROCESSOR_UNKNOWN. */
1634 case PROCESSOR_I386
:
1635 case PROCESSOR_I486
:
1636 case PROCESSOR_PENTIUM
:
1637 case PROCESSOR_IAMCU
:
1639 case PROCESSOR_ATHLON
:
1641 case PROCESSOR_AMDFAM10
:
1643 case PROCESSOR_ZNVER
:
1645 case PROCESSOR_GENERIC32
:
1646 /* We use cpu_arch_isa_flags to check if we CAN optimize
1648 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1653 case PROCESSOR_PENTIUMPRO
:
1654 case PROCESSOR_PENTIUM4
:
1655 case PROCESSOR_NOCONA
:
1656 case PROCESSOR_CORE
:
1657 case PROCESSOR_CORE2
:
1658 case PROCESSOR_COREI7
:
1659 case PROCESSOR_L1OM
:
1660 case PROCESSOR_K1OM
:
1661 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1666 case PROCESSOR_GENERIC64
:
1672 if (patt
== f32_patt
)
1674 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1675 /* Limit number of NOPs to 2 for older processors. */
1676 max_number_of_nops
= 2;
1680 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1681 /* Limit number of NOPs to 7 for newer processors. */
1682 max_number_of_nops
= 7;
1687 limit
= max_single_nop_size
;
1689 if (fragP
->fr_type
== rs_fill_nop
)
1691 /* Output NOPs for .nop directive. */
1692 if (limit
> max_single_nop_size
)
1694 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1695 _("invalid single nop size: %d "
1696 "(expect within [0, %d])"),
1697 limit
, max_single_nop_size
);
1701 else if (fragP
->fr_type
!= rs_machine_dependent
)
1702 fragP
->fr_var
= count
;
1704 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1706 /* Generate jump over NOPs. */
1707 offsetT disp
= count
- 2;
1708 if (fits_in_imm7 (disp
))
1710 /* Use "jmp disp8" if possible. */
1712 where
[0] = jump_disp8
[0];
1718 unsigned int size_of_jump
;
1720 if (flag_code
== CODE_16BIT
)
1722 where
[0] = jump16_disp32
[0];
1723 where
[1] = jump16_disp32
[1];
1728 where
[0] = jump32_disp32
[0];
1732 count
-= size_of_jump
+ 4;
1733 if (!fits_in_imm31 (count
))
1735 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1736 _("jump over nop padding out of range"));
1740 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1741 where
+= size_of_jump
+ 4;
1745 /* Generate multiple NOPs. */
1746 i386_output_nops (where
, patt
, count
, limit
);
1750 operand_type_all_zero (const union i386_operand_type
*x
)
1752 switch (ARRAY_SIZE(x
->array
))
1763 return !x
->array
[0];
1770 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1772 switch (ARRAY_SIZE(x
->array
))
1788 x
->bitfield
.class = ClassNone
;
1789 x
->bitfield
.instance
= InstanceNone
;
1793 operand_type_equal (const union i386_operand_type
*x
,
1794 const union i386_operand_type
*y
)
1796 switch (ARRAY_SIZE(x
->array
))
1799 if (x
->array
[2] != y
->array
[2])
1803 if (x
->array
[1] != y
->array
[1])
1807 return x
->array
[0] == y
->array
[0];
1815 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1817 switch (ARRAY_SIZE(x
->array
))
1832 return !x
->array
[0];
1839 cpu_flags_equal (const union i386_cpu_flags
*x
,
1840 const union i386_cpu_flags
*y
)
1842 switch (ARRAY_SIZE(x
->array
))
1845 if (x
->array
[3] != y
->array
[3])
1849 if (x
->array
[2] != y
->array
[2])
1853 if (x
->array
[1] != y
->array
[1])
1857 return x
->array
[0] == y
->array
[0];
1865 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1867 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1868 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1871 static INLINE i386_cpu_flags
1872 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1874 switch (ARRAY_SIZE (x
.array
))
1877 x
.array
[3] &= y
.array
[3];
1880 x
.array
[2] &= y
.array
[2];
1883 x
.array
[1] &= y
.array
[1];
1886 x
.array
[0] &= y
.array
[0];
1894 static INLINE i386_cpu_flags
1895 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1897 switch (ARRAY_SIZE (x
.array
))
1900 x
.array
[3] |= y
.array
[3];
1903 x
.array
[2] |= y
.array
[2];
1906 x
.array
[1] |= y
.array
[1];
1909 x
.array
[0] |= y
.array
[0];
1917 static INLINE i386_cpu_flags
1918 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1920 switch (ARRAY_SIZE (x
.array
))
1923 x
.array
[3] &= ~y
.array
[3];
1926 x
.array
[2] &= ~y
.array
[2];
1929 x
.array
[1] &= ~y
.array
[1];
1932 x
.array
[0] &= ~y
.array
[0];
1940 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1942 #define CPU_FLAGS_ARCH_MATCH 0x1
1943 #define CPU_FLAGS_64BIT_MATCH 0x2
1945 #define CPU_FLAGS_PERFECT_MATCH \
1946 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1948 /* Return CPU flags match bits. */
1951 cpu_flags_match (const insn_template
*t
)
1953 i386_cpu_flags x
= t
->cpu_flags
;
1954 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1956 x
.bitfield
.cpu64
= 0;
1957 x
.bitfield
.cpuno64
= 0;
1959 if (cpu_flags_all_zero (&x
))
1961 /* This instruction is available on all archs. */
1962 match
|= CPU_FLAGS_ARCH_MATCH
;
1966 /* This instruction is available only on some archs. */
1967 i386_cpu_flags cpu
= cpu_arch_flags
;
1969 /* AVX512VL is no standalone feature - match it and then strip it. */
1970 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1972 x
.bitfield
.cpuavx512vl
= 0;
1974 cpu
= cpu_flags_and (x
, cpu
);
1975 if (!cpu_flags_all_zero (&cpu
))
1977 if (x
.bitfield
.cpuavx
)
1979 /* We need to check a few extra flags with AVX. */
1980 if (cpu
.bitfield
.cpuavx
1981 && (!t
->opcode_modifier
.sse2avx
1982 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1983 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1984 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1985 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1986 match
|= CPU_FLAGS_ARCH_MATCH
;
1988 else if (x
.bitfield
.cpuavx512f
)
1990 /* We need to check a few extra flags with AVX512F. */
1991 if (cpu
.bitfield
.cpuavx512f
1992 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1993 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1994 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1995 match
|= CPU_FLAGS_ARCH_MATCH
;
1998 match
|= CPU_FLAGS_ARCH_MATCH
;
2004 static INLINE i386_operand_type
2005 operand_type_and (i386_operand_type x
, i386_operand_type y
)
2007 if (x
.bitfield
.class != y
.bitfield
.class)
2008 x
.bitfield
.class = ClassNone
;
2009 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2010 x
.bitfield
.instance
= InstanceNone
;
2012 switch (ARRAY_SIZE (x
.array
))
2015 x
.array
[2] &= y
.array
[2];
2018 x
.array
[1] &= y
.array
[1];
2021 x
.array
[0] &= y
.array
[0];
2029 static INLINE i386_operand_type
2030 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2032 gas_assert (y
.bitfield
.class == ClassNone
);
2033 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2035 switch (ARRAY_SIZE (x
.array
))
2038 x
.array
[2] &= ~y
.array
[2];
2041 x
.array
[1] &= ~y
.array
[1];
2044 x
.array
[0] &= ~y
.array
[0];
2052 static INLINE i386_operand_type
2053 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2055 gas_assert (x
.bitfield
.class == ClassNone
||
2056 y
.bitfield
.class == ClassNone
||
2057 x
.bitfield
.class == y
.bitfield
.class);
2058 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2059 y
.bitfield
.instance
== InstanceNone
||
2060 x
.bitfield
.instance
== y
.bitfield
.instance
);
2062 switch (ARRAY_SIZE (x
.array
))
2065 x
.array
[2] |= y
.array
[2];
2068 x
.array
[1] |= y
.array
[1];
2071 x
.array
[0] |= y
.array
[0];
2079 static INLINE i386_operand_type
2080 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2082 gas_assert (y
.bitfield
.class == ClassNone
);
2083 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2085 switch (ARRAY_SIZE (x
.array
))
2088 x
.array
[2] ^= y
.array
[2];
2091 x
.array
[1] ^= y
.array
[1];
2094 x
.array
[0] ^= y
.array
[0];
2102 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2103 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2104 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2105 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2106 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2107 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2108 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2109 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2110 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2111 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2112 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2113 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2114 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2115 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2126 operand_type_check (i386_operand_type t
, enum operand_type c
)
2131 return t
.bitfield
.class == Reg
;
2134 return (t
.bitfield
.imm8
2138 || t
.bitfield
.imm32s
2139 || t
.bitfield
.imm64
);
2142 return (t
.bitfield
.disp8
2143 || t
.bitfield
.disp16
2144 || t
.bitfield
.disp32
2145 || t
.bitfield
.disp32s
2146 || t
.bitfield
.disp64
);
2149 return (t
.bitfield
.disp8
2150 || t
.bitfield
.disp16
2151 || t
.bitfield
.disp32
2152 || t
.bitfield
.disp32s
2153 || t
.bitfield
.disp64
2154 || t
.bitfield
.baseindex
);
2163 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2164 between operand GIVEN and opeand WANTED for instruction template T. */
2167 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2170 return !((i
.types
[given
].bitfield
.byte
2171 && !t
->operand_types
[wanted
].bitfield
.byte
)
2172 || (i
.types
[given
].bitfield
.word
2173 && !t
->operand_types
[wanted
].bitfield
.word
)
2174 || (i
.types
[given
].bitfield
.dword
2175 && !t
->operand_types
[wanted
].bitfield
.dword
)
2176 || (i
.types
[given
].bitfield
.qword
2177 && !t
->operand_types
[wanted
].bitfield
.qword
)
2178 || (i
.types
[given
].bitfield
.tbyte
2179 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2182 /* Return 1 if there is no conflict in SIMD register between operand
2183 GIVEN and opeand WANTED for instruction template T. */
2186 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2189 return !((i
.types
[given
].bitfield
.xmmword
2190 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2191 || (i
.types
[given
].bitfield
.ymmword
2192 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2193 || (i
.types
[given
].bitfield
.zmmword
2194 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2195 || (i
.types
[given
].bitfield
.tmmword
2196 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2199 /* Return 1 if there is no conflict in any size between operand GIVEN
2200 and opeand WANTED for instruction template T. */
2203 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2206 return (match_operand_size (t
, wanted
, given
)
2207 && !((i
.types
[given
].bitfield
.unspecified
2208 && !i
.broadcast
.type
2209 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2210 || (i
.types
[given
].bitfield
.fword
2211 && !t
->operand_types
[wanted
].bitfield
.fword
)
2212 /* For scalar opcode templates to allow register and memory
2213 operands at the same time, some special casing is needed
2214 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2215 down-conversion vpmov*. */
2216 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2217 && t
->operand_types
[wanted
].bitfield
.byte
2218 + t
->operand_types
[wanted
].bitfield
.word
2219 + t
->operand_types
[wanted
].bitfield
.dword
2220 + t
->operand_types
[wanted
].bitfield
.qword
2221 > !!t
->opcode_modifier
.broadcast
)
2222 ? (i
.types
[given
].bitfield
.xmmword
2223 || i
.types
[given
].bitfield
.ymmword
2224 || i
.types
[given
].bitfield
.zmmword
)
2225 : !match_simd_size(t
, wanted
, given
))));
2228 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2229 operands for instruction template T, and it has MATCH_REVERSE set if there
2230 is no size conflict on any operands for the template with operands reversed
2231 (and the template allows for reversing in the first place). */
2233 #define MATCH_STRAIGHT 1
2234 #define MATCH_REVERSE 2
2236 static INLINE
unsigned int
2237 operand_size_match (const insn_template
*t
)
2239 unsigned int j
, match
= MATCH_STRAIGHT
;
2241 /* Don't check non-absolute jump instructions. */
2242 if (t
->opcode_modifier
.jump
2243 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2246 /* Check memory and accumulator operand size. */
2247 for (j
= 0; j
< i
.operands
; j
++)
2249 if (i
.types
[j
].bitfield
.class != Reg
2250 && i
.types
[j
].bitfield
.class != RegSIMD
2251 && t
->opcode_modifier
.anysize
)
2254 if (t
->operand_types
[j
].bitfield
.class == Reg
2255 && !match_operand_size (t
, j
, j
))
2261 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2262 && !match_simd_size (t
, j
, j
))
2268 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2269 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2275 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2282 if (!t
->opcode_modifier
.d
)
2286 i
.error
= operand_size_mismatch
;
2290 /* Check reverse. */
2291 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2293 for (j
= 0; j
< i
.operands
; j
++)
2295 unsigned int given
= i
.operands
- j
- 1;
2297 if (t
->operand_types
[j
].bitfield
.class == Reg
2298 && !match_operand_size (t
, j
, given
))
2301 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2302 && !match_simd_size (t
, j
, given
))
2305 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2306 && (!match_operand_size (t
, j
, given
)
2307 || !match_simd_size (t
, j
, given
)))
2310 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2314 return match
| MATCH_REVERSE
;
2318 operand_type_match (i386_operand_type overlap
,
2319 i386_operand_type given
)
2321 i386_operand_type temp
= overlap
;
2323 temp
.bitfield
.unspecified
= 0;
2324 temp
.bitfield
.byte
= 0;
2325 temp
.bitfield
.word
= 0;
2326 temp
.bitfield
.dword
= 0;
2327 temp
.bitfield
.fword
= 0;
2328 temp
.bitfield
.qword
= 0;
2329 temp
.bitfield
.tbyte
= 0;
2330 temp
.bitfield
.xmmword
= 0;
2331 temp
.bitfield
.ymmword
= 0;
2332 temp
.bitfield
.zmmword
= 0;
2333 temp
.bitfield
.tmmword
= 0;
2334 if (operand_type_all_zero (&temp
))
2337 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2341 i
.error
= operand_type_mismatch
;
2345 /* If given types g0 and g1 are registers they must be of the same type
2346 unless the expected operand type register overlap is null.
2347 Some Intel syntax memory operand size checking also happens here. */
2350 operand_type_register_match (i386_operand_type g0
,
2351 i386_operand_type t0
,
2352 i386_operand_type g1
,
2353 i386_operand_type t1
)
2355 if (g0
.bitfield
.class != Reg
2356 && g0
.bitfield
.class != RegSIMD
2357 && (!operand_type_check (g0
, anymem
)
2358 || g0
.bitfield
.unspecified
2359 || (t0
.bitfield
.class != Reg
2360 && t0
.bitfield
.class != RegSIMD
)))
2363 if (g1
.bitfield
.class != Reg
2364 && g1
.bitfield
.class != RegSIMD
2365 && (!operand_type_check (g1
, anymem
)
2366 || g1
.bitfield
.unspecified
2367 || (t1
.bitfield
.class != Reg
2368 && t1
.bitfield
.class != RegSIMD
)))
2371 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2372 && g0
.bitfield
.word
== g1
.bitfield
.word
2373 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2374 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2375 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2376 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2377 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2380 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2381 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2382 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2383 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2384 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2385 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2386 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2389 i
.error
= register_type_mismatch
;
2394 static INLINE
unsigned int
2395 register_number (const reg_entry
*r
)
2397 unsigned int nr
= r
->reg_num
;
2399 if (r
->reg_flags
& RegRex
)
2402 if (r
->reg_flags
& RegVRex
)
2408 static INLINE
unsigned int
2409 mode_from_disp_size (i386_operand_type t
)
2411 if (t
.bitfield
.disp8
)
2413 else if (t
.bitfield
.disp16
2414 || t
.bitfield
.disp32
2415 || t
.bitfield
.disp32s
)
2422 fits_in_signed_byte (addressT num
)
2424 return num
+ 0x80 <= 0xff;
2428 fits_in_unsigned_byte (addressT num
)
2434 fits_in_unsigned_word (addressT num
)
2436 return num
<= 0xffff;
2440 fits_in_signed_word (addressT num
)
2442 return num
+ 0x8000 <= 0xffff;
2446 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2451 return num
+ 0x80000000 <= 0xffffffff;
2453 } /* fits_in_signed_long() */
2456 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2461 return num
<= 0xffffffff;
2463 } /* fits_in_unsigned_long() */
2465 static INLINE valueT
extend_to_32bit_address (addressT num
)
2468 if (fits_in_unsigned_long(num
))
2469 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2471 if (!fits_in_signed_long (num
))
2472 return num
& 0xffffffff;
2479 fits_in_disp8 (offsetT num
)
2481 int shift
= i
.memshift
;
2487 mask
= (1 << shift
) - 1;
2489 /* Return 0 if NUM isn't properly aligned. */
2493 /* Check if NUM will fit in 8bit after shift. */
2494 return fits_in_signed_byte (num
>> shift
);
2498 fits_in_imm4 (offsetT num
)
2500 return (num
& 0xf) == num
;
2503 static i386_operand_type
2504 smallest_imm_type (offsetT num
)
2506 i386_operand_type t
;
2508 operand_type_set (&t
, 0);
2509 t
.bitfield
.imm64
= 1;
2511 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2513 /* This code is disabled on the 486 because all the Imm1 forms
2514 in the opcode table are slower on the i486. They're the
2515 versions with the implicitly specified single-position
2516 displacement, which has another syntax if you really want to
2518 t
.bitfield
.imm1
= 1;
2519 t
.bitfield
.imm8
= 1;
2520 t
.bitfield
.imm8s
= 1;
2521 t
.bitfield
.imm16
= 1;
2522 t
.bitfield
.imm32
= 1;
2523 t
.bitfield
.imm32s
= 1;
2525 else if (fits_in_signed_byte (num
))
2527 t
.bitfield
.imm8
= 1;
2528 t
.bitfield
.imm8s
= 1;
2529 t
.bitfield
.imm16
= 1;
2530 t
.bitfield
.imm32
= 1;
2531 t
.bitfield
.imm32s
= 1;
2533 else if (fits_in_unsigned_byte (num
))
2535 t
.bitfield
.imm8
= 1;
2536 t
.bitfield
.imm16
= 1;
2537 t
.bitfield
.imm32
= 1;
2538 t
.bitfield
.imm32s
= 1;
2540 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2542 t
.bitfield
.imm16
= 1;
2543 t
.bitfield
.imm32
= 1;
2544 t
.bitfield
.imm32s
= 1;
2546 else if (fits_in_signed_long (num
))
2548 t
.bitfield
.imm32
= 1;
2549 t
.bitfield
.imm32s
= 1;
2551 else if (fits_in_unsigned_long (num
))
2552 t
.bitfield
.imm32
= 1;
2558 offset_in_range (offsetT val
, int size
)
2564 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2565 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2567 case 4: mask
= ((addressT
) 1 << 32) - 1; break;
2569 case sizeof (val
): return val
;
2573 if ((val
& ~mask
) != 0 && (-val
& ~mask
) != 0)
2576 char masked_buf
[128];
2578 /* Coded this way in order to ease translation. */
2579 sprintf_vma (val_buf
, val
);
2580 sprintf_vma (masked_buf
, val
& mask
);
2581 as_warn (_("0x%s shortened to 0x%s"), val_buf
, masked_buf
);
2597 a. PREFIX_EXIST if attempting to add a prefix where one from the
2598 same class already exists.
2599 b. PREFIX_LOCK if lock prefix is added.
2600 c. PREFIX_REP if rep/repne prefix is added.
2601 d. PREFIX_DS if ds prefix is added.
2602 e. PREFIX_OTHER if other prefix is added.
2605 static enum PREFIX_GROUP
2606 add_prefix (unsigned int prefix
)
2608 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2611 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2612 && flag_code
== CODE_64BIT
)
2614 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2615 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2616 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2617 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2628 case DS_PREFIX_OPCODE
:
2631 case CS_PREFIX_OPCODE
:
2632 case ES_PREFIX_OPCODE
:
2633 case FS_PREFIX_OPCODE
:
2634 case GS_PREFIX_OPCODE
:
2635 case SS_PREFIX_OPCODE
:
2639 case REPNE_PREFIX_OPCODE
:
2640 case REPE_PREFIX_OPCODE
:
2645 case LOCK_PREFIX_OPCODE
:
2654 case ADDR_PREFIX_OPCODE
:
2658 case DATA_PREFIX_OPCODE
:
2662 if (i
.prefix
[q
] != 0)
2670 i
.prefix
[q
] |= prefix
;
2673 as_bad (_("same type of prefix used twice"));
2679 update_code_flag (int value
, int check
)
2681 PRINTF_LIKE ((*as_error
));
2683 flag_code
= (enum flag_code
) value
;
2684 if (flag_code
== CODE_64BIT
)
2686 cpu_arch_flags
.bitfield
.cpu64
= 1;
2687 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2691 cpu_arch_flags
.bitfield
.cpu64
= 0;
2692 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2694 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2697 as_error
= as_fatal
;
2700 (*as_error
) (_("64bit mode not supported on `%s'."),
2701 cpu_arch_name
? cpu_arch_name
: default_arch
);
2703 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2706 as_error
= as_fatal
;
2709 (*as_error
) (_("32bit mode not supported on `%s'."),
2710 cpu_arch_name
? cpu_arch_name
: default_arch
);
2712 stackop_size
= '\0';
2716 set_code_flag (int value
)
2718 update_code_flag (value
, 0);
2722 set_16bit_gcc_code_flag (int new_code_flag
)
2724 flag_code
= (enum flag_code
) new_code_flag
;
2725 if (flag_code
!= CODE_16BIT
)
2727 cpu_arch_flags
.bitfield
.cpu64
= 0;
2728 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2729 stackop_size
= LONG_MNEM_SUFFIX
;
2733 set_intel_syntax (int syntax_flag
)
2735 /* Find out if register prefixing is specified. */
2736 int ask_naked_reg
= 0;
2739 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2742 int e
= get_symbol_name (&string
);
2744 if (strcmp (string
, "prefix") == 0)
2746 else if (strcmp (string
, "noprefix") == 0)
2749 as_bad (_("bad argument to syntax directive."));
2750 (void) restore_line_pointer (e
);
2752 demand_empty_rest_of_line ();
2754 intel_syntax
= syntax_flag
;
2756 if (ask_naked_reg
== 0)
2757 allow_naked_reg
= (intel_syntax
2758 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2760 allow_naked_reg
= (ask_naked_reg
< 0);
2762 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2764 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2765 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2766 register_prefix
= allow_naked_reg
? "" : "%";
2770 set_intel_mnemonic (int mnemonic_flag
)
2772 intel_mnemonic
= mnemonic_flag
;
2776 set_allow_index_reg (int flag
)
2778 allow_index_reg
= flag
;
2782 set_check (int what
)
2784 enum check_kind
*kind
;
2789 kind
= &operand_check
;
2800 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2803 int e
= get_symbol_name (&string
);
2805 if (strcmp (string
, "none") == 0)
2807 else if (strcmp (string
, "warning") == 0)
2808 *kind
= check_warning
;
2809 else if (strcmp (string
, "error") == 0)
2810 *kind
= check_error
;
2812 as_bad (_("bad argument to %s_check directive."), str
);
2813 (void) restore_line_pointer (e
);
2816 as_bad (_("missing argument for %s_check directive"), str
);
2818 demand_empty_rest_of_line ();
2822 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2823 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2825 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2826 static const char *arch
;
2828 /* Intel LIOM is only supported on ELF. */
2834 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2835 use default_arch. */
2836 arch
= cpu_arch_name
;
2838 arch
= default_arch
;
2841 /* If we are targeting Intel MCU, we must enable it. */
2842 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2843 || new_flag
.bitfield
.cpuiamcu
)
2846 /* If we are targeting Intel L1OM, we must enable it. */
2847 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2848 || new_flag
.bitfield
.cpul1om
)
2851 /* If we are targeting Intel K1OM, we must enable it. */
2852 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2853 || new_flag
.bitfield
.cpuk1om
)
2856 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2861 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2865 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2868 int e
= get_symbol_name (&string
);
2870 i386_cpu_flags flags
;
2872 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2874 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2876 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2880 cpu_arch_name
= cpu_arch
[j
].name
;
2881 cpu_sub_arch_name
= NULL
;
2882 cpu_arch_flags
= cpu_arch
[j
].flags
;
2883 if (flag_code
== CODE_64BIT
)
2885 cpu_arch_flags
.bitfield
.cpu64
= 1;
2886 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2890 cpu_arch_flags
.bitfield
.cpu64
= 0;
2891 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2893 cpu_arch_isa
= cpu_arch
[j
].type
;
2894 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2895 if (!cpu_arch_tune_set
)
2897 cpu_arch_tune
= cpu_arch_isa
;
2898 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2903 flags
= cpu_flags_or (cpu_arch_flags
,
2906 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2908 if (cpu_sub_arch_name
)
2910 char *name
= cpu_sub_arch_name
;
2911 cpu_sub_arch_name
= concat (name
,
2913 (const char *) NULL
);
2917 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2918 cpu_arch_flags
= flags
;
2919 cpu_arch_isa_flags
= flags
;
2923 = cpu_flags_or (cpu_arch_isa_flags
,
2925 (void) restore_line_pointer (e
);
2926 demand_empty_rest_of_line ();
2931 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2933 /* Disable an ISA extension. */
2934 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2935 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2937 flags
= cpu_flags_and_not (cpu_arch_flags
,
2938 cpu_noarch
[j
].flags
);
2939 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2941 if (cpu_sub_arch_name
)
2943 char *name
= cpu_sub_arch_name
;
2944 cpu_sub_arch_name
= concat (name
, string
,
2945 (const char *) NULL
);
2949 cpu_sub_arch_name
= xstrdup (string
);
2950 cpu_arch_flags
= flags
;
2951 cpu_arch_isa_flags
= flags
;
2953 (void) restore_line_pointer (e
);
2954 demand_empty_rest_of_line ();
2958 j
= ARRAY_SIZE (cpu_arch
);
2961 if (j
>= ARRAY_SIZE (cpu_arch
))
2962 as_bad (_("no such architecture: `%s'"), string
);
2964 *input_line_pointer
= e
;
2967 as_bad (_("missing cpu architecture"));
2969 no_cond_jump_promotion
= 0;
2970 if (*input_line_pointer
== ','
2971 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2976 ++input_line_pointer
;
2977 e
= get_symbol_name (&string
);
2979 if (strcmp (string
, "nojumps") == 0)
2980 no_cond_jump_promotion
= 1;
2981 else if (strcmp (string
, "jumps") == 0)
2984 as_bad (_("no such architecture modifier: `%s'"), string
);
2986 (void) restore_line_pointer (e
);
2989 demand_empty_rest_of_line ();
2992 enum bfd_architecture
2995 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2997 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2998 || flag_code
!= CODE_64BIT
)
2999 as_fatal (_("Intel L1OM is 64bit ELF only"));
3000 return bfd_arch_l1om
;
3002 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
3004 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3005 || flag_code
!= CODE_64BIT
)
3006 as_fatal (_("Intel K1OM is 64bit ELF only"));
3007 return bfd_arch_k1om
;
3009 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3011 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3012 || flag_code
== CODE_64BIT
)
3013 as_fatal (_("Intel MCU is 32bit ELF only"));
3014 return bfd_arch_iamcu
;
3017 return bfd_arch_i386
;
3023 if (startswith (default_arch
, "x86_64"))
3025 if (cpu_arch_isa
== PROCESSOR_L1OM
)
3027 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3028 || default_arch
[6] != '\0')
3029 as_fatal (_("Intel L1OM is 64bit ELF only"));
3030 return bfd_mach_l1om
;
3032 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
3034 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3035 || default_arch
[6] != '\0')
3036 as_fatal (_("Intel K1OM is 64bit ELF only"));
3037 return bfd_mach_k1om
;
3039 else if (default_arch
[6] == '\0')
3040 return bfd_mach_x86_64
;
3042 return bfd_mach_x64_32
;
3044 else if (!strcmp (default_arch
, "i386")
3045 || !strcmp (default_arch
, "iamcu"))
3047 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3049 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3050 as_fatal (_("Intel MCU is 32bit ELF only"));
3051 return bfd_mach_i386_iamcu
;
3054 return bfd_mach_i386_i386
;
3057 as_fatal (_("unknown architecture"));
3063 /* Support pseudo prefixes like {disp32}. */
3064 lex_type
['{'] = LEX_BEGIN_NAME
;
3066 /* Initialize op_hash hash table. */
3067 op_hash
= str_htab_create ();
3070 const insn_template
*optab
;
3071 templates
*core_optab
;
3073 /* Setup for loop. */
3075 core_optab
= XNEW (templates
);
3076 core_optab
->start
= optab
;
3081 if (optab
->name
== NULL
3082 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3084 /* different name --> ship out current template list;
3085 add to hash table; & begin anew. */
3086 core_optab
->end
= optab
;
3087 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3088 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3090 if (optab
->name
== NULL
)
3092 core_optab
= XNEW (templates
);
3093 core_optab
->start
= optab
;
3098 /* Initialize reg_hash hash table. */
3099 reg_hash
= str_htab_create ();
3101 const reg_entry
*regtab
;
3102 unsigned int regtab_size
= i386_regtab_size
;
3104 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3106 switch (regtab
->reg_type
.bitfield
.class)
3109 if (regtab
->reg_type
.bitfield
.dword
)
3111 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3114 else if (regtab
->reg_type
.bitfield
.tbyte
)
3116 /* There's no point inserting st(<N>) in the hash table, as
3117 parentheses aren't included in register_chars[] anyway. */
3118 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3125 switch (regtab
->reg_num
)
3127 case 0: reg_es
= regtab
; break;
3128 case 2: reg_ss
= regtab
; break;
3129 case 3: reg_ds
= regtab
; break;
3134 if (!regtab
->reg_num
)
3139 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3140 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3144 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3149 for (c
= 0; c
< 256; c
++)
3151 if (ISDIGIT (c
) || ISLOWER (c
))
3153 mnemonic_chars
[c
] = c
;
3154 register_chars
[c
] = c
;
3155 operand_chars
[c
] = c
;
3157 else if (ISUPPER (c
))
3159 mnemonic_chars
[c
] = TOLOWER (c
);
3160 register_chars
[c
] = mnemonic_chars
[c
];
3161 operand_chars
[c
] = c
;
3163 else if (c
== '{' || c
== '}')
3165 mnemonic_chars
[c
] = c
;
3166 operand_chars
[c
] = c
;
3168 #ifdef SVR4_COMMENT_CHARS
3169 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3170 operand_chars
[c
] = c
;
3173 if (ISALPHA (c
) || ISDIGIT (c
))
3174 identifier_chars
[c
] = c
;
3177 identifier_chars
[c
] = c
;
3178 operand_chars
[c
] = c
;
3183 identifier_chars
['@'] = '@';
3186 identifier_chars
['?'] = '?';
3187 operand_chars
['?'] = '?';
3189 mnemonic_chars
['_'] = '_';
3190 mnemonic_chars
['-'] = '-';
3191 mnemonic_chars
['.'] = '.';
3192 identifier_chars
['_'] = '_';
3193 identifier_chars
['.'] = '.';
3195 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3196 operand_chars
[(unsigned char) *p
] = *p
;
3199 if (flag_code
== CODE_64BIT
)
3201 #if defined (OBJ_COFF) && defined (TE_PE)
3202 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3205 x86_dwarf2_return_column
= 16;
3207 x86_cie_data_alignment
= -8;
3211 x86_dwarf2_return_column
= 8;
3212 x86_cie_data_alignment
= -4;
3215 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3216 can be turned into BRANCH_PREFIX frag. */
3217 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3222 i386_print_statistics (FILE *file
)
3224 htab_print_statistics (file
, "i386 opcode", op_hash
);
3225 htab_print_statistics (file
, "i386 register", reg_hash
);
3230 /* Debugging routines for md_assemble. */
3231 static void pte (insn_template
*);
3232 static void pt (i386_operand_type
);
3233 static void pe (expressionS
*);
3234 static void ps (symbolS
*);
3237 pi (const char *line
, i386_insn
*x
)
3241 fprintf (stdout
, "%s: template ", line
);
3243 fprintf (stdout
, " address: base %s index %s scale %x\n",
3244 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3245 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3246 x
->log2_scale_factor
);
3247 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3248 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3249 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3250 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3251 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3252 (x
->rex
& REX_W
) != 0,
3253 (x
->rex
& REX_R
) != 0,
3254 (x
->rex
& REX_X
) != 0,
3255 (x
->rex
& REX_B
) != 0);
3256 for (j
= 0; j
< x
->operands
; j
++)
3258 fprintf (stdout
, " #%d: ", j
+ 1);
3260 fprintf (stdout
, "\n");
3261 if (x
->types
[j
].bitfield
.class == Reg
3262 || x
->types
[j
].bitfield
.class == RegMMX
3263 || x
->types
[j
].bitfield
.class == RegSIMD
3264 || x
->types
[j
].bitfield
.class == RegMask
3265 || x
->types
[j
].bitfield
.class == SReg
3266 || x
->types
[j
].bitfield
.class == RegCR
3267 || x
->types
[j
].bitfield
.class == RegDR
3268 || x
->types
[j
].bitfield
.class == RegTR
3269 || x
->types
[j
].bitfield
.class == RegBND
)
3270 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3271 if (operand_type_check (x
->types
[j
], imm
))
3273 if (operand_type_check (x
->types
[j
], disp
))
3274 pe (x
->op
[j
].disps
);
3279 pte (insn_template
*t
)
3281 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3282 static const char *const opc_spc
[] = {
3283 NULL
, "0f", "0f38", "0f3a", NULL
, "evexmap5", "evexmap6", NULL
,
3284 "XOP08", "XOP09", "XOP0A",
3288 fprintf (stdout
, " %d operands ", t
->operands
);
3289 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3290 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3291 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3292 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3293 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3294 if (t
->extension_opcode
!= None
)
3295 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3296 if (t
->opcode_modifier
.d
)
3297 fprintf (stdout
, "D");
3298 if (t
->opcode_modifier
.w
)
3299 fprintf (stdout
, "W");
3300 fprintf (stdout
, "\n");
3301 for (j
= 0; j
< t
->operands
; j
++)
3303 fprintf (stdout
, " #%d type ", j
+ 1);
3304 pt (t
->operand_types
[j
]);
3305 fprintf (stdout
, "\n");
3312 fprintf (stdout
, " operation %d\n", e
->X_op
);
3313 fprintf (stdout
, " add_number %" BFD_VMA_FMT
"d (%" BFD_VMA_FMT
"x)\n",
3314 e
->X_add_number
, e
->X_add_number
);
3315 if (e
->X_add_symbol
)
3317 fprintf (stdout
, " add_symbol ");
3318 ps (e
->X_add_symbol
);
3319 fprintf (stdout
, "\n");
3323 fprintf (stdout
, " op_symbol ");
3324 ps (e
->X_op_symbol
);
3325 fprintf (stdout
, "\n");
3332 fprintf (stdout
, "%s type %s%s",
3334 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3335 segment_name (S_GET_SEGMENT (s
)));
3338 static struct type_name
3340 i386_operand_type mask
;
3343 const type_names
[] =
3345 { OPERAND_TYPE_REG8
, "r8" },
3346 { OPERAND_TYPE_REG16
, "r16" },
3347 { OPERAND_TYPE_REG32
, "r32" },
3348 { OPERAND_TYPE_REG64
, "r64" },
3349 { OPERAND_TYPE_ACC8
, "acc8" },
3350 { OPERAND_TYPE_ACC16
, "acc16" },
3351 { OPERAND_TYPE_ACC32
, "acc32" },
3352 { OPERAND_TYPE_ACC64
, "acc64" },
3353 { OPERAND_TYPE_IMM8
, "i8" },
3354 { OPERAND_TYPE_IMM8
, "i8s" },
3355 { OPERAND_TYPE_IMM16
, "i16" },
3356 { OPERAND_TYPE_IMM32
, "i32" },
3357 { OPERAND_TYPE_IMM32S
, "i32s" },
3358 { OPERAND_TYPE_IMM64
, "i64" },
3359 { OPERAND_TYPE_IMM1
, "i1" },
3360 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3361 { OPERAND_TYPE_DISP8
, "d8" },
3362 { OPERAND_TYPE_DISP16
, "d16" },
3363 { OPERAND_TYPE_DISP32
, "d32" },
3364 { OPERAND_TYPE_DISP32S
, "d32s" },
3365 { OPERAND_TYPE_DISP64
, "d64" },
3366 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3367 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3368 { OPERAND_TYPE_CONTROL
, "control reg" },
3369 { OPERAND_TYPE_TEST
, "test reg" },
3370 { OPERAND_TYPE_DEBUG
, "debug reg" },
3371 { OPERAND_TYPE_FLOATREG
, "FReg" },
3372 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3373 { OPERAND_TYPE_SREG
, "SReg" },
3374 { OPERAND_TYPE_REGMMX
, "rMMX" },
3375 { OPERAND_TYPE_REGXMM
, "rXMM" },
3376 { OPERAND_TYPE_REGYMM
, "rYMM" },
3377 { OPERAND_TYPE_REGZMM
, "rZMM" },
3378 { OPERAND_TYPE_REGTMM
, "rTMM" },
3379 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3383 pt (i386_operand_type t
)
3386 i386_operand_type a
;
3388 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3390 a
= operand_type_and (t
, type_names
[j
].mask
);
3391 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3392 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3397 #endif /* DEBUG386 */
3399 static bfd_reloc_code_real_type
3400 reloc (unsigned int size
,
3403 bfd_reloc_code_real_type other
)
3405 if (other
!= NO_RELOC
)
3407 reloc_howto_type
*rel
;
3412 case BFD_RELOC_X86_64_GOT32
:
3413 return BFD_RELOC_X86_64_GOT64
;
3415 case BFD_RELOC_X86_64_GOTPLT64
:
3416 return BFD_RELOC_X86_64_GOTPLT64
;
3418 case BFD_RELOC_X86_64_PLTOFF64
:
3419 return BFD_RELOC_X86_64_PLTOFF64
;
3421 case BFD_RELOC_X86_64_GOTPC32
:
3422 other
= BFD_RELOC_X86_64_GOTPC64
;
3424 case BFD_RELOC_X86_64_GOTPCREL
:
3425 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3427 case BFD_RELOC_X86_64_TPOFF32
:
3428 other
= BFD_RELOC_X86_64_TPOFF64
;
3430 case BFD_RELOC_X86_64_DTPOFF32
:
3431 other
= BFD_RELOC_X86_64_DTPOFF64
;
3437 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3438 if (other
== BFD_RELOC_SIZE32
)
3441 other
= BFD_RELOC_SIZE64
;
3444 as_bad (_("there are no pc-relative size relocations"));
3450 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3451 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3454 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3456 as_bad (_("unknown relocation (%u)"), other
);
3457 else if (size
!= bfd_get_reloc_size (rel
))
3458 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3459 bfd_get_reloc_size (rel
),
3461 else if (pcrel
&& !rel
->pc_relative
)
3462 as_bad (_("non-pc-relative relocation for pc-relative field"));
3463 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3465 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3467 as_bad (_("relocated field and relocation type differ in signedness"));
3476 as_bad (_("there are no unsigned pc-relative relocations"));
3479 case 1: return BFD_RELOC_8_PCREL
;
3480 case 2: return BFD_RELOC_16_PCREL
;
3481 case 4: return BFD_RELOC_32_PCREL
;
3482 case 8: return BFD_RELOC_64_PCREL
;
3484 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3491 case 4: return BFD_RELOC_X86_64_32S
;
3496 case 1: return BFD_RELOC_8
;
3497 case 2: return BFD_RELOC_16
;
3498 case 4: return BFD_RELOC_32
;
3499 case 8: return BFD_RELOC_64
;
3501 as_bad (_("cannot do %s %u byte relocation"),
3502 sign
> 0 ? "signed" : "unsigned", size
);
3508 /* Here we decide which fixups can be adjusted to make them relative to
3509 the beginning of the section instead of the symbol. Basically we need
3510 to make sure that the dynamic relocations are done correctly, so in
3511 some cases we force the original symbol to be used. */
3514 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3516 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3520 /* Don't adjust pc-relative references to merge sections in 64-bit
3522 if (use_rela_relocations
3523 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3527 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3528 and changed later by validate_fix. */
3529 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3530 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3533 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3534 for size relocations. */
3535 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3536 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3537 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3538 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3539 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3540 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3541 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3542 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3543 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3544 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3545 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3546 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3547 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3548 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3549 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3550 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3551 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3552 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3553 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3554 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3555 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3556 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3557 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3558 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3559 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3560 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3561 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3562 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3563 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3564 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3565 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3572 want_disp32 (const insn_template
*t
)
3574 return flag_code
!= CODE_64BIT
3575 || i
.prefix
[ADDR_PREFIX
]
3576 || (t
->base_opcode
== 0x8d
3577 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
3578 && (!i
.types
[1].bitfield
.qword
3579 || t
->opcode_modifier
.size
== SIZE32
));
3583 intel_float_operand (const char *mnemonic
)
3585 /* Note that the value returned is meaningful only for opcodes with (memory)
3586 operands, hence the code here is free to improperly handle opcodes that
3587 have no operands (for better performance and smaller code). */
3589 if (mnemonic
[0] != 'f')
3590 return 0; /* non-math */
3592 switch (mnemonic
[1])
3594 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3595 the fs segment override prefix not currently handled because no
3596 call path can make opcodes without operands get here */
3598 return 2 /* integer op */;
3600 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3601 return 3; /* fldcw/fldenv */
3604 if (mnemonic
[2] != 'o' /* fnop */)
3605 return 3; /* non-waiting control op */
3608 if (mnemonic
[2] == 's')
3609 return 3; /* frstor/frstpm */
3612 if (mnemonic
[2] == 'a')
3613 return 3; /* fsave */
3614 if (mnemonic
[2] == 't')
3616 switch (mnemonic
[3])
3618 case 'c': /* fstcw */
3619 case 'd': /* fstdw */
3620 case 'e': /* fstenv */
3621 case 's': /* fsts[gw] */
3627 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3628 return 0; /* fxsave/fxrstor are not really math ops */
3636 install_template (const insn_template
*t
)
3642 /* Note that for pseudo prefixes this produces a length of 1. But for them
3643 the length isn't interesting at all. */
3644 for (l
= 1; l
< 4; ++l
)
3645 if (!(t
->base_opcode
>> (8 * l
)))
3648 i
.opcode_length
= l
;
3651 /* Build the VEX prefix. */
3654 build_vex_prefix (const insn_template
*t
)
3656 unsigned int register_specifier
;
3657 unsigned int vector_length
;
3660 /* Check register specifier. */
3661 if (i
.vex
.register_specifier
)
3663 register_specifier
=
3664 ~register_number (i
.vex
.register_specifier
) & 0xf;
3665 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3668 register_specifier
= 0xf;
3670 /* Use 2-byte VEX prefix by swapping destination and source operand
3671 if there are more than 1 register operand. */
3672 if (i
.reg_operands
> 1
3673 && i
.vec_encoding
!= vex_encoding_vex3
3674 && i
.dir_encoding
== dir_encoding_default
3675 && i
.operands
== i
.reg_operands
3676 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3677 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3678 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3681 unsigned int xchg
= i
.operands
- 1;
3682 union i386_op temp_op
;
3683 i386_operand_type temp_type
;
3685 temp_type
= i
.types
[xchg
];
3686 i
.types
[xchg
] = i
.types
[0];
3687 i
.types
[0] = temp_type
;
3688 temp_op
= i
.op
[xchg
];
3689 i
.op
[xchg
] = i
.op
[0];
3692 gas_assert (i
.rm
.mode
== 3);
3696 i
.rm
.regmem
= i
.rm
.reg
;
3699 if (i
.tm
.opcode_modifier
.d
)
3700 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3701 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3702 else /* Use the next insn. */
3703 install_template (&t
[1]);
3706 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3707 are no memory operands and at least 3 register ones. */
3708 if (i
.reg_operands
>= 3
3709 && i
.vec_encoding
!= vex_encoding_vex3
3710 && i
.reg_operands
== i
.operands
- i
.imm_operands
3711 && i
.tm
.opcode_modifier
.vex
3712 && i
.tm
.opcode_modifier
.commutative
3713 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3715 && i
.vex
.register_specifier
3716 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3718 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3719 union i386_op temp_op
;
3720 i386_operand_type temp_type
;
3722 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3723 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3724 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3725 &i
.types
[i
.operands
- 3]));
3726 gas_assert (i
.rm
.mode
== 3);
3728 temp_type
= i
.types
[xchg
];
3729 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3730 i
.types
[xchg
+ 1] = temp_type
;
3731 temp_op
= i
.op
[xchg
];
3732 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3733 i
.op
[xchg
+ 1] = temp_op
;
3736 xchg
= i
.rm
.regmem
| 8;
3737 i
.rm
.regmem
= ~register_specifier
& 0xf;
3738 gas_assert (!(i
.rm
.regmem
& 8));
3739 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3740 register_specifier
= ~xchg
& 0xf;
3743 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3744 vector_length
= avxscalar
;
3745 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3751 /* Determine vector length from the last multi-length vector
3754 for (op
= t
->operands
; op
--;)
3755 if (t
->operand_types
[op
].bitfield
.xmmword
3756 && t
->operand_types
[op
].bitfield
.ymmword
3757 && i
.types
[op
].bitfield
.ymmword
)
3764 /* Check the REX.W bit and VEXW. */
3765 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3766 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3767 else if (i
.tm
.opcode_modifier
.vexw
)
3768 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3770 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3772 /* Use 2-byte VEX prefix if possible. */
3774 && i
.vec_encoding
!= vex_encoding_vex3
3775 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3776 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3778 /* 2-byte VEX prefix. */
3782 i
.vex
.bytes
[0] = 0xc5;
3784 /* Check the REX.R bit. */
3785 r
= (i
.rex
& REX_R
) ? 0 : 1;
3786 i
.vex
.bytes
[1] = (r
<< 7
3787 | register_specifier
<< 3
3788 | vector_length
<< 2
3789 | i
.tm
.opcode_modifier
.opcodeprefix
);
3793 /* 3-byte VEX prefix. */
3796 switch (i
.tm
.opcode_modifier
.opcodespace
)
3801 i
.vex
.bytes
[0] = 0xc4;
3806 i
.vex
.bytes
[0] = 0x8f;
3812 /* The high 3 bits of the second VEX byte are 1's compliment
3813 of RXB bits from REX. */
3814 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3816 i
.vex
.bytes
[2] = (w
<< 7
3817 | register_specifier
<< 3
3818 | vector_length
<< 2
3819 | i
.tm
.opcode_modifier
.opcodeprefix
);
3824 is_evex_encoding (const insn_template
*t
)
3826 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3827 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3828 || t
->opcode_modifier
.sae
;
3832 is_any_vex_encoding (const insn_template
*t
)
3834 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3837 /* Build the EVEX prefix. */
3840 build_evex_prefix (void)
3842 unsigned int register_specifier
, w
;
3843 rex_byte vrex_used
= 0;
3845 /* Check register specifier. */
3846 if (i
.vex
.register_specifier
)
3848 gas_assert ((i
.vrex
& REX_X
) == 0);
3850 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3851 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3852 register_specifier
+= 8;
3853 /* The upper 16 registers are encoded in the fourth byte of the
3855 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3856 i
.vex
.bytes
[3] = 0x8;
3857 register_specifier
= ~register_specifier
& 0xf;
3861 register_specifier
= 0xf;
3863 /* Encode upper 16 vector index register in the fourth byte of
3865 if (!(i
.vrex
& REX_X
))
3866 i
.vex
.bytes
[3] = 0x8;
3871 /* 4 byte EVEX prefix. */
3873 i
.vex
.bytes
[0] = 0x62;
3875 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3877 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3878 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_EVEXMAP6
);
3879 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3881 /* The fifth bit of the second EVEX byte is 1's compliment of the
3882 REX_R bit in VREX. */
3883 if (!(i
.vrex
& REX_R
))
3884 i
.vex
.bytes
[1] |= 0x10;
3888 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3890 /* When all operands are registers, the REX_X bit in REX is not
3891 used. We reuse it to encode the upper 16 registers, which is
3892 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3893 as 1's compliment. */
3894 if ((i
.vrex
& REX_B
))
3897 i
.vex
.bytes
[1] &= ~0x40;
3901 /* EVEX instructions shouldn't need the REX prefix. */
3902 i
.vrex
&= ~vrex_used
;
3903 gas_assert (i
.vrex
== 0);
3905 /* Check the REX.W bit and VEXW. */
3906 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3907 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3908 else if (i
.tm
.opcode_modifier
.vexw
)
3909 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3911 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3913 /* The third byte of the EVEX prefix. */
3914 i
.vex
.bytes
[2] = ((w
<< 7)
3915 | (register_specifier
<< 3)
3916 | 4 /* Encode the U bit. */
3917 | i
.tm
.opcode_modifier
.opcodeprefix
);
3919 /* The fourth byte of the EVEX prefix. */
3920 /* The zeroing-masking bit. */
3921 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3922 i
.vex
.bytes
[3] |= 0x80;
3924 /* Don't always set the broadcast bit if there is no RC. */
3925 if (i
.rounding
.type
== rc_none
)
3927 /* Encode the vector length. */
3928 unsigned int vec_length
;
3930 if (!i
.tm
.opcode_modifier
.evex
3931 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3935 /* Determine vector length from the last multi-length vector
3937 for (op
= i
.operands
; op
--;)
3938 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3939 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3940 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3942 if (i
.types
[op
].bitfield
.zmmword
)
3944 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3947 else if (i
.types
[op
].bitfield
.ymmword
)
3949 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3952 else if (i
.types
[op
].bitfield
.xmmword
)
3954 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3957 else if (i
.broadcast
.type
&& op
== i
.broadcast
.operand
)
3959 switch (i
.broadcast
.bytes
)
3962 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3965 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3968 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3977 if (op
>= MAX_OPERANDS
)
3981 switch (i
.tm
.opcode_modifier
.evex
)
3983 case EVEXLIG
: /* LL' is ignored */
3984 vec_length
= evexlig
<< 5;
3987 vec_length
= 0 << 5;
3990 vec_length
= 1 << 5;
3993 vec_length
= 2 << 5;
3999 i
.vex
.bytes
[3] |= vec_length
;
4000 /* Encode the broadcast bit. */
4001 if (i
.broadcast
.type
)
4002 i
.vex
.bytes
[3] |= 0x10;
4004 else if (i
.rounding
.type
!= saeonly
)
4005 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
4007 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
4010 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
4014 process_immext (void)
4018 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
4019 which is coded in the same place as an 8-bit immediate field
4020 would be. Here we fake an 8-bit immediate operand from the
4021 opcode suffix stored in tm.extension_opcode.
4023 AVX instructions also use this encoding, for some of
4024 3 argument instructions. */
4026 gas_assert (i
.imm_operands
<= 1
4028 || (is_any_vex_encoding (&i
.tm
)
4029 && i
.operands
<= 4)));
4031 exp
= &im_expressions
[i
.imm_operands
++];
4032 i
.op
[i
.operands
].imms
= exp
;
4033 i
.types
[i
.operands
] = imm8
;
4035 exp
->X_op
= O_constant
;
4036 exp
->X_add_number
= i
.tm
.extension_opcode
;
4037 i
.tm
.extension_opcode
= None
;
4044 switch (i
.tm
.opcode_modifier
.prefixok
)
4052 as_bad (_("invalid instruction `%s' after `%s'"),
4053 i
.tm
.name
, i
.hle_prefix
);
4056 if (i
.prefix
[LOCK_PREFIX
])
4058 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4062 case PrefixHLERelease
:
4063 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4065 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4069 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4071 as_bad (_("memory destination needed for instruction `%s'"
4072 " after `xrelease'"), i
.tm
.name
);
4079 /* Encode aligned vector move as unaligned vector move. */
4082 encode_with_unaligned_vector_move (void)
4084 switch (i
.tm
.base_opcode
)
4086 case 0x28: /* Load instructions. */
4087 case 0x29: /* Store instructions. */
4088 /* movaps/movapd/vmovaps/vmovapd. */
4089 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4090 && i
.tm
.opcode_modifier
.opcodeprefix
<= PREFIX_0X66
)
4091 i
.tm
.base_opcode
= 0x10 | (i
.tm
.base_opcode
& 1);
4093 case 0x6f: /* Load instructions. */
4094 case 0x7f: /* Store instructions. */
4095 /* movdqa/vmovdqa/vmovdqa64/vmovdqa32. */
4096 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4097 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0X66
)
4098 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4105 /* Try the shortest encoding by shortening operand size. */
4108 optimize_encoding (void)
4112 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4113 && i
.tm
.base_opcode
== 0x8d)
4116 lea symbol, %rN -> mov $symbol, %rN
4117 lea (%rM), %rN -> mov %rM, %rN
4118 lea (,%rM,1), %rN -> mov %rM, %rN
4120 and in 32-bit mode for 16-bit addressing
4122 lea (%rM), %rN -> movzx %rM, %rN
4124 and in 64-bit mode zap 32-bit addressing in favor of using a
4125 32-bit (or less) destination.
4127 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4129 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4130 i
.tm
.opcode_modifier
.size
= SIZE32
;
4131 i
.prefix
[ADDR_PREFIX
] = 0;
4134 if (!i
.index_reg
&& !i
.base_reg
)
4137 lea symbol, %rN -> mov $symbol, %rN
4139 if (flag_code
== CODE_64BIT
)
4141 /* Don't transform a relocation to a 16-bit one. */
4143 && i
.op
[0].disps
->X_op
!= O_constant
4144 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4147 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4148 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4150 i
.tm
.base_opcode
= 0xb8;
4151 i
.tm
.opcode_modifier
.modrm
= 0;
4152 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4153 i
.types
[0].bitfield
.imm32
= 1;
4156 i
.tm
.opcode_modifier
.size
= SIZE16
;
4157 i
.types
[0].bitfield
.imm16
= 1;
4162 /* Subject to further optimization below. */
4163 i
.tm
.base_opcode
= 0xc7;
4164 i
.tm
.extension_opcode
= 0;
4165 i
.types
[0].bitfield
.imm32s
= 1;
4166 i
.types
[0].bitfield
.baseindex
= 0;
4169 /* Outside of 64-bit mode address and operand sizes have to match if
4170 a relocation is involved, as otherwise we wouldn't (currently) or
4171 even couldn't express the relocation correctly. */
4172 else if (i
.op
[0].disps
4173 && i
.op
[0].disps
->X_op
!= O_constant
4174 && ((!i
.prefix
[ADDR_PREFIX
])
4175 != (flag_code
== CODE_32BIT
4176 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4177 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4179 /* In 16-bit mode converting LEA with 16-bit addressing and a 32-bit
4180 destination is going to grow encoding size. */
4181 else if (flag_code
== CODE_16BIT
4182 && (optimize
<= 1 || optimize_for_space
)
4183 && !i
.prefix
[ADDR_PREFIX
]
4184 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4188 i
.tm
.base_opcode
= 0xb8;
4189 i
.tm
.opcode_modifier
.modrm
= 0;
4190 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4191 i
.types
[0].bitfield
.imm32
= 1;
4193 i
.types
[0].bitfield
.imm16
= 1;
4196 && i
.op
[0].disps
->X_op
== O_constant
4197 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4198 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4200 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4201 i
.op
[0].disps
->X_add_number
&= 0xffff;
4204 i
.tm
.operand_types
[0] = i
.types
[0];
4208 i
.op
[0].imms
= &im_expressions
[0];
4209 i
.op
[0].imms
->X_op
= O_absent
;
4212 else if (i
.op
[0].disps
4213 && (i
.op
[0].disps
->X_op
!= O_constant
4214 || i
.op
[0].disps
->X_add_number
))
4219 lea (%rM), %rN -> mov %rM, %rN
4220 lea (,%rM,1), %rN -> mov %rM, %rN
4221 lea (%rM), %rN -> movzx %rM, %rN
4223 const reg_entry
*addr_reg
;
4225 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4226 addr_reg
= i
.base_reg
;
4227 else if (!i
.base_reg
4228 && i
.index_reg
->reg_num
!= RegIZ
4229 && !i
.log2_scale_factor
)
4230 addr_reg
= i
.index_reg
;
4234 if (addr_reg
->reg_type
.bitfield
.word
4235 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4237 if (flag_code
!= CODE_32BIT
)
4239 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
4240 i
.tm
.base_opcode
= 0xb7;
4243 i
.tm
.base_opcode
= 0x8b;
4245 if (addr_reg
->reg_type
.bitfield
.dword
4246 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4247 i
.tm
.opcode_modifier
.size
= SIZE32
;
4249 i
.op
[0].regs
= addr_reg
;
4254 i
.disp_operands
= 0;
4255 i
.prefix
[ADDR_PREFIX
] = 0;
4256 i
.prefix
[SEG_PREFIX
] = 0;
4260 if (optimize_for_space
4261 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4262 && i
.reg_operands
== 1
4263 && i
.imm_operands
== 1
4264 && !i
.types
[1].bitfield
.byte
4265 && i
.op
[0].imms
->X_op
== O_constant
4266 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4267 && (i
.tm
.base_opcode
== 0xa8
4268 || (i
.tm
.base_opcode
== 0xf6
4269 && i
.tm
.extension_opcode
== 0x0)))
4272 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4274 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4275 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4277 i
.types
[1].bitfield
.byte
= 1;
4278 /* Ignore the suffix. */
4280 /* Convert to byte registers. */
4281 if (i
.types
[1].bitfield
.word
)
4283 else if (i
.types
[1].bitfield
.dword
)
4287 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4292 else if (flag_code
== CODE_64BIT
4293 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4294 && ((i
.types
[1].bitfield
.qword
4295 && i
.reg_operands
== 1
4296 && i
.imm_operands
== 1
4297 && i
.op
[0].imms
->X_op
== O_constant
4298 && ((i
.tm
.base_opcode
== 0xb8
4299 && i
.tm
.extension_opcode
== None
4300 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4301 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4302 && ((i
.tm
.base_opcode
== 0x24
4303 || i
.tm
.base_opcode
== 0xa8)
4304 || (i
.tm
.base_opcode
== 0x80
4305 && i
.tm
.extension_opcode
== 0x4)
4306 || ((i
.tm
.base_opcode
== 0xf6
4307 || (i
.tm
.base_opcode
| 1) == 0xc7)
4308 && i
.tm
.extension_opcode
== 0x0)))
4309 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4310 && i
.tm
.base_opcode
== 0x83
4311 && i
.tm
.extension_opcode
== 0x4)))
4312 || (i
.types
[0].bitfield
.qword
4313 && ((i
.reg_operands
== 2
4314 && i
.op
[0].regs
== i
.op
[1].regs
4315 && (i
.tm
.base_opcode
== 0x30
4316 || i
.tm
.base_opcode
== 0x28))
4317 || (i
.reg_operands
== 1
4319 && i
.tm
.base_opcode
== 0x30)))))
4322 andq $imm31, %r64 -> andl $imm31, %r32
4323 andq $imm7, %r64 -> andl $imm7, %r32
4324 testq $imm31, %r64 -> testl $imm31, %r32
4325 xorq %r64, %r64 -> xorl %r32, %r32
4326 subq %r64, %r64 -> subl %r32, %r32
4327 movq $imm31, %r64 -> movl $imm31, %r32
4328 movq $imm32, %r64 -> movl $imm32, %r32
4330 i
.tm
.opcode_modifier
.norex64
= 1;
4331 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4334 movq $imm31, %r64 -> movl $imm31, %r32
4335 movq $imm32, %r64 -> movl $imm32, %r32
4337 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4338 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4339 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4340 i
.types
[0].bitfield
.imm32
= 1;
4341 i
.types
[0].bitfield
.imm32s
= 0;
4342 i
.types
[0].bitfield
.imm64
= 0;
4343 i
.types
[1].bitfield
.dword
= 1;
4344 i
.types
[1].bitfield
.qword
= 0;
4345 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4348 movq $imm31, %r64 -> movl $imm31, %r32
4350 i
.tm
.base_opcode
= 0xb8;
4351 i
.tm
.extension_opcode
= None
;
4352 i
.tm
.opcode_modifier
.w
= 0;
4353 i
.tm
.opcode_modifier
.modrm
= 0;
4357 else if (optimize
> 1
4358 && !optimize_for_space
4359 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4360 && i
.reg_operands
== 2
4361 && i
.op
[0].regs
== i
.op
[1].regs
4362 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4363 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4364 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4367 andb %rN, %rN -> testb %rN, %rN
4368 andw %rN, %rN -> testw %rN, %rN
4369 andq %rN, %rN -> testq %rN, %rN
4370 orb %rN, %rN -> testb %rN, %rN
4371 orw %rN, %rN -> testw %rN, %rN
4372 orq %rN, %rN -> testq %rN, %rN
4374 and outside of 64-bit mode
4376 andl %rN, %rN -> testl %rN, %rN
4377 orl %rN, %rN -> testl %rN, %rN
4379 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4381 else if (i
.reg_operands
== 3
4382 && i
.op
[0].regs
== i
.op
[1].regs
4383 && !i
.types
[2].bitfield
.xmmword
4384 && (i
.tm
.opcode_modifier
.vex
4385 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4386 && i
.rounding
.type
== rc_none
4387 && is_evex_encoding (&i
.tm
)
4388 && (i
.vec_encoding
!= vex_encoding_evex
4389 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4390 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4391 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4392 && i
.types
[2].bitfield
.ymmword
))))
4393 && ((i
.tm
.base_opcode
== 0x55
4394 || i
.tm
.base_opcode
== 0x57
4395 || i
.tm
.base_opcode
== 0xdf
4396 || i
.tm
.base_opcode
== 0xef
4397 || i
.tm
.base_opcode
== 0xf8
4398 || i
.tm
.base_opcode
== 0xf9
4399 || i
.tm
.base_opcode
== 0xfa
4400 || i
.tm
.base_opcode
== 0xfb
4401 || i
.tm
.base_opcode
== 0x42
4402 || i
.tm
.base_opcode
== 0x47)
4403 && i
.tm
.extension_opcode
== None
))
4406 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4408 EVEX VOP %zmmM, %zmmM, %zmmN
4409 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4410 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4411 EVEX VOP %ymmM, %ymmM, %ymmN
4412 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4413 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4414 VEX VOP %ymmM, %ymmM, %ymmN
4415 -> VEX VOP %xmmM, %xmmM, %xmmN
4416 VOP, one of vpandn and vpxor:
4417 VEX VOP %ymmM, %ymmM, %ymmN
4418 -> VEX VOP %xmmM, %xmmM, %xmmN
4419 VOP, one of vpandnd and vpandnq:
4420 EVEX VOP %zmmM, %zmmM, %zmmN
4421 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4422 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4423 EVEX VOP %ymmM, %ymmM, %ymmN
4424 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4425 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4426 VOP, one of vpxord and vpxorq:
4427 EVEX VOP %zmmM, %zmmM, %zmmN
4428 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4429 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4430 EVEX VOP %ymmM, %ymmM, %ymmN
4431 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4432 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4433 VOP, one of kxord and kxorq:
4434 VEX VOP %kM, %kM, %kN
4435 -> VEX kxorw %kM, %kM, %kN
4436 VOP, one of kandnd and kandnq:
4437 VEX VOP %kM, %kM, %kN
4438 -> VEX kandnw %kM, %kM, %kN
4440 if (is_evex_encoding (&i
.tm
))
4442 if (i
.vec_encoding
!= vex_encoding_evex
)
4444 i
.tm
.opcode_modifier
.vex
= VEX128
;
4445 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4446 i
.tm
.opcode_modifier
.evex
= 0;
4448 else if (optimize
> 1)
4449 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4453 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4455 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4456 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4459 i
.tm
.opcode_modifier
.vex
= VEX128
;
4461 if (i
.tm
.opcode_modifier
.vex
)
4462 for (j
= 0; j
< 3; j
++)
4464 i
.types
[j
].bitfield
.xmmword
= 1;
4465 i
.types
[j
].bitfield
.ymmword
= 0;
4468 else if (i
.vec_encoding
!= vex_encoding_evex
4469 && !i
.types
[0].bitfield
.zmmword
4470 && !i
.types
[1].bitfield
.zmmword
4472 && !i
.broadcast
.type
4473 && is_evex_encoding (&i
.tm
)
4474 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4475 || (i
.tm
.base_opcode
& ~4) == 0xdb
4476 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4477 && i
.tm
.extension_opcode
== None
)
4480 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4481 vmovdqu32 and vmovdqu64:
4482 EVEX VOP %xmmM, %xmmN
4483 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4484 EVEX VOP %ymmM, %ymmN
4485 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4487 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4489 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4491 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4493 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4494 VOP, one of vpand, vpandn, vpor, vpxor:
4495 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4496 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4497 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4498 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4499 EVEX VOP{d,q} mem, %xmmM, %xmmN
4500 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4501 EVEX VOP{d,q} mem, %ymmM, %ymmN
4502 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4504 for (j
= 0; j
< i
.operands
; j
++)
4505 if (operand_type_check (i
.types
[j
], disp
)
4506 && i
.op
[j
].disps
->X_op
== O_constant
)
4508 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4509 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4510 bytes, we choose EVEX Disp8 over VEX Disp32. */
4511 int evex_disp8
, vex_disp8
;
4512 unsigned int memshift
= i
.memshift
;
4513 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4515 evex_disp8
= fits_in_disp8 (n
);
4517 vex_disp8
= fits_in_disp8 (n
);
4518 if (evex_disp8
!= vex_disp8
)
4520 i
.memshift
= memshift
;
4524 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4527 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4528 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4529 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4530 i
.tm
.opcode_modifier
.vex
4531 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4532 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4533 /* VPAND, VPOR, and VPXOR are commutative. */
4534 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4535 i
.tm
.opcode_modifier
.commutative
= 1;
4536 i
.tm
.opcode_modifier
.evex
= 0;
4537 i
.tm
.opcode_modifier
.masking
= 0;
4538 i
.tm
.opcode_modifier
.broadcast
= 0;
4539 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4542 i
.types
[j
].bitfield
.disp8
4543 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4547 /* Return non-zero for load instruction. */
4553 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4554 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4558 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4559 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4560 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4561 if (i
.tm
.opcode_modifier
.anysize
)
4565 if (strcmp (i
.tm
.name
, "pop") == 0)
4569 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4572 if (i
.tm
.base_opcode
== 0x9d
4573 || i
.tm
.base_opcode
== 0x61)
4576 /* movs, cmps, lods, scas. */
4577 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4581 if (base_opcode
== 0x6f
4582 || i
.tm
.base_opcode
== 0xd7)
4584 /* NB: For AMD-specific insns with implicit memory operands,
4585 they're intentionally not covered. */
4588 /* No memory operand. */
4589 if (!i
.mem_operands
)
4595 if (i
.tm
.base_opcode
== 0xae
4596 && i
.tm
.opcode_modifier
.vex
4597 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4598 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4599 && i
.tm
.extension_opcode
== 2)
4602 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4604 /* test, not, neg, mul, imul, div, idiv. */
4605 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4606 && i
.tm
.extension_opcode
!= 1)
4610 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4613 /* add, or, adc, sbb, and, sub, xor, cmp. */
4614 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4617 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4618 if ((base_opcode
== 0xc1
4619 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4620 && i
.tm
.extension_opcode
!= 6)
4623 /* Check for x87 instructions. */
4624 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4626 /* Skip fst, fstp, fstenv, fstcw. */
4627 if (i
.tm
.base_opcode
== 0xd9
4628 && (i
.tm
.extension_opcode
== 2
4629 || i
.tm
.extension_opcode
== 3
4630 || i
.tm
.extension_opcode
== 6
4631 || i
.tm
.extension_opcode
== 7))
4634 /* Skip fisttp, fist, fistp, fstp. */
4635 if (i
.tm
.base_opcode
== 0xdb
4636 && (i
.tm
.extension_opcode
== 1
4637 || i
.tm
.extension_opcode
== 2
4638 || i
.tm
.extension_opcode
== 3
4639 || i
.tm
.extension_opcode
== 7))
4642 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4643 if (i
.tm
.base_opcode
== 0xdd
4644 && (i
.tm
.extension_opcode
== 1
4645 || i
.tm
.extension_opcode
== 2
4646 || i
.tm
.extension_opcode
== 3
4647 || i
.tm
.extension_opcode
== 6
4648 || i
.tm
.extension_opcode
== 7))
4651 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4652 if (i
.tm
.base_opcode
== 0xdf
4653 && (i
.tm
.extension_opcode
== 1
4654 || i
.tm
.extension_opcode
== 2
4655 || i
.tm
.extension_opcode
== 3
4656 || i
.tm
.extension_opcode
== 6
4657 || i
.tm
.extension_opcode
== 7))
4663 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4665 /* bt, bts, btr, btc. */
4666 if (i
.tm
.base_opcode
== 0xba
4667 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4670 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4671 if (i
.tm
.base_opcode
== 0xc7
4672 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4673 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4674 || i
.tm
.extension_opcode
== 6))
4677 /* fxrstor, ldmxcsr, xrstor. */
4678 if (i
.tm
.base_opcode
== 0xae
4679 && (i
.tm
.extension_opcode
== 1
4680 || i
.tm
.extension_opcode
== 2
4681 || i
.tm
.extension_opcode
== 5))
4684 /* lgdt, lidt, lmsw. */
4685 if (i
.tm
.base_opcode
== 0x01
4686 && (i
.tm
.extension_opcode
== 2
4687 || i
.tm
.extension_opcode
== 3
4688 || i
.tm
.extension_opcode
== 6))
4692 dest
= i
.operands
- 1;
4694 /* Check fake imm8 operand and 3 source operands. */
4695 if ((i
.tm
.opcode_modifier
.immext
4696 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4697 && i
.types
[dest
].bitfield
.imm8
)
4700 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4701 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4702 && (base_opcode
== 0x1
4703 || base_opcode
== 0x9
4704 || base_opcode
== 0x11
4705 || base_opcode
== 0x19
4706 || base_opcode
== 0x21
4707 || base_opcode
== 0x29
4708 || base_opcode
== 0x31
4709 || base_opcode
== 0x39
4710 || (base_opcode
| 2) == 0x87))
4714 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4715 && base_opcode
== 0xc1)
4718 /* Check for load instruction. */
4719 return (i
.types
[dest
].bitfield
.class != ClassNone
4720 || i
.types
[dest
].bitfield
.instance
== Accum
);
4723 /* Output lfence, 0xfaee8, after instruction. */
4726 insert_lfence_after (void)
4728 if (lfence_after_load
&& load_insn_p ())
4730 /* There are also two REP string instructions that require
4731 special treatment. Specifically, the compare string (CMPS)
4732 and scan string (SCAS) instructions set EFLAGS in a manner
4733 that depends on the data being compared/scanned. When used
4734 with a REP prefix, the number of iterations may therefore
4735 vary depending on this data. If the data is a program secret
4736 chosen by the adversary using an LVI method,
4737 then this data-dependent behavior may leak some aspect
4739 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4740 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4741 && i
.prefix
[REP_PREFIX
])
4743 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4746 char *p
= frag_more (3);
4753 /* Output lfence, 0xfaee8, before instruction. */
4756 insert_lfence_before (void)
4760 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4763 if (i
.tm
.base_opcode
== 0xff
4764 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4766 /* Insert lfence before indirect branch if needed. */
4768 if (lfence_before_indirect_branch
== lfence_branch_none
)
4771 if (i
.operands
!= 1)
4774 if (i
.reg_operands
== 1)
4776 /* Indirect branch via register. Don't insert lfence with
4777 -mlfence-after-load=yes. */
4778 if (lfence_after_load
4779 || lfence_before_indirect_branch
== lfence_branch_memory
)
4782 else if (i
.mem_operands
== 1
4783 && lfence_before_indirect_branch
!= lfence_branch_register
)
4785 as_warn (_("indirect `%s` with memory operand should be avoided"),
4792 if (last_insn
.kind
!= last_insn_other
4793 && last_insn
.seg
== now_seg
)
4795 as_warn_where (last_insn
.file
, last_insn
.line
,
4796 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4797 last_insn
.name
, i
.tm
.name
);
4808 /* Output or/not/shl and lfence before near ret. */
4809 if (lfence_before_ret
!= lfence_before_ret_none
4810 && (i
.tm
.base_opcode
== 0xc2
4811 || i
.tm
.base_opcode
== 0xc3))
4813 if (last_insn
.kind
!= last_insn_other
4814 && last_insn
.seg
== now_seg
)
4816 as_warn_where (last_insn
.file
, last_insn
.line
,
4817 _("`%s` skips -mlfence-before-ret on `%s`"),
4818 last_insn
.name
, i
.tm
.name
);
4822 /* Near ret ingore operand size override under CPU64. */
4823 char prefix
= flag_code
== CODE_64BIT
4825 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4827 if (lfence_before_ret
== lfence_before_ret_not
)
4829 /* not: 0xf71424, may add prefix
4830 for operand size override or 64-bit code. */
4831 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4845 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4848 if (lfence_before_ret
== lfence_before_ret_or
)
4850 /* or: 0x830c2400, may add prefix
4851 for operand size override or 64-bit code. */
4857 /* shl: 0xc1242400, may add prefix
4858 for operand size override or 64-bit code. */
4873 /* This is the guts of the machine-dependent assembler. LINE points to a
4874 machine dependent instruction. This function is supposed to emit
4875 the frags/bytes it assembles to. */
4878 md_assemble (char *line
)
4881 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4882 const insn_template
*t
;
4884 /* Initialize globals. */
4885 memset (&i
, '\0', sizeof (i
));
4886 i
.rounding
.type
= rc_none
;
4887 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4888 i
.reloc
[j
] = NO_RELOC
;
4889 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4890 memset (im_expressions
, '\0', sizeof (im_expressions
));
4891 save_stack_p
= save_stack
;
4893 /* First parse an instruction mnemonic & call i386_operand for the operands.
4894 We assume that the scrubber has arranged it so that line[0] is the valid
4895 start of a (possibly prefixed) mnemonic. */
4897 line
= parse_insn (line
, mnemonic
);
4900 mnem_suffix
= i
.suffix
;
4902 line
= parse_operands (line
, mnemonic
);
4904 xfree (i
.memop1_string
);
4905 i
.memop1_string
= NULL
;
4909 /* Now we've parsed the mnemonic into a set of templates, and have the
4910 operands at hand. */
4912 /* All Intel opcodes have reversed operands except for "bound", "enter",
4913 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4914 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4915 and "call" instructions with 2 immediate operands so that the immediate
4916 segment precedes the offset consistently in Intel and AT&T modes. */
4919 && (strcmp (mnemonic
, "bound") != 0)
4920 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4921 && !startswith (mnemonic
, "monitor")
4922 && !startswith (mnemonic
, "mwait")
4923 && (strcmp (mnemonic
, "pvalidate") != 0)
4924 && !startswith (mnemonic
, "rmp")
4925 && (strcmp (mnemonic
, "tpause") != 0)
4926 && (strcmp (mnemonic
, "umwait") != 0)
4927 && !(operand_type_check (i
.types
[0], imm
)
4928 && operand_type_check (i
.types
[1], imm
)))
4931 /* The order of the immediates should be reversed
4932 for 2 immediates extrq and insertq instructions */
4933 if (i
.imm_operands
== 2
4934 && (strcmp (mnemonic
, "extrq") == 0
4935 || strcmp (mnemonic
, "insertq") == 0))
4936 swap_2_operands (0, 1);
4941 if (i
.disp_operands
&& !want_disp32 (current_templates
->start
))
4943 for (j
= 0; j
< i
.operands
; ++j
)
4945 const expressionS
*exp
= i
.op
[j
].disps
;
4947 if (!operand_type_check (i
.types
[j
], disp
))
4950 if (exp
->X_op
!= O_constant
)
4953 /* Since displacement is signed extended to 64bit, don't allow
4954 disp32 and turn off disp32s if they are out of range. */
4955 i
.types
[j
].bitfield
.disp32
= 0;
4956 if (fits_in_signed_long (exp
->X_add_number
))
4959 i
.types
[j
].bitfield
.disp32s
= 0;
4960 if (i
.types
[j
].bitfield
.baseindex
)
4962 char number_buf
[128];
4964 /* Coded this way in order to allow for ease of translation. */
4965 sprintf_vma (number_buf
, exp
->X_add_number
);
4966 as_bad (_("0x%s out of range of signed 32bit displacement"),
4973 /* Don't optimize displacement for movabs since it only takes 64bit
4976 && i
.disp_encoding
!= disp_encoding_32bit
4977 && (flag_code
!= CODE_64BIT
4978 || strcmp (mnemonic
, "movabs") != 0))
4981 /* Next, we find a template that matches the given insn,
4982 making sure the overlap of the given operands types is consistent
4983 with the template operand types. */
4985 if (!(t
= match_template (mnem_suffix
)))
4988 if (sse_check
!= check_none
4989 && !i
.tm
.opcode_modifier
.noavx
4990 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4991 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4992 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4993 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4994 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4995 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4996 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4997 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4998 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4999 || i
.tm
.cpu_flags
.bitfield
.cpuaes
5000 || i
.tm
.cpu_flags
.bitfield
.cpusha
5001 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
5003 (sse_check
== check_warning
5005 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
5008 if (i
.tm
.opcode_modifier
.fwait
)
5009 if (!add_prefix (FWAIT_OPCODE
))
5012 /* Check if REP prefix is OK. */
5013 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
5015 as_bad (_("invalid instruction `%s' after `%s'"),
5016 i
.tm
.name
, i
.rep_prefix
);
5020 /* Check for lock without a lockable instruction. Destination operand
5021 must be memory unless it is xchg (0x86). */
5022 if (i
.prefix
[LOCK_PREFIX
]
5023 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
5024 || i
.mem_operands
== 0
5025 || (i
.tm
.base_opcode
!= 0x86
5026 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
5028 as_bad (_("expecting lockable instruction after `lock'"));
5032 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
5033 if (i
.prefix
[DATA_PREFIX
]
5034 && (is_any_vex_encoding (&i
.tm
)
5035 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
5036 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
5038 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
5042 /* Check if HLE prefix is OK. */
5043 if (i
.hle_prefix
&& !check_hle ())
5046 /* Check BND prefix. */
5047 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5048 as_bad (_("expecting valid branch instruction after `bnd'"));
5050 /* Check NOTRACK prefix. */
5051 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5052 as_bad (_("expecting indirect branch instruction after `notrack'"));
5054 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5056 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5057 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5058 else if (flag_code
!= CODE_16BIT
5059 ? i
.prefix
[ADDR_PREFIX
]
5060 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5061 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5064 /* Insert BND prefix. */
5065 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5067 if (!i
.prefix
[BND_PREFIX
])
5068 add_prefix (BND_PREFIX_OPCODE
);
5069 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5071 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5072 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5076 /* Check string instruction segment overrides. */
5077 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5079 gas_assert (i
.mem_operands
);
5080 if (!check_string ())
5082 i
.disp_operands
= 0;
5085 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5086 optimize_encoding ();
5088 if (use_unaligned_vector_move
)
5089 encode_with_unaligned_vector_move ();
5091 if (!process_suffix ())
5094 /* Update operand types and check extended states. */
5095 for (j
= 0; j
< i
.operands
; j
++)
5097 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5098 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5103 i
.xstate
|= xstate_mmx
;
5106 i
.xstate
|= xstate_mask
;
5109 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5110 i
.xstate
|= xstate_tmm
;
5111 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5112 i
.xstate
|= xstate_zmm
;
5113 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5114 i
.xstate
|= xstate_ymm
;
5115 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5116 i
.xstate
|= xstate_xmm
;
5121 /* Make still unresolved immediate matches conform to size of immediate
5122 given in i.suffix. */
5123 if (!finalize_imm ())
5126 if (i
.types
[0].bitfield
.imm1
)
5127 i
.imm_operands
= 0; /* kludge for shift insns. */
5129 /* We only need to check those implicit registers for instructions
5130 with 3 operands or less. */
5131 if (i
.operands
<= 3)
5132 for (j
= 0; j
< i
.operands
; j
++)
5133 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
5134 && !i
.types
[j
].bitfield
.xmmword
)
5137 /* For insns with operands there are more diddles to do to the opcode. */
5140 if (!process_operands ())
5143 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5145 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5146 as_warn (_("translating to `%sp'"), i
.tm
.name
);
5149 if (is_any_vex_encoding (&i
.tm
))
5151 if (!cpu_arch_flags
.bitfield
.cpui286
)
5153 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5158 /* Check for explicit REX prefix. */
5159 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5161 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
5165 if (i
.tm
.opcode_modifier
.vex
)
5166 build_vex_prefix (t
);
5168 build_evex_prefix ();
5170 /* The individual REX.RXBW bits got consumed. */
5171 i
.rex
&= REX_OPCODE
;
5174 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
5175 instructions may define INT_OPCODE as well, so avoid this corner
5176 case for those instructions that use MODRM. */
5177 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
5178 && i
.tm
.base_opcode
== INT_OPCODE
5179 && !i
.tm
.opcode_modifier
.modrm
5180 && i
.op
[0].imms
->X_add_number
== 3)
5182 i
.tm
.base_opcode
= INT3_OPCODE
;
5186 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5187 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5188 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5189 && i
.op
[0].disps
->X_op
== O_constant
)
5191 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5192 the absolute address given by the constant. Since ix86 jumps and
5193 calls are pc relative, we need to generate a reloc. */
5194 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5195 i
.op
[0].disps
->X_op
= O_symbol
;
5198 /* For 8 bit registers we need an empty rex prefix. Also if the
5199 instruction already has a prefix, we need to convert old
5200 registers to new ones. */
5202 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5203 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5204 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5205 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5206 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5207 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5212 i
.rex
|= REX_OPCODE
;
5213 for (x
= 0; x
< 2; x
++)
5215 /* Look for 8 bit operand that uses old registers. */
5216 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5217 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5219 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5220 /* In case it is "hi" register, give up. */
5221 if (i
.op
[x
].regs
->reg_num
> 3)
5222 as_bad (_("can't encode register '%s%s' in an "
5223 "instruction requiring REX prefix."),
5224 register_prefix
, i
.op
[x
].regs
->reg_name
);
5226 /* Otherwise it is equivalent to the extended register.
5227 Since the encoding doesn't change this is merely
5228 cosmetic cleanup for debug output. */
5230 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5235 if (i
.rex
== 0 && i
.rex_encoding
)
5237 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5238 that uses legacy register. If it is "hi" register, don't add
5239 the REX_OPCODE byte. */
5241 for (x
= 0; x
< 2; x
++)
5242 if (i
.types
[x
].bitfield
.class == Reg
5243 && i
.types
[x
].bitfield
.byte
5244 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5245 && i
.op
[x
].regs
->reg_num
> 3)
5247 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5248 i
.rex_encoding
= false;
5257 add_prefix (REX_OPCODE
| i
.rex
);
5259 insert_lfence_before ();
5261 /* We are ready to output the insn. */
5264 insert_lfence_after ();
5266 last_insn
.seg
= now_seg
;
5268 if (i
.tm
.opcode_modifier
.isprefix
)
5270 last_insn
.kind
= last_insn_prefix
;
5271 last_insn
.name
= i
.tm
.name
;
5272 last_insn
.file
= as_where (&last_insn
.line
);
5275 last_insn
.kind
= last_insn_other
;
5279 parse_insn (char *line
, char *mnemonic
)
5282 char *token_start
= l
;
5285 const insn_template
*t
;
5291 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5296 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5298 as_bad (_("no such instruction: `%s'"), token_start
);
5303 if (!is_space_char (*l
)
5304 && *l
!= END_OF_INSN
5306 || (*l
!= PREFIX_SEPARATOR
5309 as_bad (_("invalid character %s in mnemonic"),
5310 output_invalid (*l
));
5313 if (token_start
== l
)
5315 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5316 as_bad (_("expecting prefix; got nothing"));
5318 as_bad (_("expecting mnemonic; got nothing"));
5322 /* Look up instruction (or prefix) via hash table. */
5323 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5325 if (*l
!= END_OF_INSN
5326 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5327 && current_templates
5328 && current_templates
->start
->opcode_modifier
.isprefix
)
5330 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5332 as_bad ((flag_code
!= CODE_64BIT
5333 ? _("`%s' is only supported in 64-bit mode")
5334 : _("`%s' is not supported in 64-bit mode")),
5335 current_templates
->start
->name
);
5338 /* If we are in 16-bit mode, do not allow addr16 or data16.
5339 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5340 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5341 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5342 && flag_code
!= CODE_64BIT
5343 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5344 ^ (flag_code
== CODE_16BIT
)))
5346 as_bad (_("redundant %s prefix"),
5347 current_templates
->start
->name
);
5351 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5353 /* Handle pseudo prefixes. */
5354 switch (current_templates
->start
->extension_opcode
)
5358 i
.disp_encoding
= disp_encoding_8bit
;
5362 i
.disp_encoding
= disp_encoding_16bit
;
5366 i
.disp_encoding
= disp_encoding_32bit
;
5370 i
.dir_encoding
= dir_encoding_load
;
5374 i
.dir_encoding
= dir_encoding_store
;
5378 i
.vec_encoding
= vex_encoding_vex
;
5382 i
.vec_encoding
= vex_encoding_vex3
;
5386 i
.vec_encoding
= vex_encoding_evex
;
5390 i
.rex_encoding
= true;
5392 case Prefix_NoOptimize
:
5394 i
.no_optimize
= true;
5402 /* Add prefix, checking for repeated prefixes. */
5403 switch (add_prefix (current_templates
->start
->base_opcode
))
5408 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5409 i
.notrack_prefix
= current_templates
->start
->name
;
5412 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5413 i
.hle_prefix
= current_templates
->start
->name
;
5414 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5415 i
.bnd_prefix
= current_templates
->start
->name
;
5417 i
.rep_prefix
= current_templates
->start
->name
;
5423 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5430 if (!current_templates
)
5432 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5433 Check if we should swap operand or force 32bit displacement in
5435 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5436 i
.dir_encoding
= dir_encoding_swap
;
5437 else if (mnem_p
- 3 == dot_p
5440 i
.disp_encoding
= disp_encoding_8bit
;
5441 else if (mnem_p
- 4 == dot_p
5445 i
.disp_encoding
= disp_encoding_32bit
;
5450 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5453 if (!current_templates
)
5456 if (mnem_p
> mnemonic
)
5458 /* See if we can get a match by trimming off a suffix. */
5461 case WORD_MNEM_SUFFIX
:
5462 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5463 i
.suffix
= SHORT_MNEM_SUFFIX
;
5466 case BYTE_MNEM_SUFFIX
:
5467 case QWORD_MNEM_SUFFIX
:
5468 i
.suffix
= mnem_p
[-1];
5471 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5473 case SHORT_MNEM_SUFFIX
:
5474 case LONG_MNEM_SUFFIX
:
5477 i
.suffix
= mnem_p
[-1];
5480 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5488 if (intel_float_operand (mnemonic
) == 1)
5489 i
.suffix
= SHORT_MNEM_SUFFIX
;
5491 i
.suffix
= LONG_MNEM_SUFFIX
;
5494 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5500 if (!current_templates
)
5502 as_bad (_("no such instruction: `%s'"), token_start
);
5507 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5508 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5510 /* Check for a branch hint. We allow ",pt" and ",pn" for
5511 predict taken and predict not taken respectively.
5512 I'm not sure that branch hints actually do anything on loop
5513 and jcxz insns (JumpByte) for current Pentium4 chips. They
5514 may work in the future and it doesn't hurt to accept them
5516 if (l
[0] == ',' && l
[1] == 'p')
5520 if (!add_prefix (DS_PREFIX_OPCODE
))
5524 else if (l
[2] == 'n')
5526 if (!add_prefix (CS_PREFIX_OPCODE
))
5532 /* Any other comma loses. */
5535 as_bad (_("invalid character %s in mnemonic"),
5536 output_invalid (*l
));
5540 /* Check if instruction is supported on specified architecture. */
5542 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5544 supported
|= cpu_flags_match (t
);
5545 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5547 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5548 as_warn (_("use .code16 to ensure correct addressing mode"));
5554 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5555 as_bad (flag_code
== CODE_64BIT
5556 ? _("`%s' is not supported in 64-bit mode")
5557 : _("`%s' is only supported in 64-bit mode"),
5558 current_templates
->start
->name
);
5560 as_bad (_("`%s' is not supported on `%s%s'"),
5561 current_templates
->start
->name
,
5562 cpu_arch_name
? cpu_arch_name
: default_arch
,
5563 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5569 parse_operands (char *l
, const char *mnemonic
)
5573 /* 1 if operand is pending after ','. */
5574 unsigned int expecting_operand
= 0;
5576 while (*l
!= END_OF_INSN
)
5578 /* Non-zero if operand parens not balanced. */
5579 unsigned int paren_not_balanced
= 0;
5580 /* True if inside double quotes. */
5581 bool in_quotes
= false;
5583 /* Skip optional white space before operand. */
5584 if (is_space_char (*l
))
5586 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5588 as_bad (_("invalid character %s before operand %d"),
5589 output_invalid (*l
),
5593 token_start
= l
; /* After white space. */
5594 while (in_quotes
|| paren_not_balanced
|| *l
!= ',')
5596 if (*l
== END_OF_INSN
)
5600 as_bad (_("unbalanced double quotes in operand %d."),
5604 if (paren_not_balanced
)
5606 know (!intel_syntax
);
5607 as_bad (_("unbalanced parenthesis in operand %d."),
5612 break; /* we are done */
5614 else if (*l
== '\\' && l
[1] == '"')
5617 in_quotes
= !in_quotes
;
5618 else if (!in_quotes
&& !is_operand_char (*l
) && !is_space_char (*l
))
5620 as_bad (_("invalid character %s in operand %d"),
5621 output_invalid (*l
),
5625 if (!intel_syntax
&& !in_quotes
)
5628 ++paren_not_balanced
;
5630 --paren_not_balanced
;
5634 if (l
!= token_start
)
5635 { /* Yes, we've read in another operand. */
5636 unsigned int operand_ok
;
5637 this_operand
= i
.operands
++;
5638 if (i
.operands
> MAX_OPERANDS
)
5640 as_bad (_("spurious operands; (%d operands/instruction max)"),
5644 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5645 /* Now parse operand adding info to 'i' as we go along. */
5646 END_STRING_AND_SAVE (l
);
5648 if (i
.mem_operands
> 1)
5650 as_bad (_("too many memory references for `%s'"),
5657 i386_intel_operand (token_start
,
5658 intel_float_operand (mnemonic
));
5660 operand_ok
= i386_att_operand (token_start
);
5662 RESTORE_END_STRING (l
);
5668 if (expecting_operand
)
5670 expecting_operand_after_comma
:
5671 as_bad (_("expecting operand after ','; got nothing"));
5676 as_bad (_("expecting operand before ','; got nothing"));
5681 /* Now *l must be either ',' or END_OF_INSN. */
5684 if (*++l
== END_OF_INSN
)
5686 /* Just skip it, if it's \n complain. */
5687 goto expecting_operand_after_comma
;
5689 expecting_operand
= 1;
5696 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5698 union i386_op temp_op
;
5699 i386_operand_type temp_type
;
5700 unsigned int temp_flags
;
5701 enum bfd_reloc_code_real temp_reloc
;
5703 temp_type
= i
.types
[xchg2
];
5704 i
.types
[xchg2
] = i
.types
[xchg1
];
5705 i
.types
[xchg1
] = temp_type
;
5707 temp_flags
= i
.flags
[xchg2
];
5708 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5709 i
.flags
[xchg1
] = temp_flags
;
5711 temp_op
= i
.op
[xchg2
];
5712 i
.op
[xchg2
] = i
.op
[xchg1
];
5713 i
.op
[xchg1
] = temp_op
;
5715 temp_reloc
= i
.reloc
[xchg2
];
5716 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5717 i
.reloc
[xchg1
] = temp_reloc
;
5721 if (i
.mask
.operand
== xchg1
)
5722 i
.mask
.operand
= xchg2
;
5723 else if (i
.mask
.operand
== xchg2
)
5724 i
.mask
.operand
= xchg1
;
5726 if (i
.broadcast
.type
)
5728 if (i
.broadcast
.operand
== xchg1
)
5729 i
.broadcast
.operand
= xchg2
;
5730 else if (i
.broadcast
.operand
== xchg2
)
5731 i
.broadcast
.operand
= xchg1
;
5733 if (i
.rounding
.type
!= rc_none
)
5735 if (i
.rounding
.operand
== xchg1
)
5736 i
.rounding
.operand
= xchg2
;
5737 else if (i
.rounding
.operand
== xchg2
)
5738 i
.rounding
.operand
= xchg1
;
5743 swap_operands (void)
5749 swap_2_operands (1, i
.operands
- 2);
5753 swap_2_operands (0, i
.operands
- 1);
5759 if (i
.mem_operands
== 2)
5761 const reg_entry
*temp_seg
;
5762 temp_seg
= i
.seg
[0];
5763 i
.seg
[0] = i
.seg
[1];
5764 i
.seg
[1] = temp_seg
;
5768 /* Try to ensure constant immediates are represented in the smallest
5773 char guess_suffix
= 0;
5777 guess_suffix
= i
.suffix
;
5778 else if (i
.reg_operands
)
5780 /* Figure out a suffix from the last register operand specified.
5781 We can't do this properly yet, i.e. excluding special register
5782 instances, but the following works for instructions with
5783 immediates. In any case, we can't set i.suffix yet. */
5784 for (op
= i
.operands
; --op
>= 0;)
5785 if (i
.types
[op
].bitfield
.class != Reg
)
5787 else if (i
.types
[op
].bitfield
.byte
)
5789 guess_suffix
= BYTE_MNEM_SUFFIX
;
5792 else if (i
.types
[op
].bitfield
.word
)
5794 guess_suffix
= WORD_MNEM_SUFFIX
;
5797 else if (i
.types
[op
].bitfield
.dword
)
5799 guess_suffix
= LONG_MNEM_SUFFIX
;
5802 else if (i
.types
[op
].bitfield
.qword
)
5804 guess_suffix
= QWORD_MNEM_SUFFIX
;
5808 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5809 guess_suffix
= WORD_MNEM_SUFFIX
;
5811 for (op
= i
.operands
; --op
>= 0;)
5812 if (operand_type_check (i
.types
[op
], imm
))
5814 switch (i
.op
[op
].imms
->X_op
)
5817 /* If a suffix is given, this operand may be shortened. */
5818 switch (guess_suffix
)
5820 case LONG_MNEM_SUFFIX
:
5821 i
.types
[op
].bitfield
.imm32
= 1;
5822 i
.types
[op
].bitfield
.imm64
= 1;
5824 case WORD_MNEM_SUFFIX
:
5825 i
.types
[op
].bitfield
.imm16
= 1;
5826 i
.types
[op
].bitfield
.imm32
= 1;
5827 i
.types
[op
].bitfield
.imm32s
= 1;
5828 i
.types
[op
].bitfield
.imm64
= 1;
5830 case BYTE_MNEM_SUFFIX
:
5831 i
.types
[op
].bitfield
.imm8
= 1;
5832 i
.types
[op
].bitfield
.imm8s
= 1;
5833 i
.types
[op
].bitfield
.imm16
= 1;
5834 i
.types
[op
].bitfield
.imm32
= 1;
5835 i
.types
[op
].bitfield
.imm32s
= 1;
5836 i
.types
[op
].bitfield
.imm64
= 1;
5840 /* If this operand is at most 16 bits, convert it
5841 to a signed 16 bit number before trying to see
5842 whether it will fit in an even smaller size.
5843 This allows a 16-bit operand such as $0xffe0 to
5844 be recognised as within Imm8S range. */
5845 if ((i
.types
[op
].bitfield
.imm16
)
5846 && fits_in_unsigned_word (i
.op
[op
].imms
->X_add_number
))
5848 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5849 ^ 0x8000) - 0x8000);
5852 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5853 if ((i
.types
[op
].bitfield
.imm32
)
5854 && fits_in_unsigned_long (i
.op
[op
].imms
->X_add_number
))
5856 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5857 ^ ((offsetT
) 1 << 31))
5858 - ((offsetT
) 1 << 31));
5862 = operand_type_or (i
.types
[op
],
5863 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5865 /* We must avoid matching of Imm32 templates when 64bit
5866 only immediate is available. */
5867 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5868 i
.types
[op
].bitfield
.imm32
= 0;
5875 /* Symbols and expressions. */
5877 /* Convert symbolic operand to proper sizes for matching, but don't
5878 prevent matching a set of insns that only supports sizes other
5879 than those matching the insn suffix. */
5881 i386_operand_type mask
, allowed
;
5882 const insn_template
*t
= current_templates
->start
;
5884 operand_type_set (&mask
, 0);
5885 allowed
= t
->operand_types
[op
];
5887 while (++t
< current_templates
->end
)
5889 allowed
= operand_type_and (allowed
, anyimm
);
5890 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5892 switch (guess_suffix
)
5894 case QWORD_MNEM_SUFFIX
:
5895 mask
.bitfield
.imm64
= 1;
5896 mask
.bitfield
.imm32s
= 1;
5898 case LONG_MNEM_SUFFIX
:
5899 mask
.bitfield
.imm32
= 1;
5901 case WORD_MNEM_SUFFIX
:
5902 mask
.bitfield
.imm16
= 1;
5904 case BYTE_MNEM_SUFFIX
:
5905 mask
.bitfield
.imm8
= 1;
5910 allowed
= operand_type_and (mask
, allowed
);
5911 if (!operand_type_all_zero (&allowed
))
5912 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5919 /* Try to use the smallest displacement type too. */
5921 optimize_disp (void)
5925 for (op
= i
.operands
; --op
>= 0;)
5926 if (operand_type_check (i
.types
[op
], disp
))
5928 if (i
.op
[op
].disps
->X_op
== O_constant
)
5930 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5932 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5934 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5935 i
.op
[op
].disps
= NULL
;
5940 if (i
.types
[op
].bitfield
.disp16
5941 && fits_in_unsigned_word (op_disp
))
5943 /* If this operand is at most 16 bits, convert
5944 to a signed 16 bit number and don't use 64bit
5946 op_disp
= ((op_disp
^ 0x8000) - 0x8000);
5947 i
.types
[op
].bitfield
.disp64
= 0;
5951 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5952 if ((i
.types
[op
].bitfield
.disp32
5953 || (flag_code
== CODE_64BIT
5954 && want_disp32 (current_templates
->start
)))
5955 && fits_in_unsigned_long (op_disp
))
5957 /* If this operand is at most 32 bits, convert
5958 to a signed 32 bit number and don't use 64bit
5960 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5961 i
.types
[op
].bitfield
.disp64
= 0;
5962 i
.types
[op
].bitfield
.disp32
= 1;
5965 if (flag_code
== CODE_64BIT
&& fits_in_signed_long (op_disp
))
5967 i
.types
[op
].bitfield
.disp64
= 0;
5968 i
.types
[op
].bitfield
.disp32s
= 1;
5971 if ((i
.types
[op
].bitfield
.disp32
5972 || i
.types
[op
].bitfield
.disp32s
5973 || i
.types
[op
].bitfield
.disp16
)
5974 && fits_in_disp8 (op_disp
))
5975 i
.types
[op
].bitfield
.disp8
= 1;
5977 i
.op
[op
].disps
->X_add_number
= op_disp
;
5979 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5980 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5982 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5983 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5984 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5987 /* We only support 64bit displacement on constants. */
5988 i
.types
[op
].bitfield
.disp64
= 0;
5992 /* Return 1 if there is a match in broadcast bytes between operand
5993 GIVEN and instruction template T. */
5996 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5998 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5999 && i
.types
[given
].bitfield
.byte
)
6000 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
6001 && i
.types
[given
].bitfield
.word
)
6002 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
6003 && i
.types
[given
].bitfield
.dword
)
6004 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
6005 && i
.types
[given
].bitfield
.qword
));
6008 /* Check if operands are valid for the instruction. */
6011 check_VecOperands (const insn_template
*t
)
6016 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
6017 any one operand are implicity requiring AVX512VL support if the actual
6018 operand size is YMMword or XMMword. Since this function runs after
6019 template matching, there's no need to check for YMMword/XMMword in
6021 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
6022 if (!cpu_flags_all_zero (&cpu
)
6023 && !t
->cpu_flags
.bitfield
.cpuavx512vl
6024 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6026 for (op
= 0; op
< t
->operands
; ++op
)
6028 if (t
->operand_types
[op
].bitfield
.zmmword
6029 && (i
.types
[op
].bitfield
.ymmword
6030 || i
.types
[op
].bitfield
.xmmword
))
6032 i
.error
= unsupported
;
6038 /* Without VSIB byte, we can't have a vector register for index. */
6039 if (!t
->opcode_modifier
.sib
6041 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6042 || i
.index_reg
->reg_type
.bitfield
.ymmword
6043 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6045 i
.error
= unsupported_vector_index_register
;
6049 /* Check if default mask is allowed. */
6050 if (t
->opcode_modifier
.nodefmask
6051 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6053 i
.error
= no_default_mask
;
6057 /* For VSIB byte, we need a vector register for index, and all vector
6058 registers must be distinct. */
6059 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6062 || !((t
->opcode_modifier
.sib
== VECSIB128
6063 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6064 || (t
->opcode_modifier
.sib
== VECSIB256
6065 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6066 || (t
->opcode_modifier
.sib
== VECSIB512
6067 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6069 i
.error
= invalid_vsib_address
;
6073 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6074 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6076 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6077 gas_assert (i
.types
[0].bitfield
.xmmword
6078 || i
.types
[0].bitfield
.ymmword
);
6079 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6080 gas_assert (i
.types
[2].bitfield
.xmmword
6081 || i
.types
[2].bitfield
.ymmword
);
6082 if (operand_check
== check_none
)
6084 if (register_number (i
.op
[0].regs
)
6085 != register_number (i
.index_reg
)
6086 && register_number (i
.op
[2].regs
)
6087 != register_number (i
.index_reg
)
6088 && register_number (i
.op
[0].regs
)
6089 != register_number (i
.op
[2].regs
))
6091 if (operand_check
== check_error
)
6093 i
.error
= invalid_vector_register_set
;
6096 as_warn (_("mask, index, and destination registers should be distinct"));
6098 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6100 if (i
.types
[1].bitfield
.class == RegSIMD
6101 && (i
.types
[1].bitfield
.xmmword
6102 || i
.types
[1].bitfield
.ymmword
6103 || i
.types
[1].bitfield
.zmmword
)
6104 && (register_number (i
.op
[1].regs
)
6105 == register_number (i
.index_reg
)))
6107 if (operand_check
== check_error
)
6109 i
.error
= invalid_vector_register_set
;
6112 if (operand_check
!= check_none
)
6113 as_warn (_("index and destination registers should be distinct"));
6118 /* For AMX instructions with 3 TMM register operands, all operands
6119 must be distinct. */
6120 if (i
.reg_operands
== 3
6121 && t
->operand_types
[0].bitfield
.tmmword
6122 && (i
.op
[0].regs
== i
.op
[1].regs
6123 || i
.op
[0].regs
== i
.op
[2].regs
6124 || i
.op
[1].regs
== i
.op
[2].regs
))
6126 i
.error
= invalid_tmm_register_set
;
6130 /* For some special instructions require that destination must be distinct
6131 from source registers. */
6132 if (t
->opcode_modifier
.distinctdest
)
6134 unsigned int dest_reg
= i
.operands
- 1;
6136 know (i
.operands
>= 3);
6138 /* #UD if dest_reg == src1_reg or dest_reg == src2_reg. */
6139 if (i
.op
[dest_reg
- 1].regs
== i
.op
[dest_reg
].regs
6140 || (i
.reg_operands
> 2
6141 && i
.op
[dest_reg
- 2].regs
== i
.op
[dest_reg
].regs
))
6143 i
.error
= invalid_dest_and_src_register_set
;
6148 /* Check if broadcast is supported by the instruction and is applied
6149 to the memory operand. */
6150 if (i
.broadcast
.type
)
6152 i386_operand_type type
, overlap
;
6154 /* Check if specified broadcast is supported in this instruction,
6155 and its broadcast bytes match the memory operand. */
6156 op
= i
.broadcast
.operand
;
6157 if (!t
->opcode_modifier
.broadcast
6158 || !(i
.flags
[op
] & Operand_Mem
)
6159 || (!i
.types
[op
].bitfield
.unspecified
6160 && !match_broadcast_size (t
, op
)))
6163 i
.error
= unsupported_broadcast
;
6167 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
6168 * i
.broadcast
.type
);
6169 operand_type_set (&type
, 0);
6170 switch (i
.broadcast
.bytes
)
6173 type
.bitfield
.word
= 1;
6176 type
.bitfield
.dword
= 1;
6179 type
.bitfield
.qword
= 1;
6182 type
.bitfield
.xmmword
= 1;
6185 type
.bitfield
.ymmword
= 1;
6188 type
.bitfield
.zmmword
= 1;
6194 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6195 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6196 && t
->operand_types
[op
].bitfield
.byte
6197 + t
->operand_types
[op
].bitfield
.word
6198 + t
->operand_types
[op
].bitfield
.dword
6199 + t
->operand_types
[op
].bitfield
.qword
> 1)
6201 overlap
.bitfield
.xmmword
= 0;
6202 overlap
.bitfield
.ymmword
= 0;
6203 overlap
.bitfield
.zmmword
= 0;
6205 if (operand_type_all_zero (&overlap
))
6208 if (t
->opcode_modifier
.checkregsize
)
6212 type
.bitfield
.baseindex
= 1;
6213 for (j
= 0; j
< i
.operands
; ++j
)
6216 && !operand_type_register_match(i
.types
[j
],
6217 t
->operand_types
[j
],
6219 t
->operand_types
[op
]))
6224 /* If broadcast is supported in this instruction, we need to check if
6225 operand of one-element size isn't specified without broadcast. */
6226 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6228 /* Find memory operand. */
6229 for (op
= 0; op
< i
.operands
; op
++)
6230 if (i
.flags
[op
] & Operand_Mem
)
6232 gas_assert (op
< i
.operands
);
6233 /* Check size of the memory operand. */
6234 if (match_broadcast_size (t
, op
))
6236 i
.error
= broadcast_needed
;
6241 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6243 /* Check if requested masking is supported. */
6246 switch (t
->opcode_modifier
.masking
)
6250 case MERGING_MASKING
:
6254 i
.error
= unsupported_masking
;
6258 case DYNAMIC_MASKING
:
6259 /* Memory destinations allow only merging masking. */
6260 if (i
.mask
.zeroing
&& i
.mem_operands
)
6262 /* Find memory operand. */
6263 for (op
= 0; op
< i
.operands
; op
++)
6264 if (i
.flags
[op
] & Operand_Mem
)
6266 gas_assert (op
< i
.operands
);
6267 if (op
== i
.operands
- 1)
6269 i
.error
= unsupported_masking
;
6279 /* Check if masking is applied to dest operand. */
6280 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6282 i
.error
= mask_not_on_destination
;
6287 if (i
.rounding
.type
!= rc_none
)
6289 if (!t
->opcode_modifier
.sae
6290 || (i
.rounding
.type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
6292 i
.error
= unsupported_rc_sae
;
6295 /* If the instruction has several immediate operands and one of
6296 them is rounding, the rounding operand should be the last
6297 immediate operand. */
6298 if (i
.imm_operands
> 1
6299 && i
.rounding
.operand
!= i
.imm_operands
- 1)
6301 i
.error
= rc_sae_operand_not_last_imm
;
6306 /* Check the special Imm4 cases; must be the first operand. */
6307 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6309 if (i
.op
[0].imms
->X_op
!= O_constant
6310 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6316 /* Turn off Imm<N> so that update_imm won't complain. */
6317 operand_type_set (&i
.types
[0], 0);
6320 /* Check vector Disp8 operand. */
6321 if (t
->opcode_modifier
.disp8memshift
6322 && i
.disp_encoding
!= disp_encoding_32bit
)
6324 if (i
.broadcast
.type
)
6325 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6326 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6327 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6330 const i386_operand_type
*type
= NULL
;
6333 for (op
= 0; op
< i
.operands
; op
++)
6334 if (i
.flags
[op
] & Operand_Mem
)
6336 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6337 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6338 else if (t
->operand_types
[op
].bitfield
.xmmword
6339 + t
->operand_types
[op
].bitfield
.ymmword
6340 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6341 type
= &t
->operand_types
[op
];
6342 else if (!i
.types
[op
].bitfield
.unspecified
)
6343 type
= &i
.types
[op
];
6345 else if (i
.types
[op
].bitfield
.class == RegSIMD
6346 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6348 if (i
.types
[op
].bitfield
.zmmword
)
6350 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6352 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6358 if (type
->bitfield
.zmmword
)
6360 else if (type
->bitfield
.ymmword
)
6362 else if (type
->bitfield
.xmmword
)
6366 /* For the check in fits_in_disp8(). */
6367 if (i
.memshift
== 0)
6371 for (op
= 0; op
< i
.operands
; op
++)
6372 if (operand_type_check (i
.types
[op
], disp
)
6373 && i
.op
[op
].disps
->X_op
== O_constant
)
6375 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6377 i
.types
[op
].bitfield
.disp8
= 1;
6380 i
.types
[op
].bitfield
.disp8
= 0;
6389 /* Check if encoding requirements are met by the instruction. */
6392 VEX_check_encoding (const insn_template
*t
)
6394 if (i
.vec_encoding
== vex_encoding_error
)
6396 i
.error
= unsupported
;
6400 if (i
.vec_encoding
== vex_encoding_evex
)
6402 /* This instruction must be encoded with EVEX prefix. */
6403 if (!is_evex_encoding (t
))
6405 i
.error
= unsupported
;
6411 if (!t
->opcode_modifier
.vex
)
6413 /* This instruction template doesn't have VEX prefix. */
6414 if (i
.vec_encoding
!= vex_encoding_default
)
6416 i
.error
= unsupported
;
6425 static const insn_template
*
6426 match_template (char mnem_suffix
)
6428 /* Points to template once we've found it. */
6429 const insn_template
*t
;
6430 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6431 i386_operand_type overlap4
;
6432 unsigned int found_reverse_match
;
6433 i386_opcode_modifier suffix_check
;
6434 i386_operand_type operand_types
[MAX_OPERANDS
];
6435 int addr_prefix_disp
;
6436 unsigned int j
, size_match
, check_register
;
6437 enum i386_error specific_error
= 0;
6439 #if MAX_OPERANDS != 5
6440 # error "MAX_OPERANDS must be 5."
6443 found_reverse_match
= 0;
6444 addr_prefix_disp
= -1;
6446 /* Prepare for mnemonic suffix check. */
6447 memset (&suffix_check
, 0, sizeof (suffix_check
));
6448 switch (mnem_suffix
)
6450 case BYTE_MNEM_SUFFIX
:
6451 suffix_check
.no_bsuf
= 1;
6453 case WORD_MNEM_SUFFIX
:
6454 suffix_check
.no_wsuf
= 1;
6456 case SHORT_MNEM_SUFFIX
:
6457 suffix_check
.no_ssuf
= 1;
6459 case LONG_MNEM_SUFFIX
:
6460 suffix_check
.no_lsuf
= 1;
6462 case QWORD_MNEM_SUFFIX
:
6463 suffix_check
.no_qsuf
= 1;
6466 /* NB: In Intel syntax, normally we can check for memory operand
6467 size when there is no mnemonic suffix. But jmp and call have
6468 2 different encodings with Dword memory operand size, one with
6469 No_ldSuf and the other without. i.suffix is set to
6470 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6471 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6472 suffix_check
.no_ldsuf
= 1;
6475 /* Must have right number of operands. */
6476 i
.error
= number_of_operands_mismatch
;
6478 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6480 addr_prefix_disp
= -1;
6481 found_reverse_match
= 0;
6483 if (i
.operands
!= t
->operands
)
6486 /* Check processor support. */
6487 i
.error
= unsupported
;
6488 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6491 /* Check Pseudo Prefix. */
6492 i
.error
= unsupported
;
6493 if (t
->opcode_modifier
.pseudovexprefix
6494 && !(i
.vec_encoding
== vex_encoding_vex
6495 || i
.vec_encoding
== vex_encoding_vex3
))
6498 /* Check AT&T mnemonic. */
6499 i
.error
= unsupported_with_intel_mnemonic
;
6500 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6503 /* Check AT&T/Intel syntax. */
6504 i
.error
= unsupported_syntax
;
6505 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6506 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6509 /* Check Intel64/AMD64 ISA. */
6513 /* Default: Don't accept Intel64. */
6514 if (t
->opcode_modifier
.isa64
== INTEL64
)
6518 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6519 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6523 /* -mintel64: Don't accept AMD64. */
6524 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6529 /* Check the suffix. */
6530 i
.error
= invalid_instruction_suffix
;
6531 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6532 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6533 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6534 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6535 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6536 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6539 size_match
= operand_size_match (t
);
6543 /* This is intentionally not
6545 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6547 as the case of a missing * on the operand is accepted (perhaps with
6548 a warning, issued further down). */
6549 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6551 i
.error
= operand_type_mismatch
;
6555 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6556 operand_types
[j
] = t
->operand_types
[j
];
6558 /* In general, don't allow
6559 - 64-bit operands outside of 64-bit mode,
6560 - 32-bit operands on pre-386. */
6561 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6562 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6563 && flag_code
!= CODE_64BIT
6564 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6565 && t
->base_opcode
== 0xc7
6566 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6567 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6568 || (i
.suffix
== LONG_MNEM_SUFFIX
6569 && !cpu_arch_flags
.bitfield
.cpui386
))
6571 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6572 && !intel_float_operand (t
->name
))
6573 : intel_float_operand (t
->name
) != 2)
6574 && (t
->operands
== i
.imm_operands
6575 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6576 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6577 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6578 || (operand_types
[j
].bitfield
.class != RegMMX
6579 && operand_types
[j
].bitfield
.class != RegSIMD
6580 && operand_types
[j
].bitfield
.class != RegMask
))
6581 && !t
->opcode_modifier
.sib
)
6584 /* Do not verify operands when there are none. */
6587 if (VEX_check_encoding (t
))
6589 specific_error
= i
.error
;
6593 /* We've found a match; break out of loop. */
6597 if (!t
->opcode_modifier
.jump
6598 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6600 /* There should be only one Disp operand. */
6601 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6602 if (operand_type_check (operand_types
[j
], disp
))
6604 if (j
< MAX_OPERANDS
)
6606 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6608 addr_prefix_disp
= j
;
6610 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6611 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6615 override
= !override
;
6618 if (operand_types
[j
].bitfield
.disp32
6619 && operand_types
[j
].bitfield
.disp16
)
6621 operand_types
[j
].bitfield
.disp16
= override
;
6622 operand_types
[j
].bitfield
.disp32
= !override
;
6624 operand_types
[j
].bitfield
.disp32s
= 0;
6625 operand_types
[j
].bitfield
.disp64
= 0;
6629 if (operand_types
[j
].bitfield
.disp32s
6630 || operand_types
[j
].bitfield
.disp64
)
6632 operand_types
[j
].bitfield
.disp64
&= !override
;
6633 operand_types
[j
].bitfield
.disp32s
&= !override
;
6634 operand_types
[j
].bitfield
.disp32
= override
;
6636 operand_types
[j
].bitfield
.disp16
= 0;
6644 case BFD_RELOC_386_GOT32
:
6645 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6646 if (t
->base_opcode
== 0xa0
6647 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6650 case BFD_RELOC_386_TLS_GOTIE
:
6651 case BFD_RELOC_386_TLS_LE_32
:
6652 case BFD_RELOC_X86_64_GOTTPOFF
:
6653 case BFD_RELOC_X86_64_TLSLD
:
6654 /* Don't allow KMOV in TLS code sequences. */
6655 if (t
->opcode_modifier
.vex
)
6662 /* We check register size if needed. */
6663 if (t
->opcode_modifier
.checkregsize
)
6665 check_register
= (1 << t
->operands
) - 1;
6666 if (i
.broadcast
.type
)
6667 check_register
&= ~(1 << i
.broadcast
.operand
);
6672 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6673 switch (t
->operands
)
6676 if (!operand_type_match (overlap0
, i
.types
[0]))
6680 /* xchg %eax, %eax is a special case. It is an alias for nop
6681 only in 32bit mode and we can use opcode 0x90. In 64bit
6682 mode, we can't use 0x90 for xchg %eax, %eax since it should
6683 zero-extend %eax to %rax. */
6684 if (flag_code
== CODE_64BIT
6685 && t
->base_opcode
== 0x90
6686 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6687 && i
.types
[0].bitfield
.instance
== Accum
6688 && i
.types
[0].bitfield
.dword
6689 && i
.types
[1].bitfield
.instance
== Accum
6690 && i
.types
[1].bitfield
.dword
)
6692 /* xrelease mov %eax, <disp> is another special case. It must not
6693 match the accumulator-only encoding of mov. */
6694 if (flag_code
!= CODE_64BIT
6696 && t
->base_opcode
== 0xa0
6697 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6698 && i
.types
[0].bitfield
.instance
== Accum
6699 && (i
.flags
[1] & Operand_Mem
))
6704 if (!(size_match
& MATCH_STRAIGHT
))
6706 /* Reverse direction of operands if swapping is possible in the first
6707 place (operands need to be symmetric) and
6708 - the load form is requested, and the template is a store form,
6709 - the store form is requested, and the template is a load form,
6710 - the non-default (swapped) form is requested. */
6711 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6712 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6713 && !operand_type_all_zero (&overlap1
))
6714 switch (i
.dir_encoding
)
6716 case dir_encoding_load
:
6717 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6718 || t
->opcode_modifier
.regmem
)
6722 case dir_encoding_store
:
6723 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6724 && !t
->opcode_modifier
.regmem
)
6728 case dir_encoding_swap
:
6731 case dir_encoding_default
:
6734 /* If we want store form, we skip the current load. */
6735 if ((i
.dir_encoding
== dir_encoding_store
6736 || i
.dir_encoding
== dir_encoding_swap
)
6737 && i
.mem_operands
== 0
6738 && t
->opcode_modifier
.load
)
6743 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6744 if (!operand_type_match (overlap0
, i
.types
[0])
6745 || !operand_type_match (overlap1
, i
.types
[1])
6746 || ((check_register
& 3) == 3
6747 && !operand_type_register_match (i
.types
[0],
6752 /* Check if other direction is valid ... */
6753 if (!t
->opcode_modifier
.d
)
6757 if (!(size_match
& MATCH_REVERSE
))
6759 /* Try reversing direction of operands. */
6760 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6761 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6762 if (!operand_type_match (overlap0
, i
.types
[0])
6763 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6765 && !operand_type_register_match (i
.types
[0],
6766 operand_types
[i
.operands
- 1],
6767 i
.types
[i
.operands
- 1],
6770 /* Does not match either direction. */
6773 /* found_reverse_match holds which of D or FloatR
6775 if (!t
->opcode_modifier
.d
)
6776 found_reverse_match
= 0;
6777 else if (operand_types
[0].bitfield
.tbyte
)
6778 found_reverse_match
= Opcode_FloatD
;
6779 else if (operand_types
[0].bitfield
.xmmword
6780 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6781 || operand_types
[0].bitfield
.class == RegMMX
6782 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6783 || is_any_vex_encoding(t
))
6784 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6785 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6787 found_reverse_match
= Opcode_D
;
6788 if (t
->opcode_modifier
.floatr
)
6789 found_reverse_match
|= Opcode_FloatR
;
6793 /* Found a forward 2 operand match here. */
6794 switch (t
->operands
)
6797 overlap4
= operand_type_and (i
.types
[4],
6801 overlap3
= operand_type_and (i
.types
[3],
6805 overlap2
= operand_type_and (i
.types
[2],
6810 switch (t
->operands
)
6813 if (!operand_type_match (overlap4
, i
.types
[4])
6814 || !operand_type_register_match (i
.types
[3],
6821 if (!operand_type_match (overlap3
, i
.types
[3])
6822 || ((check_register
& 0xa) == 0xa
6823 && !operand_type_register_match (i
.types
[1],
6827 || ((check_register
& 0xc) == 0xc
6828 && !operand_type_register_match (i
.types
[2],
6835 /* Here we make use of the fact that there are no
6836 reverse match 3 operand instructions. */
6837 if (!operand_type_match (overlap2
, i
.types
[2])
6838 || ((check_register
& 5) == 5
6839 && !operand_type_register_match (i
.types
[0],
6843 || ((check_register
& 6) == 6
6844 && !operand_type_register_match (i
.types
[1],
6852 /* Found either forward/reverse 2, 3 or 4 operand match here:
6853 slip through to break. */
6856 /* Check if vector operands are valid. */
6857 if (check_VecOperands (t
))
6859 specific_error
= i
.error
;
6863 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6864 if (VEX_check_encoding (t
))
6866 specific_error
= i
.error
;
6870 /* We've found a match; break out of loop. */
6874 if (t
== current_templates
->end
)
6876 /* We found no match. */
6877 const char *err_msg
;
6878 switch (specific_error
? specific_error
: i
.error
)
6882 case operand_size_mismatch
:
6883 err_msg
= _("operand size mismatch");
6885 case operand_type_mismatch
:
6886 err_msg
= _("operand type mismatch");
6888 case register_type_mismatch
:
6889 err_msg
= _("register type mismatch");
6891 case number_of_operands_mismatch
:
6892 err_msg
= _("number of operands mismatch");
6894 case invalid_instruction_suffix
:
6895 err_msg
= _("invalid instruction suffix");
6898 err_msg
= _("constant doesn't fit in 4 bits");
6900 case unsupported_with_intel_mnemonic
:
6901 err_msg
= _("unsupported with Intel mnemonic");
6903 case unsupported_syntax
:
6904 err_msg
= _("unsupported syntax");
6907 as_bad (_("unsupported instruction `%s'"),
6908 current_templates
->start
->name
);
6910 case invalid_sib_address
:
6911 err_msg
= _("invalid SIB address");
6913 case invalid_vsib_address
:
6914 err_msg
= _("invalid VSIB address");
6916 case invalid_vector_register_set
:
6917 err_msg
= _("mask, index, and destination registers must be distinct");
6919 case invalid_tmm_register_set
:
6920 err_msg
= _("all tmm registers must be distinct");
6922 case invalid_dest_and_src_register_set
:
6923 err_msg
= _("destination and source registers must be distinct");
6925 case unsupported_vector_index_register
:
6926 err_msg
= _("unsupported vector index register");
6928 case unsupported_broadcast
:
6929 err_msg
= _("unsupported broadcast");
6931 case broadcast_needed
:
6932 err_msg
= _("broadcast is needed for operand of such type");
6934 case unsupported_masking
:
6935 err_msg
= _("unsupported masking");
6937 case mask_not_on_destination
:
6938 err_msg
= _("mask not on destination operand");
6940 case no_default_mask
:
6941 err_msg
= _("default mask isn't allowed");
6943 case unsupported_rc_sae
:
6944 err_msg
= _("unsupported static rounding/sae");
6946 case rc_sae_operand_not_last_imm
:
6948 err_msg
= _("RC/SAE operand must precede immediate operands");
6950 err_msg
= _("RC/SAE operand must follow immediate operands");
6952 case invalid_register_operand
:
6953 err_msg
= _("invalid register operand");
6956 as_bad (_("%s for `%s'"), err_msg
,
6957 current_templates
->start
->name
);
6961 if (!quiet_warnings
)
6964 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6965 as_warn (_("indirect %s without `*'"), t
->name
);
6967 if (t
->opcode_modifier
.isprefix
6968 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6970 /* Warn them that a data or address size prefix doesn't
6971 affect assembly of the next line of code. */
6972 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6976 /* Copy the template we found. */
6977 install_template (t
);
6979 if (addr_prefix_disp
!= -1)
6980 i
.tm
.operand_types
[addr_prefix_disp
]
6981 = operand_types
[addr_prefix_disp
];
6983 if (found_reverse_match
)
6985 /* If we found a reverse match we must alter the opcode direction
6986 bit and clear/flip the regmem modifier one. found_reverse_match
6987 holds bits to change (different for int & float insns). */
6989 i
.tm
.base_opcode
^= found_reverse_match
;
6991 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6992 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6994 /* Certain SIMD insns have their load forms specified in the opcode
6995 table, and hence we need to _set_ RegMem instead of clearing it.
6996 We need to avoid setting the bit though on insns like KMOVW. */
6997 i
.tm
.opcode_modifier
.regmem
6998 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6999 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
7000 && !i
.tm
.opcode_modifier
.regmem
;
7009 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
7010 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
7012 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
7014 as_bad (_("`%s' operand %u must use `%ses' segment"),
7016 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
7021 /* There's only ever one segment override allowed per instruction.
7022 This instruction possibly has a legal segment override on the
7023 second operand, so copy the segment to where non-string
7024 instructions store it, allowing common code. */
7025 i
.seg
[op
] = i
.seg
[1];
7031 process_suffix (void)
7033 bool is_crc32
= false, is_movx
= false;
7035 /* If matched instruction specifies an explicit instruction mnemonic
7037 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
7038 i
.suffix
= WORD_MNEM_SUFFIX
;
7039 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
7040 i
.suffix
= LONG_MNEM_SUFFIX
;
7041 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
7042 i
.suffix
= QWORD_MNEM_SUFFIX
;
7043 else if (i
.reg_operands
7044 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
7045 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
7047 unsigned int numop
= i
.operands
;
7050 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7051 && (i
.tm
.base_opcode
| 8) == 0xbe)
7052 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7053 && i
.tm
.base_opcode
== 0x63
7054 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
7057 is_crc32
= (i
.tm
.base_opcode
== 0xf0
7058 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7059 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
7061 /* movsx/movzx want only their source operand considered here, for the
7062 ambiguity checking below. The suffix will be replaced afterwards
7063 to represent the destination (register). */
7064 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
7067 /* crc32 needs REX.W set regardless of suffix / source operand size. */
7068 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
7071 /* If there's no instruction mnemonic suffix we try to invent one
7072 based on GPR operands. */
7075 /* We take i.suffix from the last register operand specified,
7076 Destination register type is more significant than source
7077 register type. crc32 in SSE4.2 prefers source register
7079 unsigned int op
= is_crc32
? 1 : i
.operands
;
7082 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7083 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7085 if (i
.types
[op
].bitfield
.class != Reg
)
7087 if (i
.types
[op
].bitfield
.byte
)
7088 i
.suffix
= BYTE_MNEM_SUFFIX
;
7089 else if (i
.types
[op
].bitfield
.word
)
7090 i
.suffix
= WORD_MNEM_SUFFIX
;
7091 else if (i
.types
[op
].bitfield
.dword
)
7092 i
.suffix
= LONG_MNEM_SUFFIX
;
7093 else if (i
.types
[op
].bitfield
.qword
)
7094 i
.suffix
= QWORD_MNEM_SUFFIX
;
7100 /* As an exception, movsx/movzx silently default to a byte source
7102 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7103 i
.suffix
= BYTE_MNEM_SUFFIX
;
7105 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7108 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7109 && i
.tm
.opcode_modifier
.no_bsuf
)
7111 else if (!check_byte_reg ())
7114 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7117 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7118 && i
.tm
.opcode_modifier
.no_lsuf
7119 && !i
.tm
.opcode_modifier
.todword
7120 && !i
.tm
.opcode_modifier
.toqword
)
7122 else if (!check_long_reg ())
7125 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7128 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7129 && i
.tm
.opcode_modifier
.no_qsuf
7130 && !i
.tm
.opcode_modifier
.todword
7131 && !i
.tm
.opcode_modifier
.toqword
)
7133 else if (!check_qword_reg ())
7136 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7139 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7140 && i
.tm
.opcode_modifier
.no_wsuf
)
7142 else if (!check_word_reg ())
7145 else if (intel_syntax
7146 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7147 /* Do nothing if the instruction is going to ignore the prefix. */
7152 /* Undo the movsx/movzx change done above. */
7155 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7158 i
.suffix
= stackop_size
;
7159 if (stackop_size
== LONG_MNEM_SUFFIX
)
7161 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7162 .code16gcc directive to support 16-bit mode with
7163 32-bit address. For IRET without a suffix, generate
7164 16-bit IRET (opcode 0xcf) to return from an interrupt
7166 if (i
.tm
.base_opcode
== 0xcf)
7168 i
.suffix
= WORD_MNEM_SUFFIX
;
7169 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7171 /* Warn about changed behavior for segment register push/pop. */
7172 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7173 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7178 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7179 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7180 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7181 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7182 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7183 && i
.tm
.extension_opcode
<= 3)))
7188 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7190 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7191 || i
.tm
.opcode_modifier
.no_lsuf
)
7192 i
.suffix
= QWORD_MNEM_SUFFIX
;
7197 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7198 i
.suffix
= LONG_MNEM_SUFFIX
;
7201 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7202 i
.suffix
= WORD_MNEM_SUFFIX
;
7208 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7209 /* Also cover lret/retf/iret in 64-bit mode. */
7210 || (flag_code
== CODE_64BIT
7211 && !i
.tm
.opcode_modifier
.no_lsuf
7212 && !i
.tm
.opcode_modifier
.no_qsuf
))
7213 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7214 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7215 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7216 /* Accept FLDENV et al without suffix. */
7217 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7219 unsigned int suffixes
, evex
= 0;
7221 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7222 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7224 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7226 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
7228 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7230 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7233 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7234 also suitable for AT&T syntax mode, it was requested that this be
7235 restricted to just Intel syntax. */
7236 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
.type
)
7240 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7242 if (is_evex_encoding (&i
.tm
)
7243 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7245 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7246 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7247 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7248 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7249 if (!i
.tm
.opcode_modifier
.evex
7250 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7251 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7254 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7255 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7256 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7259 /* Any properly sized operand disambiguates the insn. */
7260 if (i
.types
[op
].bitfield
.xmmword
7261 || i
.types
[op
].bitfield
.ymmword
7262 || i
.types
[op
].bitfield
.zmmword
)
7264 suffixes
&= ~(7 << 6);
7269 if ((i
.flags
[op
] & Operand_Mem
)
7270 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7272 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7274 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7276 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7278 if (is_evex_encoding (&i
.tm
))
7284 /* Are multiple suffixes / operand sizes allowed? */
7285 if (suffixes
& (suffixes
- 1))
7288 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7289 || operand_check
== check_error
))
7291 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7294 if (operand_check
== check_error
)
7296 as_bad (_("no instruction mnemonic suffix given and "
7297 "no register operands; can't size `%s'"), i
.tm
.name
);
7300 if (operand_check
== check_warning
)
7301 as_warn (_("%s; using default for `%s'"),
7303 ? _("ambiguous operand size")
7304 : _("no instruction mnemonic suffix given and "
7305 "no register operands"),
7308 if (i
.tm
.opcode_modifier
.floatmf
)
7309 i
.suffix
= SHORT_MNEM_SUFFIX
;
7311 /* handled below */;
7313 i
.tm
.opcode_modifier
.evex
= evex
;
7314 else if (flag_code
== CODE_16BIT
)
7315 i
.suffix
= WORD_MNEM_SUFFIX
;
7316 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7317 i
.suffix
= LONG_MNEM_SUFFIX
;
7319 i
.suffix
= QWORD_MNEM_SUFFIX
;
7325 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7326 In AT&T syntax, if there is no suffix (warned about above), the default
7327 will be byte extension. */
7328 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7329 i
.tm
.base_opcode
|= 1;
7331 /* For further processing, the suffix should represent the destination
7332 (register). This is already the case when one was used with
7333 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7334 no suffix to begin with. */
7335 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7337 if (i
.types
[1].bitfield
.word
)
7338 i
.suffix
= WORD_MNEM_SUFFIX
;
7339 else if (i
.types
[1].bitfield
.qword
)
7340 i
.suffix
= QWORD_MNEM_SUFFIX
;
7342 i
.suffix
= LONG_MNEM_SUFFIX
;
7344 i
.tm
.opcode_modifier
.w
= 0;
7348 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7349 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7350 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7352 /* Change the opcode based on the operand size given by i.suffix. */
7355 /* Size floating point instruction. */
7356 case LONG_MNEM_SUFFIX
:
7357 if (i
.tm
.opcode_modifier
.floatmf
)
7359 i
.tm
.base_opcode
^= 4;
7363 case WORD_MNEM_SUFFIX
:
7364 case QWORD_MNEM_SUFFIX
:
7365 /* It's not a byte, select word/dword operation. */
7366 if (i
.tm
.opcode_modifier
.w
)
7369 i
.tm
.base_opcode
|= 8;
7371 i
.tm
.base_opcode
|= 1;
7374 case SHORT_MNEM_SUFFIX
:
7375 /* Now select between word & dword operations via the operand
7376 size prefix, except for instructions that will ignore this
7378 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7379 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7380 && !i
.tm
.opcode_modifier
.floatmf
7381 && !is_any_vex_encoding (&i
.tm
)
7382 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7383 || (flag_code
== CODE_64BIT
7384 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7386 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7388 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7389 prefix
= ADDR_PREFIX_OPCODE
;
7391 if (!add_prefix (prefix
))
7395 /* Set mode64 for an operand. */
7396 if (i
.suffix
== QWORD_MNEM_SUFFIX
7397 && flag_code
== CODE_64BIT
7398 && !i
.tm
.opcode_modifier
.norex64
7399 && !i
.tm
.opcode_modifier
.vexw
7400 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7402 && ! (i
.operands
== 2
7403 && i
.tm
.base_opcode
== 0x90
7404 && i
.tm
.extension_opcode
== None
7405 && i
.types
[0].bitfield
.instance
== Accum
7406 && i
.types
[0].bitfield
.qword
7407 && i
.types
[1].bitfield
.instance
== Accum
7408 && i
.types
[1].bitfield
.qword
))
7414 /* Select word/dword/qword operation with explicit data sizing prefix
7415 when there are no suitable register operands. */
7416 if (i
.tm
.opcode_modifier
.w
7417 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7419 || (i
.reg_operands
== 1
7421 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7423 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7424 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7427 i
.tm
.base_opcode
|= 1;
7431 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7433 gas_assert (!i
.suffix
);
7434 gas_assert (i
.reg_operands
);
7436 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7439 /* The address size override prefix changes the size of the
7441 if (flag_code
== CODE_64BIT
7442 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7444 as_bad (_("16-bit addressing unavailable for `%s'"),
7449 if ((flag_code
== CODE_32BIT
7450 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7451 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7452 && !add_prefix (ADDR_PREFIX_OPCODE
))
7457 /* Check invalid register operand when the address size override
7458 prefix changes the size of register operands. */
7460 enum { need_word
, need_dword
, need_qword
} need
;
7462 /* Check the register operand for the address size prefix if
7463 the memory operand has no real registers, like symbol, DISP
7464 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7465 if (i
.mem_operands
== 1
7466 && i
.reg_operands
== 1
7468 && i
.types
[1].bitfield
.class == Reg
7469 && (flag_code
== CODE_32BIT
7470 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7471 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7472 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7473 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7474 || (x86_elf_abi
== X86_64_X32_ABI
7476 && i
.base_reg
->reg_num
== RegIP
7477 && i
.base_reg
->reg_type
.bitfield
.qword
))
7481 && !add_prefix (ADDR_PREFIX_OPCODE
))
7484 if (flag_code
== CODE_32BIT
)
7485 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7486 else if (i
.prefix
[ADDR_PREFIX
])
7489 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7491 for (op
= 0; op
< i
.operands
; op
++)
7493 if (i
.types
[op
].bitfield
.class != Reg
)
7499 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7503 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7507 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7512 as_bad (_("invalid register operand size for `%s'"),
7523 check_byte_reg (void)
7527 for (op
= i
.operands
; --op
>= 0;)
7529 /* Skip non-register operands. */
7530 if (i
.types
[op
].bitfield
.class != Reg
)
7533 /* If this is an eight bit register, it's OK. If it's the 16 or
7534 32 bit version of an eight bit register, we will just use the
7535 low portion, and that's OK too. */
7536 if (i
.types
[op
].bitfield
.byte
)
7539 /* I/O port address operands are OK too. */
7540 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7541 && i
.tm
.operand_types
[op
].bitfield
.word
)
7544 /* crc32 only wants its source operand checked here. */
7545 if (i
.tm
.base_opcode
== 0xf0
7546 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7547 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7551 /* Any other register is bad. */
7552 as_bad (_("`%s%s' not allowed with `%s%c'"),
7553 register_prefix
, i
.op
[op
].regs
->reg_name
,
7554 i
.tm
.name
, i
.suffix
);
7561 check_long_reg (void)
7565 for (op
= i
.operands
; --op
>= 0;)
7566 /* Skip non-register operands. */
7567 if (i
.types
[op
].bitfield
.class != Reg
)
7569 /* Reject eight bit registers, except where the template requires
7570 them. (eg. movzb) */
7571 else if (i
.types
[op
].bitfield
.byte
7572 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7573 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7574 && (i
.tm
.operand_types
[op
].bitfield
.word
7575 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7577 as_bad (_("`%s%s' not allowed with `%s%c'"),
7579 i
.op
[op
].regs
->reg_name
,
7584 /* Error if the e prefix on a general reg is missing. */
7585 else if (i
.types
[op
].bitfield
.word
7586 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7587 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7588 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7590 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7591 register_prefix
, i
.op
[op
].regs
->reg_name
,
7595 /* Warn if the r prefix on a general reg is present. */
7596 else if (i
.types
[op
].bitfield
.qword
7597 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7598 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7599 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7602 && i
.tm
.opcode_modifier
.toqword
7603 && i
.types
[0].bitfield
.class != RegSIMD
)
7605 /* Convert to QWORD. We want REX byte. */
7606 i
.suffix
= QWORD_MNEM_SUFFIX
;
7610 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7611 register_prefix
, i
.op
[op
].regs
->reg_name
,
7620 check_qword_reg (void)
7624 for (op
= i
.operands
; --op
>= 0; )
7625 /* Skip non-register operands. */
7626 if (i
.types
[op
].bitfield
.class != Reg
)
7628 /* Reject eight bit registers, except where the template requires
7629 them. (eg. movzb) */
7630 else if (i
.types
[op
].bitfield
.byte
7631 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7632 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7633 && (i
.tm
.operand_types
[op
].bitfield
.word
7634 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7636 as_bad (_("`%s%s' not allowed with `%s%c'"),
7638 i
.op
[op
].regs
->reg_name
,
7643 /* Warn if the r prefix on a general reg is missing. */
7644 else if ((i
.types
[op
].bitfield
.word
7645 || i
.types
[op
].bitfield
.dword
)
7646 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7647 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7648 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7650 /* Prohibit these changes in the 64bit mode, since the
7651 lowering is more complicated. */
7653 && i
.tm
.opcode_modifier
.todword
7654 && i
.types
[0].bitfield
.class != RegSIMD
)
7656 /* Convert to DWORD. We don't want REX byte. */
7657 i
.suffix
= LONG_MNEM_SUFFIX
;
7661 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7662 register_prefix
, i
.op
[op
].regs
->reg_name
,
7671 check_word_reg (void)
7674 for (op
= i
.operands
; --op
>= 0;)
7675 /* Skip non-register operands. */
7676 if (i
.types
[op
].bitfield
.class != Reg
)
7678 /* Reject eight bit registers, except where the template requires
7679 them. (eg. movzb) */
7680 else if (i
.types
[op
].bitfield
.byte
7681 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7682 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7683 && (i
.tm
.operand_types
[op
].bitfield
.word
7684 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7686 as_bad (_("`%s%s' not allowed with `%s%c'"),
7688 i
.op
[op
].regs
->reg_name
,
7693 /* Error if the e or r prefix on a general reg is present. */
7694 else if ((i
.types
[op
].bitfield
.dword
7695 || i
.types
[op
].bitfield
.qword
)
7696 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7697 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7698 && i
.tm
.operand_types
[op
].bitfield
.word
)
7700 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7701 register_prefix
, i
.op
[op
].regs
->reg_name
,
7705 /* For some instructions need encode as EVEX.W=1 without explicit VexW1. */
7706 else if (i
.types
[op
].bitfield
.qword
7708 && i
.tm
.opcode_modifier
.toqword
)
7710 /* Convert to QWORD. We want EVEX.W byte. */
7711 i
.suffix
= QWORD_MNEM_SUFFIX
;
7717 update_imm (unsigned int j
)
7719 i386_operand_type overlap
= i
.types
[j
];
7720 if ((overlap
.bitfield
.imm8
7721 || overlap
.bitfield
.imm8s
7722 || overlap
.bitfield
.imm16
7723 || overlap
.bitfield
.imm32
7724 || overlap
.bitfield
.imm32s
7725 || overlap
.bitfield
.imm64
)
7726 && !operand_type_equal (&overlap
, &imm8
)
7727 && !operand_type_equal (&overlap
, &imm8s
)
7728 && !operand_type_equal (&overlap
, &imm16
)
7729 && !operand_type_equal (&overlap
, &imm32
)
7730 && !operand_type_equal (&overlap
, &imm32s
)
7731 && !operand_type_equal (&overlap
, &imm64
))
7735 i386_operand_type temp
;
7737 operand_type_set (&temp
, 0);
7738 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7740 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7741 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7743 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7744 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7745 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7747 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7748 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7751 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7754 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7755 || operand_type_equal (&overlap
, &imm16_32
)
7756 || operand_type_equal (&overlap
, &imm16_32s
))
7758 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7763 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7764 overlap
= operand_type_and (overlap
, imm32s
);
7765 else if (i
.prefix
[DATA_PREFIX
])
7766 overlap
= operand_type_and (overlap
,
7767 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7768 if (!operand_type_equal (&overlap
, &imm8
)
7769 && !operand_type_equal (&overlap
, &imm8s
)
7770 && !operand_type_equal (&overlap
, &imm16
)
7771 && !operand_type_equal (&overlap
, &imm32
)
7772 && !operand_type_equal (&overlap
, &imm32s
)
7773 && !operand_type_equal (&overlap
, &imm64
))
7775 as_bad (_("no instruction mnemonic suffix given; "
7776 "can't determine immediate size"));
7780 i
.types
[j
] = overlap
;
7790 /* Update the first 2 immediate operands. */
7791 n
= i
.operands
> 2 ? 2 : i
.operands
;
7794 for (j
= 0; j
< n
; j
++)
7795 if (update_imm (j
) == 0)
7798 /* The 3rd operand can't be immediate operand. */
7799 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7806 process_operands (void)
7808 /* Default segment register this instruction will use for memory
7809 accesses. 0 means unknown. This is only for optimizing out
7810 unnecessary segment overrides. */
7811 const reg_entry
*default_seg
= NULL
;
7813 if (i
.tm
.opcode_modifier
.sse2avx
)
7815 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7817 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7818 i
.prefix
[REX_PREFIX
] = 0;
7821 /* ImmExt should be processed after SSE2AVX. */
7822 else if (i
.tm
.opcode_modifier
.immext
)
7825 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7827 unsigned int dupl
= i
.operands
;
7828 unsigned int dest
= dupl
- 1;
7831 /* The destination must be an xmm register. */
7832 gas_assert (i
.reg_operands
7833 && MAX_OPERANDS
> dupl
7834 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7836 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7837 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7839 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7841 /* Keep xmm0 for instructions with VEX prefix and 3
7843 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7844 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7849 /* We remove the first xmm0 and keep the number of
7850 operands unchanged, which in fact duplicates the
7852 for (j
= 1; j
< i
.operands
; j
++)
7854 i
.op
[j
- 1] = i
.op
[j
];
7855 i
.types
[j
- 1] = i
.types
[j
];
7856 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7857 i
.flags
[j
- 1] = i
.flags
[j
];
7861 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7863 gas_assert ((MAX_OPERANDS
- 1) > dupl
7864 && (i
.tm
.opcode_modifier
.vexsources
7867 /* Add the implicit xmm0 for instructions with VEX prefix
7869 for (j
= i
.operands
; j
> 0; j
--)
7871 i
.op
[j
] = i
.op
[j
- 1];
7872 i
.types
[j
] = i
.types
[j
- 1];
7873 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7874 i
.flags
[j
] = i
.flags
[j
- 1];
7877 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7878 i
.types
[0] = regxmm
;
7879 i
.tm
.operand_types
[0] = regxmm
;
7882 i
.reg_operands
+= 2;
7887 i
.op
[dupl
] = i
.op
[dest
];
7888 i
.types
[dupl
] = i
.types
[dest
];
7889 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7890 i
.flags
[dupl
] = i
.flags
[dest
];
7899 i
.op
[dupl
] = i
.op
[dest
];
7900 i
.types
[dupl
] = i
.types
[dest
];
7901 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7902 i
.flags
[dupl
] = i
.flags
[dest
];
7905 if (i
.tm
.opcode_modifier
.immext
)
7908 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7909 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7913 for (j
= 1; j
< i
.operands
; j
++)
7915 i
.op
[j
- 1] = i
.op
[j
];
7916 i
.types
[j
- 1] = i
.types
[j
];
7918 /* We need to adjust fields in i.tm since they are used by
7919 build_modrm_byte. */
7920 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7922 i
.flags
[j
- 1] = i
.flags
[j
];
7929 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7931 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7933 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7934 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7935 regnum
= register_number (i
.op
[1].regs
);
7936 first_reg_in_group
= regnum
& ~3;
7937 last_reg_in_group
= first_reg_in_group
+ 3;
7938 if (regnum
!= first_reg_in_group
)
7939 as_warn (_("source register `%s%s' implicitly denotes"
7940 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7941 register_prefix
, i
.op
[1].regs
->reg_name
,
7942 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7943 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7946 else if (i
.tm
.opcode_modifier
.regkludge
)
7948 /* The imul $imm, %reg instruction is converted into
7949 imul $imm, %reg, %reg, and the clr %reg instruction
7950 is converted into xor %reg, %reg. */
7952 unsigned int first_reg_op
;
7954 if (operand_type_check (i
.types
[0], reg
))
7958 /* Pretend we saw the extra register operand. */
7959 gas_assert (i
.reg_operands
== 1
7960 && i
.op
[first_reg_op
+ 1].regs
== 0);
7961 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7962 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7967 if (i
.tm
.opcode_modifier
.modrm
)
7969 /* The opcode is completed (modulo i.tm.extension_opcode which
7970 must be put into the modrm byte). Now, we make the modrm and
7971 index base bytes based on all the info we've collected. */
7973 default_seg
= build_modrm_byte ();
7975 else if (i
.types
[0].bitfield
.class == SReg
)
7977 if (flag_code
!= CODE_64BIT
7978 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7979 && i
.op
[0].regs
->reg_num
== 1
7980 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7981 && i
.op
[0].regs
->reg_num
< 4)
7983 as_bad (_("you can't `%s %s%s'"),
7984 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7987 if (i
.op
[0].regs
->reg_num
> 3
7988 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7990 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7991 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7993 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7995 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7996 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7998 default_seg
= reg_ds
;
8000 else if (i
.tm
.opcode_modifier
.isstring
)
8002 /* For the string instructions that allow a segment override
8003 on one of their operands, the default segment is ds. */
8004 default_seg
= reg_ds
;
8006 else if (i
.short_form
)
8008 /* The register or float register operand is in operand
8010 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
8012 /* Register goes in low 3 bits of opcode. */
8013 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
8014 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8016 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
8018 /* Warn about some common errors, but press on regardless.
8019 The first case can be generated by gcc (<= 2.8.1). */
8020 if (i
.operands
== 2)
8022 /* Reversed arguments on faddp, fsubp, etc. */
8023 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
8024 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
8025 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
8029 /* Extraneous `l' suffix on fp insn. */
8030 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
8031 register_prefix
, i
.op
[0].regs
->reg_name
);
8036 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
8037 && i
.tm
.base_opcode
== 0x8d /* lea */
8038 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
8039 && !is_any_vex_encoding(&i
.tm
))
8041 if (!quiet_warnings
)
8042 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
8046 i
.prefix
[SEG_PREFIX
] = 0;
8050 /* If a segment was explicitly specified, and the specified segment
8051 is neither the default nor the one already recorded from a prefix,
8052 use an opcode prefix to select it. If we never figured out what
8053 the default segment is, then default_seg will be zero at this
8054 point, and the specified segment prefix will always be used. */
8056 && i
.seg
[0] != default_seg
8057 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
8059 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
8065 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
8068 if (r
->reg_flags
& RegRex
)
8070 if (i
.rex
& rex_bit
)
8071 as_bad (_("same type of prefix used twice"));
8074 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
8076 gas_assert (i
.vex
.register_specifier
== r
);
8077 i
.vex
.register_specifier
+= 8;
8080 if (r
->reg_flags
& RegVRex
)
8084 static const reg_entry
*
8085 build_modrm_byte (void)
8087 const reg_entry
*default_seg
= NULL
;
8088 unsigned int source
, dest
;
8091 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
8094 unsigned int nds
, reg_slot
;
8097 dest
= i
.operands
- 1;
8100 /* There are 2 kinds of instructions:
8101 1. 5 operands: 4 register operands or 3 register operands
8102 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8103 VexW0 or VexW1. The destination must be either XMM, YMM or
8105 2. 4 operands: 4 register operands or 3 register operands
8106 plus 1 memory operand, with VexXDS. */
8107 gas_assert ((i
.reg_operands
== 4
8108 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
8109 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8110 && i
.tm
.opcode_modifier
.vexw
8111 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8113 /* If VexW1 is set, the first non-immediate operand is the source and
8114 the second non-immediate one is encoded in the immediate operand. */
8115 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
8117 source
= i
.imm_operands
;
8118 reg_slot
= i
.imm_operands
+ 1;
8122 source
= i
.imm_operands
+ 1;
8123 reg_slot
= i
.imm_operands
;
8126 if (i
.imm_operands
== 0)
8128 /* When there is no immediate operand, generate an 8bit
8129 immediate operand to encode the first operand. */
8130 exp
= &im_expressions
[i
.imm_operands
++];
8131 i
.op
[i
.operands
].imms
= exp
;
8132 i
.types
[i
.operands
] = imm8
;
8135 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8136 exp
->X_op
= O_constant
;
8137 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
8138 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8142 gas_assert (i
.imm_operands
== 1);
8143 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8144 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8146 /* Turn on Imm8 again so that output_imm will generate it. */
8147 i
.types
[0].bitfield
.imm8
= 1;
8149 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8150 i
.op
[0].imms
->X_add_number
8151 |= register_number (i
.op
[reg_slot
].regs
) << 4;
8152 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8155 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
8156 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
8161 /* i.reg_operands MUST be the number of real register operands;
8162 implicit registers do not count. If there are 3 register
8163 operands, it must be a instruction with VexNDS. For a
8164 instruction with VexNDD, the destination register is encoded
8165 in VEX prefix. If there are 4 register operands, it must be
8166 a instruction with VEX prefix and 3 sources. */
8167 if (i
.mem_operands
== 0
8168 && ((i
.reg_operands
== 2
8169 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
8170 || (i
.reg_operands
== 3
8171 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8172 || (i
.reg_operands
== 4 && vex_3_sources
)))
8180 /* When there are 3 operands, one of them may be immediate,
8181 which may be the first or the last operand. Otherwise,
8182 the first operand must be shift count register (cl) or it
8183 is an instruction with VexNDS. */
8184 gas_assert (i
.imm_operands
== 1
8185 || (i
.imm_operands
== 0
8186 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8187 || (i
.types
[0].bitfield
.instance
== RegC
8188 && i
.types
[0].bitfield
.byte
))));
8189 if (operand_type_check (i
.types
[0], imm
)
8190 || (i
.types
[0].bitfield
.instance
== RegC
8191 && i
.types
[0].bitfield
.byte
))
8197 /* When there are 4 operands, the first two must be 8bit
8198 immediate operands. The source operand will be the 3rd
8201 For instructions with VexNDS, if the first operand
8202 an imm8, the source operand is the 2nd one. If the last
8203 operand is imm8, the source operand is the first one. */
8204 gas_assert ((i
.imm_operands
== 2
8205 && i
.types
[0].bitfield
.imm8
8206 && i
.types
[1].bitfield
.imm8
)
8207 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8208 && i
.imm_operands
== 1
8209 && (i
.types
[0].bitfield
.imm8
8210 || i
.types
[i
.operands
- 1].bitfield
.imm8
8211 || i
.rounding
.type
!= rc_none
)));
8212 if (i
.imm_operands
== 2)
8216 if (i
.types
[0].bitfield
.imm8
)
8223 if (is_evex_encoding (&i
.tm
))
8225 /* For EVEX instructions, when there are 5 operands, the
8226 first one must be immediate operand. If the second one
8227 is immediate operand, the source operand is the 3th
8228 one. If the last one is immediate operand, the source
8229 operand is the 2nd one. */
8230 gas_assert (i
.imm_operands
== 2
8231 && i
.tm
.opcode_modifier
.sae
8232 && operand_type_check (i
.types
[0], imm
));
8233 if (operand_type_check (i
.types
[1], imm
))
8235 else if (operand_type_check (i
.types
[4], imm
))
8249 /* RC/SAE operand could be between DEST and SRC. That happens
8250 when one operand is GPR and the other one is XMM/YMM/ZMM
8252 if (i
.rounding
.type
!= rc_none
&& i
.rounding
.operand
== dest
)
8255 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8257 /* For instructions with VexNDS, the register-only source
8258 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8259 register. It is encoded in VEX prefix. */
8261 i386_operand_type op
;
8264 /* Swap two source operands if needed. */
8265 if (i
.tm
.opcode_modifier
.swapsources
)
8273 op
= i
.tm
.operand_types
[vvvv
];
8274 if ((dest
+ 1) >= i
.operands
8275 || ((op
.bitfield
.class != Reg
8276 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8277 && op
.bitfield
.class != RegSIMD
8278 && !operand_type_equal (&op
, ®mask
)))
8280 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8286 /* One of the register operands will be encoded in the i.rm.reg
8287 field, the other in the combined i.rm.mode and i.rm.regmem
8288 fields. If no form of this instruction supports a memory
8289 destination operand, then we assume the source operand may
8290 sometimes be a memory operand and so we need to store the
8291 destination in the i.rm.reg field. */
8292 if (!i
.tm
.opcode_modifier
.regmem
8293 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8295 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8296 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8297 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8298 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8302 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8303 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8304 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8305 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8307 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8309 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8312 add_prefix (LOCK_PREFIX_OPCODE
);
8316 { /* If it's not 2 reg operands... */
8321 unsigned int fake_zero_displacement
= 0;
8324 for (op
= 0; op
< i
.operands
; op
++)
8325 if (i
.flags
[op
] & Operand_Mem
)
8327 gas_assert (op
< i
.operands
);
8329 if (i
.tm
.opcode_modifier
.sib
)
8331 /* The index register of VSIB shouldn't be RegIZ. */
8332 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8333 && i
.index_reg
->reg_num
== RegIZ
)
8336 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8339 i
.sib
.base
= NO_BASE_REGISTER
;
8340 i
.sib
.scale
= i
.log2_scale_factor
;
8341 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8342 if (want_disp32 (&i
.tm
))
8343 i
.types
[op
].bitfield
.disp32
= 1;
8345 i
.types
[op
].bitfield
.disp32s
= 1;
8348 /* Since the mandatory SIB always has index register, so
8349 the code logic remains unchanged. The non-mandatory SIB
8350 without index register is allowed and will be handled
8354 if (i
.index_reg
->reg_num
== RegIZ
)
8355 i
.sib
.index
= NO_INDEX_REGISTER
;
8357 i
.sib
.index
= i
.index_reg
->reg_num
;
8358 set_rex_vrex (i
.index_reg
, REX_X
, false);
8362 default_seg
= reg_ds
;
8364 if (i
.base_reg
== 0)
8367 if (!i
.disp_operands
)
8368 fake_zero_displacement
= 1;
8369 if (i
.index_reg
== 0)
8371 /* Both check for VSIB and mandatory non-vector SIB. */
8372 gas_assert (!i
.tm
.opcode_modifier
.sib
8373 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8374 /* Operand is just <disp> */
8375 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8376 if (flag_code
== CODE_64BIT
)
8378 /* 64bit mode overwrites the 32bit absolute
8379 addressing by RIP relative addressing and
8380 absolute addressing is encoded by one of the
8381 redundant SIB forms. */
8382 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8383 i
.sib
.base
= NO_BASE_REGISTER
;
8384 i
.sib
.index
= NO_INDEX_REGISTER
;
8385 if (want_disp32 (&i
.tm
))
8386 i
.types
[op
].bitfield
.disp32
= 1;
8388 i
.types
[op
].bitfield
.disp32s
= 1;
8390 else if ((flag_code
== CODE_16BIT
)
8391 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8393 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8394 i
.types
[op
].bitfield
.disp16
= 1;
8398 i
.rm
.regmem
= NO_BASE_REGISTER
;
8399 i
.types
[op
].bitfield
.disp32
= 1;
8402 else if (!i
.tm
.opcode_modifier
.sib
)
8404 /* !i.base_reg && i.index_reg */
8405 if (i
.index_reg
->reg_num
== RegIZ
)
8406 i
.sib
.index
= NO_INDEX_REGISTER
;
8408 i
.sib
.index
= i
.index_reg
->reg_num
;
8409 i
.sib
.base
= NO_BASE_REGISTER
;
8410 i
.sib
.scale
= i
.log2_scale_factor
;
8411 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8412 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8413 if (want_disp32 (&i
.tm
))
8414 i
.types
[op
].bitfield
.disp32
= 1;
8416 i
.types
[op
].bitfield
.disp32s
= 1;
8417 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8421 /* RIP addressing for 64bit mode. */
8422 else if (i
.base_reg
->reg_num
== RegIP
)
8424 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8425 i
.rm
.regmem
= NO_BASE_REGISTER
;
8426 i
.types
[op
].bitfield
.disp8
= 0;
8427 i
.types
[op
].bitfield
.disp16
= 0;
8428 i
.types
[op
].bitfield
.disp32
= 0;
8429 i
.types
[op
].bitfield
.disp32s
= 1;
8430 i
.types
[op
].bitfield
.disp64
= 0;
8431 i
.flags
[op
] |= Operand_PCrel
;
8432 if (! i
.disp_operands
)
8433 fake_zero_displacement
= 1;
8435 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8437 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8438 switch (i
.base_reg
->reg_num
)
8441 if (i
.index_reg
== 0)
8443 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8444 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8447 default_seg
= reg_ss
;
8448 if (i
.index_reg
== 0)
8451 if (operand_type_check (i
.types
[op
], disp
) == 0)
8453 /* fake (%bp) into 0(%bp) */
8454 if (i
.disp_encoding
== disp_encoding_16bit
)
8455 i
.types
[op
].bitfield
.disp16
= 1;
8457 i
.types
[op
].bitfield
.disp8
= 1;
8458 fake_zero_displacement
= 1;
8461 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8462 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8464 default: /* (%si) -> 4 or (%di) -> 5 */
8465 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8467 if (!fake_zero_displacement
8471 fake_zero_displacement
= 1;
8472 if (i
.disp_encoding
== disp_encoding_8bit
)
8473 i
.types
[op
].bitfield
.disp8
= 1;
8475 i
.types
[op
].bitfield
.disp16
= 1;
8477 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8479 else /* i.base_reg and 32/64 bit mode */
8481 if (operand_type_check (i
.types
[op
], disp
))
8483 i
.types
[op
].bitfield
.disp16
= 0;
8484 i
.types
[op
].bitfield
.disp64
= 0;
8485 if (!want_disp32 (&i
.tm
))
8487 i
.types
[op
].bitfield
.disp32
= 0;
8488 i
.types
[op
].bitfield
.disp32s
= 1;
8492 i
.types
[op
].bitfield
.disp32
= 1;
8493 i
.types
[op
].bitfield
.disp32s
= 0;
8497 if (!i
.tm
.opcode_modifier
.sib
)
8498 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8499 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8501 i
.sib
.base
= i
.base_reg
->reg_num
;
8502 /* x86-64 ignores REX prefix bit here to avoid decoder
8504 if (!(i
.base_reg
->reg_flags
& RegRex
)
8505 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8506 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8507 default_seg
= reg_ss
;
8508 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8510 fake_zero_displacement
= 1;
8511 if (i
.disp_encoding
== disp_encoding_32bit
)
8512 i
.types
[op
].bitfield
.disp32
= 1;
8514 i
.types
[op
].bitfield
.disp8
= 1;
8516 i
.sib
.scale
= i
.log2_scale_factor
;
8517 if (i
.index_reg
== 0)
8519 /* Only check for VSIB. */
8520 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8521 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8522 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8524 /* <disp>(%esp) becomes two byte modrm with no index
8525 register. We've already stored the code for esp
8526 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8527 Any base register besides %esp will not use the
8528 extra modrm byte. */
8529 i
.sib
.index
= NO_INDEX_REGISTER
;
8531 else if (!i
.tm
.opcode_modifier
.sib
)
8533 if (i
.index_reg
->reg_num
== RegIZ
)
8534 i
.sib
.index
= NO_INDEX_REGISTER
;
8536 i
.sib
.index
= i
.index_reg
->reg_num
;
8537 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8538 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8543 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8544 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8548 if (!fake_zero_displacement
8552 fake_zero_displacement
= 1;
8553 if (i
.disp_encoding
== disp_encoding_8bit
)
8554 i
.types
[op
].bitfield
.disp8
= 1;
8556 i
.types
[op
].bitfield
.disp32
= 1;
8558 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8562 if (fake_zero_displacement
)
8564 /* Fakes a zero displacement assuming that i.types[op]
8565 holds the correct displacement size. */
8568 gas_assert (i
.op
[op
].disps
== 0);
8569 exp
= &disp_expressions
[i
.disp_operands
++];
8570 i
.op
[op
].disps
= exp
;
8571 exp
->X_op
= O_constant
;
8572 exp
->X_add_number
= 0;
8573 exp
->X_add_symbol
= (symbolS
*) 0;
8574 exp
->X_op_symbol
= (symbolS
*) 0;
8582 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8584 if (operand_type_check (i
.types
[0], imm
))
8585 i
.vex
.register_specifier
= NULL
;
8588 /* VEX.vvvv encodes one of the sources when the first
8589 operand is not an immediate. */
8590 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8591 i
.vex
.register_specifier
= i
.op
[0].regs
;
8593 i
.vex
.register_specifier
= i
.op
[1].regs
;
8596 /* Destination is a XMM register encoded in the ModRM.reg
8598 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8599 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8602 /* ModRM.rm and VEX.B encodes the other source. */
8603 if (!i
.mem_operands
)
8607 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8608 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8610 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8612 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8616 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8618 i
.vex
.register_specifier
= i
.op
[2].regs
;
8619 if (!i
.mem_operands
)
8622 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8623 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8627 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8628 (if any) based on i.tm.extension_opcode. Again, we must be
8629 careful to make sure that segment/control/debug/test/MMX
8630 registers are coded into the i.rm.reg field. */
8631 else if (i
.reg_operands
)
8634 unsigned int vex_reg
= ~0;
8636 for (op
= 0; op
< i
.operands
; op
++)
8637 if (i
.types
[op
].bitfield
.class == Reg
8638 || i
.types
[op
].bitfield
.class == RegBND
8639 || i
.types
[op
].bitfield
.class == RegMask
8640 || i
.types
[op
].bitfield
.class == SReg
8641 || i
.types
[op
].bitfield
.class == RegCR
8642 || i
.types
[op
].bitfield
.class == RegDR
8643 || i
.types
[op
].bitfield
.class == RegTR
8644 || i
.types
[op
].bitfield
.class == RegSIMD
8645 || i
.types
[op
].bitfield
.class == RegMMX
)
8650 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8652 /* For instructions with VexNDS, the register-only
8653 source operand is encoded in VEX prefix. */
8654 gas_assert (mem
!= (unsigned int) ~0);
8659 gas_assert (op
< i
.operands
);
8663 /* Check register-only source operand when two source
8664 operands are swapped. */
8665 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8666 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8670 gas_assert (mem
== (vex_reg
+ 1)
8671 && op
< i
.operands
);
8676 gas_assert (vex_reg
< i
.operands
);
8680 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8682 /* For instructions with VexNDD, the register destination
8683 is encoded in VEX prefix. */
8684 if (i
.mem_operands
== 0)
8686 /* There is no memory operand. */
8687 gas_assert ((op
+ 2) == i
.operands
);
8692 /* There are only 2 non-immediate operands. */
8693 gas_assert (op
< i
.imm_operands
+ 2
8694 && i
.operands
== i
.imm_operands
+ 2);
8695 vex_reg
= i
.imm_operands
+ 1;
8699 gas_assert (op
< i
.operands
);
8701 if (vex_reg
!= (unsigned int) ~0)
8703 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8705 if ((type
->bitfield
.class != Reg
8706 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8707 && type
->bitfield
.class != RegSIMD
8708 && !operand_type_equal (type
, ®mask
))
8711 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8714 /* Don't set OP operand twice. */
8717 /* If there is an extension opcode to put here, the
8718 register number must be put into the regmem field. */
8719 if (i
.tm
.extension_opcode
!= None
)
8721 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8722 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8723 i
.tm
.opcode_modifier
.sse2avx
);
8727 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8728 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8729 i
.tm
.opcode_modifier
.sse2avx
);
8733 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8734 must set it to 3 to indicate this is a register operand
8735 in the regmem field. */
8736 if (!i
.mem_operands
)
8740 /* Fill in i.rm.reg field with extension opcode (if any). */
8741 if (i
.tm
.extension_opcode
!= None
)
8742 i
.rm
.reg
= i
.tm
.extension_opcode
;
8748 frag_opcode_byte (unsigned char byte
)
8750 if (now_seg
!= absolute_section
)
8751 FRAG_APPEND_1_CHAR (byte
);
8753 ++abs_section_offset
;
8757 flip_code16 (unsigned int code16
)
8759 gas_assert (i
.tm
.operands
== 1);
8761 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8762 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8763 || i
.tm
.operand_types
[0].bitfield
.disp32s
8764 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8769 output_branch (void)
8775 relax_substateT subtype
;
8779 if (now_seg
== absolute_section
)
8781 as_bad (_("relaxable branches not supported in absolute section"));
8785 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8786 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8789 if (i
.prefix
[DATA_PREFIX
] != 0)
8793 code16
^= flip_code16(code16
);
8795 /* Pentium4 branch hints. */
8796 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8797 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8802 if (i
.prefix
[REX_PREFIX
] != 0)
8808 /* BND prefixed jump. */
8809 if (i
.prefix
[BND_PREFIX
] != 0)
8815 if (i
.prefixes
!= 0)
8816 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8818 /* It's always a symbol; End frag & setup for relax.
8819 Make sure there is enough room in this frag for the largest
8820 instruction we may generate in md_convert_frag. This is 2
8821 bytes for the opcode and room for the prefix and largest
8823 frag_grow (prefix
+ 2 + 4);
8824 /* Prefix and 1 opcode byte go in fr_fix. */
8825 p
= frag_more (prefix
+ 1);
8826 if (i
.prefix
[DATA_PREFIX
] != 0)
8827 *p
++ = DATA_PREFIX_OPCODE
;
8828 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8829 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8830 *p
++ = i
.prefix
[SEG_PREFIX
];
8831 if (i
.prefix
[BND_PREFIX
] != 0)
8832 *p
++ = BND_PREFIX_OPCODE
;
8833 if (i
.prefix
[REX_PREFIX
] != 0)
8834 *p
++ = i
.prefix
[REX_PREFIX
];
8835 *p
= i
.tm
.base_opcode
;
8837 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8838 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8839 else if (cpu_arch_flags
.bitfield
.cpui386
)
8840 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8842 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8845 sym
= i
.op
[0].disps
->X_add_symbol
;
8846 off
= i
.op
[0].disps
->X_add_number
;
8848 if (i
.op
[0].disps
->X_op
!= O_constant
8849 && i
.op
[0].disps
->X_op
!= O_symbol
)
8851 /* Handle complex expressions. */
8852 sym
= make_expr_symbol (i
.op
[0].disps
);
8856 /* 1 possible extra opcode + 4 byte displacement go in var part.
8857 Pass reloc in fr_var. */
8858 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8861 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8862 /* Return TRUE iff PLT32 relocation should be used for branching to
8866 need_plt32_p (symbolS
*s
)
8868 /* PLT32 relocation is ELF only. */
8873 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8874 krtld support it. */
8878 /* Since there is no need to prepare for PLT branch on x86-64, we
8879 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8880 be used as a marker for 32-bit PC-relative branches. */
8887 /* Weak or undefined symbol need PLT32 relocation. */
8888 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8891 /* Non-global symbol doesn't need PLT32 relocation. */
8892 if (! S_IS_EXTERNAL (s
))
8895 /* Other global symbols need PLT32 relocation. NB: Symbol with
8896 non-default visibilities are treated as normal global symbol
8897 so that PLT32 relocation can be used as a marker for 32-bit
8898 PC-relative branches. It is useful for linker relaxation. */
8909 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8911 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8913 /* This is a loop or jecxz type instruction. */
8915 if (i
.prefix
[ADDR_PREFIX
] != 0)
8917 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8920 /* Pentium4 branch hints. */
8921 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8922 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8924 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8933 if (flag_code
== CODE_16BIT
)
8936 if (i
.prefix
[DATA_PREFIX
] != 0)
8938 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8940 code16
^= flip_code16(code16
);
8948 /* BND prefixed jump. */
8949 if (i
.prefix
[BND_PREFIX
] != 0)
8951 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8955 if (i
.prefix
[REX_PREFIX
] != 0)
8957 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8961 if (i
.prefixes
!= 0)
8962 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8964 if (now_seg
== absolute_section
)
8966 abs_section_offset
+= i
.opcode_length
+ size
;
8970 p
= frag_more (i
.opcode_length
+ size
);
8971 switch (i
.opcode_length
)
8974 *p
++ = i
.tm
.base_opcode
>> 8;
8977 *p
++ = i
.tm
.base_opcode
;
8983 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8985 && jump_reloc
== NO_RELOC
8986 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8987 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8990 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8992 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8993 i
.op
[0].disps
, 1, jump_reloc
);
8995 /* All jumps handled here are signed, but don't unconditionally use a
8996 signed limit check for 32 and 16 bit jumps as we want to allow wrap
8997 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
9002 fixP
->fx_signed
= 1;
9006 if (i
.tm
.base_opcode
== 0xc7f8)
9007 fixP
->fx_signed
= 1;
9011 if (flag_code
== CODE_64BIT
)
9012 fixP
->fx_signed
= 1;
9018 output_interseg_jump (void)
9026 if (flag_code
== CODE_16BIT
)
9030 if (i
.prefix
[DATA_PREFIX
] != 0)
9037 gas_assert (!i
.prefix
[REX_PREFIX
]);
9043 if (i
.prefixes
!= 0)
9044 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
9046 if (now_seg
== absolute_section
)
9048 abs_section_offset
+= prefix
+ 1 + 2 + size
;
9052 /* 1 opcode; 2 segment; offset */
9053 p
= frag_more (prefix
+ 1 + 2 + size
);
9055 if (i
.prefix
[DATA_PREFIX
] != 0)
9056 *p
++ = DATA_PREFIX_OPCODE
;
9058 if (i
.prefix
[REX_PREFIX
] != 0)
9059 *p
++ = i
.prefix
[REX_PREFIX
];
9061 *p
++ = i
.tm
.base_opcode
;
9062 if (i
.op
[1].imms
->X_op
== O_constant
)
9064 offsetT n
= i
.op
[1].imms
->X_add_number
;
9067 && !fits_in_unsigned_word (n
)
9068 && !fits_in_signed_word (n
))
9070 as_bad (_("16-bit jump out of range"));
9073 md_number_to_chars (p
, n
, size
);
9076 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9077 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9080 if (i
.op
[0].imms
->X_op
== O_constant
)
9081 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9083 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9084 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9087 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9092 asection
*seg
= now_seg
;
9093 subsegT subseg
= now_subseg
;
9095 unsigned int alignment
, align_size_1
;
9096 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9097 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9098 unsigned int padding
;
9100 if (!IS_ELF
|| !x86_used_note
)
9103 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9105 /* The .note.gnu.property section layout:
9107 Field Length Contents
9110 n_descsz 4 The note descriptor size
9111 n_type 4 NT_GNU_PROPERTY_TYPE_0
9113 n_desc n_descsz The program property array
9117 /* Create the .note.gnu.property section. */
9118 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9119 bfd_set_section_flags (sec
,
9126 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9137 bfd_set_section_alignment (sec
, alignment
);
9138 elf_section_type (sec
) = SHT_NOTE
;
9140 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9142 isa_1_descsz_raw
= 4 + 4 + 4;
9143 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9144 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9146 feature_2_descsz_raw
= isa_1_descsz
;
9147 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9149 feature_2_descsz_raw
+= 4 + 4 + 4;
9150 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9151 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9154 descsz
= feature_2_descsz
;
9155 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9156 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9158 /* Write n_namsz. */
9159 md_number_to_chars (p
, (valueT
) 4, 4);
9161 /* Write n_descsz. */
9162 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9165 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9168 memcpy (p
+ 4 * 3, "GNU", 4);
9170 /* Write 4-byte type. */
9171 md_number_to_chars (p
+ 4 * 4,
9172 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9174 /* Write 4-byte data size. */
9175 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9177 /* Write 4-byte data. */
9178 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9180 /* Zero out paddings. */
9181 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9183 memset (p
+ 4 * 7, 0, padding
);
9185 /* Write 4-byte type. */
9186 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9187 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9189 /* Write 4-byte data size. */
9190 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9192 /* Write 4-byte data. */
9193 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9194 (valueT
) x86_feature_2_used
, 4);
9196 /* Zero out paddings. */
9197 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9199 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9201 /* We probably can't restore the current segment, for there likely
9204 subseg_set (seg
, subseg
);
9209 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9210 const char *frag_now_ptr
)
9212 unsigned int len
= 0;
9214 if (start_frag
!= frag_now
)
9216 const fragS
*fr
= start_frag
;
9221 } while (fr
&& fr
!= frag_now
);
9224 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9227 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9228 be macro-fused with conditional jumps.
9229 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9230 or is one of the following format:
9243 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9245 /* No RIP address. */
9246 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9249 /* No opcodes outside of base encoding space. */
9250 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9253 /* add, sub without add/sub m, imm. */
9254 if (i
.tm
.base_opcode
<= 5
9255 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9256 || ((i
.tm
.base_opcode
| 3) == 0x83
9257 && (i
.tm
.extension_opcode
== 0x5
9258 || i
.tm
.extension_opcode
== 0x0)))
9260 *mf_cmp_p
= mf_cmp_alu_cmp
;
9261 return !(i
.mem_operands
&& i
.imm_operands
);
9264 /* and without and m, imm. */
9265 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9266 || ((i
.tm
.base_opcode
| 3) == 0x83
9267 && i
.tm
.extension_opcode
== 0x4))
9269 *mf_cmp_p
= mf_cmp_test_and
;
9270 return !(i
.mem_operands
&& i
.imm_operands
);
9273 /* test without test m imm. */
9274 if ((i
.tm
.base_opcode
| 1) == 0x85
9275 || (i
.tm
.base_opcode
| 1) == 0xa9
9276 || ((i
.tm
.base_opcode
| 1) == 0xf7
9277 && i
.tm
.extension_opcode
== 0))
9279 *mf_cmp_p
= mf_cmp_test_and
;
9280 return !(i
.mem_operands
&& i
.imm_operands
);
9283 /* cmp without cmp m, imm. */
9284 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9285 || ((i
.tm
.base_opcode
| 3) == 0x83
9286 && (i
.tm
.extension_opcode
== 0x7)))
9288 *mf_cmp_p
= mf_cmp_alu_cmp
;
9289 return !(i
.mem_operands
&& i
.imm_operands
);
9292 /* inc, dec without inc/dec m. */
9293 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9294 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9295 || ((i
.tm
.base_opcode
| 1) == 0xff
9296 && i
.tm
.extension_opcode
<= 0x1))
9298 *mf_cmp_p
= mf_cmp_incdec
;
9299 return !i
.mem_operands
;
9305 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9308 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9310 /* NB: Don't work with COND_JUMP86 without i386. */
9311 if (!align_branch_power
9312 || now_seg
== absolute_section
9313 || !cpu_arch_flags
.bitfield
.cpui386
9314 || !(align_branch
& align_branch_fused_bit
))
9317 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9319 if (last_insn
.kind
== last_insn_other
9320 || last_insn
.seg
!= now_seg
)
9323 as_warn_where (last_insn
.file
, last_insn
.line
,
9324 _("`%s` skips -malign-branch-boundary on `%s`"),
9325 last_insn
.name
, i
.tm
.name
);
9331 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9334 add_branch_prefix_frag_p (void)
9336 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9337 to PadLock instructions since they include prefixes in opcode. */
9338 if (!align_branch_power
9339 || !align_branch_prefix_size
9340 || now_seg
== absolute_section
9341 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9342 || !cpu_arch_flags
.bitfield
.cpui386
)
9345 /* Don't add prefix if it is a prefix or there is no operand in case
9346 that segment prefix is special. */
9347 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9350 if (last_insn
.kind
== last_insn_other
9351 || last_insn
.seg
!= now_seg
)
9355 as_warn_where (last_insn
.file
, last_insn
.line
,
9356 _("`%s` skips -malign-branch-boundary on `%s`"),
9357 last_insn
.name
, i
.tm
.name
);
9362 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9365 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9366 enum mf_jcc_kind
*mf_jcc_p
)
9370 /* NB: Don't work with COND_JUMP86 without i386. */
9371 if (!align_branch_power
9372 || now_seg
== absolute_section
9373 || !cpu_arch_flags
.bitfield
.cpui386
9374 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9379 /* Check for jcc and direct jmp. */
9380 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9382 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9384 *branch_p
= align_branch_jmp
;
9385 add_padding
= align_branch
& align_branch_jmp_bit
;
9389 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9390 igore the lowest bit. */
9391 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9392 *branch_p
= align_branch_jcc
;
9393 if ((align_branch
& align_branch_jcc_bit
))
9397 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9400 *branch_p
= align_branch_ret
;
9401 if ((align_branch
& align_branch_ret_bit
))
9406 /* Check for indirect jmp, direct and indirect calls. */
9407 if (i
.tm
.base_opcode
== 0xe8)
9410 *branch_p
= align_branch_call
;
9411 if ((align_branch
& align_branch_call_bit
))
9414 else if (i
.tm
.base_opcode
== 0xff
9415 && (i
.tm
.extension_opcode
== 2
9416 || i
.tm
.extension_opcode
== 4))
9418 /* Indirect call and jmp. */
9419 *branch_p
= align_branch_indirect
;
9420 if ((align_branch
& align_branch_indirect_bit
))
9427 && (i
.op
[0].disps
->X_op
== O_symbol
9428 || (i
.op
[0].disps
->X_op
== O_subtract
9429 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9431 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9432 /* No padding to call to global or undefined tls_get_addr. */
9433 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9434 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9440 && last_insn
.kind
!= last_insn_other
9441 && last_insn
.seg
== now_seg
)
9444 as_warn_where (last_insn
.file
, last_insn
.line
,
9445 _("`%s` skips -malign-branch-boundary on `%s`"),
9446 last_insn
.name
, i
.tm
.name
);
9456 fragS
*insn_start_frag
;
9457 offsetT insn_start_off
;
9458 fragS
*fragP
= NULL
;
9459 enum align_branch_kind branch
= align_branch_none
;
9460 /* The initializer is arbitrary just to avoid uninitialized error.
9461 it's actually either assigned in add_branch_padding_frag_p
9462 or never be used. */
9463 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9465 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9466 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9468 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9469 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9470 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9472 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9473 || i
.tm
.cpu_flags
.bitfield
.cpu287
9474 || i
.tm
.cpu_flags
.bitfield
.cpu387
9475 || i
.tm
.cpu_flags
.bitfield
.cpu687
9476 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9477 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9479 if ((i
.xstate
& xstate_mmx
)
9480 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9481 && !is_any_vex_encoding (&i
.tm
)
9482 && (i
.tm
.base_opcode
== 0x77 /* emms */
9483 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9484 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9488 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9489 i
.xstate
|= xstate_zmm
;
9490 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9491 i
.xstate
|= xstate_ymm
;
9492 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9493 i
.xstate
|= xstate_xmm
;
9496 /* vzeroall / vzeroupper */
9497 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9498 i
.xstate
|= xstate_ymm
;
9500 if ((i
.xstate
& xstate_xmm
)
9501 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9502 || (i
.tm
.base_opcode
== 0xae
9503 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9504 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9505 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9506 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9507 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9509 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9510 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9511 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9512 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9513 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9514 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9515 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9516 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9517 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9518 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9519 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9520 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9521 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9522 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9524 if (x86_feature_2_used
9525 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9526 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9527 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9528 && i
.tm
.base_opcode
== 0xc7
9529 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9530 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9531 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9532 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9533 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9534 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9535 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9536 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9537 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9538 /* LAHF-SAHF insns in 64-bit mode. */
9539 || (flag_code
== CODE_64BIT
9540 && (i
.tm
.base_opcode
| 1) == 0x9f
9541 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9542 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9543 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9544 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9545 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9546 CpuAVX512DQ, LPW, TBM and AMX. */
9547 || (i
.tm
.opcode_modifier
.vex
9548 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9549 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9550 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9551 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9552 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9553 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9554 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9555 || i
.tm
.cpu_flags
.bitfield
.cpufma
9556 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9557 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9558 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9559 || (x86_feature_2_used
9560 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9561 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9562 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9563 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9564 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9565 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9566 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9567 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9568 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9570 || (i
.tm
.opcode_modifier
.evex
9571 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9572 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9573 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9574 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9578 /* Tie dwarf2 debug info to the address at the start of the insn.
9579 We can't do this after the insn has been output as the current
9580 frag may have been closed off. eg. by frag_var. */
9581 dwarf2_emit_insn (0);
9583 insn_start_frag
= frag_now
;
9584 insn_start_off
= frag_now_fix ();
9586 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9589 /* Branch can be 8 bytes. Leave some room for prefixes. */
9590 unsigned int max_branch_padding_size
= 14;
9592 /* Align section to boundary. */
9593 record_alignment (now_seg
, align_branch_power
);
9595 /* Make room for padding. */
9596 frag_grow (max_branch_padding_size
);
9598 /* Start of the padding. */
9603 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9604 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9607 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9608 fragP
->tc_frag_data
.branch_type
= branch
;
9609 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9613 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9615 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9616 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9618 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9619 output_interseg_jump ();
9622 /* Output normal instructions here. */
9626 enum mf_cmp_kind mf_cmp
;
9629 && (i
.tm
.base_opcode
== 0xaee8
9630 || i
.tm
.base_opcode
== 0xaef0
9631 || i
.tm
.base_opcode
== 0xaef8))
9633 /* Encode lfence, mfence, and sfence as
9634 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9635 if (flag_code
== CODE_16BIT
)
9636 as_bad (_("Cannot convert `%s' in 16-bit mode"), i
.tm
.name
);
9637 else if (omit_lock_prefix
)
9638 as_bad (_("Cannot convert `%s' with `-momit-lock-prefix=yes' in effect"),
9640 else if (now_seg
!= absolute_section
)
9642 offsetT val
= 0x240483f0ULL
;
9645 md_number_to_chars (p
, val
, 5);
9648 abs_section_offset
+= 5;
9652 /* Some processors fail on LOCK prefix. This options makes
9653 assembler ignore LOCK prefix and serves as a workaround. */
9654 if (omit_lock_prefix
)
9656 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9657 && i
.tm
.opcode_modifier
.isprefix
)
9659 i
.prefix
[LOCK_PREFIX
] = 0;
9663 /* Skip if this is a branch. */
9665 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9667 /* Make room for padding. */
9668 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9673 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9674 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9677 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9678 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9679 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9681 else if (add_branch_prefix_frag_p ())
9683 unsigned int max_prefix_size
= align_branch_prefix_size
;
9685 /* Make room for padding. */
9686 frag_grow (max_prefix_size
);
9691 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9692 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9695 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9698 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9699 don't need the explicit prefix. */
9700 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9702 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9711 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9712 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9716 switch (i
.opcode_length
)
9721 /* Check for pseudo prefixes. */
9722 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9724 as_bad_where (insn_start_frag
->fr_file
,
9725 insn_start_frag
->fr_line
,
9726 _("pseudo prefix without instruction"));
9736 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9737 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9738 R_X86_64_GOTTPOFF relocation so that linker can safely
9739 perform IE->LE optimization. A dummy REX_OPCODE prefix
9740 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9741 relocation for GDesc -> IE/LE optimization. */
9742 if (x86_elf_abi
== X86_64_X32_ABI
9744 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9745 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9746 && i
.prefix
[REX_PREFIX
] == 0)
9747 add_prefix (REX_OPCODE
);
9750 /* The prefix bytes. */
9751 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9753 frag_opcode_byte (*q
);
9757 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9763 frag_opcode_byte (*q
);
9766 /* There should be no other prefixes for instructions
9771 /* For EVEX instructions i.vrex should become 0 after
9772 build_evex_prefix. For VEX instructions upper 16 registers
9773 aren't available, so VREX should be 0. */
9776 /* Now the VEX prefix. */
9777 if (now_seg
!= absolute_section
)
9779 p
= frag_more (i
.vex
.length
);
9780 for (j
= 0; j
< i
.vex
.length
; j
++)
9781 p
[j
] = i
.vex
.bytes
[j
];
9784 abs_section_offset
+= i
.vex
.length
;
9787 /* Now the opcode; be careful about word order here! */
9788 j
= i
.opcode_length
;
9790 switch (i
.tm
.opcode_modifier
.opcodespace
)
9805 if (now_seg
== absolute_section
)
9806 abs_section_offset
+= j
;
9809 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9815 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9818 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9819 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9823 switch (i
.opcode_length
)
9826 /* Put out high byte first: can't use md_number_to_chars! */
9827 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9830 *p
= i
.tm
.base_opcode
& 0xff;
9839 /* Now the modrm byte and sib byte (if present). */
9840 if (i
.tm
.opcode_modifier
.modrm
)
9842 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9844 | (i
.rm
.mode
<< 6));
9845 /* If i.rm.regmem == ESP (4)
9846 && i.rm.mode != (Register mode)
9848 ==> need second modrm byte. */
9849 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9851 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9852 frag_opcode_byte ((i
.sib
.base
<< 0)
9853 | (i
.sib
.index
<< 3)
9854 | (i
.sib
.scale
<< 6));
9857 if (i
.disp_operands
)
9858 output_disp (insn_start_frag
, insn_start_off
);
9861 output_imm (insn_start_frag
, insn_start_off
);
9864 * frag_now_fix () returning plain abs_section_offset when we're in the
9865 * absolute section, and abs_section_offset not getting updated as data
9866 * gets added to the frag breaks the logic below.
9868 if (now_seg
!= absolute_section
)
9870 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9872 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9876 /* NB: Don't add prefix with GOTPC relocation since
9877 output_disp() above depends on the fixed encoding
9878 length. Can't add prefix with TLS relocation since
9879 it breaks TLS linker optimization. */
9880 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9881 /* Prefix count on the current instruction. */
9882 unsigned int count
= i
.vex
.length
;
9884 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9885 /* REX byte is encoded in VEX/EVEX prefix. */
9886 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9889 /* Count prefixes for extended opcode maps. */
9891 switch (i
.tm
.opcode_modifier
.opcodespace
)
9906 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9909 /* Set the maximum prefix size in BRANCH_PREFIX
9911 if (fragP
->tc_frag_data
.max_bytes
> max
)
9912 fragP
->tc_frag_data
.max_bytes
= max
;
9913 if (fragP
->tc_frag_data
.max_bytes
> count
)
9914 fragP
->tc_frag_data
.max_bytes
-= count
;
9916 fragP
->tc_frag_data
.max_bytes
= 0;
9920 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9922 unsigned int max_prefix_size
;
9923 if (align_branch_prefix_size
> max
)
9924 max_prefix_size
= max
;
9926 max_prefix_size
= align_branch_prefix_size
;
9927 if (max_prefix_size
> count
)
9928 fragP
->tc_frag_data
.max_prefix_length
9929 = max_prefix_size
- count
;
9932 /* Use existing segment prefix if possible. Use CS
9933 segment prefix in 64-bit mode. In 32-bit mode, use SS
9934 segment prefix with ESP/EBP base register and use DS
9935 segment prefix without ESP/EBP base register. */
9936 if (i
.prefix
[SEG_PREFIX
])
9937 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9938 else if (flag_code
== CODE_64BIT
)
9939 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9941 && (i
.base_reg
->reg_num
== 4
9942 || i
.base_reg
->reg_num
== 5))
9943 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9945 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9950 /* NB: Don't work with COND_JUMP86 without i386. */
9951 if (align_branch_power
9952 && now_seg
!= absolute_section
9953 && cpu_arch_flags
.bitfield
.cpui386
)
9955 /* Terminate each frag so that we can add prefix and check for
9957 frag_wane (frag_now
);
9964 pi ("" /*line*/, &i
);
9966 #endif /* DEBUG386 */
9969 /* Return the size of the displacement operand N. */
9972 disp_size (unsigned int n
)
9976 if (i
.types
[n
].bitfield
.disp64
)
9978 else if (i
.types
[n
].bitfield
.disp8
)
9980 else if (i
.types
[n
].bitfield
.disp16
)
9985 /* Return the size of the immediate operand N. */
9988 imm_size (unsigned int n
)
9991 if (i
.types
[n
].bitfield
.imm64
)
9993 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9995 else if (i
.types
[n
].bitfield
.imm16
)
10001 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
10006 for (n
= 0; n
< i
.operands
; n
++)
10008 if (operand_type_check (i
.types
[n
], disp
))
10010 int size
= disp_size (n
);
10012 if (now_seg
== absolute_section
)
10013 abs_section_offset
+= size
;
10014 else if (i
.op
[n
].disps
->X_op
== O_constant
)
10016 offsetT val
= i
.op
[n
].disps
->X_add_number
;
10018 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
10020 p
= frag_more (size
);
10021 md_number_to_chars (p
, val
, size
);
10025 enum bfd_reloc_code_real reloc_type
;
10026 int sign
= i
.types
[n
].bitfield
.disp32s
;
10027 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
10030 /* We can't have 8 bit displacement here. */
10031 gas_assert (!i
.types
[n
].bitfield
.disp8
);
10033 /* The PC relative address is computed relative
10034 to the instruction boundary, so in case immediate
10035 fields follows, we need to adjust the value. */
10036 if (pcrel
&& i
.imm_operands
)
10041 for (n1
= 0; n1
< i
.operands
; n1
++)
10042 if (operand_type_check (i
.types
[n1
], imm
))
10044 /* Only one immediate is allowed for PC
10045 relative address. */
10046 gas_assert (sz
== 0);
10047 sz
= imm_size (n1
);
10048 i
.op
[n
].disps
->X_add_number
-= sz
;
10050 /* We should find the immediate. */
10051 gas_assert (sz
!= 0);
10054 p
= frag_more (size
);
10055 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
10057 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
10058 && (((reloc_type
== BFD_RELOC_32
10059 || reloc_type
== BFD_RELOC_X86_64_32S
10060 || (reloc_type
== BFD_RELOC_64
10062 && (i
.op
[n
].disps
->X_op
== O_symbol
10063 || (i
.op
[n
].disps
->X_op
== O_add
10064 && ((symbol_get_value_expression
10065 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10067 || reloc_type
== BFD_RELOC_32_PCREL
))
10071 reloc_type
= BFD_RELOC_386_GOTPC
;
10072 i
.has_gotpc_tls_reloc
= true;
10073 i
.op
[n
].disps
->X_add_number
+=
10074 encoding_length (insn_start_frag
, insn_start_off
, p
);
10076 else if (reloc_type
== BFD_RELOC_64
)
10077 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10079 /* Don't do the adjustment for x86-64, as there
10080 the pcrel addressing is relative to the _next_
10081 insn, and that is taken care of in other code. */
10082 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10084 else if (align_branch_power
)
10086 switch (reloc_type
)
10088 case BFD_RELOC_386_TLS_GD
:
10089 case BFD_RELOC_386_TLS_LDM
:
10090 case BFD_RELOC_386_TLS_IE
:
10091 case BFD_RELOC_386_TLS_IE_32
:
10092 case BFD_RELOC_386_TLS_GOTIE
:
10093 case BFD_RELOC_386_TLS_GOTDESC
:
10094 case BFD_RELOC_386_TLS_DESC_CALL
:
10095 case BFD_RELOC_X86_64_TLSGD
:
10096 case BFD_RELOC_X86_64_TLSLD
:
10097 case BFD_RELOC_X86_64_GOTTPOFF
:
10098 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10099 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10100 i
.has_gotpc_tls_reloc
= true;
10105 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10106 size
, i
.op
[n
].disps
, pcrel
,
10109 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10110 && !i
.prefix
[ADDR_PREFIX
])
10111 fixP
->fx_signed
= 1;
10113 /* Check for "call/jmp *mem", "mov mem, %reg",
10114 "test %reg, mem" and "binop mem, %reg" where binop
10115 is one of adc, add, and, cmp, or, sbb, sub, xor
10116 instructions without data prefix. Always generate
10117 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10118 if (i
.prefix
[DATA_PREFIX
] == 0
10119 && (generate_relax_relocations
10122 && i
.rm
.regmem
== 5))
10124 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10125 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
10126 && ((i
.operands
== 1
10127 && i
.tm
.base_opcode
== 0xff
10128 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10129 || (i
.operands
== 2
10130 && (i
.tm
.base_opcode
== 0x8b
10131 || i
.tm
.base_opcode
== 0x85
10132 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10136 fixP
->fx_tcbit
= i
.rex
!= 0;
10138 && (i
.base_reg
->reg_num
== RegIP
))
10139 fixP
->fx_tcbit2
= 1;
10142 fixP
->fx_tcbit2
= 1;
10150 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10155 for (n
= 0; n
< i
.operands
; n
++)
10157 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
10158 if (i
.rounding
.type
!= rc_none
&& n
== i
.rounding
.operand
)
10161 if (operand_type_check (i
.types
[n
], imm
))
10163 int size
= imm_size (n
);
10165 if (now_seg
== absolute_section
)
10166 abs_section_offset
+= size
;
10167 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10171 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10173 p
= frag_more (size
);
10174 md_number_to_chars (p
, val
, size
);
10178 /* Not absolute_section.
10179 Need a 32-bit fixup (don't support 8bit
10180 non-absolute imms). Try to support other
10182 enum bfd_reloc_code_real reloc_type
;
10185 if (i
.types
[n
].bitfield
.imm32s
10186 && (i
.suffix
== QWORD_MNEM_SUFFIX
10187 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
10192 p
= frag_more (size
);
10193 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10195 /* This is tough to explain. We end up with this one if we
10196 * have operands that look like
10197 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10198 * obtain the absolute address of the GOT, and it is strongly
10199 * preferable from a performance point of view to avoid using
10200 * a runtime relocation for this. The actual sequence of
10201 * instructions often look something like:
10206 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10208 * The call and pop essentially return the absolute address
10209 * of the label .L66 and store it in %ebx. The linker itself
10210 * will ultimately change the first operand of the addl so
10211 * that %ebx points to the GOT, but to keep things simple, the
10212 * .o file must have this operand set so that it generates not
10213 * the absolute address of .L66, but the absolute address of
10214 * itself. This allows the linker itself simply treat a GOTPC
10215 * relocation as asking for a pcrel offset to the GOT to be
10216 * added in, and the addend of the relocation is stored in the
10217 * operand field for the instruction itself.
10219 * Our job here is to fix the operand so that it would add
10220 * the correct offset so that %ebx would point to itself. The
10221 * thing that is tricky is that .-.L66 will point to the
10222 * beginning of the instruction, so we need to further modify
10223 * the operand so that it will point to itself. There are
10224 * other cases where you have something like:
10226 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10228 * and here no correction would be required. Internally in
10229 * the assembler we treat operands of this form as not being
10230 * pcrel since the '.' is explicitly mentioned, and I wonder
10231 * whether it would simplify matters to do it this way. Who
10232 * knows. In earlier versions of the PIC patches, the
10233 * pcrel_adjust field was used to store the correction, but
10234 * since the expression is not pcrel, I felt it would be
10235 * confusing to do it this way. */
10237 if ((reloc_type
== BFD_RELOC_32
10238 || reloc_type
== BFD_RELOC_X86_64_32S
10239 || reloc_type
== BFD_RELOC_64
)
10241 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10242 && (i
.op
[n
].imms
->X_op
== O_symbol
10243 || (i
.op
[n
].imms
->X_op
== O_add
10244 && ((symbol_get_value_expression
10245 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10249 reloc_type
= BFD_RELOC_386_GOTPC
;
10250 else if (size
== 4)
10251 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10252 else if (size
== 8)
10253 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10254 i
.has_gotpc_tls_reloc
= true;
10255 i
.op
[n
].imms
->X_add_number
+=
10256 encoding_length (insn_start_frag
, insn_start_off
, p
);
10258 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10259 i
.op
[n
].imms
, 0, reloc_type
);
10265 /* x86_cons_fix_new is called via the expression parsing code when a
10266 reloc is needed. We use this hook to get the correct .got reloc. */
10267 static int cons_sign
= -1;
10270 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10271 expressionS
*exp
, bfd_reloc_code_real_type r
)
10273 r
= reloc (len
, 0, cons_sign
, r
);
10276 if (exp
->X_op
== O_secrel
)
10278 exp
->X_op
= O_symbol
;
10279 r
= BFD_RELOC_32_SECREL
;
10283 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10286 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10287 purpose of the `.dc.a' internal pseudo-op. */
10290 x86_address_bytes (void)
10292 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10294 return stdoutput
->arch_info
->bits_per_address
/ 8;
10297 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10298 || defined (LEX_AT)) && !defined (TE_PE)
10299 # define lex_got(reloc, adjust, types) NULL
10301 /* Parse operands of the form
10302 <symbol>@GOTOFF+<nnn>
10303 and similar .plt or .got references.
10305 If we find one, set up the correct relocation in RELOC and copy the
10306 input string, minus the `@GOTOFF' into a malloc'd buffer for
10307 parsing by the calling routine. Return this buffer, and if ADJUST
10308 is non-null set it to the length of the string we removed from the
10309 input line. Otherwise return NULL. */
10311 lex_got (enum bfd_reloc_code_real
*rel
,
10313 i386_operand_type
*types
)
10315 /* Some of the relocations depend on the size of what field is to
10316 be relocated. But in our callers i386_immediate and i386_displacement
10317 we don't yet know the operand size (this will be set by insn
10318 matching). Hence we record the word32 relocation here,
10319 and adjust the reloc according to the real size in reloc(). */
10320 static const struct {
10323 const enum bfd_reloc_code_real rel
[2];
10324 const i386_operand_type types64
;
10325 bool need_GOT_symbol
;
10328 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10329 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10330 BFD_RELOC_SIZE32
},
10331 OPERAND_TYPE_IMM32_64
, false },
10333 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10334 BFD_RELOC_X86_64_PLTOFF64
},
10335 OPERAND_TYPE_IMM64
, true },
10336 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10337 BFD_RELOC_X86_64_PLT32
},
10338 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10339 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10340 BFD_RELOC_X86_64_GOTPLT64
},
10341 OPERAND_TYPE_IMM64_DISP64
, true },
10342 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10343 BFD_RELOC_X86_64_GOTOFF64
},
10344 OPERAND_TYPE_IMM64_DISP64
, true },
10345 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10346 BFD_RELOC_X86_64_GOTPCREL
},
10347 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10348 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10349 BFD_RELOC_X86_64_TLSGD
},
10350 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10351 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10352 _dummy_first_bfd_reloc_code_real
},
10353 OPERAND_TYPE_NONE
, true },
10354 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10355 BFD_RELOC_X86_64_TLSLD
},
10356 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10357 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10358 BFD_RELOC_X86_64_GOTTPOFF
},
10359 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10360 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10361 BFD_RELOC_X86_64_TPOFF32
},
10362 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10363 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10364 _dummy_first_bfd_reloc_code_real
},
10365 OPERAND_TYPE_NONE
, true },
10366 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10367 BFD_RELOC_X86_64_DTPOFF32
},
10368 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10369 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10370 _dummy_first_bfd_reloc_code_real
},
10371 OPERAND_TYPE_NONE
, true },
10372 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10373 _dummy_first_bfd_reloc_code_real
},
10374 OPERAND_TYPE_NONE
, true },
10375 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10376 BFD_RELOC_X86_64_GOT32
},
10377 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10378 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10379 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10380 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10381 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10382 BFD_RELOC_X86_64_TLSDESC_CALL
},
10383 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10385 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10386 BFD_RELOC_32_SECREL
},
10387 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10393 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10398 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10399 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10402 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10404 int len
= gotrel
[j
].len
;
10405 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10407 if (gotrel
[j
].rel
[object_64bit
] != 0)
10410 char *tmpbuf
, *past_reloc
;
10412 *rel
= gotrel
[j
].rel
[object_64bit
];
10416 if (flag_code
!= CODE_64BIT
)
10418 types
->bitfield
.imm32
= 1;
10419 types
->bitfield
.disp32
= 1;
10422 *types
= gotrel
[j
].types64
;
10425 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10426 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10428 /* The length of the first part of our input line. */
10429 first
= cp
- input_line_pointer
;
10431 /* The second part goes from after the reloc token until
10432 (and including) an end_of_line char or comma. */
10433 past_reloc
= cp
+ 1 + len
;
10435 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10437 second
= cp
+ 1 - past_reloc
;
10439 /* Allocate and copy string. The trailing NUL shouldn't
10440 be necessary, but be safe. */
10441 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10442 memcpy (tmpbuf
, input_line_pointer
, first
);
10443 if (second
!= 0 && *past_reloc
!= ' ')
10444 /* Replace the relocation token with ' ', so that
10445 errors like foo@GOTOFF1 will be detected. */
10446 tmpbuf
[first
++] = ' ';
10448 /* Increment length by 1 if the relocation token is
10453 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10454 tmpbuf
[first
+ second
] = '\0';
10458 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10459 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10464 /* Might be a symbol version string. Don't as_bad here. */
10469 bfd_reloc_code_real_type
10470 x86_cons (expressionS
*exp
, int size
)
10472 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10474 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
10475 && !defined (LEX_AT)) \
10477 intel_syntax
= -intel_syntax
;
10480 if (size
== 4 || (object_64bit
&& size
== 8))
10482 /* Handle @GOTOFF and the like in an expression. */
10484 char *gotfree_input_line
;
10487 save
= input_line_pointer
;
10488 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10489 if (gotfree_input_line
)
10490 input_line_pointer
= gotfree_input_line
;
10494 if (gotfree_input_line
)
10496 /* expression () has merrily parsed up to the end of line,
10497 or a comma - in the wrong buffer. Transfer how far
10498 input_line_pointer has moved to the right buffer. */
10499 input_line_pointer
= (save
10500 + (input_line_pointer
- gotfree_input_line
)
10502 free (gotfree_input_line
);
10503 if (exp
->X_op
== O_constant
10504 || exp
->X_op
== O_absent
10505 || exp
->X_op
== O_illegal
10506 || exp
->X_op
== O_register
10507 || exp
->X_op
== O_big
)
10509 char c
= *input_line_pointer
;
10510 *input_line_pointer
= 0;
10511 as_bad (_("missing or invalid expression `%s'"), save
);
10512 *input_line_pointer
= c
;
10514 else if ((got_reloc
== BFD_RELOC_386_PLT32
10515 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10516 && exp
->X_op
!= O_symbol
)
10518 char c
= *input_line_pointer
;
10519 *input_line_pointer
= 0;
10520 as_bad (_("invalid PLT expression `%s'"), save
);
10521 *input_line_pointer
= c
;
10528 intel_syntax
= -intel_syntax
;
10531 i386_intel_simplify (exp
);
10536 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10537 if (size
== 4 && exp
->X_op
== O_constant
&& !object_64bit
)
10538 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10544 signed_cons (int size
)
10554 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10561 if (exp
.X_op
== O_symbol
)
10562 exp
.X_op
= O_secrel
;
10564 emit_expr (&exp
, 4);
10566 while (*input_line_pointer
++ == ',');
10568 input_line_pointer
--;
10569 demand_empty_rest_of_line ();
10573 /* Handle Vector operations. */
10576 check_VecOperations (char *op_string
)
10578 const reg_entry
*mask
;
10585 if (*op_string
== '{')
10589 /* Check broadcasts. */
10590 if (startswith (op_string
, "1to"))
10592 unsigned int bcst_type
;
10594 if (i
.broadcast
.type
)
10595 goto duplicated_vec_op
;
10598 if (*op_string
== '8')
10600 else if (*op_string
== '4')
10602 else if (*op_string
== '2')
10604 else if (*op_string
== '1'
10605 && *(op_string
+1) == '6')
10610 else if (*op_string
== '3'
10611 && *(op_string
+1) == '2')
10618 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10623 i
.broadcast
.type
= bcst_type
;
10624 i
.broadcast
.operand
= this_operand
;
10626 /* Check masking operation. */
10627 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10629 if (mask
== &bad_reg
)
10632 /* k0 can't be used for write mask. */
10633 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10635 as_bad (_("`%s%s' can't be used for write mask"),
10636 register_prefix
, mask
->reg_name
);
10643 i
.mask
.operand
= this_operand
;
10645 else if (i
.mask
.reg
->reg_num
)
10646 goto duplicated_vec_op
;
10651 /* Only "{z}" is allowed here. No need to check
10652 zeroing mask explicitly. */
10653 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10655 as_bad (_("invalid write mask `%s'"), saved
);
10660 op_string
= end_op
;
10662 /* Check zeroing-flag for masking operation. */
10663 else if (*op_string
== 'z')
10667 i
.mask
.reg
= reg_k0
;
10668 i
.mask
.zeroing
= 1;
10669 i
.mask
.operand
= this_operand
;
10673 if (i
.mask
.zeroing
)
10676 as_bad (_("duplicated `%s'"), saved
);
10680 i
.mask
.zeroing
= 1;
10682 /* Only "{%k}" is allowed here. No need to check mask
10683 register explicitly. */
10684 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10686 as_bad (_("invalid zeroing-masking `%s'"),
10695 goto unknown_vec_op
;
10697 if (*op_string
!= '}')
10699 as_bad (_("missing `}' in `%s'"), saved
);
10704 /* Strip whitespace since the addition of pseudo prefixes
10705 changed how the scrubber treats '{'. */
10706 if (is_space_char (*op_string
))
10712 /* We don't know this one. */
10713 as_bad (_("unknown vector operation: `%s'"), saved
);
10717 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10719 as_bad (_("zeroing-masking only allowed with write mask"));
10727 i386_immediate (char *imm_start
)
10729 char *save_input_line_pointer
;
10730 char *gotfree_input_line
;
10733 i386_operand_type types
;
10735 operand_type_set (&types
, ~0);
10737 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10739 as_bad (_("at most %d immediate operands are allowed"),
10740 MAX_IMMEDIATE_OPERANDS
);
10744 exp
= &im_expressions
[i
.imm_operands
++];
10745 i
.op
[this_operand
].imms
= exp
;
10747 if (is_space_char (*imm_start
))
10750 save_input_line_pointer
= input_line_pointer
;
10751 input_line_pointer
= imm_start
;
10753 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10754 if (gotfree_input_line
)
10755 input_line_pointer
= gotfree_input_line
;
10757 exp_seg
= expression (exp
);
10759 SKIP_WHITESPACE ();
10760 if (*input_line_pointer
)
10761 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10763 input_line_pointer
= save_input_line_pointer
;
10764 if (gotfree_input_line
)
10766 free (gotfree_input_line
);
10768 if (exp
->X_op
== O_constant
)
10769 exp
->X_op
= O_illegal
;
10772 if (exp_seg
== reg_section
)
10774 as_bad (_("illegal immediate register operand %s"), imm_start
);
10778 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10782 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10783 i386_operand_type types
, const char *imm_start
)
10785 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10788 as_bad (_("missing or invalid immediate expression `%s'"),
10792 else if (exp
->X_op
== O_constant
)
10794 /* Size it properly later. */
10795 i
.types
[this_operand
].bitfield
.imm64
= 1;
10797 /* If not 64bit, sign/zero extend val, to account for wraparound
10799 if (flag_code
!= CODE_64BIT
)
10800 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10802 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10803 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10804 && exp_seg
!= absolute_section
10805 && exp_seg
!= text_section
10806 && exp_seg
!= data_section
10807 && exp_seg
!= bss_section
10808 && exp_seg
!= undefined_section
10809 && !bfd_is_com_section (exp_seg
))
10811 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10817 /* This is an address. The size of the address will be
10818 determined later, depending on destination register,
10819 suffix, or the default for the section. */
10820 i
.types
[this_operand
].bitfield
.imm8
= 1;
10821 i
.types
[this_operand
].bitfield
.imm16
= 1;
10822 i
.types
[this_operand
].bitfield
.imm32
= 1;
10823 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10824 i
.types
[this_operand
].bitfield
.imm64
= 1;
10825 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10833 i386_scale (char *scale
)
10836 char *save
= input_line_pointer
;
10838 input_line_pointer
= scale
;
10839 val
= get_absolute_expression ();
10844 i
.log2_scale_factor
= 0;
10847 i
.log2_scale_factor
= 1;
10850 i
.log2_scale_factor
= 2;
10853 i
.log2_scale_factor
= 3;
10857 char sep
= *input_line_pointer
;
10859 *input_line_pointer
= '\0';
10860 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10862 *input_line_pointer
= sep
;
10863 input_line_pointer
= save
;
10867 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10869 as_warn (_("scale factor of %d without an index register"),
10870 1 << i
.log2_scale_factor
);
10871 i
.log2_scale_factor
= 0;
10873 scale
= input_line_pointer
;
10874 input_line_pointer
= save
;
10879 i386_displacement (char *disp_start
, char *disp_end
)
10883 char *save_input_line_pointer
;
10884 char *gotfree_input_line
;
10886 i386_operand_type bigdisp
, types
= anydisp
;
10889 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10891 as_bad (_("at most %d displacement operands are allowed"),
10892 MAX_MEMORY_OPERANDS
);
10896 operand_type_set (&bigdisp
, 0);
10898 || i
.types
[this_operand
].bitfield
.baseindex
10899 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10900 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10902 i386_addressing_mode ();
10903 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10904 if (flag_code
== CODE_64BIT
)
10908 bigdisp
.bitfield
.disp32s
= 1;
10909 bigdisp
.bitfield
.disp64
= 1;
10912 bigdisp
.bitfield
.disp32
= 1;
10914 else if ((flag_code
== CODE_16BIT
) ^ override
)
10915 bigdisp
.bitfield
.disp16
= 1;
10917 bigdisp
.bitfield
.disp32
= 1;
10921 /* For PC-relative branches, the width of the displacement may be
10922 dependent upon data size, but is never dependent upon address size.
10923 Also make sure to not unintentionally match against a non-PC-relative
10924 branch template. */
10925 static templates aux_templates
;
10926 const insn_template
*t
= current_templates
->start
;
10927 bool has_intel64
= false;
10929 aux_templates
.start
= t
;
10930 while (++t
< current_templates
->end
)
10932 if (t
->opcode_modifier
.jump
10933 != current_templates
->start
->opcode_modifier
.jump
)
10935 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10936 has_intel64
= true;
10938 if (t
< current_templates
->end
)
10940 aux_templates
.end
= t
;
10941 current_templates
= &aux_templates
;
10944 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10945 if (flag_code
== CODE_64BIT
)
10947 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10948 && (!intel64
|| !has_intel64
))
10949 bigdisp
.bitfield
.disp16
= 1;
10951 bigdisp
.bitfield
.disp32s
= 1;
10956 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10958 : LONG_MNEM_SUFFIX
));
10959 bigdisp
.bitfield
.disp32
= 1;
10960 if ((flag_code
== CODE_16BIT
) ^ override
)
10962 bigdisp
.bitfield
.disp32
= 0;
10963 bigdisp
.bitfield
.disp16
= 1;
10967 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10970 exp
= &disp_expressions
[i
.disp_operands
];
10971 i
.op
[this_operand
].disps
= exp
;
10973 save_input_line_pointer
= input_line_pointer
;
10974 input_line_pointer
= disp_start
;
10975 END_STRING_AND_SAVE (disp_end
);
10977 #ifndef GCC_ASM_O_HACK
10978 #define GCC_ASM_O_HACK 0
10981 END_STRING_AND_SAVE (disp_end
+ 1);
10982 if (i
.types
[this_operand
].bitfield
.baseIndex
10983 && displacement_string_end
[-1] == '+')
10985 /* This hack is to avoid a warning when using the "o"
10986 constraint within gcc asm statements.
10989 #define _set_tssldt_desc(n,addr,limit,type) \
10990 __asm__ __volatile__ ( \
10991 "movw %w2,%0\n\t" \
10992 "movw %w1,2+%0\n\t" \
10993 "rorl $16,%1\n\t" \
10994 "movb %b1,4+%0\n\t" \
10995 "movb %4,5+%0\n\t" \
10996 "movb $0,6+%0\n\t" \
10997 "movb %h1,7+%0\n\t" \
10999 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
11001 This works great except that the output assembler ends
11002 up looking a bit weird if it turns out that there is
11003 no offset. You end up producing code that looks like:
11016 So here we provide the missing zero. */
11018 *displacement_string_end
= '0';
11021 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
11022 if (gotfree_input_line
)
11023 input_line_pointer
= gotfree_input_line
;
11025 exp_seg
= expression (exp
);
11027 SKIP_WHITESPACE ();
11028 if (*input_line_pointer
)
11029 as_bad (_("junk `%s' after expression"), input_line_pointer
);
11031 RESTORE_END_STRING (disp_end
+ 1);
11033 input_line_pointer
= save_input_line_pointer
;
11034 if (gotfree_input_line
)
11036 free (gotfree_input_line
);
11038 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
11039 exp
->X_op
= O_illegal
;
11042 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
11044 RESTORE_END_STRING (disp_end
);
11050 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
11051 i386_operand_type types
, const char *disp_start
)
11053 i386_operand_type bigdisp
;
11056 /* We do this to make sure that the section symbol is in
11057 the symbol table. We will ultimately change the relocation
11058 to be relative to the beginning of the section. */
11059 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
11060 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
11061 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11063 if (exp
->X_op
!= O_symbol
)
11066 if (S_IS_LOCAL (exp
->X_add_symbol
)
11067 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
11068 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
11069 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
11070 exp
->X_op
= O_subtract
;
11071 exp
->X_op_symbol
= GOT_symbol
;
11072 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
11073 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
11074 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11075 i
.reloc
[this_operand
] = BFD_RELOC_64
;
11077 i
.reloc
[this_operand
] = BFD_RELOC_32
;
11080 else if (exp
->X_op
== O_absent
11081 || exp
->X_op
== O_illegal
11082 || exp
->X_op
== O_big
)
11085 as_bad (_("missing or invalid displacement expression `%s'"),
11090 else if (exp
->X_op
== O_constant
)
11092 /* Sizing gets taken care of by optimize_disp().
11094 If not 64bit, sign/zero extend val, to account for wraparound
11096 if (flag_code
!= CODE_64BIT
)
11097 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
11100 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11101 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
11102 && exp_seg
!= absolute_section
11103 && exp_seg
!= text_section
11104 && exp_seg
!= data_section
11105 && exp_seg
!= bss_section
11106 && exp_seg
!= undefined_section
11107 && !bfd_is_com_section (exp_seg
))
11109 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11114 else if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
11115 i
.types
[this_operand
].bitfield
.disp8
= 1;
11117 /* Check if this is a displacement only operand. */
11118 bigdisp
= operand_type_and_not (i
.types
[this_operand
], anydisp
);
11119 if (operand_type_all_zero (&bigdisp
))
11120 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
11126 /* Return the active addressing mode, taking address override and
11127 registers forming the address into consideration. Update the
11128 address override prefix if necessary. */
11130 static enum flag_code
11131 i386_addressing_mode (void)
11133 enum flag_code addr_mode
;
11135 if (i
.prefix
[ADDR_PREFIX
])
11136 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
11137 else if (flag_code
== CODE_16BIT
11138 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
11139 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
11140 from md_assemble() by "is not a valid base/index expression"
11141 when there is a base and/or index. */
11142 && !i
.types
[this_operand
].bitfield
.baseindex
)
11144 /* MPX insn memory operands with neither base nor index must be forced
11145 to use 32-bit addressing in 16-bit mode. */
11146 addr_mode
= CODE_32BIT
;
11147 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11149 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11150 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11154 addr_mode
= flag_code
;
11156 #if INFER_ADDR_PREFIX
11157 if (i
.mem_operands
== 0)
11159 /* Infer address prefix from the first memory operand. */
11160 const reg_entry
*addr_reg
= i
.base_reg
;
11162 if (addr_reg
== NULL
)
11163 addr_reg
= i
.index_reg
;
11167 if (addr_reg
->reg_type
.bitfield
.dword
)
11168 addr_mode
= CODE_32BIT
;
11169 else if (flag_code
!= CODE_64BIT
11170 && addr_reg
->reg_type
.bitfield
.word
)
11171 addr_mode
= CODE_16BIT
;
11173 if (addr_mode
!= flag_code
)
11175 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11177 /* Change the size of any displacement too. At most one
11178 of Disp16 or Disp32 is set.
11179 FIXME. There doesn't seem to be any real need for
11180 separate Disp16 and Disp32 flags. The same goes for
11181 Imm16 and Imm32. Removing them would probably clean
11182 up the code quite a lot. */
11183 if (flag_code
!= CODE_64BIT
11184 && (i
.types
[this_operand
].bitfield
.disp16
11185 || i
.types
[this_operand
].bitfield
.disp32
))
11186 i
.types
[this_operand
]
11187 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11197 /* Make sure the memory operand we've been dealt is valid.
11198 Return 1 on success, 0 on a failure. */
11201 i386_index_check (const char *operand_string
)
11203 const char *kind
= "base/index";
11204 enum flag_code addr_mode
= i386_addressing_mode ();
11205 const insn_template
*t
= current_templates
->start
;
11207 if (t
->opcode_modifier
.isstring
11208 && !t
->cpu_flags
.bitfield
.cpupadlock
11209 && (current_templates
->end
[-1].opcode_modifier
.isstring
11210 || i
.mem_operands
))
11212 /* Memory operands of string insns are special in that they only allow
11213 a single register (rDI, rSI, or rBX) as their memory address. */
11214 const reg_entry
*expected_reg
;
11215 static const char *di_si
[][2] =
11221 static const char *bx
[] = { "ebx", "bx", "rbx" };
11223 kind
= "string address";
11225 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11227 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11228 - IS_STRING_ES_OP0
;
11231 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11232 || ((!i
.mem_operands
!= !intel_syntax
)
11233 && current_templates
->end
[-1].operand_types
[1]
11234 .bitfield
.baseindex
))
11237 = (const reg_entry
*) str_hash_find (reg_hash
,
11238 di_si
[addr_mode
][op
== es_op
]);
11242 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11244 if (i
.base_reg
!= expected_reg
11246 || operand_type_check (i
.types
[this_operand
], disp
))
11248 /* The second memory operand must have the same size as
11252 && !((addr_mode
== CODE_64BIT
11253 && i
.base_reg
->reg_type
.bitfield
.qword
)
11254 || (addr_mode
== CODE_32BIT
11255 ? i
.base_reg
->reg_type
.bitfield
.dword
11256 : i
.base_reg
->reg_type
.bitfield
.word
)))
11259 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11261 intel_syntax
? '[' : '(',
11263 expected_reg
->reg_name
,
11264 intel_syntax
? ']' : ')');
11271 as_bad (_("`%s' is not a valid %s expression"),
11272 operand_string
, kind
);
11277 if (addr_mode
!= CODE_16BIT
)
11279 /* 32-bit/64-bit checks. */
11280 if (i
.disp_encoding
== disp_encoding_16bit
)
11283 as_bad (_("invalid `%s' prefix"),
11284 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11289 && ((addr_mode
== CODE_64BIT
11290 ? !i
.base_reg
->reg_type
.bitfield
.qword
11291 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11292 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11293 || i
.base_reg
->reg_num
== RegIZ
))
11295 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11296 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11297 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11298 && ((addr_mode
== CODE_64BIT
11299 ? !i
.index_reg
->reg_type
.bitfield
.qword
11300 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11301 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11304 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11305 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11306 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11307 && t
->base_opcode
== 0x1b)
11308 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11309 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11310 && (t
->base_opcode
& ~1) == 0x1a)
11311 || t
->opcode_modifier
.sib
== SIBMEM
)
11313 /* They cannot use RIP-relative addressing. */
11314 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11316 as_bad (_("`%s' cannot be used here"), operand_string
);
11320 /* bndldx and bndstx ignore their scale factor. */
11321 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11322 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11323 && (t
->base_opcode
& ~1) == 0x1a
11324 && i
.log2_scale_factor
)
11325 as_warn (_("register scaling is being ignored here"));
11330 /* 16-bit checks. */
11331 if (i
.disp_encoding
== disp_encoding_32bit
)
11335 && (!i
.base_reg
->reg_type
.bitfield
.word
11336 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11338 && (!i
.index_reg
->reg_type
.bitfield
.word
11339 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11341 && i
.base_reg
->reg_num
< 6
11342 && i
.index_reg
->reg_num
>= 6
11343 && i
.log2_scale_factor
== 0))))
11350 /* Handle vector immediates. */
11353 RC_SAE_immediate (const char *imm_start
)
11355 unsigned int match_found
, j
;
11356 const char *pstr
= imm_start
;
11364 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11366 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11368 if (i
.rounding
.type
!= rc_none
)
11370 as_bad (_("duplicated `%s'"), imm_start
);
11374 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11375 i
.rounding
.operand
= this_operand
;
11377 pstr
+= RC_NamesTable
[j
].len
;
11385 if (*pstr
++ != '}')
11387 as_bad (_("Missing '}': '%s'"), imm_start
);
11390 /* RC/SAE immediate string should contain nothing more. */;
11393 as_bad (_("Junk after '}': '%s'"), imm_start
);
11397 exp
= &im_expressions
[i
.imm_operands
++];
11398 i
.op
[this_operand
].imms
= exp
;
11400 exp
->X_op
= O_constant
;
11401 exp
->X_add_number
= 0;
11402 exp
->X_add_symbol
= (symbolS
*) 0;
11403 exp
->X_op_symbol
= (symbolS
*) 0;
11405 i
.types
[this_operand
].bitfield
.imm8
= 1;
11409 /* Only string instructions can have a second memory operand, so
11410 reduce current_templates to just those if it contains any. */
11412 maybe_adjust_templates (void)
11414 const insn_template
*t
;
11416 gas_assert (i
.mem_operands
== 1);
11418 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11419 if (t
->opcode_modifier
.isstring
)
11422 if (t
< current_templates
->end
)
11424 static templates aux_templates
;
11427 aux_templates
.start
= t
;
11428 for (; t
< current_templates
->end
; ++t
)
11429 if (!t
->opcode_modifier
.isstring
)
11431 aux_templates
.end
= t
;
11433 /* Determine whether to re-check the first memory operand. */
11434 recheck
= (aux_templates
.start
!= current_templates
->start
11435 || t
!= current_templates
->end
);
11437 current_templates
= &aux_templates
;
11441 i
.mem_operands
= 0;
11442 if (i
.memop1_string
!= NULL
11443 && i386_index_check (i
.memop1_string
) == 0)
11445 i
.mem_operands
= 1;
11452 static INLINE
bool starts_memory_operand (char c
)
11455 || is_identifier_char (c
)
11456 || strchr ("([\"+-!~", c
);
11459 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11463 i386_att_operand (char *operand_string
)
11465 const reg_entry
*r
;
11467 char *op_string
= operand_string
;
11469 if (is_space_char (*op_string
))
11472 /* We check for an absolute prefix (differentiating,
11473 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11474 if (*op_string
== ABSOLUTE_PREFIX
)
11477 if (is_space_char (*op_string
))
11479 i
.jumpabsolute
= true;
11482 /* Check if operand is a register. */
11483 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11485 i386_operand_type temp
;
11490 /* Check for a segment override by searching for ':' after a
11491 segment register. */
11492 op_string
= end_op
;
11493 if (is_space_char (*op_string
))
11495 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11497 i
.seg
[i
.mem_operands
] = r
;
11499 /* Skip the ':' and whitespace. */
11501 if (is_space_char (*op_string
))
11504 /* Handle case of %es:*foo. */
11505 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
)
11508 if (is_space_char (*op_string
))
11510 i
.jumpabsolute
= true;
11513 if (!starts_memory_operand (*op_string
))
11515 as_bad (_("bad memory operand `%s'"), op_string
);
11518 goto do_memory_reference
;
11521 /* Handle vector operations. */
11522 if (*op_string
== '{')
11524 op_string
= check_VecOperations (op_string
);
11525 if (op_string
== NULL
)
11531 as_bad (_("junk `%s' after register"), op_string
);
11534 temp
= r
->reg_type
;
11535 temp
.bitfield
.baseindex
= 0;
11536 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11538 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11539 i
.op
[this_operand
].regs
= r
;
11542 else if (*op_string
== REGISTER_PREFIX
)
11544 as_bad (_("bad register name `%s'"), op_string
);
11547 else if (*op_string
== IMMEDIATE_PREFIX
)
11550 if (i
.jumpabsolute
)
11552 as_bad (_("immediate operand illegal with absolute jump"));
11555 if (!i386_immediate (op_string
))
11558 else if (RC_SAE_immediate (operand_string
))
11560 /* If it is a RC or SAE immediate, do nothing. */
11563 else if (starts_memory_operand (*op_string
))
11565 /* This is a memory reference of some sort. */
11568 /* Start and end of displacement string expression (if found). */
11569 char *displacement_string_start
;
11570 char *displacement_string_end
;
11572 do_memory_reference
:
11573 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11575 if ((i
.mem_operands
== 1
11576 && !current_templates
->start
->opcode_modifier
.isstring
)
11577 || i
.mem_operands
== 2)
11579 as_bad (_("too many memory references for `%s'"),
11580 current_templates
->start
->name
);
11584 /* Check for base index form. We detect the base index form by
11585 looking for an ')' at the end of the operand, searching
11586 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11588 base_string
= op_string
+ strlen (op_string
);
11590 /* Handle vector operations. */
11592 if (is_space_char (*base_string
))
11595 if (*base_string
== '}')
11597 char *vop_start
= NULL
;
11599 while (base_string
-- > op_string
)
11601 if (*base_string
== '"')
11603 if (*base_string
!= '{')
11606 vop_start
= base_string
;
11609 if (is_space_char (*base_string
))
11612 if (*base_string
!= '}')
11620 as_bad (_("unbalanced figure braces"));
11624 if (check_VecOperations (vop_start
) == NULL
)
11628 /* If we only have a displacement, set-up for it to be parsed later. */
11629 displacement_string_start
= op_string
;
11630 displacement_string_end
= base_string
+ 1;
11632 if (*base_string
== ')')
11635 unsigned int parens_not_balanced
= 1;
11637 /* We've already checked that the number of left & right ()'s are
11638 equal, so this loop will not be infinite. */
11642 if (*base_string
== ')')
11643 parens_not_balanced
++;
11644 if (*base_string
== '(')
11645 parens_not_balanced
--;
11647 while (parens_not_balanced
&& *base_string
!= '"');
11649 temp_string
= base_string
;
11651 /* Skip past '(' and whitespace. */
11652 if (*base_string
== '(')
11654 if (is_space_char (*base_string
))
11657 if (*base_string
== ','
11658 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11661 displacement_string_end
= temp_string
;
11663 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11667 if (i
.base_reg
== &bad_reg
)
11669 base_string
= end_op
;
11670 if (is_space_char (*base_string
))
11674 /* There may be an index reg or scale factor here. */
11675 if (*base_string
== ',')
11678 if (is_space_char (*base_string
))
11681 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11684 if (i
.index_reg
== &bad_reg
)
11686 base_string
= end_op
;
11687 if (is_space_char (*base_string
))
11689 if (*base_string
== ',')
11692 if (is_space_char (*base_string
))
11695 else if (*base_string
!= ')')
11697 as_bad (_("expecting `,' or `)' "
11698 "after index register in `%s'"),
11703 else if (*base_string
== REGISTER_PREFIX
)
11705 end_op
= strchr (base_string
, ',');
11708 as_bad (_("bad register name `%s'"), base_string
);
11712 /* Check for scale factor. */
11713 if (*base_string
!= ')')
11715 char *end_scale
= i386_scale (base_string
);
11720 base_string
= end_scale
;
11721 if (is_space_char (*base_string
))
11723 if (*base_string
!= ')')
11725 as_bad (_("expecting `)' "
11726 "after scale factor in `%s'"),
11731 else if (!i
.index_reg
)
11733 as_bad (_("expecting index register or scale factor "
11734 "after `,'; got '%c'"),
11739 else if (*base_string
!= ')')
11741 as_bad (_("expecting `,' or `)' "
11742 "after base register in `%s'"),
11747 else if (*base_string
== REGISTER_PREFIX
)
11749 end_op
= strchr (base_string
, ',');
11752 as_bad (_("bad register name `%s'"), base_string
);
11757 /* If there's an expression beginning the operand, parse it,
11758 assuming displacement_string_start and
11759 displacement_string_end are meaningful. */
11760 if (displacement_string_start
!= displacement_string_end
)
11762 if (!i386_displacement (displacement_string_start
,
11763 displacement_string_end
))
11767 /* Special case for (%dx) while doing input/output op. */
11769 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11770 && i
.base_reg
->reg_type
.bitfield
.word
11771 && i
.index_reg
== 0
11772 && i
.log2_scale_factor
== 0
11773 && i
.seg
[i
.mem_operands
] == 0
11774 && !operand_type_check (i
.types
[this_operand
], disp
))
11776 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11780 if (i386_index_check (operand_string
) == 0)
11782 i
.flags
[this_operand
] |= Operand_Mem
;
11783 if (i
.mem_operands
== 0)
11784 i
.memop1_string
= xstrdup (operand_string
);
11789 /* It's not a memory operand; argh! */
11790 as_bad (_("invalid char %s beginning operand %d `%s'"),
11791 output_invalid (*op_string
),
11796 return 1; /* Normal return. */
11799 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11800 that an rs_machine_dependent frag may reach. */
11803 i386_frag_max_var (fragS
*frag
)
11805 /* The only relaxable frags are for jumps.
11806 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11807 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11808 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11811 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11813 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11815 /* STT_GNU_IFUNC symbol must go through PLT. */
11816 if ((symbol_get_bfdsym (fr_symbol
)->flags
11817 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11820 if (!S_IS_EXTERNAL (fr_symbol
))
11821 /* Symbol may be weak or local. */
11822 return !S_IS_WEAK (fr_symbol
);
11824 /* Global symbols with non-default visibility can't be preempted. */
11825 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11828 if (fr_var
!= NO_RELOC
)
11829 switch ((enum bfd_reloc_code_real
) fr_var
)
11831 case BFD_RELOC_386_PLT32
:
11832 case BFD_RELOC_X86_64_PLT32
:
11833 /* Symbol with PLT relocation may be preempted. */
11839 /* Global symbols with default visibility in a shared library may be
11840 preempted by another definition. */
11845 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11846 Note also work for Skylake and Cascadelake.
11847 ---------------------------------------------------------------------
11848 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11849 | ------ | ----------- | ------- | -------- |
11851 | Jno | N | N | Y |
11852 | Jc/Jb | Y | N | Y |
11853 | Jae/Jnb | Y | N | Y |
11854 | Je/Jz | Y | Y | Y |
11855 | Jne/Jnz | Y | Y | Y |
11856 | Jna/Jbe | Y | N | Y |
11857 | Ja/Jnbe | Y | N | Y |
11859 | Jns | N | N | Y |
11860 | Jp/Jpe | N | N | Y |
11861 | Jnp/Jpo | N | N | Y |
11862 | Jl/Jnge | Y | Y | Y |
11863 | Jge/Jnl | Y | Y | Y |
11864 | Jle/Jng | Y | Y | Y |
11865 | Jg/Jnle | Y | Y | Y |
11866 --------------------------------------------------------------------- */
11868 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11870 if (mf_cmp
== mf_cmp_alu_cmp
)
11871 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11872 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11873 if (mf_cmp
== mf_cmp_incdec
)
11874 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11875 || mf_jcc
== mf_jcc_jle
);
11876 if (mf_cmp
== mf_cmp_test_and
)
11881 /* Return the next non-empty frag. */
11884 i386_next_non_empty_frag (fragS
*fragP
)
11886 /* There may be a frag with a ".fill 0" when there is no room in
11887 the current frag for frag_grow in output_insn. */
11888 for (fragP
= fragP
->fr_next
;
11890 && fragP
->fr_type
== rs_fill
11891 && fragP
->fr_fix
== 0);
11892 fragP
= fragP
->fr_next
)
11897 /* Return the next jcc frag after BRANCH_PADDING. */
11900 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11902 fragS
*branch_fragP
;
11906 if (pad_fragP
->fr_type
== rs_machine_dependent
11907 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11908 == BRANCH_PADDING
))
11910 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11911 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11913 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11914 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11915 pad_fragP
->tc_frag_data
.mf_type
))
11916 return branch_fragP
;
11922 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11925 i386_classify_machine_dependent_frag (fragS
*fragP
)
11929 fragS
*branch_fragP
;
11931 unsigned int max_prefix_length
;
11933 if (fragP
->tc_frag_data
.classified
)
11936 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11937 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11938 for (next_fragP
= fragP
;
11939 next_fragP
!= NULL
;
11940 next_fragP
= next_fragP
->fr_next
)
11942 next_fragP
->tc_frag_data
.classified
= 1;
11943 if (next_fragP
->fr_type
== rs_machine_dependent
)
11944 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11946 case BRANCH_PADDING
:
11947 /* The BRANCH_PADDING frag must be followed by a branch
11949 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11950 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11952 case FUSED_JCC_PADDING
:
11953 /* Check if this is a fused jcc:
11955 CMP like instruction
11959 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11960 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11961 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11964 /* The BRANCH_PADDING frag is merged with the
11965 FUSED_JCC_PADDING frag. */
11966 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11967 /* CMP like instruction size. */
11968 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11969 frag_wane (pad_fragP
);
11970 /* Skip to branch_fragP. */
11971 next_fragP
= branch_fragP
;
11973 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11975 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11977 next_fragP
->fr_subtype
11978 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11979 next_fragP
->tc_frag_data
.max_bytes
11980 = next_fragP
->tc_frag_data
.max_prefix_length
;
11981 /* This will be updated in the BRANCH_PREFIX scan. */
11982 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11985 frag_wane (next_fragP
);
11990 /* Stop if there is no BRANCH_PREFIX. */
11991 if (!align_branch_prefix_size
)
11994 /* Scan for BRANCH_PREFIX. */
11995 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11997 if (fragP
->fr_type
!= rs_machine_dependent
11998 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12002 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
12003 COND_JUMP_PREFIX. */
12004 max_prefix_length
= 0;
12005 for (next_fragP
= fragP
;
12006 next_fragP
!= NULL
;
12007 next_fragP
= next_fragP
->fr_next
)
12009 if (next_fragP
->fr_type
== rs_fill
)
12010 /* Skip rs_fill frags. */
12012 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
12013 /* Stop for all other frags. */
12016 /* rs_machine_dependent frags. */
12017 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12020 /* Count BRANCH_PREFIX frags. */
12021 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
12023 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
12024 frag_wane (next_fragP
);
12028 += next_fragP
->tc_frag_data
.max_bytes
;
12030 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12032 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12033 == FUSED_JCC_PADDING
))
12035 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
12036 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
12040 /* Stop for other rs_machine_dependent frags. */
12044 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
12046 /* Skip to the next frag. */
12047 fragP
= next_fragP
;
12051 /* Compute padding size for
12054 CMP like instruction
12056 COND_JUMP/UNCOND_JUMP
12061 COND_JUMP/UNCOND_JUMP
12065 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
12067 unsigned int offset
, size
, padding_size
;
12068 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
12070 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
12072 address
= fragP
->fr_address
;
12073 address
+= fragP
->fr_fix
;
12075 /* CMP like instrunction size. */
12076 size
= fragP
->tc_frag_data
.cmp_size
;
12078 /* The base size of the branch frag. */
12079 size
+= branch_fragP
->fr_fix
;
12081 /* Add opcode and displacement bytes for the rs_machine_dependent
12083 if (branch_fragP
->fr_type
== rs_machine_dependent
)
12084 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
12086 /* Check if branch is within boundary and doesn't end at the last
12088 offset
= address
& ((1U << align_branch_power
) - 1);
12089 if ((offset
+ size
) >= (1U << align_branch_power
))
12090 /* Padding needed to avoid crossing boundary. */
12091 padding_size
= (1U << align_branch_power
) - offset
;
12093 /* No padding needed. */
12096 /* The return value may be saved in tc_frag_data.length which is
12098 if (!fits_in_unsigned_byte (padding_size
))
12101 return padding_size
;
12104 /* i386_generic_table_relax_frag()
12106 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
12107 grow/shrink padding to align branch frags. Hand others to
12111 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
12113 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12114 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12116 long padding_size
= i386_branch_padding_size (fragP
, 0);
12117 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
12119 /* When the BRANCH_PREFIX frag is used, the computed address
12120 must match the actual address and there should be no padding. */
12121 if (fragP
->tc_frag_data
.padding_address
12122 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
12126 /* Update the padding size. */
12128 fragP
->tc_frag_data
.length
= padding_size
;
12132 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12134 fragS
*padding_fragP
, *next_fragP
;
12135 long padding_size
, left_size
, last_size
;
12137 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12138 if (!padding_fragP
)
12139 /* Use the padding set by the leading BRANCH_PREFIX frag. */
12140 return (fragP
->tc_frag_data
.length
12141 - fragP
->tc_frag_data
.last_length
);
12143 /* Compute the relative address of the padding frag in the very
12144 first time where the BRANCH_PREFIX frag sizes are zero. */
12145 if (!fragP
->tc_frag_data
.padding_address
)
12146 fragP
->tc_frag_data
.padding_address
12147 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
12149 /* First update the last length from the previous interation. */
12150 left_size
= fragP
->tc_frag_data
.prefix_length
;
12151 for (next_fragP
= fragP
;
12152 next_fragP
!= padding_fragP
;
12153 next_fragP
= next_fragP
->fr_next
)
12154 if (next_fragP
->fr_type
== rs_machine_dependent
12155 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12160 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12164 if (max
> left_size
)
12169 next_fragP
->tc_frag_data
.last_length
= size
;
12173 next_fragP
->tc_frag_data
.last_length
= 0;
12176 /* Check the padding size for the padding frag. */
12177 padding_size
= i386_branch_padding_size
12178 (padding_fragP
, (fragP
->fr_address
12179 + fragP
->tc_frag_data
.padding_address
));
12181 last_size
= fragP
->tc_frag_data
.prefix_length
;
12182 /* Check if there is change from the last interation. */
12183 if (padding_size
== last_size
)
12185 /* Update the expected address of the padding frag. */
12186 padding_fragP
->tc_frag_data
.padding_address
12187 = (fragP
->fr_address
+ padding_size
12188 + fragP
->tc_frag_data
.padding_address
);
12192 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12194 /* No padding if there is no sufficient room. Clear the
12195 expected address of the padding frag. */
12196 padding_fragP
->tc_frag_data
.padding_address
= 0;
12200 /* Store the expected address of the padding frag. */
12201 padding_fragP
->tc_frag_data
.padding_address
12202 = (fragP
->fr_address
+ padding_size
12203 + fragP
->tc_frag_data
.padding_address
);
12205 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12207 /* Update the length for the current interation. */
12208 left_size
= padding_size
;
12209 for (next_fragP
= fragP
;
12210 next_fragP
!= padding_fragP
;
12211 next_fragP
= next_fragP
->fr_next
)
12212 if (next_fragP
->fr_type
== rs_machine_dependent
12213 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12218 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12222 if (max
> left_size
)
12227 next_fragP
->tc_frag_data
.length
= size
;
12231 next_fragP
->tc_frag_data
.length
= 0;
12234 return (fragP
->tc_frag_data
.length
12235 - fragP
->tc_frag_data
.last_length
);
12237 return relax_frag (segment
, fragP
, stretch
);
12240 /* md_estimate_size_before_relax()
12242 Called just before relax() for rs_machine_dependent frags. The x86
12243 assembler uses these frags to handle variable size jump
12246 Any symbol that is now undefined will not become defined.
12247 Return the correct fr_subtype in the frag.
12248 Return the initial "guess for variable size of frag" to caller.
12249 The guess is actually the growth beyond the fixed part. Whatever
12250 we do to grow the fixed or variable part contributes to our
12254 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12256 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12257 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12258 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12260 i386_classify_machine_dependent_frag (fragP
);
12261 return fragP
->tc_frag_data
.length
;
12264 /* We've already got fragP->fr_subtype right; all we have to do is
12265 check for un-relaxable symbols. On an ELF system, we can't relax
12266 an externally visible symbol, because it may be overridden by a
12268 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12269 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12271 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12274 #if defined (OBJ_COFF) && defined (TE_PE)
12275 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12276 && S_IS_WEAK (fragP
->fr_symbol
))
12280 /* Symbol is undefined in this segment, or we need to keep a
12281 reloc so that weak symbols can be overridden. */
12282 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12283 enum bfd_reloc_code_real reloc_type
;
12284 unsigned char *opcode
;
12288 if (fragP
->fr_var
!= NO_RELOC
)
12289 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12290 else if (size
== 2)
12291 reloc_type
= BFD_RELOC_16_PCREL
;
12292 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12293 else if (need_plt32_p (fragP
->fr_symbol
))
12294 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12297 reloc_type
= BFD_RELOC_32_PCREL
;
12299 old_fr_fix
= fragP
->fr_fix
;
12300 opcode
= (unsigned char *) fragP
->fr_opcode
;
12302 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12305 /* Make jmp (0xeb) a (d)word displacement jump. */
12307 fragP
->fr_fix
+= size
;
12308 fixP
= fix_new (fragP
, old_fr_fix
, size
,
12310 fragP
->fr_offset
, 1,
12316 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12318 /* Negate the condition, and branch past an
12319 unconditional jump. */
12322 /* Insert an unconditional jump. */
12324 /* We added two extra opcode bytes, and have a two byte
12326 fragP
->fr_fix
+= 2 + 2;
12327 fix_new (fragP
, old_fr_fix
+ 2, 2,
12329 fragP
->fr_offset
, 1,
12333 /* Fall through. */
12336 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12338 fragP
->fr_fix
+= 1;
12339 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12341 fragP
->fr_offset
, 1,
12342 BFD_RELOC_8_PCREL
);
12343 fixP
->fx_signed
= 1;
12347 /* This changes the byte-displacement jump 0x7N
12348 to the (d)word-displacement jump 0x0f,0x8N. */
12349 opcode
[1] = opcode
[0] + 0x10;
12350 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12351 /* We've added an opcode byte. */
12352 fragP
->fr_fix
+= 1 + size
;
12353 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
12355 fragP
->fr_offset
, 1,
12360 BAD_CASE (fragP
->fr_subtype
);
12364 /* All jumps handled here are signed, but don't unconditionally use a
12365 signed limit check for 32 and 16 bit jumps as we want to allow wrap
12366 around at 4G (outside of 64-bit mode) and 64k. */
12367 if (size
== 4 && flag_code
== CODE_64BIT
)
12368 fixP
->fx_signed
= 1;
12371 return fragP
->fr_fix
- old_fr_fix
;
12374 /* Guess size depending on current relax state. Initially the relax
12375 state will correspond to a short jump and we return 1, because
12376 the variable part of the frag (the branch offset) is one byte
12377 long. However, we can relax a section more than once and in that
12378 case we must either set fr_subtype back to the unrelaxed state,
12379 or return the value for the appropriate branch. */
12380 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12383 /* Called after relax() is finished.
12385 In: Address of frag.
12386 fr_type == rs_machine_dependent.
12387 fr_subtype is what the address relaxed to.
12389 Out: Any fixSs and constants are set up.
12390 Caller will turn frag into a ".space 0". */
12393 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12396 unsigned char *opcode
;
12397 unsigned char *where_to_put_displacement
= NULL
;
12398 offsetT target_address
;
12399 offsetT opcode_address
;
12400 unsigned int extension
= 0;
12401 offsetT displacement_from_opcode_start
;
12403 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12404 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12405 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12407 /* Generate nop padding. */
12408 unsigned int size
= fragP
->tc_frag_data
.length
;
12411 if (size
> fragP
->tc_frag_data
.max_bytes
)
12417 const char *branch
= "branch";
12418 const char *prefix
= "";
12419 fragS
*padding_fragP
;
12420 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12423 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12424 switch (fragP
->tc_frag_data
.default_prefix
)
12429 case CS_PREFIX_OPCODE
:
12432 case DS_PREFIX_OPCODE
:
12435 case ES_PREFIX_OPCODE
:
12438 case FS_PREFIX_OPCODE
:
12441 case GS_PREFIX_OPCODE
:
12444 case SS_PREFIX_OPCODE
:
12449 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12450 "%s within %d-byte boundary\n");
12452 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12453 "align %s within %d-byte boundary\n");
12457 padding_fragP
= fragP
;
12458 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12459 "%s within %d-byte boundary\n");
12463 switch (padding_fragP
->tc_frag_data
.branch_type
)
12465 case align_branch_jcc
:
12468 case align_branch_fused
:
12469 branch
= "fused jcc";
12471 case align_branch_jmp
:
12474 case align_branch_call
:
12477 case align_branch_indirect
:
12478 branch
= "indiret branch";
12480 case align_branch_ret
:
12487 fprintf (stdout
, msg
,
12488 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12489 (long long) fragP
->fr_address
, branch
,
12490 1 << align_branch_power
);
12492 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12493 memset (fragP
->fr_opcode
,
12494 fragP
->tc_frag_data
.default_prefix
, size
);
12496 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12498 fragP
->fr_fix
+= size
;
12503 opcode
= (unsigned char *) fragP
->fr_opcode
;
12505 /* Address we want to reach in file space. */
12506 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12508 /* Address opcode resides at in file space. */
12509 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12511 /* Displacement from opcode start to fill into instruction. */
12512 displacement_from_opcode_start
= target_address
- opcode_address
;
12514 if ((fragP
->fr_subtype
& BIG
) == 0)
12516 /* Don't have to change opcode. */
12517 extension
= 1; /* 1 opcode + 1 displacement */
12518 where_to_put_displacement
= &opcode
[1];
12522 if (no_cond_jump_promotion
12523 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12524 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12525 _("long jump required"));
12527 switch (fragP
->fr_subtype
)
12529 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12530 extension
= 4; /* 1 opcode + 4 displacement */
12532 where_to_put_displacement
= &opcode
[1];
12535 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12536 extension
= 2; /* 1 opcode + 2 displacement */
12538 where_to_put_displacement
= &opcode
[1];
12541 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12542 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12543 extension
= 5; /* 2 opcode + 4 displacement */
12544 opcode
[1] = opcode
[0] + 0x10;
12545 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12546 where_to_put_displacement
= &opcode
[2];
12549 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12550 extension
= 3; /* 2 opcode + 2 displacement */
12551 opcode
[1] = opcode
[0] + 0x10;
12552 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12553 where_to_put_displacement
= &opcode
[2];
12556 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12561 where_to_put_displacement
= &opcode
[3];
12565 BAD_CASE (fragP
->fr_subtype
);
12570 /* If size if less then four we are sure that the operand fits,
12571 but if it's 4, then it could be that the displacement is larger
12573 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12575 && ((addressT
) (displacement_from_opcode_start
- extension
12576 + ((addressT
) 1 << 31))
12577 > (((addressT
) 2 << 31) - 1)))
12579 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12580 _("jump target out of range"));
12581 /* Make us emit 0. */
12582 displacement_from_opcode_start
= extension
;
12584 /* Now put displacement after opcode. */
12585 md_number_to_chars ((char *) where_to_put_displacement
,
12586 (valueT
) (displacement_from_opcode_start
- extension
),
12587 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12588 fragP
->fr_fix
+= extension
;
12591 /* Apply a fixup (fixP) to segment data, once it has been determined
12592 by our caller that we have all the info we need to fix it up.
12594 Parameter valP is the pointer to the value of the bits.
12596 On the 386, immediates, displacements, and data pointers are all in
12597 the same (little-endian) format, so we don't need to care about which
12598 we are handling. */
12601 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12603 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12604 valueT value
= *valP
;
12606 #if !defined (TE_Mach)
12607 if (fixP
->fx_pcrel
)
12609 switch (fixP
->fx_r_type
)
12615 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12618 case BFD_RELOC_X86_64_32S
:
12619 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12622 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12625 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12630 if (fixP
->fx_addsy
!= NULL
12631 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12632 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12633 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12634 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12635 && !use_rela_relocations
)
12637 /* This is a hack. There should be a better way to handle this.
12638 This covers for the fact that bfd_install_relocation will
12639 subtract the current location (for partial_inplace, PC relative
12640 relocations); see more below. */
12644 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12647 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12649 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12652 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12654 if ((sym_seg
== seg
12655 || (symbol_section_p (fixP
->fx_addsy
)
12656 && sym_seg
!= absolute_section
))
12657 && !generic_force_reloc (fixP
))
12659 /* Yes, we add the values in twice. This is because
12660 bfd_install_relocation subtracts them out again. I think
12661 bfd_install_relocation is broken, but I don't dare change
12663 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12667 #if defined (OBJ_COFF) && defined (TE_PE)
12668 /* For some reason, the PE format does not store a
12669 section address offset for a PC relative symbol. */
12670 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12671 || S_IS_WEAK (fixP
->fx_addsy
))
12672 value
+= md_pcrel_from (fixP
);
12675 #if defined (OBJ_COFF) && defined (TE_PE)
12676 if (fixP
->fx_addsy
!= NULL
12677 && S_IS_WEAK (fixP
->fx_addsy
)
12678 /* PR 16858: Do not modify weak function references. */
12679 && ! fixP
->fx_pcrel
)
12681 #if !defined (TE_PEP)
12682 /* For x86 PE weak function symbols are neither PC-relative
12683 nor do they set S_IS_FUNCTION. So the only reliable way
12684 to detect them is to check the flags of their containing
12686 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12687 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12691 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12695 /* Fix a few things - the dynamic linker expects certain values here,
12696 and we must not disappoint it. */
12697 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12698 if (IS_ELF
&& fixP
->fx_addsy
)
12699 switch (fixP
->fx_r_type
)
12701 case BFD_RELOC_386_PLT32
:
12702 case BFD_RELOC_X86_64_PLT32
:
12703 /* Make the jump instruction point to the address of the operand.
12704 At runtime we merely add the offset to the actual PLT entry.
12705 NB: Subtract the offset size only for jump instructions. */
12706 if (fixP
->fx_pcrel
)
12710 case BFD_RELOC_386_TLS_GD
:
12711 case BFD_RELOC_386_TLS_LDM
:
12712 case BFD_RELOC_386_TLS_IE_32
:
12713 case BFD_RELOC_386_TLS_IE
:
12714 case BFD_RELOC_386_TLS_GOTIE
:
12715 case BFD_RELOC_386_TLS_GOTDESC
:
12716 case BFD_RELOC_X86_64_TLSGD
:
12717 case BFD_RELOC_X86_64_TLSLD
:
12718 case BFD_RELOC_X86_64_GOTTPOFF
:
12719 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12720 value
= 0; /* Fully resolved at runtime. No addend. */
12722 case BFD_RELOC_386_TLS_LE
:
12723 case BFD_RELOC_386_TLS_LDO_32
:
12724 case BFD_RELOC_386_TLS_LE_32
:
12725 case BFD_RELOC_X86_64_DTPOFF32
:
12726 case BFD_RELOC_X86_64_DTPOFF64
:
12727 case BFD_RELOC_X86_64_TPOFF32
:
12728 case BFD_RELOC_X86_64_TPOFF64
:
12729 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12732 case BFD_RELOC_386_TLS_DESC_CALL
:
12733 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12734 value
= 0; /* Fully resolved at runtime. No addend. */
12735 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12739 case BFD_RELOC_VTABLE_INHERIT
:
12740 case BFD_RELOC_VTABLE_ENTRY
:
12747 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12749 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
12751 value
= extend_to_32bit_address (value
);
12754 #endif /* !defined (TE_Mach) */
12756 /* Are we finished with this relocation now? */
12757 if (fixP
->fx_addsy
== NULL
)
12760 switch (fixP
->fx_r_type
)
12762 case BFD_RELOC_X86_64_32S
:
12763 fixP
->fx_signed
= 1;
12770 #if defined (OBJ_COFF) && defined (TE_PE)
12771 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12774 /* Remember value for tc_gen_reloc. */
12775 fixP
->fx_addnumber
= value
;
12776 /* Clear out the frag for now. */
12780 else if (use_rela_relocations
)
12782 fixP
->fx_no_overflow
= 1;
12783 /* Remember value for tc_gen_reloc. */
12784 fixP
->fx_addnumber
= value
;
12788 md_number_to_chars (p
, value
, fixP
->fx_size
);
12792 md_atof (int type
, char *litP
, int *sizeP
)
12794 /* This outputs the LITTLENUMs in REVERSE order;
12795 in accord with the bigendian 386. */
12796 return ieee_md_atof (type
, litP
, sizeP
, false);
12799 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12802 output_invalid (int c
)
12805 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12808 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12809 "(0x%x)", (unsigned char) c
);
12810 return output_invalid_buf
;
12813 /* Verify that @r can be used in the current context. */
12815 static bool check_register (const reg_entry
*r
)
12817 if (allow_pseudo_reg
)
12820 if (operand_type_all_zero (&r
->reg_type
))
12823 if ((r
->reg_type
.bitfield
.dword
12824 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12825 || r
->reg_type
.bitfield
.class == RegCR
12826 || r
->reg_type
.bitfield
.class == RegDR
)
12827 && !cpu_arch_flags
.bitfield
.cpui386
)
12830 if (r
->reg_type
.bitfield
.class == RegTR
12831 && (flag_code
== CODE_64BIT
12832 || !cpu_arch_flags
.bitfield
.cpui386
12833 || cpu_arch_isa_flags
.bitfield
.cpui586
12834 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12837 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12840 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12842 if (r
->reg_type
.bitfield
.zmmword
12843 || r
->reg_type
.bitfield
.class == RegMask
)
12846 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12848 if (r
->reg_type
.bitfield
.ymmword
)
12851 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12856 if (r
->reg_type
.bitfield
.tmmword
12857 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12858 || flag_code
!= CODE_64BIT
))
12861 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12864 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12865 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12868 /* Upper 16 vector registers are only available with VREX in 64bit
12869 mode, and require EVEX encoding. */
12870 if (r
->reg_flags
& RegVRex
)
12872 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12873 || flag_code
!= CODE_64BIT
)
12876 if (i
.vec_encoding
== vex_encoding_default
)
12877 i
.vec_encoding
= vex_encoding_evex
;
12878 else if (i
.vec_encoding
!= vex_encoding_evex
)
12879 i
.vec_encoding
= vex_encoding_error
;
12882 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12883 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12884 && flag_code
!= CODE_64BIT
)
12887 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12894 /* REG_STRING starts *before* REGISTER_PREFIX. */
12896 static const reg_entry
*
12897 parse_real_register (char *reg_string
, char **end_op
)
12899 char *s
= reg_string
;
12901 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12902 const reg_entry
*r
;
12904 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12905 if (*s
== REGISTER_PREFIX
)
12908 if (is_space_char (*s
))
12911 p
= reg_name_given
;
12912 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12914 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12915 return (const reg_entry
*) NULL
;
12919 /* For naked regs, make sure that we are not dealing with an identifier.
12920 This prevents confusing an identifier like `eax_var' with register
12922 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12923 return (const reg_entry
*) NULL
;
12927 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12929 /* Handle floating point regs, allowing spaces in the (i) part. */
12932 if (!cpu_arch_flags
.bitfield
.cpu8087
12933 && !cpu_arch_flags
.bitfield
.cpu287
12934 && !cpu_arch_flags
.bitfield
.cpu387
12935 && !allow_pseudo_reg
)
12936 return (const reg_entry
*) NULL
;
12938 if (is_space_char (*s
))
12943 if (is_space_char (*s
))
12945 if (*s
>= '0' && *s
<= '7')
12947 int fpr
= *s
- '0';
12949 if (is_space_char (*s
))
12954 know (r
[fpr
].reg_num
== fpr
);
12958 /* We have "%st(" then garbage. */
12959 return (const reg_entry
*) NULL
;
12963 return r
&& check_register (r
) ? r
: NULL
;
12966 /* REG_STRING starts *before* REGISTER_PREFIX. */
12968 static const reg_entry
*
12969 parse_register (char *reg_string
, char **end_op
)
12971 const reg_entry
*r
;
12973 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12974 r
= parse_real_register (reg_string
, end_op
);
12979 char *save
= input_line_pointer
;
12983 input_line_pointer
= reg_string
;
12984 c
= get_symbol_name (®_string
);
12985 symbolP
= symbol_find (reg_string
);
12986 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12988 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12990 know (e
->X_op
== O_register
);
12991 know (e
->X_add_number
>= 0
12992 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12993 r
= i386_regtab
+ e
->X_add_number
;
12994 if (!check_register (r
))
12996 as_bad (_("register '%s%s' cannot be used here"),
12997 register_prefix
, r
->reg_name
);
13000 *end_op
= input_line_pointer
;
13002 *input_line_pointer
= c
;
13003 input_line_pointer
= save
;
13009 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
13011 const reg_entry
*r
;
13012 char *end
= input_line_pointer
;
13015 r
= parse_register (name
, &input_line_pointer
);
13016 if (r
&& end
<= input_line_pointer
)
13018 *nextcharP
= *input_line_pointer
;
13019 *input_line_pointer
= 0;
13022 e
->X_op
= O_register
;
13023 e
->X_add_number
= r
- i386_regtab
;
13026 e
->X_op
= O_illegal
;
13029 input_line_pointer
= end
;
13031 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
13035 md_operand (expressionS
*e
)
13038 const reg_entry
*r
;
13040 switch (*input_line_pointer
)
13042 case REGISTER_PREFIX
:
13043 r
= parse_real_register (input_line_pointer
, &end
);
13046 e
->X_op
= O_register
;
13047 e
->X_add_number
= r
- i386_regtab
;
13048 input_line_pointer
= end
;
13053 gas_assert (intel_syntax
);
13054 end
= input_line_pointer
++;
13056 if (*input_line_pointer
== ']')
13058 ++input_line_pointer
;
13059 e
->X_op_symbol
= make_expr_symbol (e
);
13060 e
->X_add_symbol
= NULL
;
13061 e
->X_add_number
= 0;
13066 e
->X_op
= O_absent
;
13067 input_line_pointer
= end
;
13074 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13075 const char *md_shortopts
= "kVQ:sqnO::";
13077 const char *md_shortopts
= "qnO::";
13080 #define OPTION_32 (OPTION_MD_BASE + 0)
13081 #define OPTION_64 (OPTION_MD_BASE + 1)
13082 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
13083 #define OPTION_MARCH (OPTION_MD_BASE + 3)
13084 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
13085 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
13086 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
13087 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
13088 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
13089 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
13090 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
13091 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
13092 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
13093 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
13094 #define OPTION_X32 (OPTION_MD_BASE + 14)
13095 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
13096 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
13097 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
13098 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
13099 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
13100 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
13101 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
13102 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
13103 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
13104 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
13105 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
13106 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
13107 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
13108 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
13109 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
13110 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
13111 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
13112 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
13113 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
13114 #define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
13116 struct option md_longopts
[] =
13118 {"32", no_argument
, NULL
, OPTION_32
},
13119 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13120 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13121 {"64", no_argument
, NULL
, OPTION_64
},
13123 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13124 {"x32", no_argument
, NULL
, OPTION_X32
},
13125 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
13126 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
13128 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
13129 {"march", required_argument
, NULL
, OPTION_MARCH
},
13130 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
13131 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
13132 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
13133 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
13134 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
13135 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
13136 {"muse-unaligned-vector-move", no_argument
, NULL
, OPTION_MUSE_UNALIGNED_VECTOR_MOVE
},
13137 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
13138 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
13139 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
13140 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
13141 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
13142 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
13143 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
13144 # if defined (TE_PE) || defined (TE_PEP)
13145 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
13147 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
13148 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
13149 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
13150 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
13151 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
13152 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
13153 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
13154 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
13155 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
13156 {"mlfence-before-indirect-branch", required_argument
, NULL
,
13157 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
13158 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
13159 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
13160 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
13161 {NULL
, no_argument
, NULL
, 0}
13163 size_t md_longopts_size
= sizeof (md_longopts
);
13166 md_parse_option (int c
, const char *arg
)
13169 char *arch
, *next
, *saved
, *type
;
13174 optimize_align_code
= 0;
13178 quiet_warnings
= 1;
13181 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13182 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
13183 should be emitted or not. FIXME: Not implemented. */
13185 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
13189 /* -V: SVR4 argument to print version ID. */
13191 print_version_id ();
13194 /* -k: Ignore for FreeBSD compatibility. */
13199 /* -s: On i386 Solaris, this tells the native assembler to use
13200 .stab instead of .stab.excl. We always use .stab anyhow. */
13203 case OPTION_MSHARED
:
13207 case OPTION_X86_USED_NOTE
:
13208 if (strcasecmp (arg
, "yes") == 0)
13210 else if (strcasecmp (arg
, "no") == 0)
13213 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13218 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13219 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13222 const char **list
, **l
;
13224 list
= bfd_target_list ();
13225 for (l
= list
; *l
!= NULL
; l
++)
13226 if (startswith (*l
, "elf64-x86-64")
13227 || strcmp (*l
, "coff-x86-64") == 0
13228 || strcmp (*l
, "pe-x86-64") == 0
13229 || strcmp (*l
, "pei-x86-64") == 0
13230 || strcmp (*l
, "mach-o-x86-64") == 0)
13232 default_arch
= "x86_64";
13236 as_fatal (_("no compiled in support for x86_64"));
13242 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13246 const char **list
, **l
;
13248 list
= bfd_target_list ();
13249 for (l
= list
; *l
!= NULL
; l
++)
13250 if (startswith (*l
, "elf32-x86-64"))
13252 default_arch
= "x86_64:32";
13256 as_fatal (_("no compiled in support for 32bit x86_64"));
13260 as_fatal (_("32bit x86_64 is only supported for ELF"));
13265 default_arch
= "i386";
13268 case OPTION_DIVIDE
:
13269 #ifdef SVR4_COMMENT_CHARS
13274 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13276 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13280 i386_comment_chars
= n
;
13286 saved
= xstrdup (arg
);
13288 /* Allow -march=+nosse. */
13294 as_fatal (_("invalid -march= option: `%s'"), arg
);
13295 next
= strchr (arch
, '+');
13298 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13300 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
13303 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13306 cpu_arch_name
= cpu_arch
[j
].name
;
13307 cpu_sub_arch_name
= NULL
;
13308 cpu_arch_flags
= cpu_arch
[j
].flags
;
13309 cpu_arch_isa
= cpu_arch
[j
].type
;
13310 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13311 if (!cpu_arch_tune_set
)
13313 cpu_arch_tune
= cpu_arch_isa
;
13314 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13318 else if (*cpu_arch
[j
].name
== '.'
13319 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13321 /* ISA extension. */
13322 i386_cpu_flags flags
;
13324 flags
= cpu_flags_or (cpu_arch_flags
,
13325 cpu_arch
[j
].flags
);
13327 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13329 if (cpu_sub_arch_name
)
13331 char *name
= cpu_sub_arch_name
;
13332 cpu_sub_arch_name
= concat (name
,
13334 (const char *) NULL
);
13338 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13339 cpu_arch_flags
= flags
;
13340 cpu_arch_isa_flags
= flags
;
13344 = cpu_flags_or (cpu_arch_isa_flags
,
13345 cpu_arch
[j
].flags
);
13350 if (j
>= ARRAY_SIZE (cpu_arch
))
13352 /* Disable an ISA extension. */
13353 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13354 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13356 i386_cpu_flags flags
;
13358 flags
= cpu_flags_and_not (cpu_arch_flags
,
13359 cpu_noarch
[j
].flags
);
13360 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13362 if (cpu_sub_arch_name
)
13364 char *name
= cpu_sub_arch_name
;
13365 cpu_sub_arch_name
= concat (arch
,
13366 (const char *) NULL
);
13370 cpu_sub_arch_name
= xstrdup (arch
);
13371 cpu_arch_flags
= flags
;
13372 cpu_arch_isa_flags
= flags
;
13377 if (j
>= ARRAY_SIZE (cpu_noarch
))
13378 j
= ARRAY_SIZE (cpu_arch
);
13381 if (j
>= ARRAY_SIZE (cpu_arch
))
13382 as_fatal (_("invalid -march= option: `%s'"), arg
);
13386 while (next
!= NULL
);
13392 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13393 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13395 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13397 cpu_arch_tune_set
= 1;
13398 cpu_arch_tune
= cpu_arch
[j
].type
;
13399 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13403 if (j
>= ARRAY_SIZE (cpu_arch
))
13404 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13407 case OPTION_MMNEMONIC
:
13408 if (strcasecmp (arg
, "att") == 0)
13409 intel_mnemonic
= 0;
13410 else if (strcasecmp (arg
, "intel") == 0)
13411 intel_mnemonic
= 1;
13413 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13416 case OPTION_MSYNTAX
:
13417 if (strcasecmp (arg
, "att") == 0)
13419 else if (strcasecmp (arg
, "intel") == 0)
13422 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13425 case OPTION_MINDEX_REG
:
13426 allow_index_reg
= 1;
13429 case OPTION_MNAKED_REG
:
13430 allow_naked_reg
= 1;
13433 case OPTION_MSSE2AVX
:
13437 case OPTION_MUSE_UNALIGNED_VECTOR_MOVE
:
13438 use_unaligned_vector_move
= 1;
13441 case OPTION_MSSE_CHECK
:
13442 if (strcasecmp (arg
, "error") == 0)
13443 sse_check
= check_error
;
13444 else if (strcasecmp (arg
, "warning") == 0)
13445 sse_check
= check_warning
;
13446 else if (strcasecmp (arg
, "none") == 0)
13447 sse_check
= check_none
;
13449 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13452 case OPTION_MOPERAND_CHECK
:
13453 if (strcasecmp (arg
, "error") == 0)
13454 operand_check
= check_error
;
13455 else if (strcasecmp (arg
, "warning") == 0)
13456 operand_check
= check_warning
;
13457 else if (strcasecmp (arg
, "none") == 0)
13458 operand_check
= check_none
;
13460 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13463 case OPTION_MAVXSCALAR
:
13464 if (strcasecmp (arg
, "128") == 0)
13465 avxscalar
= vex128
;
13466 else if (strcasecmp (arg
, "256") == 0)
13467 avxscalar
= vex256
;
13469 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13472 case OPTION_MVEXWIG
:
13473 if (strcmp (arg
, "0") == 0)
13475 else if (strcmp (arg
, "1") == 0)
13478 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13481 case OPTION_MADD_BND_PREFIX
:
13482 add_bnd_prefix
= 1;
13485 case OPTION_MEVEXLIG
:
13486 if (strcmp (arg
, "128") == 0)
13487 evexlig
= evexl128
;
13488 else if (strcmp (arg
, "256") == 0)
13489 evexlig
= evexl256
;
13490 else if (strcmp (arg
, "512") == 0)
13491 evexlig
= evexl512
;
13493 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13496 case OPTION_MEVEXRCIG
:
13497 if (strcmp (arg
, "rne") == 0)
13499 else if (strcmp (arg
, "rd") == 0)
13501 else if (strcmp (arg
, "ru") == 0)
13503 else if (strcmp (arg
, "rz") == 0)
13506 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13509 case OPTION_MEVEXWIG
:
13510 if (strcmp (arg
, "0") == 0)
13512 else if (strcmp (arg
, "1") == 0)
13515 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13518 # if defined (TE_PE) || defined (TE_PEP)
13519 case OPTION_MBIG_OBJ
:
13524 case OPTION_MOMIT_LOCK_PREFIX
:
13525 if (strcasecmp (arg
, "yes") == 0)
13526 omit_lock_prefix
= 1;
13527 else if (strcasecmp (arg
, "no") == 0)
13528 omit_lock_prefix
= 0;
13530 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13533 case OPTION_MFENCE_AS_LOCK_ADD
:
13534 if (strcasecmp (arg
, "yes") == 0)
13536 else if (strcasecmp (arg
, "no") == 0)
13539 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13542 case OPTION_MLFENCE_AFTER_LOAD
:
13543 if (strcasecmp (arg
, "yes") == 0)
13544 lfence_after_load
= 1;
13545 else if (strcasecmp (arg
, "no") == 0)
13546 lfence_after_load
= 0;
13548 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13551 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13552 if (strcasecmp (arg
, "all") == 0)
13554 lfence_before_indirect_branch
= lfence_branch_all
;
13555 if (lfence_before_ret
== lfence_before_ret_none
)
13556 lfence_before_ret
= lfence_before_ret_shl
;
13558 else if (strcasecmp (arg
, "memory") == 0)
13559 lfence_before_indirect_branch
= lfence_branch_memory
;
13560 else if (strcasecmp (arg
, "register") == 0)
13561 lfence_before_indirect_branch
= lfence_branch_register
;
13562 else if (strcasecmp (arg
, "none") == 0)
13563 lfence_before_indirect_branch
= lfence_branch_none
;
13565 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13569 case OPTION_MLFENCE_BEFORE_RET
:
13570 if (strcasecmp (arg
, "or") == 0)
13571 lfence_before_ret
= lfence_before_ret_or
;
13572 else if (strcasecmp (arg
, "not") == 0)
13573 lfence_before_ret
= lfence_before_ret_not
;
13574 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13575 lfence_before_ret
= lfence_before_ret_shl
;
13576 else if (strcasecmp (arg
, "none") == 0)
13577 lfence_before_ret
= lfence_before_ret_none
;
13579 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13583 case OPTION_MRELAX_RELOCATIONS
:
13584 if (strcasecmp (arg
, "yes") == 0)
13585 generate_relax_relocations
= 1;
13586 else if (strcasecmp (arg
, "no") == 0)
13587 generate_relax_relocations
= 0;
13589 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13592 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13595 long int align
= strtoul (arg
, &end
, 0);
13600 align_branch_power
= 0;
13603 else if (align
>= 16)
13606 for (align_power
= 0;
13608 align
>>= 1, align_power
++)
13610 /* Limit alignment power to 31. */
13611 if (align
== 1 && align_power
< 32)
13613 align_branch_power
= align_power
;
13618 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13622 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13625 int align
= strtoul (arg
, &end
, 0);
13626 /* Some processors only support 5 prefixes. */
13627 if (*end
== '\0' && align
>= 0 && align
< 6)
13629 align_branch_prefix_size
= align
;
13632 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13637 case OPTION_MALIGN_BRANCH
:
13639 saved
= xstrdup (arg
);
13643 next
= strchr (type
, '+');
13646 if (strcasecmp (type
, "jcc") == 0)
13647 align_branch
|= align_branch_jcc_bit
;
13648 else if (strcasecmp (type
, "fused") == 0)
13649 align_branch
|= align_branch_fused_bit
;
13650 else if (strcasecmp (type
, "jmp") == 0)
13651 align_branch
|= align_branch_jmp_bit
;
13652 else if (strcasecmp (type
, "call") == 0)
13653 align_branch
|= align_branch_call_bit
;
13654 else if (strcasecmp (type
, "ret") == 0)
13655 align_branch
|= align_branch_ret_bit
;
13656 else if (strcasecmp (type
, "indirect") == 0)
13657 align_branch
|= align_branch_indirect_bit
;
13659 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13662 while (next
!= NULL
);
13666 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13667 align_branch_power
= 5;
13668 align_branch_prefix_size
= 5;
13669 align_branch
= (align_branch_jcc_bit
13670 | align_branch_fused_bit
13671 | align_branch_jmp_bit
);
13674 case OPTION_MAMD64
:
13678 case OPTION_MINTEL64
:
13686 /* Turn off -Os. */
13687 optimize_for_space
= 0;
13689 else if (*arg
== 's')
13691 optimize_for_space
= 1;
13692 /* Turn on all encoding optimizations. */
13693 optimize
= INT_MAX
;
13697 optimize
= atoi (arg
);
13698 /* Turn off -Os. */
13699 optimize_for_space
= 0;
13709 #define MESSAGE_TEMPLATE \
13713 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13714 int *left_p
, const char *name
, int len
)
13716 int size
= sizeof (MESSAGE_TEMPLATE
);
13717 int left
= *left_p
;
13719 /* Reserve 2 spaces for ", " or ",\0" */
13722 /* Check if there is any room. */
13730 p
= mempcpy (p
, name
, len
);
13734 /* Output the current message now and start a new one. */
13737 fprintf (stream
, "%s\n", message
);
13739 left
= size
- (start
- message
) - len
- 2;
13741 gas_assert (left
>= 0);
13743 p
= mempcpy (p
, name
, len
);
13751 show_arch (FILE *stream
, int ext
, int check
)
13753 static char message
[] = MESSAGE_TEMPLATE
;
13754 char *start
= message
+ 27;
13756 int size
= sizeof (MESSAGE_TEMPLATE
);
13763 left
= size
- (start
- message
);
13764 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13766 /* Should it be skipped? */
13767 if (cpu_arch
[j
].skip
)
13770 name
= cpu_arch
[j
].name
;
13771 len
= cpu_arch
[j
].len
;
13774 /* It is an extension. Skip if we aren't asked to show it. */
13785 /* It is an processor. Skip if we show only extension. */
13788 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13790 /* It is an impossible processor - skip. */
13794 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13797 /* Display disabled extensions. */
13799 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13801 name
= cpu_noarch
[j
].name
;
13802 len
= cpu_noarch
[j
].len
;
13803 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13808 fprintf (stream
, "%s\n", message
);
13812 md_show_usage (FILE *stream
)
13814 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13815 fprintf (stream
, _("\
13816 -Qy, -Qn ignored\n\
13817 -V print assembler version number\n\
13820 fprintf (stream
, _("\
13821 -n Do not optimize code alignment\n\
13822 -q quieten some warnings\n"));
13823 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13824 fprintf (stream
, _("\
13828 # if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13829 fprintf (stream
, _("\
13830 --32/--64/--x32 generate 32bit/64bit/x32 object\n"));
13831 # elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
13832 fprintf (stream
, _("\
13833 --32/--64 generate 32bit/64bit object\n"));
13836 #ifdef SVR4_COMMENT_CHARS
13837 fprintf (stream
, _("\
13838 --divide do not treat `/' as a comment character\n"));
13840 fprintf (stream
, _("\
13841 --divide ignored\n"));
13843 fprintf (stream
, _("\
13844 -march=CPU[,+EXTENSION...]\n\
13845 generate code for CPU and EXTENSION, CPU is one of:\n"));
13846 show_arch (stream
, 0, 1);
13847 fprintf (stream
, _("\
13848 EXTENSION is combination of:\n"));
13849 show_arch (stream
, 1, 0);
13850 fprintf (stream
, _("\
13851 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13852 show_arch (stream
, 0, 0);
13853 fprintf (stream
, _("\
13854 -msse2avx encode SSE instructions with VEX prefix\n"));
13855 fprintf (stream
, _("\
13856 -muse-unaligned-vector-move\n\
13857 encode aligned vector move as unaligned vector move\n"));
13858 fprintf (stream
, _("\
13859 -msse-check=[none|error|warning] (default: warning)\n\
13860 check SSE instructions\n"));
13861 fprintf (stream
, _("\
13862 -moperand-check=[none|error|warning] (default: warning)\n\
13863 check operand combinations for validity\n"));
13864 fprintf (stream
, _("\
13865 -mavxscalar=[128|256] (default: 128)\n\
13866 encode scalar AVX instructions with specific vector\n\
13868 fprintf (stream
, _("\
13869 -mvexwig=[0|1] (default: 0)\n\
13870 encode VEX instructions with specific VEX.W value\n\
13871 for VEX.W bit ignored instructions\n"));
13872 fprintf (stream
, _("\
13873 -mevexlig=[128|256|512] (default: 128)\n\
13874 encode scalar EVEX instructions with specific vector\n\
13876 fprintf (stream
, _("\
13877 -mevexwig=[0|1] (default: 0)\n\
13878 encode EVEX instructions with specific EVEX.W value\n\
13879 for EVEX.W bit ignored instructions\n"));
13880 fprintf (stream
, _("\
13881 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13882 encode EVEX instructions with specific EVEX.RC value\n\
13883 for SAE-only ignored instructions\n"));
13884 fprintf (stream
, _("\
13885 -mmnemonic=[att|intel] "));
13886 if (SYSV386_COMPAT
)
13887 fprintf (stream
, _("(default: att)\n"));
13889 fprintf (stream
, _("(default: intel)\n"));
13890 fprintf (stream
, _("\
13891 use AT&T/Intel mnemonic\n"));
13892 fprintf (stream
, _("\
13893 -msyntax=[att|intel] (default: att)\n\
13894 use AT&T/Intel syntax\n"));
13895 fprintf (stream
, _("\
13896 -mindex-reg support pseudo index registers\n"));
13897 fprintf (stream
, _("\
13898 -mnaked-reg don't require `%%' prefix for registers\n"));
13899 fprintf (stream
, _("\
13900 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13901 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13902 fprintf (stream
, _("\
13903 -mshared disable branch optimization for shared code\n"));
13904 fprintf (stream
, _("\
13905 -mx86-used-note=[no|yes] "));
13906 if (DEFAULT_X86_USED_NOTE
)
13907 fprintf (stream
, _("(default: yes)\n"));
13909 fprintf (stream
, _("(default: no)\n"));
13910 fprintf (stream
, _("\
13911 generate x86 used ISA and feature properties\n"));
13913 #if defined (TE_PE) || defined (TE_PEP)
13914 fprintf (stream
, _("\
13915 -mbig-obj generate big object files\n"));
13917 fprintf (stream
, _("\
13918 -momit-lock-prefix=[no|yes] (default: no)\n\
13919 strip all lock prefixes\n"));
13920 fprintf (stream
, _("\
13921 -mfence-as-lock-add=[no|yes] (default: no)\n\
13922 encode lfence, mfence and sfence as\n\
13923 lock addl $0x0, (%%{re}sp)\n"));
13924 fprintf (stream
, _("\
13925 -mrelax-relocations=[no|yes] "));
13926 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13927 fprintf (stream
, _("(default: yes)\n"));
13929 fprintf (stream
, _("(default: no)\n"));
13930 fprintf (stream
, _("\
13931 generate relax relocations\n"));
13932 fprintf (stream
, _("\
13933 -malign-branch-boundary=NUM (default: 0)\n\
13934 align branches within NUM byte boundary\n"));
13935 fprintf (stream
, _("\
13936 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13937 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13939 specify types of branches to align\n"));
13940 fprintf (stream
, _("\
13941 -malign-branch-prefix-size=NUM (default: 5)\n\
13942 align branches with NUM prefixes per instruction\n"));
13943 fprintf (stream
, _("\
13944 -mbranches-within-32B-boundaries\n\
13945 align branches within 32 byte boundary\n"));
13946 fprintf (stream
, _("\
13947 -mlfence-after-load=[no|yes] (default: no)\n\
13948 generate lfence after load\n"));
13949 fprintf (stream
, _("\
13950 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13951 generate lfence before indirect near branch\n"));
13952 fprintf (stream
, _("\
13953 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13954 generate lfence before ret\n"));
13955 fprintf (stream
, _("\
13956 -mamd64 accept only AMD64 ISA [default]\n"));
13957 fprintf (stream
, _("\
13958 -mintel64 accept only Intel64 ISA\n"));
13961 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13962 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13963 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13965 /* Pick the target format to use. */
13968 i386_target_format (void)
13970 if (startswith (default_arch
, "x86_64"))
13972 update_code_flag (CODE_64BIT
, 1);
13973 if (default_arch
[6] == '\0')
13974 x86_elf_abi
= X86_64_ABI
;
13976 x86_elf_abi
= X86_64_X32_ABI
;
13978 else if (!strcmp (default_arch
, "i386"))
13979 update_code_flag (CODE_32BIT
, 1);
13980 else if (!strcmp (default_arch
, "iamcu"))
13982 update_code_flag (CODE_32BIT
, 1);
13983 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13985 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13986 cpu_arch_name
= "iamcu";
13987 cpu_sub_arch_name
= NULL
;
13988 cpu_arch_flags
= iamcu_flags
;
13989 cpu_arch_isa
= PROCESSOR_IAMCU
;
13990 cpu_arch_isa_flags
= iamcu_flags
;
13991 if (!cpu_arch_tune_set
)
13993 cpu_arch_tune
= cpu_arch_isa
;
13994 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13997 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13998 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
14002 as_fatal (_("unknown architecture"));
14004 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
14005 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
14006 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
14007 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
14009 switch (OUTPUT_FLAVOR
)
14011 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
14012 case bfd_target_aout_flavour
:
14013 return AOUT_TARGET_FORMAT
;
14015 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
14016 # if defined (TE_PE) || defined (TE_PEP)
14017 case bfd_target_coff_flavour
:
14018 if (flag_code
== CODE_64BIT
)
14021 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
14023 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
14024 # elif defined (TE_GO32)
14025 case bfd_target_coff_flavour
:
14026 return "coff-go32";
14028 case bfd_target_coff_flavour
:
14029 return "coff-i386";
14032 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14033 case bfd_target_elf_flavour
:
14035 const char *format
;
14037 switch (x86_elf_abi
)
14040 format
= ELF_TARGET_FORMAT
;
14042 tls_get_addr
= "___tls_get_addr";
14046 use_rela_relocations
= 1;
14049 tls_get_addr
= "__tls_get_addr";
14051 format
= ELF_TARGET_FORMAT64
;
14053 case X86_64_X32_ABI
:
14054 use_rela_relocations
= 1;
14057 tls_get_addr
= "__tls_get_addr";
14059 disallow_64bit_reloc
= 1;
14060 format
= ELF_TARGET_FORMAT32
;
14063 if (cpu_arch_isa
== PROCESSOR_L1OM
)
14065 if (x86_elf_abi
!= X86_64_ABI
)
14066 as_fatal (_("Intel L1OM is 64bit only"));
14067 return ELF_TARGET_L1OM_FORMAT
;
14069 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
14071 if (x86_elf_abi
!= X86_64_ABI
)
14072 as_fatal (_("Intel K1OM is 64bit only"));
14073 return ELF_TARGET_K1OM_FORMAT
;
14075 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
14077 if (x86_elf_abi
!= I386_ABI
)
14078 as_fatal (_("Intel MCU is 32bit only"));
14079 return ELF_TARGET_IAMCU_FORMAT
;
14085 #if defined (OBJ_MACH_O)
14086 case bfd_target_mach_o_flavour
:
14087 if (flag_code
== CODE_64BIT
)
14089 use_rela_relocations
= 1;
14091 return "mach-o-x86-64";
14094 return "mach-o-i386";
14102 #endif /* OBJ_MAYBE_ more than one */
14105 md_undefined_symbol (char *name
)
14107 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
14108 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
14109 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
14110 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
14114 if (symbol_find (name
))
14115 as_bad (_("GOT already in symbol table"));
14116 GOT_symbol
= symbol_new (name
, undefined_section
,
14117 &zero_address_frag
, 0);
14124 /* Round up a section size to the appropriate boundary. */
14127 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
14129 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
14130 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
14132 /* For a.out, force the section size to be aligned. If we don't do
14133 this, BFD will align it for us, but it will not write out the
14134 final bytes of the section. This may be a bug in BFD, but it is
14135 easier to fix it here since that is how the other a.out targets
14139 align
= bfd_section_alignment (segment
);
14140 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
14147 /* On the i386, PC-relative offsets are relative to the start of the
14148 next instruction. That is, the address of the offset, plus its
14149 size, since the offset is always the last part of the insn. */
14152 md_pcrel_from (fixS
*fixP
)
14154 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
14160 s_bss (int ignore ATTRIBUTE_UNUSED
)
14164 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14166 obj_elf_section_change_hook ();
14168 temp
= get_absolute_expression ();
14169 subseg_set (bss_section
, (subsegT
) temp
);
14170 demand_empty_rest_of_line ();
14175 /* Remember constant directive. */
14178 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
14180 if (last_insn
.kind
!= last_insn_directive
14181 && (bfd_section_flags (now_seg
) & SEC_CODE
))
14183 last_insn
.seg
= now_seg
;
14184 last_insn
.kind
= last_insn_directive
;
14185 last_insn
.name
= "constant directive";
14186 last_insn
.file
= as_where (&last_insn
.line
);
14187 if (lfence_before_ret
!= lfence_before_ret_none
)
14189 if (lfence_before_indirect_branch
!= lfence_branch_none
)
14190 as_warn (_("constant directive skips -mlfence-before-ret "
14191 "and -mlfence-before-indirect-branch"));
14193 as_warn (_("constant directive skips -mlfence-before-ret"));
14195 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
14196 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
14201 i386_validate_fix (fixS
*fixp
)
14203 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14204 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14205 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
14206 return IS_ELF
&& fixp
->fx_addsy
14207 && (!S_IS_DEFINED (fixp
->fx_addsy
)
14208 || S_IS_EXTERNAL (fixp
->fx_addsy
));
14211 if (fixp
->fx_subsy
)
14213 if (fixp
->fx_subsy
== GOT_symbol
)
14215 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
14219 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14220 if (fixp
->fx_tcbit2
)
14221 fixp
->fx_r_type
= (fixp
->fx_tcbit
14222 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14223 : BFD_RELOC_X86_64_GOTPCRELX
);
14226 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14231 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14233 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14235 fixp
->fx_subsy
= 0;
14238 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14241 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14242 to section. Since PLT32 relocation must be against symbols,
14243 turn such PLT32 relocation into PC32 relocation. */
14245 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14246 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14247 && symbol_section_p (fixp
->fx_addsy
))
14248 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14251 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14252 && fixp
->fx_tcbit2
)
14253 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14262 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14265 bfd_reloc_code_real_type code
;
14267 switch (fixp
->fx_r_type
)
14269 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14272 case BFD_RELOC_SIZE32
:
14273 case BFD_RELOC_SIZE64
:
14275 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
14276 && (!fixp
->fx_subsy
14277 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
14278 sym
= fixp
->fx_addsy
;
14279 else if (fixp
->fx_subsy
14280 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
14281 && (!fixp
->fx_addsy
14282 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
14283 sym
= fixp
->fx_subsy
;
14286 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
14288 /* Resolve size relocation against local symbol to size of
14289 the symbol plus addend. */
14290 valueT value
= S_GET_SIZE (sym
);
14292 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
14293 value
= bfd_section_size (S_GET_SEGMENT (sym
));
14294 if (sym
== fixp
->fx_subsy
)
14297 if (fixp
->fx_addsy
)
14298 value
+= S_GET_VALUE (fixp
->fx_addsy
);
14300 else if (fixp
->fx_subsy
)
14301 value
-= S_GET_VALUE (fixp
->fx_subsy
);
14302 value
+= fixp
->fx_offset
;
14303 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14305 && !fits_in_unsigned_long (value
))
14306 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14307 _("symbol size computation overflow"));
14308 fixp
->fx_addsy
= NULL
;
14309 fixp
->fx_subsy
= NULL
;
14310 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14313 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
14315 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14316 "unsupported expression involving @size");
14320 /* Fall through. */
14322 case BFD_RELOC_X86_64_PLT32
:
14323 case BFD_RELOC_X86_64_GOT32
:
14324 case BFD_RELOC_X86_64_GOTPCREL
:
14325 case BFD_RELOC_X86_64_GOTPCRELX
:
14326 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14327 case BFD_RELOC_386_PLT32
:
14328 case BFD_RELOC_386_GOT32
:
14329 case BFD_RELOC_386_GOT32X
:
14330 case BFD_RELOC_386_GOTOFF
:
14331 case BFD_RELOC_386_GOTPC
:
14332 case BFD_RELOC_386_TLS_GD
:
14333 case BFD_RELOC_386_TLS_LDM
:
14334 case BFD_RELOC_386_TLS_LDO_32
:
14335 case BFD_RELOC_386_TLS_IE_32
:
14336 case BFD_RELOC_386_TLS_IE
:
14337 case BFD_RELOC_386_TLS_GOTIE
:
14338 case BFD_RELOC_386_TLS_LE_32
:
14339 case BFD_RELOC_386_TLS_LE
:
14340 case BFD_RELOC_386_TLS_GOTDESC
:
14341 case BFD_RELOC_386_TLS_DESC_CALL
:
14342 case BFD_RELOC_X86_64_TLSGD
:
14343 case BFD_RELOC_X86_64_TLSLD
:
14344 case BFD_RELOC_X86_64_DTPOFF32
:
14345 case BFD_RELOC_X86_64_DTPOFF64
:
14346 case BFD_RELOC_X86_64_GOTTPOFF
:
14347 case BFD_RELOC_X86_64_TPOFF32
:
14348 case BFD_RELOC_X86_64_TPOFF64
:
14349 case BFD_RELOC_X86_64_GOTOFF64
:
14350 case BFD_RELOC_X86_64_GOTPC32
:
14351 case BFD_RELOC_X86_64_GOT64
:
14352 case BFD_RELOC_X86_64_GOTPCREL64
:
14353 case BFD_RELOC_X86_64_GOTPC64
:
14354 case BFD_RELOC_X86_64_GOTPLT64
:
14355 case BFD_RELOC_X86_64_PLTOFF64
:
14356 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14357 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14358 case BFD_RELOC_RVA
:
14359 case BFD_RELOC_VTABLE_ENTRY
:
14360 case BFD_RELOC_VTABLE_INHERIT
:
14362 case BFD_RELOC_32_SECREL
:
14364 code
= fixp
->fx_r_type
;
14366 case BFD_RELOC_X86_64_32S
:
14367 if (!fixp
->fx_pcrel
)
14369 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14370 code
= fixp
->fx_r_type
;
14373 /* Fall through. */
14375 if (fixp
->fx_pcrel
)
14377 switch (fixp
->fx_size
)
14380 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14381 _("can not do %d byte pc-relative relocation"),
14383 code
= BFD_RELOC_32_PCREL
;
14385 case 1: code
= BFD_RELOC_8_PCREL
; break;
14386 case 2: code
= BFD_RELOC_16_PCREL
; break;
14387 case 4: code
= BFD_RELOC_32_PCREL
; break;
14389 case 8: code
= BFD_RELOC_64_PCREL
; break;
14395 switch (fixp
->fx_size
)
14398 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14399 _("can not do %d byte relocation"),
14401 code
= BFD_RELOC_32
;
14403 case 1: code
= BFD_RELOC_8
; break;
14404 case 2: code
= BFD_RELOC_16
; break;
14405 case 4: code
= BFD_RELOC_32
; break;
14407 case 8: code
= BFD_RELOC_64
; break;
14414 if ((code
== BFD_RELOC_32
14415 || code
== BFD_RELOC_32_PCREL
14416 || code
== BFD_RELOC_X86_64_32S
)
14418 && fixp
->fx_addsy
== GOT_symbol
)
14421 code
= BFD_RELOC_386_GOTPC
;
14423 code
= BFD_RELOC_X86_64_GOTPC32
;
14425 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14427 && fixp
->fx_addsy
== GOT_symbol
)
14429 code
= BFD_RELOC_X86_64_GOTPC64
;
14432 rel
= XNEW (arelent
);
14433 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14434 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14436 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14438 if (!use_rela_relocations
)
14440 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14441 vtable entry to be used in the relocation's section offset. */
14442 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14443 rel
->address
= fixp
->fx_offset
;
14444 #if defined (OBJ_COFF) && defined (TE_PE)
14445 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14446 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14451 /* Use the rela in 64bit mode. */
14454 if (disallow_64bit_reloc
)
14457 case BFD_RELOC_X86_64_DTPOFF64
:
14458 case BFD_RELOC_X86_64_TPOFF64
:
14459 case BFD_RELOC_64_PCREL
:
14460 case BFD_RELOC_X86_64_GOTOFF64
:
14461 case BFD_RELOC_X86_64_GOT64
:
14462 case BFD_RELOC_X86_64_GOTPCREL64
:
14463 case BFD_RELOC_X86_64_GOTPC64
:
14464 case BFD_RELOC_X86_64_GOTPLT64
:
14465 case BFD_RELOC_X86_64_PLTOFF64
:
14466 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14467 _("cannot represent relocation type %s in x32 mode"),
14468 bfd_get_reloc_code_name (code
));
14474 if (!fixp
->fx_pcrel
)
14475 rel
->addend
= fixp
->fx_offset
;
14479 case BFD_RELOC_X86_64_PLT32
:
14480 case BFD_RELOC_X86_64_GOT32
:
14481 case BFD_RELOC_X86_64_GOTPCREL
:
14482 case BFD_RELOC_X86_64_GOTPCRELX
:
14483 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14484 case BFD_RELOC_X86_64_TLSGD
:
14485 case BFD_RELOC_X86_64_TLSLD
:
14486 case BFD_RELOC_X86_64_GOTTPOFF
:
14487 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14488 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14489 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14492 rel
->addend
= (section
->vma
14494 + fixp
->fx_addnumber
14495 + md_pcrel_from (fixp
));
14500 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14501 if (rel
->howto
== NULL
)
14503 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14504 _("cannot represent relocation type %s"),
14505 bfd_get_reloc_code_name (code
));
14506 /* Set howto to a garbage value so that we can keep going. */
14507 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14508 gas_assert (rel
->howto
!= NULL
);
14514 #include "tc-i386-intel.c"
14517 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14519 int saved_naked_reg
;
14520 char saved_register_dot
;
14522 saved_naked_reg
= allow_naked_reg
;
14523 allow_naked_reg
= 1;
14524 saved_register_dot
= register_chars
['.'];
14525 register_chars
['.'] = '.';
14526 allow_pseudo_reg
= 1;
14527 expression_and_evaluate (exp
);
14528 allow_pseudo_reg
= 0;
14529 register_chars
['.'] = saved_register_dot
;
14530 allow_naked_reg
= saved_naked_reg
;
14532 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14534 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14536 exp
->X_op
= O_constant
;
14537 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14538 .dw2_regnum
[flag_code
>> 1];
14541 exp
->X_op
= O_illegal
;
14546 tc_x86_frame_initial_instructions (void)
14548 static unsigned int sp_regno
[2];
14550 if (!sp_regno
[flag_code
>> 1])
14552 char *saved_input
= input_line_pointer
;
14553 char sp
[][4] = {"esp", "rsp"};
14556 input_line_pointer
= sp
[flag_code
>> 1];
14557 tc_x86_parse_to_dw2regnum (&exp
);
14558 gas_assert (exp
.X_op
== O_constant
);
14559 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14560 input_line_pointer
= saved_input
;
14563 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14564 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14568 x86_dwarf2_addr_size (void)
14570 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14571 if (x86_elf_abi
== X86_64_X32_ABI
)
14574 return bfd_arch_bits_per_address (stdoutput
) / 8;
14578 i386_elf_section_type (const char *str
, size_t len
)
14580 if (flag_code
== CODE_64BIT
14581 && len
== sizeof ("unwind") - 1
14582 && startswith (str
, "unwind"))
14583 return SHT_X86_64_UNWIND
;
14590 i386_solaris_fix_up_eh_frame (segT sec
)
14592 if (flag_code
== CODE_64BIT
)
14593 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14599 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14603 exp
.X_op
= O_secrel
;
14604 exp
.X_add_symbol
= symbol
;
14605 exp
.X_add_number
= 0;
14606 emit_expr (&exp
, size
);
14610 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14611 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14614 x86_64_section_letter (int letter
, const char **ptr_msg
)
14616 if (flag_code
== CODE_64BIT
)
14619 return SHF_X86_64_LARGE
;
14621 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14624 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14629 x86_64_section_word (char *str
, size_t len
)
14631 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14632 return SHF_X86_64_LARGE
;
14638 handle_large_common (int small ATTRIBUTE_UNUSED
)
14640 if (flag_code
!= CODE_64BIT
)
14642 s_comm_internal (0, elf_common_parse
);
14643 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14647 static segT lbss_section
;
14648 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14649 asection
*saved_bss_section
= bss_section
;
14651 if (lbss_section
== NULL
)
14653 flagword applicable
;
14654 segT seg
= now_seg
;
14655 subsegT subseg
= now_subseg
;
14657 /* The .lbss section is for local .largecomm symbols. */
14658 lbss_section
= subseg_new (".lbss", 0);
14659 applicable
= bfd_applicable_section_flags (stdoutput
);
14660 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14661 seg_info (lbss_section
)->bss
= 1;
14663 subseg_set (seg
, subseg
);
14666 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14667 bss_section
= lbss_section
;
14669 s_comm_internal (0, elf_common_parse
);
14671 elf_com_section_ptr
= saved_com_section_ptr
;
14672 bss_section
= saved_bss_section
;
14675 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */