1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2022 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
37 #ifndef INFER_ADDR_PREFIX
38 #define INFER_ADDR_PREFIX 1
42 #define DEFAULT_ARCH "i386"
47 #define INLINE __inline__
53 /* Prefixes will be emitted in the order defined below.
54 WAIT_PREFIX must be the first prefix since FWAIT is really is an
55 instruction, and so must come before any prefixes.
56 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
57 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
63 #define HLE_PREFIX REP_PREFIX
64 #define BND_PREFIX REP_PREFIX
66 #define REX_PREFIX 6 /* must come last. */
67 #define MAX_PREFIXES 7 /* max prefixes per opcode */
69 /* we define the syntax here (modulo base,index,scale syntax) */
70 #define REGISTER_PREFIX '%'
71 #define IMMEDIATE_PREFIX '$'
72 #define ABSOLUTE_PREFIX '*'
74 /* these are the instruction mnemonic suffixes in AT&T syntax or
75 memory operand size in Intel syntax. */
76 #define WORD_MNEM_SUFFIX 'w'
77 #define BYTE_MNEM_SUFFIX 'b'
78 #define SHORT_MNEM_SUFFIX 's'
79 #define LONG_MNEM_SUFFIX 'l'
80 #define QWORD_MNEM_SUFFIX 'q'
81 /* Intel Syntax. Use a non-ascii letter since since it never appears
83 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
85 #define END_OF_INSN '\0'
87 /* This matches the C -> StaticRounding alias in the opcode table. */
88 #define commutative staticrounding
91 'templates' is for grouping together 'template' structures for opcodes
92 of the same name. This is only used for storing the insns in the grand
93 ole hash table of insns.
94 The templates themselves start at START and range up to (but not including)
99 const insn_template
*start
;
100 const insn_template
*end
;
104 /* 386 operand encoding bytes: see 386 book for details of this. */
107 unsigned int regmem
; /* codes register or memory operand */
108 unsigned int reg
; /* codes register operand (or extended opcode) */
109 unsigned int mode
; /* how to interpret regmem & reg */
113 /* x86-64 extension prefix. */
114 typedef int rex_byte
;
116 /* 386 opcode byte to code indirect addressing. */
125 /* x86 arch names, types and features */
128 const char *name
; /* arch name */
129 unsigned int len
; /* arch string length */
130 enum processor_type type
; /* arch type */
131 i386_cpu_flags flags
; /* cpu feature flags */
132 unsigned int skip
; /* show_arch should skip this. */
136 /* Used to turn off indicated flags. */
139 const char *name
; /* arch name */
140 unsigned int len
; /* arch string length */
141 i386_cpu_flags flags
; /* cpu feature flags */
145 static void update_code_flag (int, int);
146 static void set_code_flag (int);
147 static void set_16bit_gcc_code_flag (int);
148 static void set_intel_syntax (int);
149 static void set_intel_mnemonic (int);
150 static void set_allow_index_reg (int);
151 static void set_check (int);
152 static void set_cpu_arch (int);
154 static void pe_directive_secrel (int);
155 static void pe_directive_secidx (int);
157 static void signed_cons (int);
158 static char *output_invalid (int c
);
159 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
161 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
163 static int i386_att_operand (char *);
164 static int i386_intel_operand (char *, int);
165 static int i386_intel_simplify (expressionS
*);
166 static int i386_intel_parse_name (const char *, expressionS
*);
167 static const reg_entry
*parse_register (char *, char **);
168 static char *parse_insn (char *, char *);
169 static char *parse_operands (char *, const char *);
170 static void swap_operands (void);
171 static void swap_2_operands (unsigned int, unsigned int);
172 static enum flag_code
i386_addressing_mode (void);
173 static void optimize_imm (void);
174 static void optimize_disp (void);
175 static const insn_template
*match_template (char);
176 static int check_string (void);
177 static int process_suffix (void);
178 static int check_byte_reg (void);
179 static int check_long_reg (void);
180 static int check_qword_reg (void);
181 static int check_word_reg (void);
182 static int finalize_imm (void);
183 static int process_operands (void);
184 static const reg_entry
*build_modrm_byte (void);
185 static void output_insn (void);
186 static void output_imm (fragS
*, offsetT
);
187 static void output_disp (fragS
*, offsetT
);
189 static void s_bss (int);
191 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
192 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
194 /* GNU_PROPERTY_X86_ISA_1_USED. */
195 static unsigned int x86_isa_1_used
;
196 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
197 static unsigned int x86_feature_2_used
;
198 /* Generate x86 used ISA and feature properties. */
199 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
202 static const char *default_arch
= DEFAULT_ARCH
;
204 /* parse_register() returns this when a register alias cannot be used. */
205 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
206 { Dw2Inval
, Dw2Inval
} };
208 static const reg_entry
*reg_eax
;
209 static const reg_entry
*reg_ds
;
210 static const reg_entry
*reg_es
;
211 static const reg_entry
*reg_ss
;
212 static const reg_entry
*reg_st0
;
213 static const reg_entry
*reg_k0
;
218 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
219 unsigned char bytes
[4];
221 /* Destination or source register specifier. */
222 const reg_entry
*register_specifier
;
225 /* 'md_assemble ()' gathers together information and puts it into a
232 const reg_entry
*regs
;
237 operand_size_mismatch
,
238 operand_type_mismatch
,
239 register_type_mismatch
,
240 number_of_operands_mismatch
,
241 invalid_instruction_suffix
,
243 unsupported_with_intel_mnemonic
,
247 invalid_vsib_address
,
248 invalid_vector_register_set
,
249 invalid_tmm_register_set
,
250 invalid_dest_and_src_register_set
,
251 unsupported_vector_index_register
,
252 unsupported_broadcast
,
255 mask_not_on_destination
,
258 rc_sae_operand_not_last_imm
,
259 invalid_register_operand
,
264 /* TM holds the template for the insn were currently assembling. */
267 /* SUFFIX holds the instruction size suffix for byte, word, dword
268 or qword, if given. */
271 /* OPCODE_LENGTH holds the number of base opcode bytes. */
272 unsigned char opcode_length
;
274 /* OPERANDS gives the number of given operands. */
275 unsigned int operands
;
277 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
278 of given register, displacement, memory operands and immediate
280 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
282 /* TYPES [i] is the type (see above #defines) which tells us how to
283 use OP[i] for the corresponding operand. */
284 i386_operand_type types
[MAX_OPERANDS
];
286 /* Displacement expression, immediate expression, or register for each
288 union i386_op op
[MAX_OPERANDS
];
290 /* Flags for operands. */
291 unsigned int flags
[MAX_OPERANDS
];
292 #define Operand_PCrel 1
293 #define Operand_Mem 2
295 /* Relocation type for operand */
296 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
298 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
299 the base index byte below. */
300 const reg_entry
*base_reg
;
301 const reg_entry
*index_reg
;
302 unsigned int log2_scale_factor
;
304 /* SEG gives the seg_entries of this insn. They are zero unless
305 explicit segment overrides are given. */
306 const reg_entry
*seg
[2];
308 /* Copied first memory operand string, for re-checking. */
311 /* PREFIX holds all the given prefix opcodes (usually null).
312 PREFIXES is the number of prefix opcodes. */
313 unsigned int prefixes
;
314 unsigned char prefix
[MAX_PREFIXES
];
316 /* Register is in low 3 bits of opcode. */
319 /* The operand to a branch insn indicates an absolute branch. */
322 /* Extended states. */
330 xstate_ymm
= 1 << 2 | xstate_xmm
,
332 xstate_zmm
= 1 << 3 | xstate_ymm
,
335 /* Use MASK state. */
339 /* Has GOTPC or TLS relocation. */
340 bool has_gotpc_tls_reloc
;
342 /* RM and SIB are the modrm byte and the sib byte where the
343 addressing modes of this insn are encoded. */
350 /* Masking attributes.
352 The struct describes masking, applied to OPERAND in the instruction.
353 REG is a pointer to the corresponding mask register. ZEROING tells
354 whether merging or zeroing mask is used. */
355 struct Mask_Operation
357 const reg_entry
*reg
;
358 unsigned int zeroing
;
359 /* The operand where this operation is associated. */
360 unsigned int operand
;
363 /* Rounding control and SAE attributes. */
376 unsigned int operand
;
379 /* Broadcasting attributes.
381 The struct describes broadcasting, applied to OPERAND. TYPE is
382 expresses the broadcast factor. */
383 struct Broadcast_Operation
385 /* Type of broadcast: {1to2}, {1to4}, {1to8}, {1to16} or {1to32}. */
388 /* Index of broadcasted operand. */
389 unsigned int operand
;
391 /* Number of bytes to broadcast. */
395 /* Compressed disp8*N attribute. */
396 unsigned int memshift
;
398 /* Prefer load or store in encoding. */
401 dir_encoding_default
= 0,
407 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
410 disp_encoding_default
= 0,
416 /* Prefer the REX byte in encoding. */
419 /* Disable instruction size optimization. */
422 /* How to encode vector instructions. */
425 vex_encoding_default
= 0,
433 const char *rep_prefix
;
436 const char *hle_prefix
;
438 /* Have BND prefix. */
439 const char *bnd_prefix
;
441 /* Have NOTRACK prefix. */
442 const char *notrack_prefix
;
445 enum i386_error error
;
448 typedef struct _i386_insn i386_insn
;
450 /* Link RC type with corresponding string, that'll be looked for in
459 static const struct RC_name RC_NamesTable
[] =
461 { rne
, STRING_COMMA_LEN ("rn-sae") },
462 { rd
, STRING_COMMA_LEN ("rd-sae") },
463 { ru
, STRING_COMMA_LEN ("ru-sae") },
464 { rz
, STRING_COMMA_LEN ("rz-sae") },
465 { saeonly
, STRING_COMMA_LEN ("sae") },
468 /* List of chars besides those in app.c:symbol_chars that can start an
469 operand. Used to prevent the scrubber eating vital white-space. */
470 const char extra_symbol_chars
[] = "*%-([{}"
479 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
480 && !defined (TE_GNU) \
481 && !defined (TE_LINUX) \
482 && !defined (TE_Haiku) \
483 && !defined (TE_FreeBSD) \
484 && !defined (TE_DragonFly) \
485 && !defined (TE_NetBSD))
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. The option
488 --divide will remove '/' from this list. */
489 const char *i386_comment_chars
= "#/";
490 #define SVR4_COMMENT_CHARS 1
491 #define PREFIX_SEPARATOR '\\'
494 const char *i386_comment_chars
= "#";
495 #define PREFIX_SEPARATOR '/'
498 /* This array holds the chars that only start a comment at the beginning of
499 a line. If the line seems to have the form '# 123 filename'
500 .line and .file directives will appear in the pre-processed output.
501 Note that input_file.c hand checks for '#' at the beginning of the
502 first line of the input file. This is because the compiler outputs
503 #NO_APP at the beginning of its output.
504 Also note that comments started like this one will always work if
505 '/' isn't otherwise defined. */
506 const char line_comment_chars
[] = "#/";
508 const char line_separator_chars
[] = ";";
510 /* Chars that can be used to separate mant from exp in floating point
512 const char EXP_CHARS
[] = "eE";
514 /* Chars that mean this number is a floating point constant
517 const char FLT_CHARS
[] = "fFdDxXhHbB";
519 /* Tables for lexical analysis. */
520 static char mnemonic_chars
[256];
521 static char register_chars
[256];
522 static char operand_chars
[256];
523 static char identifier_chars
[256];
525 /* Lexical macros. */
526 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
527 #define is_operand_char(x) (operand_chars[(unsigned char) x])
528 #define is_register_char(x) (register_chars[(unsigned char) x])
529 #define is_space_char(x) ((x) == ' ')
530 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
532 /* All non-digit non-letter characters that may occur in an operand. */
533 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
535 /* md_assemble() always leaves the strings it's passed unaltered. To
536 effect this we maintain a stack of saved characters that we've smashed
537 with '\0's (indicating end of strings for various sub-fields of the
538 assembler instruction). */
539 static char save_stack
[32];
540 static char *save_stack_p
;
541 #define END_STRING_AND_SAVE(s) \
542 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
543 #define RESTORE_END_STRING(s) \
544 do { *(s) = *--save_stack_p; } while (0)
546 /* The instruction we're assembling. */
549 /* Possible templates for current insn. */
550 static const templates
*current_templates
;
552 /* Per instruction expressionS buffers: max displacements & immediates. */
553 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
554 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
556 /* Current operand we are working on. */
557 static int this_operand
= -1;
559 /* We support four different modes. FLAG_CODE variable is used to distinguish
567 static enum flag_code flag_code
;
568 static unsigned int object_64bit
;
569 static unsigned int disallow_64bit_reloc
;
570 static int use_rela_relocations
= 0;
571 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
572 static const char *tls_get_addr
;
574 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
575 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
576 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
578 /* The ELF ABI to use. */
586 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
589 #if defined (TE_PE) || defined (TE_PEP)
590 /* Use big object file format. */
591 static int use_big_obj
= 0;
594 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
595 /* 1 if generating code for a shared library. */
596 static int shared
= 0;
599 /* 1 for intel syntax,
601 static int intel_syntax
= 0;
603 static enum x86_64_isa
605 amd64
= 1, /* AMD64 ISA. */
606 intel64
/* Intel64 ISA. */
609 /* 1 for intel mnemonic,
610 0 if att mnemonic. */
611 static int intel_mnemonic
= !SYSV386_COMPAT
;
613 /* 1 if pseudo registers are permitted. */
614 static int allow_pseudo_reg
= 0;
616 /* 1 if register prefix % not required. */
617 static int allow_naked_reg
= 0;
619 /* 1 if the assembler should add BND prefix for all control-transferring
620 instructions supporting it, even if this prefix wasn't specified
622 static int add_bnd_prefix
= 0;
624 /* 1 if pseudo index register, eiz/riz, is allowed . */
625 static int allow_index_reg
= 0;
627 /* 1 if the assembler should ignore LOCK prefix, even if it was
628 specified explicitly. */
629 static int omit_lock_prefix
= 0;
631 /* 1 if the assembler should encode lfence, mfence, and sfence as
632 "lock addl $0, (%{re}sp)". */
633 static int avoid_fence
= 0;
635 /* 1 if lfence should be inserted after every load. */
636 static int lfence_after_load
= 0;
638 /* Non-zero if lfence should be inserted before indirect branch. */
639 static enum lfence_before_indirect_branch_kind
641 lfence_branch_none
= 0,
642 lfence_branch_register
,
643 lfence_branch_memory
,
646 lfence_before_indirect_branch
;
648 /* Non-zero if lfence should be inserted before ret. */
649 static enum lfence_before_ret_kind
651 lfence_before_ret_none
= 0,
652 lfence_before_ret_not
,
653 lfence_before_ret_or
,
654 lfence_before_ret_shl
658 /* Types of previous instruction is .byte or prefix. */
673 /* 1 if the assembler should generate relax relocations. */
675 static int generate_relax_relocations
676 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
678 static enum check_kind
684 sse_check
, operand_check
= check_warning
;
686 /* Non-zero if branches should be aligned within power of 2 boundary. */
687 static int align_branch_power
= 0;
689 /* Types of branches to align. */
690 enum align_branch_kind
692 align_branch_none
= 0,
693 align_branch_jcc
= 1,
694 align_branch_fused
= 2,
695 align_branch_jmp
= 3,
696 align_branch_call
= 4,
697 align_branch_indirect
= 5,
701 /* Type bits of branches to align. */
702 enum align_branch_bit
704 align_branch_jcc_bit
= 1 << align_branch_jcc
,
705 align_branch_fused_bit
= 1 << align_branch_fused
,
706 align_branch_jmp_bit
= 1 << align_branch_jmp
,
707 align_branch_call_bit
= 1 << align_branch_call
,
708 align_branch_indirect_bit
= 1 << align_branch_indirect
,
709 align_branch_ret_bit
= 1 << align_branch_ret
712 static unsigned int align_branch
= (align_branch_jcc_bit
713 | align_branch_fused_bit
714 | align_branch_jmp_bit
);
716 /* Types of condition jump used by macro-fusion. */
719 mf_jcc_jo
= 0, /* base opcode 0x70 */
720 mf_jcc_jc
, /* base opcode 0x72 */
721 mf_jcc_je
, /* base opcode 0x74 */
722 mf_jcc_jna
, /* base opcode 0x76 */
723 mf_jcc_js
, /* base opcode 0x78 */
724 mf_jcc_jp
, /* base opcode 0x7a */
725 mf_jcc_jl
, /* base opcode 0x7c */
726 mf_jcc_jle
, /* base opcode 0x7e */
729 /* Types of compare flag-modifying insntructions used by macro-fusion. */
732 mf_cmp_test_and
, /* test/cmp */
733 mf_cmp_alu_cmp
, /* add/sub/cmp */
734 mf_cmp_incdec
/* inc/dec */
737 /* The maximum padding size for fused jcc. CMP like instruction can
738 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
740 #define MAX_FUSED_JCC_PADDING_SIZE 20
742 /* The maximum number of prefixes added for an instruction. */
743 static unsigned int align_branch_prefix_size
= 5;
746 1. Clear the REX_W bit with register operand if possible.
747 2. Above plus use 128bit vector instruction to clear the full vector
750 static int optimize
= 0;
753 1. Clear the REX_W bit with register operand if possible.
754 2. Above plus use 128bit vector instruction to clear the full vector
756 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
759 static int optimize_for_space
= 0;
761 /* Register prefix used for error message. */
762 static const char *register_prefix
= "%";
764 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
765 leave, push, and pop instructions so that gcc has the same stack
766 frame as in 32 bit mode. */
767 static char stackop_size
= '\0';
769 /* Non-zero to optimize code alignment. */
770 int optimize_align_code
= 1;
772 /* Non-zero to quieten some warnings. */
773 static int quiet_warnings
= 0;
776 static const char *cpu_arch_name
= NULL
;
777 static char *cpu_sub_arch_name
= NULL
;
779 /* CPU feature flags. */
780 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
782 /* If we have selected a cpu we are generating instructions for. */
783 static int cpu_arch_tune_set
= 0;
785 /* Cpu we are generating instructions for. */
786 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
788 /* CPU feature flags of cpu we are generating instructions for. */
789 static i386_cpu_flags cpu_arch_tune_flags
;
791 /* CPU instruction set architecture used. */
792 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
794 /* CPU feature flags of instruction set architecture used. */
795 i386_cpu_flags cpu_arch_isa_flags
;
797 /* If set, conditional jumps are not automatically promoted to handle
798 larger than a byte offset. */
799 static unsigned int no_cond_jump_promotion
= 0;
801 /* Encode SSE instructions with VEX prefix. */
802 static unsigned int sse2avx
;
804 /* Encode aligned vector move as unaligned vector move. */
805 static unsigned int use_unaligned_vector_move
;
807 /* Encode scalar AVX instructions with specific vector length. */
814 /* Encode VEX WIG instructions with specific vex.w. */
821 /* Encode scalar EVEX LIG instructions with specific vector length. */
829 /* Encode EVEX WIG instructions with specific evex.w. */
836 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
837 static enum rc_type evexrcig
= rne
;
839 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
840 static symbolS
*GOT_symbol
;
842 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
843 unsigned int x86_dwarf2_return_column
;
845 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
846 int x86_cie_data_alignment
;
848 /* Interface to relax_segment.
849 There are 3 major relax states for 386 jump insns because the
850 different types of jumps add different sizes to frags when we're
851 figuring out what sort of jump to choose to reach a given label.
853 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
854 branches which are handled by md_estimate_size_before_relax() and
855 i386_generic_table_relax_frag(). */
858 #define UNCOND_JUMP 0
860 #define COND_JUMP86 2
861 #define BRANCH_PADDING 3
862 #define BRANCH_PREFIX 4
863 #define FUSED_JCC_PADDING 5
868 #define SMALL16 (SMALL | CODE16)
870 #define BIG16 (BIG | CODE16)
874 #define INLINE __inline__
880 #define ENCODE_RELAX_STATE(type, size) \
881 ((relax_substateT) (((type) << 2) | (size)))
882 #define TYPE_FROM_RELAX_STATE(s) \
884 #define DISP_SIZE_FROM_RELAX_STATE(s) \
885 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
887 /* This table is used by relax_frag to promote short jumps to long
888 ones where necessary. SMALL (short) jumps may be promoted to BIG
889 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
890 don't allow a short jump in a 32 bit code segment to be promoted to
891 a 16 bit offset jump because it's slower (requires data size
892 prefix), and doesn't work, unless the destination is in the bottom
893 64k of the code segment (The top 16 bits of eip are zeroed). */
895 const relax_typeS md_relax_table
[] =
898 1) most positive reach of this state,
899 2) most negative reach of this state,
900 3) how many bytes this mode will have in the variable part of the frag
901 4) which index into the table to try if we can't fit into this one. */
903 /* UNCOND_JUMP states. */
904 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
905 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
906 /* dword jmp adds 4 bytes to frag:
907 0 extra opcode bytes, 4 displacement bytes. */
909 /* word jmp adds 2 byte2 to frag:
910 0 extra opcode bytes, 2 displacement bytes. */
913 /* COND_JUMP states. */
914 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
915 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
916 /* dword conditionals adds 5 bytes to frag:
917 1 extra opcode byte, 4 displacement bytes. */
919 /* word conditionals add 3 bytes to frag:
920 1 extra opcode byte, 2 displacement bytes. */
923 /* COND_JUMP86 states. */
924 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
925 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
926 /* dword conditionals adds 5 bytes to frag:
927 1 extra opcode byte, 4 displacement bytes. */
929 /* word conditionals add 4 bytes to frag:
930 1 displacement byte and a 3 byte long branch insn. */
934 static const arch_entry cpu_arch
[] =
936 /* Do not replace the first two entries - i386_target_format()
937 relies on them being there in this order. */
938 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
939 CPU_GENERIC32_FLAGS
, 0 },
940 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
941 CPU_GENERIC64_FLAGS
, 0 },
942 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
944 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
946 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
948 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
950 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
952 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
954 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
956 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
958 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
959 CPU_PENTIUMPRO_FLAGS
, 0 },
960 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
962 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
964 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
966 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
968 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
969 CPU_NOCONA_FLAGS
, 0 },
970 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
972 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
974 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
975 CPU_CORE2_FLAGS
, 1 },
976 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
977 CPU_CORE2_FLAGS
, 0 },
978 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
979 CPU_COREI7_FLAGS
, 0 },
980 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
981 CPU_IAMCU_FLAGS
, 0 },
982 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
984 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
986 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
987 CPU_ATHLON_FLAGS
, 0 },
988 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
990 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
992 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
994 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
995 CPU_AMDFAM10_FLAGS
, 0 },
996 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
997 CPU_BDVER1_FLAGS
, 0 },
998 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
999 CPU_BDVER2_FLAGS
, 0 },
1000 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1001 CPU_BDVER3_FLAGS
, 0 },
1002 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1003 CPU_BDVER4_FLAGS
, 0 },
1004 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1005 CPU_ZNVER1_FLAGS
, 0 },
1006 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1007 CPU_ZNVER2_FLAGS
, 0 },
1008 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1009 CPU_ZNVER3_FLAGS
, 0 },
1010 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1011 CPU_BTVER1_FLAGS
, 0 },
1012 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1013 CPU_BTVER2_FLAGS
, 0 },
1014 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1015 CPU_8087_FLAGS
, 0 },
1016 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1020 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1022 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1023 CPU_CMOV_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1025 CPU_FXSR_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1028 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1030 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1031 CPU_SSE2_FLAGS
, 0 },
1032 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1033 CPU_SSE3_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1035 CPU_SSE4A_FLAGS
, 0 },
1036 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1037 CPU_SSSE3_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1039 CPU_SSE4_1_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1041 CPU_SSE4_2_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1043 CPU_SSE4_2_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1046 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1047 CPU_AVX2_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1049 CPU_AVX512F_FLAGS
, 0 },
1050 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1051 CPU_AVX512CD_FLAGS
, 0 },
1052 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1053 CPU_AVX512ER_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1055 CPU_AVX512PF_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1057 CPU_AVX512DQ_FLAGS
, 0 },
1058 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1059 CPU_AVX512BW_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1061 CPU_AVX512VL_FLAGS
, 0 },
1062 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1064 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1065 CPU_VMFUNC_FLAGS
, 0 },
1066 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1068 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1069 CPU_XSAVE_FLAGS
, 0 },
1070 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1071 CPU_XSAVEOPT_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1073 CPU_XSAVEC_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1075 CPU_XSAVES_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1078 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1079 CPU_PCLMUL_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1081 CPU_PCLMUL_FLAGS
, 1 },
1082 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1083 CPU_FSGSBASE_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1085 CPU_RDRND_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1087 CPU_F16C_FLAGS
, 0 },
1088 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1089 CPU_BMI2_FLAGS
, 0 },
1090 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1092 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1093 CPU_FMA4_FLAGS
, 0 },
1094 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1096 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1098 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1099 CPU_MOVBE_FLAGS
, 0 },
1100 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1101 CPU_CX16_FLAGS
, 0 },
1102 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1104 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1105 CPU_LZCNT_FLAGS
, 0 },
1106 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1107 CPU_POPCNT_FLAGS
, 0 },
1108 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1110 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1112 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1113 CPU_INVPCID_FLAGS
, 0 },
1114 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1115 CPU_CLFLUSH_FLAGS
, 0 },
1116 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1118 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1119 CPU_SYSCALL_FLAGS
, 0 },
1120 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1121 CPU_RDTSCP_FLAGS
, 0 },
1122 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1123 CPU_3DNOW_FLAGS
, 0 },
1124 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1125 CPU_3DNOWA_FLAGS
, 0 },
1126 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1127 CPU_PADLOCK_FLAGS
, 0 },
1128 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1129 CPU_SVME_FLAGS
, 1 },
1130 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1131 CPU_SVME_FLAGS
, 0 },
1132 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1133 CPU_SSE4A_FLAGS
, 0 },
1134 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1136 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1138 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1140 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1142 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1143 CPU_RDSEED_FLAGS
, 0 },
1144 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1145 CPU_PRFCHW_FLAGS
, 0 },
1146 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1147 CPU_SMAP_FLAGS
, 0 },
1148 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1150 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1152 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1153 CPU_CLFLUSHOPT_FLAGS
, 0 },
1154 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1155 CPU_PREFETCHWT1_FLAGS
, 0 },
1156 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1158 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1159 CPU_CLWB_FLAGS
, 0 },
1160 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1161 CPU_AVX512IFMA_FLAGS
, 0 },
1162 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1163 CPU_AVX512VBMI_FLAGS
, 0 },
1164 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1165 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1166 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1167 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1168 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1169 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1170 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1171 CPU_AVX512_VBMI2_FLAGS
, 0 },
1172 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1173 CPU_AVX512_VNNI_FLAGS
, 0 },
1174 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1175 CPU_AVX512_BITALG_FLAGS
, 0 },
1176 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1177 CPU_AVX_VNNI_FLAGS
, 0 },
1178 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1179 CPU_CLZERO_FLAGS
, 0 },
1180 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1181 CPU_MWAITX_FLAGS
, 0 },
1182 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1183 CPU_OSPKE_FLAGS
, 0 },
1184 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1185 CPU_RDPID_FLAGS
, 0 },
1186 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1187 CPU_PTWRITE_FLAGS
, 0 },
1188 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1190 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1191 CPU_SHSTK_FLAGS
, 0 },
1192 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1193 CPU_GFNI_FLAGS
, 0 },
1194 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1195 CPU_VAES_FLAGS
, 0 },
1196 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1197 CPU_VPCLMULQDQ_FLAGS
, 0 },
1198 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1199 CPU_WBNOINVD_FLAGS
, 0 },
1200 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1201 CPU_PCONFIG_FLAGS
, 0 },
1202 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1203 CPU_WAITPKG_FLAGS
, 0 },
1204 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1205 CPU_CLDEMOTE_FLAGS
, 0 },
1206 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1207 CPU_AMX_INT8_FLAGS
, 0 },
1208 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1209 CPU_AMX_BF16_FLAGS
, 0 },
1210 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1211 CPU_AMX_TILE_FLAGS
, 0 },
1212 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1213 CPU_MOVDIRI_FLAGS
, 0 },
1214 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1215 CPU_MOVDIR64B_FLAGS
, 0 },
1216 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1217 CPU_AVX512_BF16_FLAGS
, 0 },
1218 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1219 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1220 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1222 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1223 CPU_ENQCMD_FLAGS
, 0 },
1224 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1225 CPU_SERIALIZE_FLAGS
, 0 },
1226 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1227 CPU_RDPRU_FLAGS
, 0 },
1228 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1229 CPU_MCOMMIT_FLAGS
, 0 },
1230 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1231 CPU_SEV_ES_FLAGS
, 0 },
1232 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1233 CPU_TSXLDTRK_FLAGS
, 0 },
1234 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1236 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1237 CPU_WIDEKL_FLAGS
, 0 },
1238 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1239 CPU_UINTR_FLAGS
, 0 },
1240 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1241 CPU_HRESET_FLAGS
, 0 },
1242 { STRING_COMMA_LEN (".avx512_fp16"), PROCESSOR_UNKNOWN
,
1243 CPU_AVX512_FP16_FLAGS
, 0 },
1246 static const noarch_entry cpu_noarch
[] =
1248 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1249 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1250 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1251 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1252 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1253 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1254 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1255 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1256 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1257 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1258 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1259 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1260 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1261 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1262 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1263 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1264 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1265 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1266 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1267 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1268 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1269 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1270 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1271 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1279 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1280 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1281 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1282 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1283 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1284 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1285 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1286 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1287 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1288 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1289 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1290 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1291 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1292 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1293 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1294 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1295 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1296 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1297 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1298 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1299 { STRING_COMMA_LEN ("noavx512_fp16"), CPU_ANY_AVX512_FP16_FLAGS
},
1303 /* Like s_lcomm_internal in gas/read.c but the alignment string
1304 is allowed to be optional. */
1307 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1314 && *input_line_pointer
== ',')
1316 align
= parse_align (needs_align
- 1);
1318 if (align
== (addressT
) -1)
1333 bss_alloc (symbolP
, size
, align
);
1338 pe_lcomm (int needs_align
)
1340 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1344 const pseudo_typeS md_pseudo_table
[] =
1346 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1347 {"align", s_align_bytes
, 0},
1349 {"align", s_align_ptwo
, 0},
1351 {"arch", set_cpu_arch
, 0},
1355 {"lcomm", pe_lcomm
, 1},
1357 {"ffloat", float_cons
, 'f'},
1358 {"dfloat", float_cons
, 'd'},
1359 {"tfloat", float_cons
, 'x'},
1360 {"hfloat", float_cons
, 'h'},
1361 {"bfloat16", float_cons
, 'b'},
1363 {"slong", signed_cons
, 4},
1364 {"noopt", s_ignore
, 0},
1365 {"optim", s_ignore
, 0},
1366 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1367 {"code16", set_code_flag
, CODE_16BIT
},
1368 {"code32", set_code_flag
, CODE_32BIT
},
1370 {"code64", set_code_flag
, CODE_64BIT
},
1372 {"intel_syntax", set_intel_syntax
, 1},
1373 {"att_syntax", set_intel_syntax
, 0},
1374 {"intel_mnemonic", set_intel_mnemonic
, 1},
1375 {"att_mnemonic", set_intel_mnemonic
, 0},
1376 {"allow_index_reg", set_allow_index_reg
, 1},
1377 {"disallow_index_reg", set_allow_index_reg
, 0},
1378 {"sse_check", set_check
, 0},
1379 {"operand_check", set_check
, 1},
1380 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1381 {"largecomm", handle_large_common
, 0},
1383 {"file", dwarf2_directive_file
, 0},
1384 {"loc", dwarf2_directive_loc
, 0},
1385 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1388 {"secrel32", pe_directive_secrel
, 0},
1389 {"secidx", pe_directive_secidx
, 0},
1394 /* For interface with expression (). */
1395 extern char *input_line_pointer
;
1397 /* Hash table for instruction mnemonic lookup. */
1398 static htab_t op_hash
;
1400 /* Hash table for register lookup. */
1401 static htab_t reg_hash
;
1403 /* Various efficient no-op patterns for aligning code labels.
1404 Note: Don't try to assemble the instructions in the comments.
1405 0L and 0w are not legal. */
1406 static const unsigned char f32_1
[] =
1408 static const unsigned char f32_2
[] =
1409 {0x66,0x90}; /* xchg %ax,%ax */
1410 static const unsigned char f32_3
[] =
1411 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1412 static const unsigned char f32_4
[] =
1413 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1414 static const unsigned char f32_6
[] =
1415 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1416 static const unsigned char f32_7
[] =
1417 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1418 static const unsigned char f16_3
[] =
1419 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1420 static const unsigned char f16_4
[] =
1421 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1422 static const unsigned char jump_disp8
[] =
1423 {0xeb}; /* jmp disp8 */
1424 static const unsigned char jump32_disp32
[] =
1425 {0xe9}; /* jmp disp32 */
1426 static const unsigned char jump16_disp32
[] =
1427 {0x66,0xe9}; /* jmp disp32 */
1428 /* 32-bit NOPs patterns. */
1429 static const unsigned char *const f32_patt
[] = {
1430 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1432 /* 16-bit NOPs patterns. */
1433 static const unsigned char *const f16_patt
[] = {
1434 f32_1
, f32_2
, f16_3
, f16_4
1436 /* nopl (%[re]ax) */
1437 static const unsigned char alt_3
[] =
1439 /* nopl 0(%[re]ax) */
1440 static const unsigned char alt_4
[] =
1441 {0x0f,0x1f,0x40,0x00};
1442 /* nopl 0(%[re]ax,%[re]ax,1) */
1443 static const unsigned char alt_5
[] =
1444 {0x0f,0x1f,0x44,0x00,0x00};
1445 /* nopw 0(%[re]ax,%[re]ax,1) */
1446 static const unsigned char alt_6
[] =
1447 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1448 /* nopl 0L(%[re]ax) */
1449 static const unsigned char alt_7
[] =
1450 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1451 /* nopl 0L(%[re]ax,%[re]ax,1) */
1452 static const unsigned char alt_8
[] =
1453 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1454 /* nopw 0L(%[re]ax,%[re]ax,1) */
1455 static const unsigned char alt_9
[] =
1456 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1457 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1458 static const unsigned char alt_10
[] =
1459 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1460 /* data16 nopw %cs:0L(%eax,%eax,1) */
1461 static const unsigned char alt_11
[] =
1462 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1463 /* 32-bit and 64-bit NOPs patterns. */
1464 static const unsigned char *const alt_patt
[] = {
1465 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1466 alt_9
, alt_10
, alt_11
1469 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1470 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1473 i386_output_nops (char *where
, const unsigned char *const *patt
,
1474 int count
, int max_single_nop_size
)
1477 /* Place the longer NOP first. */
1480 const unsigned char *nops
;
1482 if (max_single_nop_size
< 1)
1484 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1485 max_single_nop_size
);
1489 nops
= patt
[max_single_nop_size
- 1];
1491 /* Use the smaller one if the requsted one isn't available. */
1494 max_single_nop_size
--;
1495 nops
= patt
[max_single_nop_size
- 1];
1498 last
= count
% max_single_nop_size
;
1501 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1502 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1506 nops
= patt
[last
- 1];
1509 /* Use the smaller one plus one-byte NOP if the needed one
1512 nops
= patt
[last
- 1];
1513 memcpy (where
+ offset
, nops
, last
);
1514 where
[offset
+ last
] = *patt
[0];
1517 memcpy (where
+ offset
, nops
, last
);
1522 fits_in_imm7 (offsetT num
)
1524 return (num
& 0x7f) == num
;
1528 fits_in_imm31 (offsetT num
)
1530 return (num
& 0x7fffffff) == num
;
1533 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1534 single NOP instruction LIMIT. */
1537 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1539 const unsigned char *const *patt
= NULL
;
1540 int max_single_nop_size
;
1541 /* Maximum number of NOPs before switching to jump over NOPs. */
1542 int max_number_of_nops
;
1544 switch (fragP
->fr_type
)
1549 case rs_machine_dependent
:
1550 /* Allow NOP padding for jumps and calls. */
1551 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1552 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1559 /* We need to decide which NOP sequence to use for 32bit and
1560 64bit. When -mtune= is used:
1562 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1563 PROCESSOR_GENERIC32, f32_patt will be used.
1564 2. For the rest, alt_patt will be used.
1566 When -mtune= isn't used, alt_patt will be used if
1567 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1570 When -march= or .arch is used, we can't use anything beyond
1571 cpu_arch_isa_flags. */
1573 if (flag_code
== CODE_16BIT
)
1576 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1577 /* Limit number of NOPs to 2 in 16-bit mode. */
1578 max_number_of_nops
= 2;
1582 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1584 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1585 switch (cpu_arch_tune
)
1587 case PROCESSOR_UNKNOWN
:
1588 /* We use cpu_arch_isa_flags to check if we SHOULD
1589 optimize with nops. */
1590 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1595 case PROCESSOR_PENTIUM4
:
1596 case PROCESSOR_NOCONA
:
1597 case PROCESSOR_CORE
:
1598 case PROCESSOR_CORE2
:
1599 case PROCESSOR_COREI7
:
1600 case PROCESSOR_GENERIC64
:
1602 case PROCESSOR_ATHLON
:
1604 case PROCESSOR_AMDFAM10
:
1606 case PROCESSOR_ZNVER
:
1610 case PROCESSOR_I386
:
1611 case PROCESSOR_I486
:
1612 case PROCESSOR_PENTIUM
:
1613 case PROCESSOR_PENTIUMPRO
:
1614 case PROCESSOR_IAMCU
:
1615 case PROCESSOR_GENERIC32
:
1622 switch (fragP
->tc_frag_data
.tune
)
1624 case PROCESSOR_UNKNOWN
:
1625 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1626 PROCESSOR_UNKNOWN. */
1630 case PROCESSOR_I386
:
1631 case PROCESSOR_I486
:
1632 case PROCESSOR_PENTIUM
:
1633 case PROCESSOR_IAMCU
:
1635 case PROCESSOR_ATHLON
:
1637 case PROCESSOR_AMDFAM10
:
1639 case PROCESSOR_ZNVER
:
1641 case PROCESSOR_GENERIC32
:
1642 /* We use cpu_arch_isa_flags to check if we CAN optimize
1644 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1649 case PROCESSOR_PENTIUMPRO
:
1650 case PROCESSOR_PENTIUM4
:
1651 case PROCESSOR_NOCONA
:
1652 case PROCESSOR_CORE
:
1653 case PROCESSOR_CORE2
:
1654 case PROCESSOR_COREI7
:
1655 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1660 case PROCESSOR_GENERIC64
:
1666 if (patt
== f32_patt
)
1668 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1669 /* Limit number of NOPs to 2 for older processors. */
1670 max_number_of_nops
= 2;
1674 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1675 /* Limit number of NOPs to 7 for newer processors. */
1676 max_number_of_nops
= 7;
1681 limit
= max_single_nop_size
;
1683 if (fragP
->fr_type
== rs_fill_nop
)
1685 /* Output NOPs for .nop directive. */
1686 if (limit
> max_single_nop_size
)
1688 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1689 _("invalid single nop size: %d "
1690 "(expect within [0, %d])"),
1691 limit
, max_single_nop_size
);
1695 else if (fragP
->fr_type
!= rs_machine_dependent
)
1696 fragP
->fr_var
= count
;
1698 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1700 /* Generate jump over NOPs. */
1701 offsetT disp
= count
- 2;
1702 if (fits_in_imm7 (disp
))
1704 /* Use "jmp disp8" if possible. */
1706 where
[0] = jump_disp8
[0];
1712 unsigned int size_of_jump
;
1714 if (flag_code
== CODE_16BIT
)
1716 where
[0] = jump16_disp32
[0];
1717 where
[1] = jump16_disp32
[1];
1722 where
[0] = jump32_disp32
[0];
1726 count
-= size_of_jump
+ 4;
1727 if (!fits_in_imm31 (count
))
1729 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1730 _("jump over nop padding out of range"));
1734 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1735 where
+= size_of_jump
+ 4;
1739 /* Generate multiple NOPs. */
1740 i386_output_nops (where
, patt
, count
, limit
);
1744 operand_type_all_zero (const union i386_operand_type
*x
)
1746 switch (ARRAY_SIZE(x
->array
))
1757 return !x
->array
[0];
1764 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1766 switch (ARRAY_SIZE(x
->array
))
1782 x
->bitfield
.class = ClassNone
;
1783 x
->bitfield
.instance
= InstanceNone
;
1787 operand_type_equal (const union i386_operand_type
*x
,
1788 const union i386_operand_type
*y
)
1790 switch (ARRAY_SIZE(x
->array
))
1793 if (x
->array
[2] != y
->array
[2])
1797 if (x
->array
[1] != y
->array
[1])
1801 return x
->array
[0] == y
->array
[0];
1809 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1811 switch (ARRAY_SIZE(x
->array
))
1826 return !x
->array
[0];
1833 cpu_flags_equal (const union i386_cpu_flags
*x
,
1834 const union i386_cpu_flags
*y
)
1836 switch (ARRAY_SIZE(x
->array
))
1839 if (x
->array
[3] != y
->array
[3])
1843 if (x
->array
[2] != y
->array
[2])
1847 if (x
->array
[1] != y
->array
[1])
1851 return x
->array
[0] == y
->array
[0];
1859 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1861 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1862 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1865 static INLINE i386_cpu_flags
1866 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1868 switch (ARRAY_SIZE (x
.array
))
1871 x
.array
[3] &= y
.array
[3];
1874 x
.array
[2] &= y
.array
[2];
1877 x
.array
[1] &= y
.array
[1];
1880 x
.array
[0] &= y
.array
[0];
1888 static INLINE i386_cpu_flags
1889 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1891 switch (ARRAY_SIZE (x
.array
))
1894 x
.array
[3] |= y
.array
[3];
1897 x
.array
[2] |= y
.array
[2];
1900 x
.array
[1] |= y
.array
[1];
1903 x
.array
[0] |= y
.array
[0];
1911 static INLINE i386_cpu_flags
1912 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1914 switch (ARRAY_SIZE (x
.array
))
1917 x
.array
[3] &= ~y
.array
[3];
1920 x
.array
[2] &= ~y
.array
[2];
1923 x
.array
[1] &= ~y
.array
[1];
1926 x
.array
[0] &= ~y
.array
[0];
1934 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1936 #define CPU_FLAGS_ARCH_MATCH 0x1
1937 #define CPU_FLAGS_64BIT_MATCH 0x2
1939 #define CPU_FLAGS_PERFECT_MATCH \
1940 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1942 /* Return CPU flags match bits. */
1945 cpu_flags_match (const insn_template
*t
)
1947 i386_cpu_flags x
= t
->cpu_flags
;
1948 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1950 x
.bitfield
.cpu64
= 0;
1951 x
.bitfield
.cpuno64
= 0;
1953 if (cpu_flags_all_zero (&x
))
1955 /* This instruction is available on all archs. */
1956 match
|= CPU_FLAGS_ARCH_MATCH
;
1960 /* This instruction is available only on some archs. */
1961 i386_cpu_flags cpu
= cpu_arch_flags
;
1963 /* AVX512VL is no standalone feature - match it and then strip it. */
1964 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1966 x
.bitfield
.cpuavx512vl
= 0;
1968 /* AVX and AVX2 present at the same time express an operand size
1969 dependency - strip AVX2 for the purposes here. The operand size
1970 dependent check occurs in check_vecOperands(). */
1971 if (x
.bitfield
.cpuavx
&& x
.bitfield
.cpuavx2
)
1972 x
.bitfield
.cpuavx2
= 0;
1974 cpu
= cpu_flags_and (x
, cpu
);
1975 if (!cpu_flags_all_zero (&cpu
))
1977 if (x
.bitfield
.cpuavx
)
1979 /* We need to check a few extra flags with AVX. */
1980 if (cpu
.bitfield
.cpuavx
1981 && (!t
->opcode_modifier
.sse2avx
1982 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1983 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1984 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1985 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1986 match
|= CPU_FLAGS_ARCH_MATCH
;
1988 else if (x
.bitfield
.cpuavx512f
)
1990 /* We need to check a few extra flags with AVX512F. */
1991 if (cpu
.bitfield
.cpuavx512f
1992 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1993 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1994 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1995 match
|= CPU_FLAGS_ARCH_MATCH
;
1998 match
|= CPU_FLAGS_ARCH_MATCH
;
2004 static INLINE i386_operand_type
2005 operand_type_and (i386_operand_type x
, i386_operand_type y
)
2007 if (x
.bitfield
.class != y
.bitfield
.class)
2008 x
.bitfield
.class = ClassNone
;
2009 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2010 x
.bitfield
.instance
= InstanceNone
;
2012 switch (ARRAY_SIZE (x
.array
))
2015 x
.array
[2] &= y
.array
[2];
2018 x
.array
[1] &= y
.array
[1];
2021 x
.array
[0] &= y
.array
[0];
2029 static INLINE i386_operand_type
2030 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2032 gas_assert (y
.bitfield
.class == ClassNone
);
2033 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2035 switch (ARRAY_SIZE (x
.array
))
2038 x
.array
[2] &= ~y
.array
[2];
2041 x
.array
[1] &= ~y
.array
[1];
2044 x
.array
[0] &= ~y
.array
[0];
2052 static INLINE i386_operand_type
2053 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2055 gas_assert (x
.bitfield
.class == ClassNone
||
2056 y
.bitfield
.class == ClassNone
||
2057 x
.bitfield
.class == y
.bitfield
.class);
2058 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2059 y
.bitfield
.instance
== InstanceNone
||
2060 x
.bitfield
.instance
== y
.bitfield
.instance
);
2062 switch (ARRAY_SIZE (x
.array
))
2065 x
.array
[2] |= y
.array
[2];
2068 x
.array
[1] |= y
.array
[1];
2071 x
.array
[0] |= y
.array
[0];
2079 static INLINE i386_operand_type
2080 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2082 gas_assert (y
.bitfield
.class == ClassNone
);
2083 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2085 switch (ARRAY_SIZE (x
.array
))
2088 x
.array
[2] ^= y
.array
[2];
2091 x
.array
[1] ^= y
.array
[1];
2094 x
.array
[0] ^= y
.array
[0];
2102 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2103 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2104 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2105 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2106 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2107 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2108 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2109 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2110 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2111 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2112 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2113 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2114 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2115 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2126 operand_type_check (i386_operand_type t
, enum operand_type c
)
2131 return t
.bitfield
.class == Reg
;
2134 return (t
.bitfield
.imm8
2138 || t
.bitfield
.imm32s
2139 || t
.bitfield
.imm64
);
2142 return (t
.bitfield
.disp8
2143 || t
.bitfield
.disp16
2144 || t
.bitfield
.disp32
2145 || t
.bitfield
.disp32s
2146 || t
.bitfield
.disp64
);
2149 return (t
.bitfield
.disp8
2150 || t
.bitfield
.disp16
2151 || t
.bitfield
.disp32
2152 || t
.bitfield
.disp32s
2153 || t
.bitfield
.disp64
2154 || t
.bitfield
.baseindex
);
2163 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2164 between operand GIVEN and opeand WANTED for instruction template T. */
2167 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2170 return !((i
.types
[given
].bitfield
.byte
2171 && !t
->operand_types
[wanted
].bitfield
.byte
)
2172 || (i
.types
[given
].bitfield
.word
2173 && !t
->operand_types
[wanted
].bitfield
.word
)
2174 || (i
.types
[given
].bitfield
.dword
2175 && !t
->operand_types
[wanted
].bitfield
.dword
)
2176 || (i
.types
[given
].bitfield
.qword
2177 && !t
->operand_types
[wanted
].bitfield
.qword
)
2178 || (i
.types
[given
].bitfield
.tbyte
2179 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2182 /* Return 1 if there is no conflict in SIMD register between operand
2183 GIVEN and opeand WANTED for instruction template T. */
2186 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2189 return !((i
.types
[given
].bitfield
.xmmword
2190 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2191 || (i
.types
[given
].bitfield
.ymmword
2192 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2193 || (i
.types
[given
].bitfield
.zmmword
2194 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2195 || (i
.types
[given
].bitfield
.tmmword
2196 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2199 /* Return 1 if there is no conflict in any size between operand GIVEN
2200 and opeand WANTED for instruction template T. */
2203 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2206 return (match_operand_size (t
, wanted
, given
)
2207 && !((i
.types
[given
].bitfield
.unspecified
2208 && !i
.broadcast
.type
2209 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2210 || (i
.types
[given
].bitfield
.fword
2211 && !t
->operand_types
[wanted
].bitfield
.fword
)
2212 /* For scalar opcode templates to allow register and memory
2213 operands at the same time, some special casing is needed
2214 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2215 down-conversion vpmov*. */
2216 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2217 && t
->operand_types
[wanted
].bitfield
.byte
2218 + t
->operand_types
[wanted
].bitfield
.word
2219 + t
->operand_types
[wanted
].bitfield
.dword
2220 + t
->operand_types
[wanted
].bitfield
.qword
2221 > !!t
->opcode_modifier
.broadcast
)
2222 ? (i
.types
[given
].bitfield
.xmmword
2223 || i
.types
[given
].bitfield
.ymmword
2224 || i
.types
[given
].bitfield
.zmmword
)
2225 : !match_simd_size(t
, wanted
, given
))));
2228 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2229 operands for instruction template T, and it has MATCH_REVERSE set if there
2230 is no size conflict on any operands for the template with operands reversed
2231 (and the template allows for reversing in the first place). */
2233 #define MATCH_STRAIGHT 1
2234 #define MATCH_REVERSE 2
2236 static INLINE
unsigned int
2237 operand_size_match (const insn_template
*t
)
2239 unsigned int j
, match
= MATCH_STRAIGHT
;
2241 /* Don't check non-absolute jump instructions. */
2242 if (t
->opcode_modifier
.jump
2243 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2246 /* Check memory and accumulator operand size. */
2247 for (j
= 0; j
< i
.operands
; j
++)
2249 if (i
.types
[j
].bitfield
.class != Reg
2250 && i
.types
[j
].bitfield
.class != RegSIMD
2251 && t
->opcode_modifier
.anysize
)
2254 if (t
->operand_types
[j
].bitfield
.class == Reg
2255 && !match_operand_size (t
, j
, j
))
2261 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2262 && !match_simd_size (t
, j
, j
))
2268 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2269 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2275 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2282 if (!t
->opcode_modifier
.d
)
2286 i
.error
= operand_size_mismatch
;
2290 /* Check reverse. */
2291 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2293 for (j
= 0; j
< i
.operands
; j
++)
2295 unsigned int given
= i
.operands
- j
- 1;
2297 if (t
->operand_types
[j
].bitfield
.class == Reg
2298 && !match_operand_size (t
, j
, given
))
2301 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2302 && !match_simd_size (t
, j
, given
))
2305 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2306 && (!match_operand_size (t
, j
, given
)
2307 || !match_simd_size (t
, j
, given
)))
2310 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2314 return match
| MATCH_REVERSE
;
2318 operand_type_match (i386_operand_type overlap
,
2319 i386_operand_type given
)
2321 i386_operand_type temp
= overlap
;
2323 temp
.bitfield
.unspecified
= 0;
2324 temp
.bitfield
.byte
= 0;
2325 temp
.bitfield
.word
= 0;
2326 temp
.bitfield
.dword
= 0;
2327 temp
.bitfield
.fword
= 0;
2328 temp
.bitfield
.qword
= 0;
2329 temp
.bitfield
.tbyte
= 0;
2330 temp
.bitfield
.xmmword
= 0;
2331 temp
.bitfield
.ymmword
= 0;
2332 temp
.bitfield
.zmmword
= 0;
2333 temp
.bitfield
.tmmword
= 0;
2334 if (operand_type_all_zero (&temp
))
2337 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2341 i
.error
= operand_type_mismatch
;
2345 /* If given types g0 and g1 are registers they must be of the same type
2346 unless the expected operand type register overlap is null.
2347 Some Intel syntax memory operand size checking also happens here. */
2350 operand_type_register_match (i386_operand_type g0
,
2351 i386_operand_type t0
,
2352 i386_operand_type g1
,
2353 i386_operand_type t1
)
2355 if (g0
.bitfield
.class != Reg
2356 && g0
.bitfield
.class != RegSIMD
2357 && (!operand_type_check (g0
, anymem
)
2358 || g0
.bitfield
.unspecified
2359 || (t0
.bitfield
.class != Reg
2360 && t0
.bitfield
.class != RegSIMD
)))
2363 if (g1
.bitfield
.class != Reg
2364 && g1
.bitfield
.class != RegSIMD
2365 && (!operand_type_check (g1
, anymem
)
2366 || g1
.bitfield
.unspecified
2367 || (t1
.bitfield
.class != Reg
2368 && t1
.bitfield
.class != RegSIMD
)))
2371 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2372 && g0
.bitfield
.word
== g1
.bitfield
.word
2373 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2374 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2375 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2376 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2377 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2380 /* If expectations overlap in no more than a single size, all is fine. */
2381 g0
= operand_type_and (t0
, t1
);
2382 if (g0
.bitfield
.byte
2386 + g0
.bitfield
.xmmword
2387 + g0
.bitfield
.ymmword
2388 + g0
.bitfield
.zmmword
<= 1)
2391 i
.error
= register_type_mismatch
;
2396 static INLINE
unsigned int
2397 register_number (const reg_entry
*r
)
2399 unsigned int nr
= r
->reg_num
;
2401 if (r
->reg_flags
& RegRex
)
2404 if (r
->reg_flags
& RegVRex
)
2410 static INLINE
unsigned int
2411 mode_from_disp_size (i386_operand_type t
)
2413 if (t
.bitfield
.disp8
)
2415 else if (t
.bitfield
.disp16
2416 || t
.bitfield
.disp32
2417 || t
.bitfield
.disp32s
)
2424 fits_in_signed_byte (addressT num
)
2426 return num
+ 0x80 <= 0xff;
2430 fits_in_unsigned_byte (addressT num
)
2436 fits_in_unsigned_word (addressT num
)
2438 return num
<= 0xffff;
2442 fits_in_signed_word (addressT num
)
2444 return num
+ 0x8000 <= 0xffff;
2448 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2453 return num
+ 0x80000000 <= 0xffffffff;
2455 } /* fits_in_signed_long() */
2458 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2463 return num
<= 0xffffffff;
2465 } /* fits_in_unsigned_long() */
2467 static INLINE valueT
extend_to_32bit_address (addressT num
)
2470 if (fits_in_unsigned_long(num
))
2471 return (num
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2473 if (!fits_in_signed_long (num
))
2474 return num
& 0xffffffff;
2481 fits_in_disp8 (offsetT num
)
2483 int shift
= i
.memshift
;
2489 mask
= (1 << shift
) - 1;
2491 /* Return 0 if NUM isn't properly aligned. */
2495 /* Check if NUM will fit in 8bit after shift. */
2496 return fits_in_signed_byte (num
>> shift
);
2500 fits_in_imm4 (offsetT num
)
2502 return (num
& 0xf) == num
;
2505 static i386_operand_type
2506 smallest_imm_type (offsetT num
)
2508 i386_operand_type t
;
2510 operand_type_set (&t
, 0);
2511 t
.bitfield
.imm64
= 1;
2513 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2515 /* This code is disabled on the 486 because all the Imm1 forms
2516 in the opcode table are slower on the i486. They're the
2517 versions with the implicitly specified single-position
2518 displacement, which has another syntax if you really want to
2520 t
.bitfield
.imm1
= 1;
2521 t
.bitfield
.imm8
= 1;
2522 t
.bitfield
.imm8s
= 1;
2523 t
.bitfield
.imm16
= 1;
2524 t
.bitfield
.imm32
= 1;
2525 t
.bitfield
.imm32s
= 1;
2527 else if (fits_in_signed_byte (num
))
2529 t
.bitfield
.imm8
= 1;
2530 t
.bitfield
.imm8s
= 1;
2531 t
.bitfield
.imm16
= 1;
2532 t
.bitfield
.imm32
= 1;
2533 t
.bitfield
.imm32s
= 1;
2535 else if (fits_in_unsigned_byte (num
))
2537 t
.bitfield
.imm8
= 1;
2538 t
.bitfield
.imm16
= 1;
2539 t
.bitfield
.imm32
= 1;
2540 t
.bitfield
.imm32s
= 1;
2542 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2544 t
.bitfield
.imm16
= 1;
2545 t
.bitfield
.imm32
= 1;
2546 t
.bitfield
.imm32s
= 1;
2548 else if (fits_in_signed_long (num
))
2550 t
.bitfield
.imm32
= 1;
2551 t
.bitfield
.imm32s
= 1;
2553 else if (fits_in_unsigned_long (num
))
2554 t
.bitfield
.imm32
= 1;
2560 offset_in_range (offsetT val
, int size
)
2566 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2567 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2569 case 4: mask
= ((addressT
) 1 << 32) - 1; break;
2571 case sizeof (val
): return val
;
2575 if ((val
& ~mask
) != 0 && (-val
& ~mask
) != 0)
2578 char masked_buf
[128];
2580 /* Coded this way in order to ease translation. */
2581 sprintf_vma (val_buf
, val
);
2582 sprintf_vma (masked_buf
, val
& mask
);
2583 as_warn (_("0x%s shortened to 0x%s"), val_buf
, masked_buf
);
2599 a. PREFIX_EXIST if attempting to add a prefix where one from the
2600 same class already exists.
2601 b. PREFIX_LOCK if lock prefix is added.
2602 c. PREFIX_REP if rep/repne prefix is added.
2603 d. PREFIX_DS if ds prefix is added.
2604 e. PREFIX_OTHER if other prefix is added.
2607 static enum PREFIX_GROUP
2608 add_prefix (unsigned int prefix
)
2610 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2613 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2614 && flag_code
== CODE_64BIT
)
2616 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2617 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2618 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2619 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2630 case DS_PREFIX_OPCODE
:
2633 case CS_PREFIX_OPCODE
:
2634 case ES_PREFIX_OPCODE
:
2635 case FS_PREFIX_OPCODE
:
2636 case GS_PREFIX_OPCODE
:
2637 case SS_PREFIX_OPCODE
:
2641 case REPNE_PREFIX_OPCODE
:
2642 case REPE_PREFIX_OPCODE
:
2647 case LOCK_PREFIX_OPCODE
:
2656 case ADDR_PREFIX_OPCODE
:
2660 case DATA_PREFIX_OPCODE
:
2664 if (i
.prefix
[q
] != 0)
2672 i
.prefix
[q
] |= prefix
;
2675 as_bad (_("same type of prefix used twice"));
2681 update_code_flag (int value
, int check
)
2683 PRINTF_LIKE ((*as_error
));
2685 flag_code
= (enum flag_code
) value
;
2686 if (flag_code
== CODE_64BIT
)
2688 cpu_arch_flags
.bitfield
.cpu64
= 1;
2689 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2693 cpu_arch_flags
.bitfield
.cpu64
= 0;
2694 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2696 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2699 as_error
= as_fatal
;
2702 (*as_error
) (_("64bit mode not supported on `%s'."),
2703 cpu_arch_name
? cpu_arch_name
: default_arch
);
2705 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2708 as_error
= as_fatal
;
2711 (*as_error
) (_("32bit mode not supported on `%s'."),
2712 cpu_arch_name
? cpu_arch_name
: default_arch
);
2714 stackop_size
= '\0';
2718 set_code_flag (int value
)
2720 update_code_flag (value
, 0);
2724 set_16bit_gcc_code_flag (int new_code_flag
)
2726 flag_code
= (enum flag_code
) new_code_flag
;
2727 if (flag_code
!= CODE_16BIT
)
2729 cpu_arch_flags
.bitfield
.cpu64
= 0;
2730 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2731 stackop_size
= LONG_MNEM_SUFFIX
;
2735 set_intel_syntax (int syntax_flag
)
2737 /* Find out if register prefixing is specified. */
2738 int ask_naked_reg
= 0;
2741 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2744 int e
= get_symbol_name (&string
);
2746 if (strcmp (string
, "prefix") == 0)
2748 else if (strcmp (string
, "noprefix") == 0)
2751 as_bad (_("bad argument to syntax directive."));
2752 (void) restore_line_pointer (e
);
2754 demand_empty_rest_of_line ();
2756 intel_syntax
= syntax_flag
;
2758 if (ask_naked_reg
== 0)
2759 allow_naked_reg
= (intel_syntax
2760 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2762 allow_naked_reg
= (ask_naked_reg
< 0);
2764 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2766 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2767 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2768 register_prefix
= allow_naked_reg
? "" : "%";
2772 set_intel_mnemonic (int mnemonic_flag
)
2774 intel_mnemonic
= mnemonic_flag
;
2778 set_allow_index_reg (int flag
)
2780 allow_index_reg
= flag
;
2784 set_check (int what
)
2786 enum check_kind
*kind
;
2791 kind
= &operand_check
;
2802 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2805 int e
= get_symbol_name (&string
);
2807 if (strcmp (string
, "none") == 0)
2809 else if (strcmp (string
, "warning") == 0)
2810 *kind
= check_warning
;
2811 else if (strcmp (string
, "error") == 0)
2812 *kind
= check_error
;
2814 as_bad (_("bad argument to %s_check directive."), str
);
2815 (void) restore_line_pointer (e
);
2818 as_bad (_("missing argument for %s_check directive"), str
);
2820 demand_empty_rest_of_line ();
2824 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2825 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2827 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2828 static const char *arch
;
2830 /* Intel MCU is only supported on ELF. */
2836 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2837 use default_arch. */
2838 arch
= cpu_arch_name
;
2840 arch
= default_arch
;
2843 /* If we are targeting Intel MCU, we must enable it. */
2844 if ((get_elf_backend_data (stdoutput
)->elf_machine_code
== EM_IAMCU
)
2845 == new_flag
.bitfield
.cpuiamcu
)
2848 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2853 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2857 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2860 int e
= get_symbol_name (&string
);
2862 i386_cpu_flags flags
;
2864 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2866 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2870 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2872 cpu_arch_name
= cpu_arch
[j
].name
;
2873 cpu_sub_arch_name
= NULL
;
2874 cpu_arch_flags
= cpu_arch
[j
].flags
;
2875 if (flag_code
== CODE_64BIT
)
2877 cpu_arch_flags
.bitfield
.cpu64
= 1;
2878 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2882 cpu_arch_flags
.bitfield
.cpu64
= 0;
2883 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2885 cpu_arch_isa
= cpu_arch
[j
].type
;
2886 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2887 if (!cpu_arch_tune_set
)
2889 cpu_arch_tune
= cpu_arch_isa
;
2890 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2895 flags
= cpu_flags_or (cpu_arch_flags
,
2898 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2900 if (cpu_sub_arch_name
)
2902 char *name
= cpu_sub_arch_name
;
2903 cpu_sub_arch_name
= concat (name
,
2905 (const char *) NULL
);
2909 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2910 cpu_arch_flags
= flags
;
2911 cpu_arch_isa_flags
= flags
;
2915 = cpu_flags_or (cpu_arch_isa_flags
,
2917 (void) restore_line_pointer (e
);
2918 demand_empty_rest_of_line ();
2923 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2925 /* Disable an ISA extension. */
2926 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2927 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2929 flags
= cpu_flags_and_not (cpu_arch_flags
,
2930 cpu_noarch
[j
].flags
);
2931 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2933 if (cpu_sub_arch_name
)
2935 char *name
= cpu_sub_arch_name
;
2936 cpu_sub_arch_name
= concat (name
, string
,
2937 (const char *) NULL
);
2941 cpu_sub_arch_name
= xstrdup (string
);
2942 cpu_arch_flags
= flags
;
2943 cpu_arch_isa_flags
= flags
;
2945 (void) restore_line_pointer (e
);
2946 demand_empty_rest_of_line ();
2950 j
= ARRAY_SIZE (cpu_arch
);
2953 if (j
>= ARRAY_SIZE (cpu_arch
))
2954 as_bad (_("no such architecture: `%s'"), string
);
2956 *input_line_pointer
= e
;
2959 as_bad (_("missing cpu architecture"));
2961 no_cond_jump_promotion
= 0;
2962 if (*input_line_pointer
== ','
2963 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2968 ++input_line_pointer
;
2969 e
= get_symbol_name (&string
);
2971 if (strcmp (string
, "nojumps") == 0)
2972 no_cond_jump_promotion
= 1;
2973 else if (strcmp (string
, "jumps") == 0)
2976 as_bad (_("no such architecture modifier: `%s'"), string
);
2978 (void) restore_line_pointer (e
);
2981 demand_empty_rest_of_line ();
2984 enum bfd_architecture
2987 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2989 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2990 || flag_code
== CODE_64BIT
)
2991 as_fatal (_("Intel MCU is 32bit ELF only"));
2992 return bfd_arch_iamcu
;
2995 return bfd_arch_i386
;
3001 if (startswith (default_arch
, "x86_64"))
3003 if (default_arch
[6] == '\0')
3004 return bfd_mach_x86_64
;
3006 return bfd_mach_x64_32
;
3008 else if (!strcmp (default_arch
, "i386")
3009 || !strcmp (default_arch
, "iamcu"))
3011 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3013 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3014 as_fatal (_("Intel MCU is 32bit ELF only"));
3015 return bfd_mach_i386_iamcu
;
3018 return bfd_mach_i386_i386
;
3021 as_fatal (_("unknown architecture"));
3027 /* Support pseudo prefixes like {disp32}. */
3028 lex_type
['{'] = LEX_BEGIN_NAME
;
3030 /* Initialize op_hash hash table. */
3031 op_hash
= str_htab_create ();
3034 const insn_template
*optab
;
3035 templates
*core_optab
;
3037 /* Setup for loop. */
3039 core_optab
= XNEW (templates
);
3040 core_optab
->start
= optab
;
3045 if (optab
->name
== NULL
3046 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3048 /* different name --> ship out current template list;
3049 add to hash table; & begin anew. */
3050 core_optab
->end
= optab
;
3051 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3052 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3054 if (optab
->name
== NULL
)
3056 core_optab
= XNEW (templates
);
3057 core_optab
->start
= optab
;
3062 /* Initialize reg_hash hash table. */
3063 reg_hash
= str_htab_create ();
3065 const reg_entry
*regtab
;
3066 unsigned int regtab_size
= i386_regtab_size
;
3068 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3070 switch (regtab
->reg_type
.bitfield
.class)
3073 if (regtab
->reg_type
.bitfield
.dword
)
3075 if (regtab
->reg_type
.bitfield
.instance
== Accum
)
3078 else if (regtab
->reg_type
.bitfield
.tbyte
)
3080 /* There's no point inserting st(<N>) in the hash table, as
3081 parentheses aren't included in register_chars[] anyway. */
3082 if (regtab
->reg_type
.bitfield
.instance
!= Accum
)
3089 switch (regtab
->reg_num
)
3091 case 0: reg_es
= regtab
; break;
3092 case 2: reg_ss
= regtab
; break;
3093 case 3: reg_ds
= regtab
; break;
3098 if (!regtab
->reg_num
)
3103 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3104 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3108 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3113 for (c
= 0; c
< 256; c
++)
3115 if (ISDIGIT (c
) || ISLOWER (c
))
3117 mnemonic_chars
[c
] = c
;
3118 register_chars
[c
] = c
;
3119 operand_chars
[c
] = c
;
3121 else if (ISUPPER (c
))
3123 mnemonic_chars
[c
] = TOLOWER (c
);
3124 register_chars
[c
] = mnemonic_chars
[c
];
3125 operand_chars
[c
] = c
;
3127 else if (c
== '{' || c
== '}')
3129 mnemonic_chars
[c
] = c
;
3130 operand_chars
[c
] = c
;
3132 #ifdef SVR4_COMMENT_CHARS
3133 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3134 operand_chars
[c
] = c
;
3137 if (ISALPHA (c
) || ISDIGIT (c
))
3138 identifier_chars
[c
] = c
;
3141 identifier_chars
[c
] = c
;
3142 operand_chars
[c
] = c
;
3147 identifier_chars
['@'] = '@';
3150 identifier_chars
['?'] = '?';
3151 operand_chars
['?'] = '?';
3153 mnemonic_chars
['_'] = '_';
3154 mnemonic_chars
['-'] = '-';
3155 mnemonic_chars
['.'] = '.';
3156 identifier_chars
['_'] = '_';
3157 identifier_chars
['.'] = '.';
3159 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3160 operand_chars
[(unsigned char) *p
] = *p
;
3163 if (flag_code
== CODE_64BIT
)
3165 #if defined (OBJ_COFF) && defined (TE_PE)
3166 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3169 x86_dwarf2_return_column
= 16;
3171 x86_cie_data_alignment
= -8;
3175 x86_dwarf2_return_column
= 8;
3176 x86_cie_data_alignment
= -4;
3179 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3180 can be turned into BRANCH_PREFIX frag. */
3181 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3186 i386_print_statistics (FILE *file
)
3188 htab_print_statistics (file
, "i386 opcode", op_hash
);
3189 htab_print_statistics (file
, "i386 register", reg_hash
);
3194 /* Debugging routines for md_assemble. */
3195 static void pte (insn_template
*);
3196 static void pt (i386_operand_type
);
3197 static void pe (expressionS
*);
3198 static void ps (symbolS
*);
3201 pi (const char *line
, i386_insn
*x
)
3205 fprintf (stdout
, "%s: template ", line
);
3207 fprintf (stdout
, " address: base %s index %s scale %x\n",
3208 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3209 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3210 x
->log2_scale_factor
);
3211 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3212 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3213 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3214 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3215 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3216 (x
->rex
& REX_W
) != 0,
3217 (x
->rex
& REX_R
) != 0,
3218 (x
->rex
& REX_X
) != 0,
3219 (x
->rex
& REX_B
) != 0);
3220 for (j
= 0; j
< x
->operands
; j
++)
3222 fprintf (stdout
, " #%d: ", j
+ 1);
3224 fprintf (stdout
, "\n");
3225 if (x
->types
[j
].bitfield
.class == Reg
3226 || x
->types
[j
].bitfield
.class == RegMMX
3227 || x
->types
[j
].bitfield
.class == RegSIMD
3228 || x
->types
[j
].bitfield
.class == RegMask
3229 || x
->types
[j
].bitfield
.class == SReg
3230 || x
->types
[j
].bitfield
.class == RegCR
3231 || x
->types
[j
].bitfield
.class == RegDR
3232 || x
->types
[j
].bitfield
.class == RegTR
3233 || x
->types
[j
].bitfield
.class == RegBND
)
3234 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3235 if (operand_type_check (x
->types
[j
], imm
))
3237 if (operand_type_check (x
->types
[j
], disp
))
3238 pe (x
->op
[j
].disps
);
3243 pte (insn_template
*t
)
3245 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3246 static const char *const opc_spc
[] = {
3247 NULL
, "0f", "0f38", "0f3a", NULL
, "evexmap5", "evexmap6", NULL
,
3248 "XOP08", "XOP09", "XOP0A",
3252 fprintf (stdout
, " %d operands ", t
->operands
);
3253 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3254 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3255 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3256 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3257 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3258 if (t
->extension_opcode
!= None
)
3259 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3260 if (t
->opcode_modifier
.d
)
3261 fprintf (stdout
, "D");
3262 if (t
->opcode_modifier
.w
)
3263 fprintf (stdout
, "W");
3264 fprintf (stdout
, "\n");
3265 for (j
= 0; j
< t
->operands
; j
++)
3267 fprintf (stdout
, " #%d type ", j
+ 1);
3268 pt (t
->operand_types
[j
]);
3269 fprintf (stdout
, "\n");
3276 fprintf (stdout
, " operation %d\n", e
->X_op
);
3277 fprintf (stdout
, " add_number %" BFD_VMA_FMT
"d (%" BFD_VMA_FMT
"x)\n",
3278 e
->X_add_number
, e
->X_add_number
);
3279 if (e
->X_add_symbol
)
3281 fprintf (stdout
, " add_symbol ");
3282 ps (e
->X_add_symbol
);
3283 fprintf (stdout
, "\n");
3287 fprintf (stdout
, " op_symbol ");
3288 ps (e
->X_op_symbol
);
3289 fprintf (stdout
, "\n");
3296 fprintf (stdout
, "%s type %s%s",
3298 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3299 segment_name (S_GET_SEGMENT (s
)));
3302 static struct type_name
3304 i386_operand_type mask
;
3307 const type_names
[] =
3309 { OPERAND_TYPE_REG8
, "r8" },
3310 { OPERAND_TYPE_REG16
, "r16" },
3311 { OPERAND_TYPE_REG32
, "r32" },
3312 { OPERAND_TYPE_REG64
, "r64" },
3313 { OPERAND_TYPE_ACC8
, "acc8" },
3314 { OPERAND_TYPE_ACC16
, "acc16" },
3315 { OPERAND_TYPE_ACC32
, "acc32" },
3316 { OPERAND_TYPE_ACC64
, "acc64" },
3317 { OPERAND_TYPE_IMM8
, "i8" },
3318 { OPERAND_TYPE_IMM8
, "i8s" },
3319 { OPERAND_TYPE_IMM16
, "i16" },
3320 { OPERAND_TYPE_IMM32
, "i32" },
3321 { OPERAND_TYPE_IMM32S
, "i32s" },
3322 { OPERAND_TYPE_IMM64
, "i64" },
3323 { OPERAND_TYPE_IMM1
, "i1" },
3324 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3325 { OPERAND_TYPE_DISP8
, "d8" },
3326 { OPERAND_TYPE_DISP16
, "d16" },
3327 { OPERAND_TYPE_DISP32
, "d32" },
3328 { OPERAND_TYPE_DISP32S
, "d32s" },
3329 { OPERAND_TYPE_DISP64
, "d64" },
3330 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3331 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3332 { OPERAND_TYPE_CONTROL
, "control reg" },
3333 { OPERAND_TYPE_TEST
, "test reg" },
3334 { OPERAND_TYPE_DEBUG
, "debug reg" },
3335 { OPERAND_TYPE_FLOATREG
, "FReg" },
3336 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3337 { OPERAND_TYPE_SREG
, "SReg" },
3338 { OPERAND_TYPE_REGMMX
, "rMMX" },
3339 { OPERAND_TYPE_REGXMM
, "rXMM" },
3340 { OPERAND_TYPE_REGYMM
, "rYMM" },
3341 { OPERAND_TYPE_REGZMM
, "rZMM" },
3342 { OPERAND_TYPE_REGTMM
, "rTMM" },
3343 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3347 pt (i386_operand_type t
)
3350 i386_operand_type a
;
3352 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3354 a
= operand_type_and (t
, type_names
[j
].mask
);
3355 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3356 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3361 #endif /* DEBUG386 */
3363 static bfd_reloc_code_real_type
3364 reloc (unsigned int size
,
3367 bfd_reloc_code_real_type other
)
3369 if (other
!= NO_RELOC
)
3371 reloc_howto_type
*rel
;
3376 case BFD_RELOC_X86_64_GOT32
:
3377 return BFD_RELOC_X86_64_GOT64
;
3379 case BFD_RELOC_X86_64_GOTPLT64
:
3380 return BFD_RELOC_X86_64_GOTPLT64
;
3382 case BFD_RELOC_X86_64_PLTOFF64
:
3383 return BFD_RELOC_X86_64_PLTOFF64
;
3385 case BFD_RELOC_X86_64_GOTPC32
:
3386 other
= BFD_RELOC_X86_64_GOTPC64
;
3388 case BFD_RELOC_X86_64_GOTPCREL
:
3389 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3391 case BFD_RELOC_X86_64_TPOFF32
:
3392 other
= BFD_RELOC_X86_64_TPOFF64
;
3394 case BFD_RELOC_X86_64_DTPOFF32
:
3395 other
= BFD_RELOC_X86_64_DTPOFF64
;
3401 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3402 if (other
== BFD_RELOC_SIZE32
)
3405 other
= BFD_RELOC_SIZE64
;
3408 as_bad (_("there are no pc-relative size relocations"));
3414 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3415 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3418 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3420 as_bad (_("unknown relocation (%u)"), other
);
3421 else if (size
!= bfd_get_reloc_size (rel
))
3422 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3423 bfd_get_reloc_size (rel
),
3425 else if (pcrel
&& !rel
->pc_relative
)
3426 as_bad (_("non-pc-relative relocation for pc-relative field"));
3427 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3429 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3431 as_bad (_("relocated field and relocation type differ in signedness"));
3440 as_bad (_("there are no unsigned pc-relative relocations"));
3443 case 1: return BFD_RELOC_8_PCREL
;
3444 case 2: return BFD_RELOC_16_PCREL
;
3445 case 4: return BFD_RELOC_32_PCREL
;
3446 case 8: return BFD_RELOC_64_PCREL
;
3448 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3455 case 4: return BFD_RELOC_X86_64_32S
;
3460 case 1: return BFD_RELOC_8
;
3461 case 2: return BFD_RELOC_16
;
3462 case 4: return BFD_RELOC_32
;
3463 case 8: return BFD_RELOC_64
;
3465 as_bad (_("cannot do %s %u byte relocation"),
3466 sign
> 0 ? "signed" : "unsigned", size
);
3472 /* Here we decide which fixups can be adjusted to make them relative to
3473 the beginning of the section instead of the symbol. Basically we need
3474 to make sure that the dynamic relocations are done correctly, so in
3475 some cases we force the original symbol to be used. */
3478 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3480 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3484 /* Don't adjust pc-relative references to merge sections in 64-bit
3486 if (use_rela_relocations
3487 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3491 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3492 and changed later by validate_fix. */
3493 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3494 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3497 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3498 for size relocations. */
3499 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3500 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3501 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3502 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3503 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3504 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3505 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3506 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3507 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3508 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3509 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3510 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3511 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3512 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3513 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3514 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3515 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3516 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3517 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3518 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3519 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3520 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3521 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3522 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3523 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3524 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3525 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3526 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3527 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3528 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3529 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3536 want_disp32 (const insn_template
*t
)
3538 return flag_code
!= CODE_64BIT
3539 || i
.prefix
[ADDR_PREFIX
]
3540 || (t
->base_opcode
== 0x8d
3541 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
3542 && (!i
.types
[1].bitfield
.qword
3543 || t
->opcode_modifier
.size
== SIZE32
));
3547 intel_float_operand (const char *mnemonic
)
3549 /* Note that the value returned is meaningful only for opcodes with (memory)
3550 operands, hence the code here is free to improperly handle opcodes that
3551 have no operands (for better performance and smaller code). */
3553 if (mnemonic
[0] != 'f')
3554 return 0; /* non-math */
3556 switch (mnemonic
[1])
3558 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3559 the fs segment override prefix not currently handled because no
3560 call path can make opcodes without operands get here */
3562 return 2 /* integer op */;
3564 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3565 return 3; /* fldcw/fldenv */
3568 if (mnemonic
[2] != 'o' /* fnop */)
3569 return 3; /* non-waiting control op */
3572 if (mnemonic
[2] == 's')
3573 return 3; /* frstor/frstpm */
3576 if (mnemonic
[2] == 'a')
3577 return 3; /* fsave */
3578 if (mnemonic
[2] == 't')
3580 switch (mnemonic
[3])
3582 case 'c': /* fstcw */
3583 case 'd': /* fstdw */
3584 case 'e': /* fstenv */
3585 case 's': /* fsts[gw] */
3591 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3592 return 0; /* fxsave/fxrstor are not really math ops */
3600 install_template (const insn_template
*t
)
3606 /* Note that for pseudo prefixes this produces a length of 1. But for them
3607 the length isn't interesting at all. */
3608 for (l
= 1; l
< 4; ++l
)
3609 if (!(t
->base_opcode
>> (8 * l
)))
3612 i
.opcode_length
= l
;
3615 /* Build the VEX prefix. */
3618 build_vex_prefix (const insn_template
*t
)
3620 unsigned int register_specifier
;
3621 unsigned int vector_length
;
3624 /* Check register specifier. */
3625 if (i
.vex
.register_specifier
)
3627 register_specifier
=
3628 ~register_number (i
.vex
.register_specifier
) & 0xf;
3629 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3632 register_specifier
= 0xf;
3634 /* Use 2-byte VEX prefix by swapping destination and source operand
3635 if there are more than 1 register operand. */
3636 if (i
.reg_operands
> 1
3637 && i
.vec_encoding
!= vex_encoding_vex3
3638 && i
.dir_encoding
== dir_encoding_default
3639 && i
.operands
== i
.reg_operands
3640 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3641 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3642 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3645 unsigned int xchg
= i
.operands
- 1;
3646 union i386_op temp_op
;
3647 i386_operand_type temp_type
;
3649 temp_type
= i
.types
[xchg
];
3650 i
.types
[xchg
] = i
.types
[0];
3651 i
.types
[0] = temp_type
;
3652 temp_op
= i
.op
[xchg
];
3653 i
.op
[xchg
] = i
.op
[0];
3656 gas_assert (i
.rm
.mode
== 3);
3660 i
.rm
.regmem
= i
.rm
.reg
;
3663 if (i
.tm
.opcode_modifier
.d
)
3664 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3665 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3666 else /* Use the next insn. */
3667 install_template (&t
[1]);
3670 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3671 are no memory operands and at least 3 register ones. */
3672 if (i
.reg_operands
>= 3
3673 && i
.vec_encoding
!= vex_encoding_vex3
3674 && i
.reg_operands
== i
.operands
- i
.imm_operands
3675 && i
.tm
.opcode_modifier
.vex
3676 && i
.tm
.opcode_modifier
.commutative
3677 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3679 && i
.vex
.register_specifier
3680 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3682 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3683 union i386_op temp_op
;
3684 i386_operand_type temp_type
;
3686 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3687 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3688 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3689 &i
.types
[i
.operands
- 3]));
3690 gas_assert (i
.rm
.mode
== 3);
3692 temp_type
= i
.types
[xchg
];
3693 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3694 i
.types
[xchg
+ 1] = temp_type
;
3695 temp_op
= i
.op
[xchg
];
3696 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3697 i
.op
[xchg
+ 1] = temp_op
;
3700 xchg
= i
.rm
.regmem
| 8;
3701 i
.rm
.regmem
= ~register_specifier
& 0xf;
3702 gas_assert (!(i
.rm
.regmem
& 8));
3703 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3704 register_specifier
= ~xchg
& 0xf;
3707 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3708 vector_length
= avxscalar
;
3709 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3715 /* Determine vector length from the last multi-length vector
3718 for (op
= t
->operands
; op
--;)
3719 if (t
->operand_types
[op
].bitfield
.xmmword
3720 && t
->operand_types
[op
].bitfield
.ymmword
3721 && i
.types
[op
].bitfield
.ymmword
)
3728 /* Check the REX.W bit and VEXW. */
3729 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3730 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3731 else if (i
.tm
.opcode_modifier
.vexw
)
3732 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3734 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3736 /* Use 2-byte VEX prefix if possible. */
3738 && i
.vec_encoding
!= vex_encoding_vex3
3739 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3740 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3742 /* 2-byte VEX prefix. */
3746 i
.vex
.bytes
[0] = 0xc5;
3748 /* Check the REX.R bit. */
3749 r
= (i
.rex
& REX_R
) ? 0 : 1;
3750 i
.vex
.bytes
[1] = (r
<< 7
3751 | register_specifier
<< 3
3752 | vector_length
<< 2
3753 | i
.tm
.opcode_modifier
.opcodeprefix
);
3757 /* 3-byte VEX prefix. */
3760 switch (i
.tm
.opcode_modifier
.opcodespace
)
3765 i
.vex
.bytes
[0] = 0xc4;
3770 i
.vex
.bytes
[0] = 0x8f;
3776 /* The high 3 bits of the second VEX byte are 1's compliment
3777 of RXB bits from REX. */
3778 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3780 i
.vex
.bytes
[2] = (w
<< 7
3781 | register_specifier
<< 3
3782 | vector_length
<< 2
3783 | i
.tm
.opcode_modifier
.opcodeprefix
);
3788 is_evex_encoding (const insn_template
*t
)
3790 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3791 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3792 || t
->opcode_modifier
.sae
;
3796 is_any_vex_encoding (const insn_template
*t
)
3798 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3801 /* Build the EVEX prefix. */
3804 build_evex_prefix (void)
3806 unsigned int register_specifier
, w
;
3807 rex_byte vrex_used
= 0;
3809 /* Check register specifier. */
3810 if (i
.vex
.register_specifier
)
3812 gas_assert ((i
.vrex
& REX_X
) == 0);
3814 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3815 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3816 register_specifier
+= 8;
3817 /* The upper 16 registers are encoded in the fourth byte of the
3819 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3820 i
.vex
.bytes
[3] = 0x8;
3821 register_specifier
= ~register_specifier
& 0xf;
3825 register_specifier
= 0xf;
3827 /* Encode upper 16 vector index register in the fourth byte of
3829 if (!(i
.vrex
& REX_X
))
3830 i
.vex
.bytes
[3] = 0x8;
3835 /* 4 byte EVEX prefix. */
3837 i
.vex
.bytes
[0] = 0x62;
3839 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3841 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3842 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_EVEXMAP6
);
3843 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3845 /* The fifth bit of the second EVEX byte is 1's compliment of the
3846 REX_R bit in VREX. */
3847 if (!(i
.vrex
& REX_R
))
3848 i
.vex
.bytes
[1] |= 0x10;
3852 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3854 /* When all operands are registers, the REX_X bit in REX is not
3855 used. We reuse it to encode the upper 16 registers, which is
3856 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3857 as 1's compliment. */
3858 if ((i
.vrex
& REX_B
))
3861 i
.vex
.bytes
[1] &= ~0x40;
3865 /* EVEX instructions shouldn't need the REX prefix. */
3866 i
.vrex
&= ~vrex_used
;
3867 gas_assert (i
.vrex
== 0);
3869 /* Check the REX.W bit and VEXW. */
3870 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3871 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3872 else if (i
.tm
.opcode_modifier
.vexw
)
3873 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3875 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3877 /* The third byte of the EVEX prefix. */
3878 i
.vex
.bytes
[2] = ((w
<< 7)
3879 | (register_specifier
<< 3)
3880 | 4 /* Encode the U bit. */
3881 | i
.tm
.opcode_modifier
.opcodeprefix
);
3883 /* The fourth byte of the EVEX prefix. */
3884 /* The zeroing-masking bit. */
3885 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3886 i
.vex
.bytes
[3] |= 0x80;
3888 /* Don't always set the broadcast bit if there is no RC. */
3889 if (i
.rounding
.type
== rc_none
)
3891 /* Encode the vector length. */
3892 unsigned int vec_length
;
3894 if (!i
.tm
.opcode_modifier
.evex
3895 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3899 /* Determine vector length from the last multi-length vector
3901 for (op
= i
.operands
; op
--;)
3902 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3903 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3904 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3906 if (i
.types
[op
].bitfield
.zmmword
)
3908 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3911 else if (i
.types
[op
].bitfield
.ymmword
)
3913 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3916 else if (i
.types
[op
].bitfield
.xmmword
)
3918 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3921 else if (i
.broadcast
.type
&& op
== i
.broadcast
.operand
)
3923 switch (i
.broadcast
.bytes
)
3926 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3929 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3932 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3941 if (op
>= MAX_OPERANDS
)
3945 switch (i
.tm
.opcode_modifier
.evex
)
3947 case EVEXLIG
: /* LL' is ignored */
3948 vec_length
= evexlig
<< 5;
3951 vec_length
= 0 << 5;
3954 vec_length
= 1 << 5;
3957 vec_length
= 2 << 5;
3963 i
.vex
.bytes
[3] |= vec_length
;
3964 /* Encode the broadcast bit. */
3965 if (i
.broadcast
.type
)
3966 i
.vex
.bytes
[3] |= 0x10;
3968 else if (i
.rounding
.type
!= saeonly
)
3969 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
.type
<< 5);
3971 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3974 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
3978 process_immext (void)
3982 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3983 which is coded in the same place as an 8-bit immediate field
3984 would be. Here we fake an 8-bit immediate operand from the
3985 opcode suffix stored in tm.extension_opcode.
3987 AVX instructions also use this encoding, for some of
3988 3 argument instructions. */
3990 gas_assert (i
.imm_operands
<= 1
3992 || (is_any_vex_encoding (&i
.tm
)
3993 && i
.operands
<= 4)));
3995 exp
= &im_expressions
[i
.imm_operands
++];
3996 i
.op
[i
.operands
].imms
= exp
;
3997 i
.types
[i
.operands
] = imm8
;
3999 exp
->X_op
= O_constant
;
4000 exp
->X_add_number
= i
.tm
.extension_opcode
;
4001 i
.tm
.extension_opcode
= None
;
4008 switch (i
.tm
.opcode_modifier
.prefixok
)
4016 as_bad (_("invalid instruction `%s' after `%s'"),
4017 i
.tm
.name
, i
.hle_prefix
);
4020 if (i
.prefix
[LOCK_PREFIX
])
4022 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4026 case PrefixHLERelease
:
4027 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4029 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4033 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4035 as_bad (_("memory destination needed for instruction `%s'"
4036 " after `xrelease'"), i
.tm
.name
);
4043 /* Encode aligned vector move as unaligned vector move. */
4046 encode_with_unaligned_vector_move (void)
4048 switch (i
.tm
.base_opcode
)
4050 case 0x28: /* Load instructions. */
4051 case 0x29: /* Store instructions. */
4052 /* movaps/movapd/vmovaps/vmovapd. */
4053 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4054 && i
.tm
.opcode_modifier
.opcodeprefix
<= PREFIX_0X66
)
4055 i
.tm
.base_opcode
= 0x10 | (i
.tm
.base_opcode
& 1);
4057 case 0x6f: /* Load instructions. */
4058 case 0x7f: /* Store instructions. */
4059 /* movdqa/vmovdqa/vmovdqa64/vmovdqa32. */
4060 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4061 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0X66
)
4062 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4069 /* Try the shortest encoding by shortening operand size. */
4072 optimize_encoding (void)
4076 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4077 && i
.tm
.base_opcode
== 0x8d)
4080 lea symbol, %rN -> mov $symbol, %rN
4081 lea (%rM), %rN -> mov %rM, %rN
4082 lea (,%rM,1), %rN -> mov %rM, %rN
4084 and in 32-bit mode for 16-bit addressing
4086 lea (%rM), %rN -> movzx %rM, %rN
4088 and in 64-bit mode zap 32-bit addressing in favor of using a
4089 32-bit (or less) destination.
4091 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4093 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4094 i
.tm
.opcode_modifier
.size
= SIZE32
;
4095 i
.prefix
[ADDR_PREFIX
] = 0;
4098 if (!i
.index_reg
&& !i
.base_reg
)
4101 lea symbol, %rN -> mov $symbol, %rN
4103 if (flag_code
== CODE_64BIT
)
4105 /* Don't transform a relocation to a 16-bit one. */
4107 && i
.op
[0].disps
->X_op
!= O_constant
4108 && i
.op
[1].regs
->reg_type
.bitfield
.word
)
4111 if (!i
.op
[1].regs
->reg_type
.bitfield
.qword
4112 || i
.tm
.opcode_modifier
.size
== SIZE32
)
4114 i
.tm
.base_opcode
= 0xb8;
4115 i
.tm
.opcode_modifier
.modrm
= 0;
4116 if (!i
.op
[1].regs
->reg_type
.bitfield
.word
)
4117 i
.types
[0].bitfield
.imm32
= 1;
4120 i
.tm
.opcode_modifier
.size
= SIZE16
;
4121 i
.types
[0].bitfield
.imm16
= 1;
4126 /* Subject to further optimization below. */
4127 i
.tm
.base_opcode
= 0xc7;
4128 i
.tm
.extension_opcode
= 0;
4129 i
.types
[0].bitfield
.imm32s
= 1;
4130 i
.types
[0].bitfield
.baseindex
= 0;
4133 /* Outside of 64-bit mode address and operand sizes have to match if
4134 a relocation is involved, as otherwise we wouldn't (currently) or
4135 even couldn't express the relocation correctly. */
4136 else if (i
.op
[0].disps
4137 && i
.op
[0].disps
->X_op
!= O_constant
4138 && ((!i
.prefix
[ADDR_PREFIX
])
4139 != (flag_code
== CODE_32BIT
4140 ? i
.op
[1].regs
->reg_type
.bitfield
.dword
4141 : i
.op
[1].regs
->reg_type
.bitfield
.word
)))
4143 /* In 16-bit mode converting LEA with 16-bit addressing and a 32-bit
4144 destination is going to grow encoding size. */
4145 else if (flag_code
== CODE_16BIT
4146 && (optimize
<= 1 || optimize_for_space
)
4147 && !i
.prefix
[ADDR_PREFIX
]
4148 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4152 i
.tm
.base_opcode
= 0xb8;
4153 i
.tm
.opcode_modifier
.modrm
= 0;
4154 if (i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4155 i
.types
[0].bitfield
.imm32
= 1;
4157 i
.types
[0].bitfield
.imm16
= 1;
4160 && i
.op
[0].disps
->X_op
== O_constant
4161 && i
.op
[1].regs
->reg_type
.bitfield
.dword
4162 /* NB: Add () to !i.prefix[ADDR_PREFIX] to silence
4164 && (!i
.prefix
[ADDR_PREFIX
]) != (flag_code
== CODE_32BIT
))
4165 i
.op
[0].disps
->X_add_number
&= 0xffff;
4168 i
.tm
.operand_types
[0] = i
.types
[0];
4172 i
.op
[0].imms
= &im_expressions
[0];
4173 i
.op
[0].imms
->X_op
= O_absent
;
4176 else if (i
.op
[0].disps
4177 && (i
.op
[0].disps
->X_op
!= O_constant
4178 || i
.op
[0].disps
->X_add_number
))
4183 lea (%rM), %rN -> mov %rM, %rN
4184 lea (,%rM,1), %rN -> mov %rM, %rN
4185 lea (%rM), %rN -> movzx %rM, %rN
4187 const reg_entry
*addr_reg
;
4189 if (!i
.index_reg
&& i
.base_reg
->reg_num
!= RegIP
)
4190 addr_reg
= i
.base_reg
;
4191 else if (!i
.base_reg
4192 && i
.index_reg
->reg_num
!= RegIZ
4193 && !i
.log2_scale_factor
)
4194 addr_reg
= i
.index_reg
;
4198 if (addr_reg
->reg_type
.bitfield
.word
4199 && i
.op
[1].regs
->reg_type
.bitfield
.dword
)
4201 if (flag_code
!= CODE_32BIT
)
4203 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
4204 i
.tm
.base_opcode
= 0xb7;
4207 i
.tm
.base_opcode
= 0x8b;
4209 if (addr_reg
->reg_type
.bitfield
.dword
4210 && i
.op
[1].regs
->reg_type
.bitfield
.qword
)
4211 i
.tm
.opcode_modifier
.size
= SIZE32
;
4213 i
.op
[0].regs
= addr_reg
;
4218 i
.disp_operands
= 0;
4219 i
.prefix
[ADDR_PREFIX
] = 0;
4220 i
.prefix
[SEG_PREFIX
] = 0;
4224 if (optimize_for_space
4225 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4226 && i
.reg_operands
== 1
4227 && i
.imm_operands
== 1
4228 && !i
.types
[1].bitfield
.byte
4229 && i
.op
[0].imms
->X_op
== O_constant
4230 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4231 && (i
.tm
.base_opcode
== 0xa8
4232 || (i
.tm
.base_opcode
== 0xf6
4233 && i
.tm
.extension_opcode
== 0x0)))
4236 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4238 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4239 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4241 i
.types
[1].bitfield
.byte
= 1;
4242 /* Ignore the suffix. */
4244 /* Convert to byte registers. */
4245 if (i
.types
[1].bitfield
.word
)
4247 else if (i
.types
[1].bitfield
.dword
)
4251 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4256 else if (flag_code
== CODE_64BIT
4257 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4258 && ((i
.types
[1].bitfield
.qword
4259 && i
.reg_operands
== 1
4260 && i
.imm_operands
== 1
4261 && i
.op
[0].imms
->X_op
== O_constant
4262 && ((i
.tm
.base_opcode
== 0xb8
4263 && i
.tm
.extension_opcode
== None
4264 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4265 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4266 && ((i
.tm
.base_opcode
== 0x24
4267 || i
.tm
.base_opcode
== 0xa8)
4268 || (i
.tm
.base_opcode
== 0x80
4269 && i
.tm
.extension_opcode
== 0x4)
4270 || ((i
.tm
.base_opcode
== 0xf6
4271 || (i
.tm
.base_opcode
| 1) == 0xc7)
4272 && i
.tm
.extension_opcode
== 0x0)))
4273 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4274 && i
.tm
.base_opcode
== 0x83
4275 && i
.tm
.extension_opcode
== 0x4)))
4276 || (i
.types
[0].bitfield
.qword
4277 && ((i
.reg_operands
== 2
4278 && i
.op
[0].regs
== i
.op
[1].regs
4279 && (i
.tm
.base_opcode
== 0x30
4280 || i
.tm
.base_opcode
== 0x28))
4281 || (i
.reg_operands
== 1
4283 && i
.tm
.base_opcode
== 0x30)))))
4286 andq $imm31, %r64 -> andl $imm31, %r32
4287 andq $imm7, %r64 -> andl $imm7, %r32
4288 testq $imm31, %r64 -> testl $imm31, %r32
4289 xorq %r64, %r64 -> xorl %r32, %r32
4290 subq %r64, %r64 -> subl %r32, %r32
4291 movq $imm31, %r64 -> movl $imm31, %r32
4292 movq $imm32, %r64 -> movl $imm32, %r32
4294 i
.tm
.opcode_modifier
.norex64
= 1;
4295 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4298 movq $imm31, %r64 -> movl $imm31, %r32
4299 movq $imm32, %r64 -> movl $imm32, %r32
4301 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4302 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4303 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4304 i
.types
[0].bitfield
.imm32
= 1;
4305 i
.types
[0].bitfield
.imm32s
= 0;
4306 i
.types
[0].bitfield
.imm64
= 0;
4307 i
.types
[1].bitfield
.dword
= 1;
4308 i
.types
[1].bitfield
.qword
= 0;
4309 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4312 movq $imm31, %r64 -> movl $imm31, %r32
4314 i
.tm
.base_opcode
= 0xb8;
4315 i
.tm
.extension_opcode
= None
;
4316 i
.tm
.opcode_modifier
.w
= 0;
4317 i
.tm
.opcode_modifier
.modrm
= 0;
4321 else if (optimize
> 1
4322 && !optimize_for_space
4323 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4324 && i
.reg_operands
== 2
4325 && i
.op
[0].regs
== i
.op
[1].regs
4326 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4327 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4328 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4331 andb %rN, %rN -> testb %rN, %rN
4332 andw %rN, %rN -> testw %rN, %rN
4333 andq %rN, %rN -> testq %rN, %rN
4334 orb %rN, %rN -> testb %rN, %rN
4335 orw %rN, %rN -> testw %rN, %rN
4336 orq %rN, %rN -> testq %rN, %rN
4338 and outside of 64-bit mode
4340 andl %rN, %rN -> testl %rN, %rN
4341 orl %rN, %rN -> testl %rN, %rN
4343 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4345 else if (i
.reg_operands
== 3
4346 && i
.op
[0].regs
== i
.op
[1].regs
4347 && !i
.types
[2].bitfield
.xmmword
4348 && (i
.tm
.opcode_modifier
.vex
4349 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4350 && i
.rounding
.type
== rc_none
4351 && is_evex_encoding (&i
.tm
)
4352 && (i
.vec_encoding
!= vex_encoding_evex
4353 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4354 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4355 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4356 && i
.types
[2].bitfield
.ymmword
))))
4357 && ((i
.tm
.base_opcode
== 0x55
4358 || i
.tm
.base_opcode
== 0x57
4359 || i
.tm
.base_opcode
== 0xdf
4360 || i
.tm
.base_opcode
== 0xef
4361 || i
.tm
.base_opcode
== 0xf8
4362 || i
.tm
.base_opcode
== 0xf9
4363 || i
.tm
.base_opcode
== 0xfa
4364 || i
.tm
.base_opcode
== 0xfb
4365 || i
.tm
.base_opcode
== 0x42
4366 || i
.tm
.base_opcode
== 0x47)
4367 && i
.tm
.extension_opcode
== None
))
4370 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4372 EVEX VOP %zmmM, %zmmM, %zmmN
4373 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4374 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4375 EVEX VOP %ymmM, %ymmM, %ymmN
4376 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4377 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4378 VEX VOP %ymmM, %ymmM, %ymmN
4379 -> VEX VOP %xmmM, %xmmM, %xmmN
4380 VOP, one of vpandn and vpxor:
4381 VEX VOP %ymmM, %ymmM, %ymmN
4382 -> VEX VOP %xmmM, %xmmM, %xmmN
4383 VOP, one of vpandnd and vpandnq:
4384 EVEX VOP %zmmM, %zmmM, %zmmN
4385 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4386 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4387 EVEX VOP %ymmM, %ymmM, %ymmN
4388 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4389 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4390 VOP, one of vpxord and vpxorq:
4391 EVEX VOP %zmmM, %zmmM, %zmmN
4392 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4393 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4394 EVEX VOP %ymmM, %ymmM, %ymmN
4395 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4396 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4397 VOP, one of kxord and kxorq:
4398 VEX VOP %kM, %kM, %kN
4399 -> VEX kxorw %kM, %kM, %kN
4400 VOP, one of kandnd and kandnq:
4401 VEX VOP %kM, %kM, %kN
4402 -> VEX kandnw %kM, %kM, %kN
4404 if (is_evex_encoding (&i
.tm
))
4406 if (i
.vec_encoding
!= vex_encoding_evex
)
4408 i
.tm
.opcode_modifier
.vex
= VEX128
;
4409 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4410 i
.tm
.opcode_modifier
.evex
= 0;
4412 else if (optimize
> 1)
4413 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4417 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4419 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4420 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4423 i
.tm
.opcode_modifier
.vex
= VEX128
;
4425 if (i
.tm
.opcode_modifier
.vex
)
4426 for (j
= 0; j
< 3; j
++)
4428 i
.types
[j
].bitfield
.xmmword
= 1;
4429 i
.types
[j
].bitfield
.ymmword
= 0;
4432 else if (i
.vec_encoding
!= vex_encoding_evex
4433 && !i
.types
[0].bitfield
.zmmword
4434 && !i
.types
[1].bitfield
.zmmword
4436 && !i
.broadcast
.type
4437 && is_evex_encoding (&i
.tm
)
4438 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4439 || (i
.tm
.base_opcode
& ~4) == 0xdb
4440 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4441 && i
.tm
.extension_opcode
== None
)
4444 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4445 vmovdqu32 and vmovdqu64:
4446 EVEX VOP %xmmM, %xmmN
4447 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4448 EVEX VOP %ymmM, %ymmN
4449 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4451 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4453 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4455 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4457 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4458 VOP, one of vpand, vpandn, vpor, vpxor:
4459 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4460 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4461 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4462 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4463 EVEX VOP{d,q} mem, %xmmM, %xmmN
4464 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4465 EVEX VOP{d,q} mem, %ymmM, %ymmN
4466 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4468 for (j
= 0; j
< i
.operands
; j
++)
4469 if (operand_type_check (i
.types
[j
], disp
)
4470 && i
.op
[j
].disps
->X_op
== O_constant
)
4472 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4473 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4474 bytes, we choose EVEX Disp8 over VEX Disp32. */
4475 int evex_disp8
, vex_disp8
;
4476 unsigned int memshift
= i
.memshift
;
4477 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4479 evex_disp8
= fits_in_disp8 (n
);
4481 vex_disp8
= fits_in_disp8 (n
);
4482 if (evex_disp8
!= vex_disp8
)
4484 i
.memshift
= memshift
;
4488 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4491 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4492 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4493 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4494 i
.tm
.opcode_modifier
.vex
4495 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4496 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4497 /* VPAND, VPOR, and VPXOR are commutative. */
4498 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4499 i
.tm
.opcode_modifier
.commutative
= 1;
4500 i
.tm
.opcode_modifier
.evex
= 0;
4501 i
.tm
.opcode_modifier
.masking
= 0;
4502 i
.tm
.opcode_modifier
.broadcast
= 0;
4503 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4506 i
.types
[j
].bitfield
.disp8
4507 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4511 /* Return non-zero for load instruction. */
4517 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4518 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4522 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4523 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4524 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4525 if (i
.tm
.opcode_modifier
.anysize
)
4529 if (strcmp (i
.tm
.name
, "pop") == 0)
4533 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4536 if (i
.tm
.base_opcode
== 0x9d
4537 || i
.tm
.base_opcode
== 0x61)
4540 /* movs, cmps, lods, scas. */
4541 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4545 if (base_opcode
== 0x6f
4546 || i
.tm
.base_opcode
== 0xd7)
4548 /* NB: For AMD-specific insns with implicit memory operands,
4549 they're intentionally not covered. */
4552 /* No memory operand. */
4553 if (!i
.mem_operands
)
4559 if (i
.tm
.base_opcode
== 0xae
4560 && i
.tm
.opcode_modifier
.vex
4561 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4562 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4563 && i
.tm
.extension_opcode
== 2)
4566 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4568 /* test, not, neg, mul, imul, div, idiv. */
4569 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4570 && i
.tm
.extension_opcode
!= 1)
4574 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4577 /* add, or, adc, sbb, and, sub, xor, cmp. */
4578 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4581 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4582 if ((base_opcode
== 0xc1
4583 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4584 && i
.tm
.extension_opcode
!= 6)
4587 /* Check for x87 instructions. */
4588 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4590 /* Skip fst, fstp, fstenv, fstcw. */
4591 if (i
.tm
.base_opcode
== 0xd9
4592 && (i
.tm
.extension_opcode
== 2
4593 || i
.tm
.extension_opcode
== 3
4594 || i
.tm
.extension_opcode
== 6
4595 || i
.tm
.extension_opcode
== 7))
4598 /* Skip fisttp, fist, fistp, fstp. */
4599 if (i
.tm
.base_opcode
== 0xdb
4600 && (i
.tm
.extension_opcode
== 1
4601 || i
.tm
.extension_opcode
== 2
4602 || i
.tm
.extension_opcode
== 3
4603 || i
.tm
.extension_opcode
== 7))
4606 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4607 if (i
.tm
.base_opcode
== 0xdd
4608 && (i
.tm
.extension_opcode
== 1
4609 || i
.tm
.extension_opcode
== 2
4610 || i
.tm
.extension_opcode
== 3
4611 || i
.tm
.extension_opcode
== 6
4612 || i
.tm
.extension_opcode
== 7))
4615 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4616 if (i
.tm
.base_opcode
== 0xdf
4617 && (i
.tm
.extension_opcode
== 1
4618 || i
.tm
.extension_opcode
== 2
4619 || i
.tm
.extension_opcode
== 3
4620 || i
.tm
.extension_opcode
== 6
4621 || i
.tm
.extension_opcode
== 7))
4627 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4629 /* bt, bts, btr, btc. */
4630 if (i
.tm
.base_opcode
== 0xba
4631 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4634 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4635 if (i
.tm
.base_opcode
== 0xc7
4636 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4637 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4638 || i
.tm
.extension_opcode
== 6))
4641 /* fxrstor, ldmxcsr, xrstor. */
4642 if (i
.tm
.base_opcode
== 0xae
4643 && (i
.tm
.extension_opcode
== 1
4644 || i
.tm
.extension_opcode
== 2
4645 || i
.tm
.extension_opcode
== 5))
4648 /* lgdt, lidt, lmsw. */
4649 if (i
.tm
.base_opcode
== 0x01
4650 && (i
.tm
.extension_opcode
== 2
4651 || i
.tm
.extension_opcode
== 3
4652 || i
.tm
.extension_opcode
== 6))
4656 dest
= i
.operands
- 1;
4658 /* Check fake imm8 operand and 3 source operands. */
4659 if ((i
.tm
.opcode_modifier
.immext
4660 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4661 && i
.types
[dest
].bitfield
.imm8
)
4664 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4665 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4666 && (base_opcode
== 0x1
4667 || base_opcode
== 0x9
4668 || base_opcode
== 0x11
4669 || base_opcode
== 0x19
4670 || base_opcode
== 0x21
4671 || base_opcode
== 0x29
4672 || base_opcode
== 0x31
4673 || base_opcode
== 0x39
4674 || (base_opcode
| 2) == 0x87))
4678 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4679 && base_opcode
== 0xc1)
4682 /* Check for load instruction. */
4683 return (i
.types
[dest
].bitfield
.class != ClassNone
4684 || i
.types
[dest
].bitfield
.instance
== Accum
);
4687 /* Output lfence, 0xfaee8, after instruction. */
4690 insert_lfence_after (void)
4692 if (lfence_after_load
&& load_insn_p ())
4694 /* There are also two REP string instructions that require
4695 special treatment. Specifically, the compare string (CMPS)
4696 and scan string (SCAS) instructions set EFLAGS in a manner
4697 that depends on the data being compared/scanned. When used
4698 with a REP prefix, the number of iterations may therefore
4699 vary depending on this data. If the data is a program secret
4700 chosen by the adversary using an LVI method,
4701 then this data-dependent behavior may leak some aspect
4703 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4704 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4705 && i
.prefix
[REP_PREFIX
])
4707 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4710 char *p
= frag_more (3);
4717 /* Output lfence, 0xfaee8, before instruction. */
4720 insert_lfence_before (void)
4724 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4727 if (i
.tm
.base_opcode
== 0xff
4728 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4730 /* Insert lfence before indirect branch if needed. */
4732 if (lfence_before_indirect_branch
== lfence_branch_none
)
4735 if (i
.operands
!= 1)
4738 if (i
.reg_operands
== 1)
4740 /* Indirect branch via register. Don't insert lfence with
4741 -mlfence-after-load=yes. */
4742 if (lfence_after_load
4743 || lfence_before_indirect_branch
== lfence_branch_memory
)
4746 else if (i
.mem_operands
== 1
4747 && lfence_before_indirect_branch
!= lfence_branch_register
)
4749 as_warn (_("indirect `%s` with memory operand should be avoided"),
4756 if (last_insn
.kind
!= last_insn_other
4757 && last_insn
.seg
== now_seg
)
4759 as_warn_where (last_insn
.file
, last_insn
.line
,
4760 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4761 last_insn
.name
, i
.tm
.name
);
4772 /* Output or/not/shl and lfence before near ret. */
4773 if (lfence_before_ret
!= lfence_before_ret_none
4774 && (i
.tm
.base_opcode
== 0xc2
4775 || i
.tm
.base_opcode
== 0xc3))
4777 if (last_insn
.kind
!= last_insn_other
4778 && last_insn
.seg
== now_seg
)
4780 as_warn_where (last_insn
.file
, last_insn
.line
,
4781 _("`%s` skips -mlfence-before-ret on `%s`"),
4782 last_insn
.name
, i
.tm
.name
);
4786 /* Near ret ingore operand size override under CPU64. */
4787 char prefix
= flag_code
== CODE_64BIT
4789 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4791 if (lfence_before_ret
== lfence_before_ret_not
)
4793 /* not: 0xf71424, may add prefix
4794 for operand size override or 64-bit code. */
4795 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4809 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4812 if (lfence_before_ret
== lfence_before_ret_or
)
4814 /* or: 0x830c2400, may add prefix
4815 for operand size override or 64-bit code. */
4821 /* shl: 0xc1242400, may add prefix
4822 for operand size override or 64-bit code. */
4837 /* This is the guts of the machine-dependent assembler. LINE points to a
4838 machine dependent instruction. This function is supposed to emit
4839 the frags/bytes it assembles to. */
4842 md_assemble (char *line
)
4845 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4846 const insn_template
*t
;
4848 /* Initialize globals. */
4849 memset (&i
, '\0', sizeof (i
));
4850 i
.rounding
.type
= rc_none
;
4851 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4852 i
.reloc
[j
] = NO_RELOC
;
4853 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4854 memset (im_expressions
, '\0', sizeof (im_expressions
));
4855 save_stack_p
= save_stack
;
4857 /* First parse an instruction mnemonic & call i386_operand for the operands.
4858 We assume that the scrubber has arranged it so that line[0] is the valid
4859 start of a (possibly prefixed) mnemonic. */
4861 line
= parse_insn (line
, mnemonic
);
4864 mnem_suffix
= i
.suffix
;
4866 line
= parse_operands (line
, mnemonic
);
4868 xfree (i
.memop1_string
);
4869 i
.memop1_string
= NULL
;
4873 /* Now we've parsed the mnemonic into a set of templates, and have the
4874 operands at hand. */
4876 /* All Intel opcodes have reversed operands except for "bound", "enter",
4877 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4878 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4879 and "call" instructions with 2 immediate operands so that the immediate
4880 segment precedes the offset consistently in Intel and AT&T modes. */
4883 && (strcmp (mnemonic
, "bound") != 0)
4884 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4885 && !startswith (mnemonic
, "monitor")
4886 && !startswith (mnemonic
, "mwait")
4887 && (strcmp (mnemonic
, "pvalidate") != 0)
4888 && !startswith (mnemonic
, "rmp")
4889 && (strcmp (mnemonic
, "tpause") != 0)
4890 && (strcmp (mnemonic
, "umwait") != 0)
4891 && !(operand_type_check (i
.types
[0], imm
)
4892 && operand_type_check (i
.types
[1], imm
)))
4895 /* The order of the immediates should be reversed
4896 for 2 immediates extrq and insertq instructions */
4897 if (i
.imm_operands
== 2
4898 && (strcmp (mnemonic
, "extrq") == 0
4899 || strcmp (mnemonic
, "insertq") == 0))
4900 swap_2_operands (0, 1);
4905 if (i
.disp_operands
&& !want_disp32 (current_templates
->start
))
4907 for (j
= 0; j
< i
.operands
; ++j
)
4909 const expressionS
*exp
= i
.op
[j
].disps
;
4911 if (!operand_type_check (i
.types
[j
], disp
))
4914 if (exp
->X_op
!= O_constant
)
4917 /* Since displacement is signed extended to 64bit, don't allow
4918 disp32 and turn off disp32s if they are out of range. */
4919 i
.types
[j
].bitfield
.disp32
= 0;
4920 if (fits_in_signed_long (exp
->X_add_number
))
4923 i
.types
[j
].bitfield
.disp32s
= 0;
4924 if (i
.types
[j
].bitfield
.baseindex
)
4926 char number_buf
[128];
4928 /* Coded this way in order to allow for ease of translation. */
4929 sprintf_vma (number_buf
, exp
->X_add_number
);
4930 as_bad (_("0x%s out of range of signed 32bit displacement"),
4937 /* Don't optimize displacement for movabs since it only takes 64bit
4940 && i
.disp_encoding
<= disp_encoding_8bit
4941 && (flag_code
!= CODE_64BIT
4942 || strcmp (mnemonic
, "movabs") != 0))
4945 /* Next, we find a template that matches the given insn,
4946 making sure the overlap of the given operands types is consistent
4947 with the template operand types. */
4949 if (!(t
= match_template (mnem_suffix
)))
4952 if (sse_check
!= check_none
4953 /* The opcode space check isn't strictly needed; it's there only to
4954 bypass the logic below when easily possible. */
4955 && t
->opcode_modifier
.opcodespace
>= SPACE_0F
4956 && t
->opcode_modifier
.opcodespace
<= SPACE_0F3A
4957 && !i
.tm
.cpu_flags
.bitfield
.cpusse4a
4958 && !is_any_vex_encoding (t
))
4962 for (j
= 0; j
< t
->operands
; ++j
)
4964 if (t
->operand_types
[j
].bitfield
.class == RegMMX
)
4966 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
)
4970 if (j
>= t
->operands
&& simd
)
4971 (sse_check
== check_warning
4973 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4976 if (i
.tm
.opcode_modifier
.fwait
)
4977 if (!add_prefix (FWAIT_OPCODE
))
4980 /* Check if REP prefix is OK. */
4981 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4983 as_bad (_("invalid instruction `%s' after `%s'"),
4984 i
.tm
.name
, i
.rep_prefix
);
4988 /* Check for lock without a lockable instruction. Destination operand
4989 must be memory unless it is xchg (0x86). */
4990 if (i
.prefix
[LOCK_PREFIX
]
4991 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
4992 || i
.mem_operands
== 0
4993 || (i
.tm
.base_opcode
!= 0x86
4994 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4996 as_bad (_("expecting lockable instruction after `lock'"));
5000 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
5001 if (i
.prefix
[DATA_PREFIX
]
5002 && (is_any_vex_encoding (&i
.tm
)
5003 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
5004 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
5006 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
5010 /* Check if HLE prefix is OK. */
5011 if (i
.hle_prefix
&& !check_hle ())
5014 /* Check BND prefix. */
5015 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
5016 as_bad (_("expecting valid branch instruction after `bnd'"));
5018 /* Check NOTRACK prefix. */
5019 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
5020 as_bad (_("expecting indirect branch instruction after `notrack'"));
5022 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
5024 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
5025 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
5026 else if (flag_code
!= CODE_16BIT
5027 ? i
.prefix
[ADDR_PREFIX
]
5028 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
5029 as_bad (_("16-bit address isn't allowed in MPX instructions"));
5032 /* Insert BND prefix. */
5033 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
5035 if (!i
.prefix
[BND_PREFIX
])
5036 add_prefix (BND_PREFIX_OPCODE
);
5037 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
5039 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
5040 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
5044 /* Check string instruction segment overrides. */
5045 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
5047 gas_assert (i
.mem_operands
);
5048 if (!check_string ())
5050 i
.disp_operands
= 0;
5053 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
5054 optimize_encoding ();
5056 if (use_unaligned_vector_move
)
5057 encode_with_unaligned_vector_move ();
5059 if (!process_suffix ())
5062 /* Update operand types and check extended states. */
5063 for (j
= 0; j
< i
.operands
; j
++)
5065 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
5066 switch (i
.tm
.operand_types
[j
].bitfield
.class)
5071 i
.xstate
|= xstate_mmx
;
5074 i
.xstate
|= xstate_mask
;
5077 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
5078 i
.xstate
|= xstate_tmm
;
5079 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
5080 i
.xstate
|= xstate_zmm
;
5081 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
5082 i
.xstate
|= xstate_ymm
;
5083 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
5084 i
.xstate
|= xstate_xmm
;
5089 /* Make still unresolved immediate matches conform to size of immediate
5090 given in i.suffix. */
5091 if (!finalize_imm ())
5094 if (i
.types
[0].bitfield
.imm1
)
5095 i
.imm_operands
= 0; /* kludge for shift insns. */
5097 /* We only need to check those implicit registers for instructions
5098 with 3 operands or less. */
5099 if (i
.operands
<= 3)
5100 for (j
= 0; j
< i
.operands
; j
++)
5101 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
5102 && !i
.types
[j
].bitfield
.xmmword
)
5105 /* For insns with operands there are more diddles to do to the opcode. */
5108 if (!process_operands ())
5111 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
5113 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
5114 as_warn (_("translating to `%sp'"), i
.tm
.name
);
5117 if (is_any_vex_encoding (&i
.tm
))
5119 if (!cpu_arch_flags
.bitfield
.cpui286
)
5121 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
5126 /* Check for explicit REX prefix. */
5127 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
5129 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
5133 if (i
.tm
.opcode_modifier
.vex
)
5134 build_vex_prefix (t
);
5136 build_evex_prefix ();
5138 /* The individual REX.RXBW bits got consumed. */
5139 i
.rex
&= REX_OPCODE
;
5142 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
5143 instructions may define INT_OPCODE as well, so avoid this corner
5144 case for those instructions that use MODRM. */
5145 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
5146 && i
.tm
.base_opcode
== INT_OPCODE
5147 && !i
.tm
.opcode_modifier
.modrm
5148 && i
.op
[0].imms
->X_add_number
== 3)
5150 i
.tm
.base_opcode
= INT3_OPCODE
;
5154 if ((i
.tm
.opcode_modifier
.jump
== JUMP
5155 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
5156 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
5157 && i
.op
[0].disps
->X_op
== O_constant
)
5159 /* Convert "jmp constant" (and "call constant") to a jump (call) to
5160 the absolute address given by the constant. Since ix86 jumps and
5161 calls are pc relative, we need to generate a reloc. */
5162 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
5163 i
.op
[0].disps
->X_op
= O_symbol
;
5166 /* For 8 bit registers we need an empty rex prefix. Also if the
5167 instruction already has a prefix, we need to convert old
5168 registers to new ones. */
5170 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
5171 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
5172 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
5173 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
5174 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
5175 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
5180 i
.rex
|= REX_OPCODE
;
5181 for (x
= 0; x
< 2; x
++)
5183 /* Look for 8 bit operand that uses old registers. */
5184 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
5185 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
5187 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5188 /* In case it is "hi" register, give up. */
5189 if (i
.op
[x
].regs
->reg_num
> 3)
5190 as_bad (_("can't encode register '%s%s' in an "
5191 "instruction requiring REX prefix."),
5192 register_prefix
, i
.op
[x
].regs
->reg_name
);
5194 /* Otherwise it is equivalent to the extended register.
5195 Since the encoding doesn't change this is merely
5196 cosmetic cleanup for debug output. */
5198 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
5203 if (i
.rex
== 0 && i
.rex_encoding
)
5205 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
5206 that uses legacy register. If it is "hi" register, don't add
5207 the REX_OPCODE byte. */
5209 for (x
= 0; x
< 2; x
++)
5210 if (i
.types
[x
].bitfield
.class == Reg
5211 && i
.types
[x
].bitfield
.byte
5212 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
5213 && i
.op
[x
].regs
->reg_num
> 3)
5215 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
5216 i
.rex_encoding
= false;
5225 add_prefix (REX_OPCODE
| i
.rex
);
5227 insert_lfence_before ();
5229 /* We are ready to output the insn. */
5232 insert_lfence_after ();
5234 last_insn
.seg
= now_seg
;
5236 if (i
.tm
.opcode_modifier
.isprefix
)
5238 last_insn
.kind
= last_insn_prefix
;
5239 last_insn
.name
= i
.tm
.name
;
5240 last_insn
.file
= as_where (&last_insn
.line
);
5243 last_insn
.kind
= last_insn_other
;
5247 parse_insn (char *line
, char *mnemonic
)
5250 char *token_start
= l
;
5253 const insn_template
*t
;
5259 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5264 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5266 as_bad (_("no such instruction: `%s'"), token_start
);
5271 if (!is_space_char (*l
)
5272 && *l
!= END_OF_INSN
5274 || (*l
!= PREFIX_SEPARATOR
5277 as_bad (_("invalid character %s in mnemonic"),
5278 output_invalid (*l
));
5281 if (token_start
== l
)
5283 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5284 as_bad (_("expecting prefix; got nothing"));
5286 as_bad (_("expecting mnemonic; got nothing"));
5290 /* Look up instruction (or prefix) via hash table. */
5291 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5293 if (*l
!= END_OF_INSN
5294 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5295 && current_templates
5296 && current_templates
->start
->opcode_modifier
.isprefix
)
5298 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5300 as_bad ((flag_code
!= CODE_64BIT
5301 ? _("`%s' is only supported in 64-bit mode")
5302 : _("`%s' is not supported in 64-bit mode")),
5303 current_templates
->start
->name
);
5306 /* If we are in 16-bit mode, do not allow addr16 or data16.
5307 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5308 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5309 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5310 && flag_code
!= CODE_64BIT
5311 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5312 ^ (flag_code
== CODE_16BIT
)))
5314 as_bad (_("redundant %s prefix"),
5315 current_templates
->start
->name
);
5319 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5321 /* Handle pseudo prefixes. */
5322 switch (current_templates
->start
->extension_opcode
)
5326 i
.disp_encoding
= disp_encoding_8bit
;
5330 i
.disp_encoding
= disp_encoding_16bit
;
5334 i
.disp_encoding
= disp_encoding_32bit
;
5338 i
.dir_encoding
= dir_encoding_load
;
5342 i
.dir_encoding
= dir_encoding_store
;
5346 i
.vec_encoding
= vex_encoding_vex
;
5350 i
.vec_encoding
= vex_encoding_vex3
;
5354 i
.vec_encoding
= vex_encoding_evex
;
5358 i
.rex_encoding
= true;
5360 case Prefix_NoOptimize
:
5362 i
.no_optimize
= true;
5370 /* Add prefix, checking for repeated prefixes. */
5371 switch (add_prefix (current_templates
->start
->base_opcode
))
5376 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5377 i
.notrack_prefix
= current_templates
->start
->name
;
5380 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5381 i
.hle_prefix
= current_templates
->start
->name
;
5382 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5383 i
.bnd_prefix
= current_templates
->start
->name
;
5385 i
.rep_prefix
= current_templates
->start
->name
;
5391 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5398 if (!current_templates
)
5400 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5401 Check if we should swap operand or force 32bit displacement in
5403 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5404 i
.dir_encoding
= dir_encoding_swap
;
5405 else if (mnem_p
- 3 == dot_p
5408 i
.disp_encoding
= disp_encoding_8bit
;
5409 else if (mnem_p
- 4 == dot_p
5413 i
.disp_encoding
= disp_encoding_32bit
;
5418 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5421 if (!current_templates
)
5424 if (mnem_p
> mnemonic
)
5426 /* See if we can get a match by trimming off a suffix. */
5429 case WORD_MNEM_SUFFIX
:
5430 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5431 i
.suffix
= SHORT_MNEM_SUFFIX
;
5434 case BYTE_MNEM_SUFFIX
:
5435 case QWORD_MNEM_SUFFIX
:
5436 i
.suffix
= mnem_p
[-1];
5439 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5441 case SHORT_MNEM_SUFFIX
:
5442 case LONG_MNEM_SUFFIX
:
5445 i
.suffix
= mnem_p
[-1];
5448 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5456 if (intel_float_operand (mnemonic
) == 1)
5457 i
.suffix
= SHORT_MNEM_SUFFIX
;
5459 i
.suffix
= LONG_MNEM_SUFFIX
;
5462 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5468 if (!current_templates
)
5470 as_bad (_("no such instruction: `%s'"), token_start
);
5475 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5476 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5478 /* Check for a branch hint. We allow ",pt" and ",pn" for
5479 predict taken and predict not taken respectively.
5480 I'm not sure that branch hints actually do anything on loop
5481 and jcxz insns (JumpByte) for current Pentium4 chips. They
5482 may work in the future and it doesn't hurt to accept them
5484 if (l
[0] == ',' && l
[1] == 'p')
5488 if (!add_prefix (DS_PREFIX_OPCODE
))
5492 else if (l
[2] == 'n')
5494 if (!add_prefix (CS_PREFIX_OPCODE
))
5500 /* Any other comma loses. */
5503 as_bad (_("invalid character %s in mnemonic"),
5504 output_invalid (*l
));
5508 /* Check if instruction is supported on specified architecture. */
5510 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5512 supported
|= cpu_flags_match (t
);
5513 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5515 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5516 as_warn (_("use .code16 to ensure correct addressing mode"));
5522 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5523 as_bad (flag_code
== CODE_64BIT
5524 ? _("`%s' is not supported in 64-bit mode")
5525 : _("`%s' is only supported in 64-bit mode"),
5526 current_templates
->start
->name
);
5528 as_bad (_("`%s' is not supported on `%s%s'"),
5529 current_templates
->start
->name
,
5530 cpu_arch_name
? cpu_arch_name
: default_arch
,
5531 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5537 parse_operands (char *l
, const char *mnemonic
)
5541 /* 1 if operand is pending after ','. */
5542 unsigned int expecting_operand
= 0;
5544 while (*l
!= END_OF_INSN
)
5546 /* Non-zero if operand parens not balanced. */
5547 unsigned int paren_not_balanced
= 0;
5548 /* True if inside double quotes. */
5549 bool in_quotes
= false;
5551 /* Skip optional white space before operand. */
5552 if (is_space_char (*l
))
5554 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5556 as_bad (_("invalid character %s before operand %d"),
5557 output_invalid (*l
),
5561 token_start
= l
; /* After white space. */
5562 while (in_quotes
|| paren_not_balanced
|| *l
!= ',')
5564 if (*l
== END_OF_INSN
)
5568 as_bad (_("unbalanced double quotes in operand %d."),
5572 if (paren_not_balanced
)
5574 know (!intel_syntax
);
5575 as_bad (_("unbalanced parenthesis in operand %d."),
5580 break; /* we are done */
5582 else if (*l
== '\\' && l
[1] == '"')
5585 in_quotes
= !in_quotes
;
5586 else if (!in_quotes
&& !is_operand_char (*l
) && !is_space_char (*l
))
5588 as_bad (_("invalid character %s in operand %d"),
5589 output_invalid (*l
),
5593 if (!intel_syntax
&& !in_quotes
)
5596 ++paren_not_balanced
;
5598 --paren_not_balanced
;
5602 if (l
!= token_start
)
5603 { /* Yes, we've read in another operand. */
5604 unsigned int operand_ok
;
5605 this_operand
= i
.operands
++;
5606 if (i
.operands
> MAX_OPERANDS
)
5608 as_bad (_("spurious operands; (%d operands/instruction max)"),
5612 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5613 /* Now parse operand adding info to 'i' as we go along. */
5614 END_STRING_AND_SAVE (l
);
5616 if (i
.mem_operands
> 1)
5618 as_bad (_("too many memory references for `%s'"),
5625 i386_intel_operand (token_start
,
5626 intel_float_operand (mnemonic
));
5628 operand_ok
= i386_att_operand (token_start
);
5630 RESTORE_END_STRING (l
);
5636 if (expecting_operand
)
5638 expecting_operand_after_comma
:
5639 as_bad (_("expecting operand after ','; got nothing"));
5644 as_bad (_("expecting operand before ','; got nothing"));
5649 /* Now *l must be either ',' or END_OF_INSN. */
5652 if (*++l
== END_OF_INSN
)
5654 /* Just skip it, if it's \n complain. */
5655 goto expecting_operand_after_comma
;
5657 expecting_operand
= 1;
5664 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5666 union i386_op temp_op
;
5667 i386_operand_type temp_type
;
5668 unsigned int temp_flags
;
5669 enum bfd_reloc_code_real temp_reloc
;
5671 temp_type
= i
.types
[xchg2
];
5672 i
.types
[xchg2
] = i
.types
[xchg1
];
5673 i
.types
[xchg1
] = temp_type
;
5675 temp_flags
= i
.flags
[xchg2
];
5676 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5677 i
.flags
[xchg1
] = temp_flags
;
5679 temp_op
= i
.op
[xchg2
];
5680 i
.op
[xchg2
] = i
.op
[xchg1
];
5681 i
.op
[xchg1
] = temp_op
;
5683 temp_reloc
= i
.reloc
[xchg2
];
5684 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5685 i
.reloc
[xchg1
] = temp_reloc
;
5689 if (i
.mask
.operand
== xchg1
)
5690 i
.mask
.operand
= xchg2
;
5691 else if (i
.mask
.operand
== xchg2
)
5692 i
.mask
.operand
= xchg1
;
5694 if (i
.broadcast
.type
)
5696 if (i
.broadcast
.operand
== xchg1
)
5697 i
.broadcast
.operand
= xchg2
;
5698 else if (i
.broadcast
.operand
== xchg2
)
5699 i
.broadcast
.operand
= xchg1
;
5701 if (i
.rounding
.type
!= rc_none
)
5703 if (i
.rounding
.operand
== xchg1
)
5704 i
.rounding
.operand
= xchg2
;
5705 else if (i
.rounding
.operand
== xchg2
)
5706 i
.rounding
.operand
= xchg1
;
5711 swap_operands (void)
5717 swap_2_operands (1, i
.operands
- 2);
5721 swap_2_operands (0, i
.operands
- 1);
5727 if (i
.mem_operands
== 2)
5729 const reg_entry
*temp_seg
;
5730 temp_seg
= i
.seg
[0];
5731 i
.seg
[0] = i
.seg
[1];
5732 i
.seg
[1] = temp_seg
;
5736 /* Try to ensure constant immediates are represented in the smallest
5741 char guess_suffix
= 0;
5745 guess_suffix
= i
.suffix
;
5746 else if (i
.reg_operands
)
5748 /* Figure out a suffix from the last register operand specified.
5749 We can't do this properly yet, i.e. excluding special register
5750 instances, but the following works for instructions with
5751 immediates. In any case, we can't set i.suffix yet. */
5752 for (op
= i
.operands
; --op
>= 0;)
5753 if (i
.types
[op
].bitfield
.class != Reg
)
5755 else if (i
.types
[op
].bitfield
.byte
)
5757 guess_suffix
= BYTE_MNEM_SUFFIX
;
5760 else if (i
.types
[op
].bitfield
.word
)
5762 guess_suffix
= WORD_MNEM_SUFFIX
;
5765 else if (i
.types
[op
].bitfield
.dword
)
5767 guess_suffix
= LONG_MNEM_SUFFIX
;
5770 else if (i
.types
[op
].bitfield
.qword
)
5772 guess_suffix
= QWORD_MNEM_SUFFIX
;
5776 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5777 guess_suffix
= WORD_MNEM_SUFFIX
;
5779 for (op
= i
.operands
; --op
>= 0;)
5780 if (operand_type_check (i
.types
[op
], imm
))
5782 switch (i
.op
[op
].imms
->X_op
)
5785 /* If a suffix is given, this operand may be shortened. */
5786 switch (guess_suffix
)
5788 case LONG_MNEM_SUFFIX
:
5789 i
.types
[op
].bitfield
.imm32
= 1;
5790 i
.types
[op
].bitfield
.imm64
= 1;
5792 case WORD_MNEM_SUFFIX
:
5793 i
.types
[op
].bitfield
.imm16
= 1;
5794 i
.types
[op
].bitfield
.imm32
= 1;
5795 i
.types
[op
].bitfield
.imm32s
= 1;
5796 i
.types
[op
].bitfield
.imm64
= 1;
5798 case BYTE_MNEM_SUFFIX
:
5799 i
.types
[op
].bitfield
.imm8
= 1;
5800 i
.types
[op
].bitfield
.imm8s
= 1;
5801 i
.types
[op
].bitfield
.imm16
= 1;
5802 i
.types
[op
].bitfield
.imm32
= 1;
5803 i
.types
[op
].bitfield
.imm32s
= 1;
5804 i
.types
[op
].bitfield
.imm64
= 1;
5808 /* If this operand is at most 16 bits, convert it
5809 to a signed 16 bit number before trying to see
5810 whether it will fit in an even smaller size.
5811 This allows a 16-bit operand such as $0xffe0 to
5812 be recognised as within Imm8S range. */
5813 if ((i
.types
[op
].bitfield
.imm16
)
5814 && fits_in_unsigned_word (i
.op
[op
].imms
->X_add_number
))
5816 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5817 ^ 0x8000) - 0x8000);
5820 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5821 if ((i
.types
[op
].bitfield
.imm32
)
5822 && fits_in_unsigned_long (i
.op
[op
].imms
->X_add_number
))
5824 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5825 ^ ((offsetT
) 1 << 31))
5826 - ((offsetT
) 1 << 31));
5830 = operand_type_or (i
.types
[op
],
5831 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5833 /* We must avoid matching of Imm32 templates when 64bit
5834 only immediate is available. */
5835 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5836 i
.types
[op
].bitfield
.imm32
= 0;
5843 /* Symbols and expressions. */
5845 /* Convert symbolic operand to proper sizes for matching, but don't
5846 prevent matching a set of insns that only supports sizes other
5847 than those matching the insn suffix. */
5849 i386_operand_type mask
, allowed
;
5850 const insn_template
*t
= current_templates
->start
;
5852 operand_type_set (&mask
, 0);
5853 allowed
= t
->operand_types
[op
];
5855 while (++t
< current_templates
->end
)
5857 allowed
= operand_type_and (allowed
, anyimm
);
5858 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5860 switch (guess_suffix
)
5862 case QWORD_MNEM_SUFFIX
:
5863 mask
.bitfield
.imm64
= 1;
5864 mask
.bitfield
.imm32s
= 1;
5866 case LONG_MNEM_SUFFIX
:
5867 mask
.bitfield
.imm32
= 1;
5869 case WORD_MNEM_SUFFIX
:
5870 mask
.bitfield
.imm16
= 1;
5872 case BYTE_MNEM_SUFFIX
:
5873 mask
.bitfield
.imm8
= 1;
5878 allowed
= operand_type_and (mask
, allowed
);
5879 if (!operand_type_all_zero (&allowed
))
5880 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5887 /* Try to use the smallest displacement type too. */
5889 optimize_disp (void)
5893 for (op
= i
.operands
; --op
>= 0;)
5894 if (operand_type_check (i
.types
[op
], disp
))
5896 if (i
.op
[op
].disps
->X_op
== O_constant
)
5898 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5900 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5902 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5903 i
.op
[op
].disps
= NULL
;
5908 if (i
.types
[op
].bitfield
.disp16
5909 && fits_in_unsigned_word (op_disp
))
5911 /* If this operand is at most 16 bits, convert
5912 to a signed 16 bit number and don't use 64bit
5914 op_disp
= ((op_disp
^ 0x8000) - 0x8000);
5915 i
.types
[op
].bitfield
.disp64
= 0;
5919 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5920 if ((i
.types
[op
].bitfield
.disp32
5921 || (flag_code
== CODE_64BIT
5922 && want_disp32 (current_templates
->start
)))
5923 && fits_in_unsigned_long (op_disp
))
5925 /* If this operand is at most 32 bits, convert
5926 to a signed 32 bit number and don't use 64bit
5928 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5929 i
.types
[op
].bitfield
.disp64
= 0;
5930 i
.types
[op
].bitfield
.disp32
= 1;
5933 if (flag_code
== CODE_64BIT
&& fits_in_signed_long (op_disp
))
5935 i
.types
[op
].bitfield
.disp64
= 0;
5936 i
.types
[op
].bitfield
.disp32s
= 1;
5939 if ((i
.types
[op
].bitfield
.disp32
5940 || i
.types
[op
].bitfield
.disp32s
5941 || i
.types
[op
].bitfield
.disp16
)
5942 && fits_in_disp8 (op_disp
))
5943 i
.types
[op
].bitfield
.disp8
= 1;
5945 i
.op
[op
].disps
->X_add_number
= op_disp
;
5947 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5948 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5950 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5951 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5952 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
5955 /* We only support 64bit displacement on constants. */
5956 i
.types
[op
].bitfield
.disp64
= 0;
5960 /* Return 1 if there is a match in broadcast bytes between operand
5961 GIVEN and instruction template T. */
5964 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5966 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5967 && i
.types
[given
].bitfield
.byte
)
5968 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5969 && i
.types
[given
].bitfield
.word
)
5970 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5971 && i
.types
[given
].bitfield
.dword
)
5972 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5973 && i
.types
[given
].bitfield
.qword
));
5976 /* Check if operands are valid for the instruction. */
5979 check_VecOperands (const insn_template
*t
)
5984 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5985 any one operand are implicity requiring AVX512VL support if the actual
5986 operand size is YMMword or XMMword. Since this function runs after
5987 template matching, there's no need to check for YMMword/XMMword in
5989 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5990 if (!cpu_flags_all_zero (&cpu
)
5991 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5992 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5994 for (op
= 0; op
< t
->operands
; ++op
)
5996 if (t
->operand_types
[op
].bitfield
.zmmword
5997 && (i
.types
[op
].bitfield
.ymmword
5998 || i
.types
[op
].bitfield
.xmmword
))
6000 i
.error
= unsupported
;
6006 /* Somewhat similarly, templates specifying both AVX and AVX2 are
6007 requiring AVX2 support if the actual operand size is YMMword. */
6008 if (t
->cpu_flags
.bitfield
.cpuavx
6009 && t
->cpu_flags
.bitfield
.cpuavx2
6010 && !cpu_arch_flags
.bitfield
.cpuavx2
)
6012 for (op
= 0; op
< t
->operands
; ++op
)
6014 if (t
->operand_types
[op
].bitfield
.xmmword
6015 && i
.types
[op
].bitfield
.ymmword
)
6017 i
.error
= unsupported
;
6023 /* Without VSIB byte, we can't have a vector register for index. */
6024 if (!t
->opcode_modifier
.sib
6026 && (i
.index_reg
->reg_type
.bitfield
.xmmword
6027 || i
.index_reg
->reg_type
.bitfield
.ymmword
6028 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
6030 i
.error
= unsupported_vector_index_register
;
6034 /* Check if default mask is allowed. */
6035 if (t
->opcode_modifier
.nodefmask
6036 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
6038 i
.error
= no_default_mask
;
6042 /* For VSIB byte, we need a vector register for index, and all vector
6043 registers must be distinct. */
6044 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
6047 || !((t
->opcode_modifier
.sib
== VECSIB128
6048 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
6049 || (t
->opcode_modifier
.sib
== VECSIB256
6050 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
6051 || (t
->opcode_modifier
.sib
== VECSIB512
6052 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
6054 i
.error
= invalid_vsib_address
;
6058 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
6059 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
6061 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
6062 gas_assert (i
.types
[0].bitfield
.xmmword
6063 || i
.types
[0].bitfield
.ymmword
);
6064 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
6065 gas_assert (i
.types
[2].bitfield
.xmmword
6066 || i
.types
[2].bitfield
.ymmword
);
6067 if (operand_check
== check_none
)
6069 if (register_number (i
.op
[0].regs
)
6070 != register_number (i
.index_reg
)
6071 && register_number (i
.op
[2].regs
)
6072 != register_number (i
.index_reg
)
6073 && register_number (i
.op
[0].regs
)
6074 != register_number (i
.op
[2].regs
))
6076 if (operand_check
== check_error
)
6078 i
.error
= invalid_vector_register_set
;
6081 as_warn (_("mask, index, and destination registers should be distinct"));
6083 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
6085 if (i
.types
[1].bitfield
.class == RegSIMD
6086 && (i
.types
[1].bitfield
.xmmword
6087 || i
.types
[1].bitfield
.ymmword
6088 || i
.types
[1].bitfield
.zmmword
)
6089 && (register_number (i
.op
[1].regs
)
6090 == register_number (i
.index_reg
)))
6092 if (operand_check
== check_error
)
6094 i
.error
= invalid_vector_register_set
;
6097 if (operand_check
!= check_none
)
6098 as_warn (_("index and destination registers should be distinct"));
6103 /* For AMX instructions with 3 TMM register operands, all operands
6104 must be distinct. */
6105 if (i
.reg_operands
== 3
6106 && t
->operand_types
[0].bitfield
.tmmword
6107 && (i
.op
[0].regs
== i
.op
[1].regs
6108 || i
.op
[0].regs
== i
.op
[2].regs
6109 || i
.op
[1].regs
== i
.op
[2].regs
))
6111 i
.error
= invalid_tmm_register_set
;
6115 /* For some special instructions require that destination must be distinct
6116 from source registers. */
6117 if (t
->opcode_modifier
.distinctdest
)
6119 unsigned int dest_reg
= i
.operands
- 1;
6121 know (i
.operands
>= 3);
6123 /* #UD if dest_reg == src1_reg or dest_reg == src2_reg. */
6124 if (i
.op
[dest_reg
- 1].regs
== i
.op
[dest_reg
].regs
6125 || (i
.reg_operands
> 2
6126 && i
.op
[dest_reg
- 2].regs
== i
.op
[dest_reg
].regs
))
6128 i
.error
= invalid_dest_and_src_register_set
;
6133 /* Check if broadcast is supported by the instruction and is applied
6134 to the memory operand. */
6135 if (i
.broadcast
.type
)
6137 i386_operand_type type
, overlap
;
6139 /* Check if specified broadcast is supported in this instruction,
6140 and its broadcast bytes match the memory operand. */
6141 op
= i
.broadcast
.operand
;
6142 if (!t
->opcode_modifier
.broadcast
6143 || !(i
.flags
[op
] & Operand_Mem
)
6144 || (!i
.types
[op
].bitfield
.unspecified
6145 && !match_broadcast_size (t
, op
)))
6148 i
.error
= unsupported_broadcast
;
6152 i
.broadcast
.bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
6153 * i
.broadcast
.type
);
6154 operand_type_set (&type
, 0);
6155 switch (i
.broadcast
.bytes
)
6158 type
.bitfield
.word
= 1;
6161 type
.bitfield
.dword
= 1;
6164 type
.bitfield
.qword
= 1;
6167 type
.bitfield
.xmmword
= 1;
6170 type
.bitfield
.ymmword
= 1;
6173 type
.bitfield
.zmmword
= 1;
6179 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
6180 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
6181 && t
->operand_types
[op
].bitfield
.byte
6182 + t
->operand_types
[op
].bitfield
.word
6183 + t
->operand_types
[op
].bitfield
.dword
6184 + t
->operand_types
[op
].bitfield
.qword
> 1)
6186 overlap
.bitfield
.xmmword
= 0;
6187 overlap
.bitfield
.ymmword
= 0;
6188 overlap
.bitfield
.zmmword
= 0;
6190 if (operand_type_all_zero (&overlap
))
6193 if (t
->opcode_modifier
.checkregsize
)
6197 type
.bitfield
.baseindex
= 1;
6198 for (j
= 0; j
< i
.operands
; ++j
)
6201 && !operand_type_register_match(i
.types
[j
],
6202 t
->operand_types
[j
],
6204 t
->operand_types
[op
]))
6209 /* If broadcast is supported in this instruction, we need to check if
6210 operand of one-element size isn't specified without broadcast. */
6211 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
6213 /* Find memory operand. */
6214 for (op
= 0; op
< i
.operands
; op
++)
6215 if (i
.flags
[op
] & Operand_Mem
)
6217 gas_assert (op
< i
.operands
);
6218 /* Check size of the memory operand. */
6219 if (match_broadcast_size (t
, op
))
6221 i
.error
= broadcast_needed
;
6226 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
6228 /* Check if requested masking is supported. */
6231 switch (t
->opcode_modifier
.masking
)
6235 case MERGING_MASKING
:
6239 i
.error
= unsupported_masking
;
6243 case DYNAMIC_MASKING
:
6244 /* Memory destinations allow only merging masking. */
6245 if (i
.mask
.zeroing
&& i
.mem_operands
)
6247 /* Find memory operand. */
6248 for (op
= 0; op
< i
.operands
; op
++)
6249 if (i
.flags
[op
] & Operand_Mem
)
6251 gas_assert (op
< i
.operands
);
6252 if (op
== i
.operands
- 1)
6254 i
.error
= unsupported_masking
;
6264 /* Check if masking is applied to dest operand. */
6265 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6267 i
.error
= mask_not_on_destination
;
6272 if (i
.rounding
.type
!= rc_none
)
6274 if (!t
->opcode_modifier
.sae
6275 || ((i
.rounding
.type
!= saeonly
) != t
->opcode_modifier
.staticrounding
))
6277 i
.error
= unsupported_rc_sae
;
6280 /* If the instruction has several immediate operands and one of
6281 them is rounding, the rounding operand should be the last
6282 immediate operand. */
6283 if (i
.imm_operands
> 1
6284 && i
.rounding
.operand
!= i
.imm_operands
- 1)
6286 i
.error
= rc_sae_operand_not_last_imm
;
6290 else if (t
->opcode_modifier
.sae
)
6292 i
.error
= unsupported_syntax
;
6296 /* Check the special Imm4 cases; must be the first operand. */
6297 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6299 if (i
.op
[0].imms
->X_op
!= O_constant
6300 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6306 /* Turn off Imm<N> so that update_imm won't complain. */
6307 operand_type_set (&i
.types
[0], 0);
6310 /* Check vector Disp8 operand. */
6311 if (t
->opcode_modifier
.disp8memshift
6312 && i
.disp_encoding
<= disp_encoding_8bit
)
6314 if (i
.broadcast
.type
)
6315 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6316 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6317 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6320 const i386_operand_type
*type
= NULL
, *fallback
= NULL
;
6323 for (op
= 0; op
< i
.operands
; op
++)
6324 if (i
.flags
[op
] & Operand_Mem
)
6326 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6327 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6328 else if (t
->operand_types
[op
].bitfield
.xmmword
6329 + t
->operand_types
[op
].bitfield
.ymmword
6330 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6331 type
= &t
->operand_types
[op
];
6332 else if (!i
.types
[op
].bitfield
.unspecified
)
6333 type
= &i
.types
[op
];
6334 else /* Ambiguities get resolved elsewhere. */
6335 fallback
= &t
->operand_types
[op
];
6337 else if (i
.types
[op
].bitfield
.class == RegSIMD
6338 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6340 if (i
.types
[op
].bitfield
.zmmword
)
6342 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6344 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6348 if (!type
&& !i
.memshift
)
6352 if (type
->bitfield
.zmmword
)
6354 else if (type
->bitfield
.ymmword
)
6356 else if (type
->bitfield
.xmmword
)
6360 /* For the check in fits_in_disp8(). */
6361 if (i
.memshift
== 0)
6365 for (op
= 0; op
< i
.operands
; op
++)
6366 if (operand_type_check (i
.types
[op
], disp
)
6367 && i
.op
[op
].disps
->X_op
== O_constant
)
6369 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6371 i
.types
[op
].bitfield
.disp8
= 1;
6374 i
.types
[op
].bitfield
.disp8
= 0;
6383 /* Check if encoding requirements are met by the instruction. */
6386 VEX_check_encoding (const insn_template
*t
)
6388 if (i
.vec_encoding
== vex_encoding_error
)
6390 i
.error
= unsupported
;
6394 if (i
.vec_encoding
== vex_encoding_evex
)
6396 /* This instruction must be encoded with EVEX prefix. */
6397 if (!is_evex_encoding (t
))
6399 i
.error
= unsupported
;
6405 if (!t
->opcode_modifier
.vex
)
6407 /* This instruction template doesn't have VEX prefix. */
6408 if (i
.vec_encoding
!= vex_encoding_default
)
6410 i
.error
= unsupported
;
6419 static const insn_template
*
6420 match_template (char mnem_suffix
)
6422 /* Points to template once we've found it. */
6423 const insn_template
*t
;
6424 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6425 i386_operand_type overlap4
;
6426 unsigned int found_reverse_match
;
6427 i386_opcode_modifier suffix_check
;
6428 i386_operand_type operand_types
[MAX_OPERANDS
];
6429 int addr_prefix_disp
;
6430 unsigned int j
, size_match
, check_register
;
6431 enum i386_error specific_error
= 0;
6433 #if MAX_OPERANDS != 5
6434 # error "MAX_OPERANDS must be 5."
6437 found_reverse_match
= 0;
6438 addr_prefix_disp
= -1;
6440 /* Prepare for mnemonic suffix check. */
6441 memset (&suffix_check
, 0, sizeof (suffix_check
));
6442 switch (mnem_suffix
)
6444 case BYTE_MNEM_SUFFIX
:
6445 suffix_check
.no_bsuf
= 1;
6447 case WORD_MNEM_SUFFIX
:
6448 suffix_check
.no_wsuf
= 1;
6450 case SHORT_MNEM_SUFFIX
:
6451 suffix_check
.no_ssuf
= 1;
6453 case LONG_MNEM_SUFFIX
:
6454 suffix_check
.no_lsuf
= 1;
6456 case QWORD_MNEM_SUFFIX
:
6457 suffix_check
.no_qsuf
= 1;
6460 /* NB: In Intel syntax, normally we can check for memory operand
6461 size when there is no mnemonic suffix. But jmp and call have
6462 2 different encodings with Dword memory operand size, one with
6463 No_ldSuf and the other without. i.suffix is set to
6464 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6465 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6466 suffix_check
.no_ldsuf
= 1;
6469 /* Must have right number of operands. */
6470 i
.error
= number_of_operands_mismatch
;
6472 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6474 addr_prefix_disp
= -1;
6475 found_reverse_match
= 0;
6477 if (i
.operands
!= t
->operands
)
6480 /* Check processor support. */
6481 i
.error
= unsupported
;
6482 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6485 /* Check Pseudo Prefix. */
6486 i
.error
= unsupported
;
6487 if (t
->opcode_modifier
.pseudovexprefix
6488 && !(i
.vec_encoding
== vex_encoding_vex
6489 || i
.vec_encoding
== vex_encoding_vex3
))
6492 /* Check AT&T mnemonic. */
6493 i
.error
= unsupported_with_intel_mnemonic
;
6494 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6497 /* Check AT&T/Intel syntax. */
6498 i
.error
= unsupported_syntax
;
6499 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6500 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6503 /* Check Intel64/AMD64 ISA. */
6507 /* Default: Don't accept Intel64. */
6508 if (t
->opcode_modifier
.isa64
== INTEL64
)
6512 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6513 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6517 /* -mintel64: Don't accept AMD64. */
6518 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6523 /* Check the suffix. */
6524 i
.error
= invalid_instruction_suffix
;
6525 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6526 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6527 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6528 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6529 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6530 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6533 size_match
= operand_size_match (t
);
6537 /* This is intentionally not
6539 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6541 as the case of a missing * on the operand is accepted (perhaps with
6542 a warning, issued further down). */
6543 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6545 i
.error
= operand_type_mismatch
;
6549 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6550 operand_types
[j
] = t
->operand_types
[j
];
6552 /* In general, don't allow
6553 - 64-bit operands outside of 64-bit mode,
6554 - 32-bit operands on pre-386. */
6555 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6556 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6557 && flag_code
!= CODE_64BIT
6558 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6559 && t
->base_opcode
== 0xc7
6560 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6561 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6562 || (i
.suffix
== LONG_MNEM_SUFFIX
6563 && !cpu_arch_flags
.bitfield
.cpui386
))
6565 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6566 && !intel_float_operand (t
->name
))
6567 : intel_float_operand (t
->name
) != 2)
6568 && (t
->operands
== i
.imm_operands
6569 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6570 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6571 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6572 || (operand_types
[j
].bitfield
.class != RegMMX
6573 && operand_types
[j
].bitfield
.class != RegSIMD
6574 && operand_types
[j
].bitfield
.class != RegMask
))
6575 && !t
->opcode_modifier
.sib
)
6578 /* Do not verify operands when there are none. */
6581 if (VEX_check_encoding (t
))
6583 specific_error
= i
.error
;
6587 /* We've found a match; break out of loop. */
6591 if (!t
->opcode_modifier
.jump
6592 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6594 /* There should be only one Disp operand. */
6595 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6596 if (operand_type_check (operand_types
[j
], disp
))
6598 if (j
< MAX_OPERANDS
)
6600 bool override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6602 addr_prefix_disp
= j
;
6604 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6605 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6609 override
= !override
;
6612 if (operand_types
[j
].bitfield
.disp32
6613 && operand_types
[j
].bitfield
.disp16
)
6615 operand_types
[j
].bitfield
.disp16
= override
;
6616 operand_types
[j
].bitfield
.disp32
= !override
;
6618 operand_types
[j
].bitfield
.disp32s
= 0;
6619 operand_types
[j
].bitfield
.disp64
= 0;
6623 if (operand_types
[j
].bitfield
.disp32s
6624 || operand_types
[j
].bitfield
.disp64
)
6626 operand_types
[j
].bitfield
.disp64
&= !override
;
6627 operand_types
[j
].bitfield
.disp32s
&= !override
;
6628 operand_types
[j
].bitfield
.disp32
= override
;
6630 operand_types
[j
].bitfield
.disp16
= 0;
6638 case BFD_RELOC_386_GOT32
:
6639 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6640 if (t
->base_opcode
== 0xa0
6641 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6644 case BFD_RELOC_386_TLS_GOTIE
:
6645 case BFD_RELOC_386_TLS_LE_32
:
6646 case BFD_RELOC_X86_64_GOTTPOFF
:
6647 case BFD_RELOC_X86_64_TLSLD
:
6648 /* Don't allow KMOV in TLS code sequences. */
6649 if (t
->opcode_modifier
.vex
)
6656 /* We check register size if needed. */
6657 if (t
->opcode_modifier
.checkregsize
)
6659 check_register
= (1 << t
->operands
) - 1;
6660 if (i
.broadcast
.type
)
6661 check_register
&= ~(1 << i
.broadcast
.operand
);
6666 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6667 switch (t
->operands
)
6670 if (!operand_type_match (overlap0
, i
.types
[0]))
6674 /* xchg %eax, %eax is a special case. It is an alias for nop
6675 only in 32bit mode and we can use opcode 0x90. In 64bit
6676 mode, we can't use 0x90 for xchg %eax, %eax since it should
6677 zero-extend %eax to %rax. */
6678 if (flag_code
== CODE_64BIT
6679 && t
->base_opcode
== 0x90
6680 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6681 && i
.types
[0].bitfield
.instance
== Accum
6682 && i
.types
[0].bitfield
.dword
6683 && i
.types
[1].bitfield
.instance
== Accum
6684 && i
.types
[1].bitfield
.dword
)
6686 /* xrelease mov %eax, <disp> is another special case. It must not
6687 match the accumulator-only encoding of mov. */
6688 if (flag_code
!= CODE_64BIT
6690 && t
->base_opcode
== 0xa0
6691 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6692 && i
.types
[0].bitfield
.instance
== Accum
6693 && (i
.flags
[1] & Operand_Mem
))
6698 if (!(size_match
& MATCH_STRAIGHT
))
6700 /* Reverse direction of operands if swapping is possible in the first
6701 place (operands need to be symmetric) and
6702 - the load form is requested, and the template is a store form,
6703 - the store form is requested, and the template is a load form,
6704 - the non-default (swapped) form is requested. */
6705 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6706 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6707 && !operand_type_all_zero (&overlap1
))
6708 switch (i
.dir_encoding
)
6710 case dir_encoding_load
:
6711 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6712 || t
->opcode_modifier
.regmem
)
6716 case dir_encoding_store
:
6717 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6718 && !t
->opcode_modifier
.regmem
)
6722 case dir_encoding_swap
:
6725 case dir_encoding_default
:
6728 /* If we want store form, we skip the current load. */
6729 if ((i
.dir_encoding
== dir_encoding_store
6730 || i
.dir_encoding
== dir_encoding_swap
)
6731 && i
.mem_operands
== 0
6732 && t
->opcode_modifier
.load
)
6737 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6738 if (!operand_type_match (overlap0
, i
.types
[0])
6739 || !operand_type_match (overlap1
, i
.types
[1])
6740 || ((check_register
& 3) == 3
6741 && !operand_type_register_match (i
.types
[0],
6746 /* Check if other direction is valid ... */
6747 if (!t
->opcode_modifier
.d
)
6751 if (!(size_match
& MATCH_REVERSE
))
6753 /* Try reversing direction of operands. */
6754 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6755 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6756 if (!operand_type_match (overlap0
, i
.types
[0])
6757 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6759 && !operand_type_register_match (i
.types
[0],
6760 operand_types
[i
.operands
- 1],
6761 i
.types
[i
.operands
- 1],
6764 /* Does not match either direction. */
6767 /* found_reverse_match holds which of D or FloatR
6769 if (!t
->opcode_modifier
.d
)
6770 found_reverse_match
= 0;
6771 else if (operand_types
[0].bitfield
.tbyte
)
6772 found_reverse_match
= Opcode_FloatD
;
6773 else if (operand_types
[0].bitfield
.xmmword
6774 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6775 || operand_types
[0].bitfield
.class == RegMMX
6776 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6777 || is_any_vex_encoding(t
))
6778 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6779 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6781 found_reverse_match
= Opcode_D
;
6782 if (t
->opcode_modifier
.floatr
)
6783 found_reverse_match
|= Opcode_FloatR
;
6787 /* Found a forward 2 operand match here. */
6788 switch (t
->operands
)
6791 overlap4
= operand_type_and (i
.types
[4],
6795 overlap3
= operand_type_and (i
.types
[3],
6799 overlap2
= operand_type_and (i
.types
[2],
6804 switch (t
->operands
)
6807 if (!operand_type_match (overlap4
, i
.types
[4])
6808 || !operand_type_register_match (i
.types
[3],
6815 if (!operand_type_match (overlap3
, i
.types
[3])
6816 || ((check_register
& 0xa) == 0xa
6817 && !operand_type_register_match (i
.types
[1],
6821 || ((check_register
& 0xc) == 0xc
6822 && !operand_type_register_match (i
.types
[2],
6829 /* Here we make use of the fact that there are no
6830 reverse match 3 operand instructions. */
6831 if (!operand_type_match (overlap2
, i
.types
[2])
6832 || ((check_register
& 5) == 5
6833 && !operand_type_register_match (i
.types
[0],
6837 || ((check_register
& 6) == 6
6838 && !operand_type_register_match (i
.types
[1],
6846 /* Found either forward/reverse 2, 3 or 4 operand match here:
6847 slip through to break. */
6850 /* Check if vector operands are valid. */
6851 if (check_VecOperands (t
))
6853 specific_error
= i
.error
;
6857 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6858 if (VEX_check_encoding (t
))
6860 specific_error
= i
.error
;
6864 /* We've found a match; break out of loop. */
6868 if (t
== current_templates
->end
)
6870 /* We found no match. */
6871 const char *err_msg
;
6872 switch (specific_error
? specific_error
: i
.error
)
6876 case operand_size_mismatch
:
6877 err_msg
= _("operand size mismatch");
6879 case operand_type_mismatch
:
6880 err_msg
= _("operand type mismatch");
6882 case register_type_mismatch
:
6883 err_msg
= _("register type mismatch");
6885 case number_of_operands_mismatch
:
6886 err_msg
= _("number of operands mismatch");
6888 case invalid_instruction_suffix
:
6889 err_msg
= _("invalid instruction suffix");
6892 err_msg
= _("constant doesn't fit in 4 bits");
6894 case unsupported_with_intel_mnemonic
:
6895 err_msg
= _("unsupported with Intel mnemonic");
6897 case unsupported_syntax
:
6898 err_msg
= _("unsupported syntax");
6901 as_bad (_("unsupported instruction `%s'"),
6902 current_templates
->start
->name
);
6904 case invalid_sib_address
:
6905 err_msg
= _("invalid SIB address");
6907 case invalid_vsib_address
:
6908 err_msg
= _("invalid VSIB address");
6910 case invalid_vector_register_set
:
6911 err_msg
= _("mask, index, and destination registers must be distinct");
6913 case invalid_tmm_register_set
:
6914 err_msg
= _("all tmm registers must be distinct");
6916 case invalid_dest_and_src_register_set
:
6917 err_msg
= _("destination and source registers must be distinct");
6919 case unsupported_vector_index_register
:
6920 err_msg
= _("unsupported vector index register");
6922 case unsupported_broadcast
:
6923 err_msg
= _("unsupported broadcast");
6925 case broadcast_needed
:
6926 err_msg
= _("broadcast is needed for operand of such type");
6928 case unsupported_masking
:
6929 err_msg
= _("unsupported masking");
6931 case mask_not_on_destination
:
6932 err_msg
= _("mask not on destination operand");
6934 case no_default_mask
:
6935 err_msg
= _("default mask isn't allowed");
6937 case unsupported_rc_sae
:
6938 err_msg
= _("unsupported static rounding/sae");
6940 case rc_sae_operand_not_last_imm
:
6942 err_msg
= _("RC/SAE operand must precede immediate operands");
6944 err_msg
= _("RC/SAE operand must follow immediate operands");
6946 case invalid_register_operand
:
6947 err_msg
= _("invalid register operand");
6950 as_bad (_("%s for `%s'"), err_msg
,
6951 current_templates
->start
->name
);
6955 if (!quiet_warnings
)
6958 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6959 as_warn (_("indirect %s without `*'"), t
->name
);
6961 if (t
->opcode_modifier
.isprefix
6962 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6964 /* Warn them that a data or address size prefix doesn't
6965 affect assembly of the next line of code. */
6966 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6970 /* Copy the template we found. */
6971 install_template (t
);
6973 if (addr_prefix_disp
!= -1)
6974 i
.tm
.operand_types
[addr_prefix_disp
]
6975 = operand_types
[addr_prefix_disp
];
6977 if (found_reverse_match
)
6979 /* If we found a reverse match we must alter the opcode direction
6980 bit and clear/flip the regmem modifier one. found_reverse_match
6981 holds bits to change (different for int & float insns). */
6983 i
.tm
.base_opcode
^= found_reverse_match
;
6985 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6986 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6988 /* Certain SIMD insns have their load forms specified in the opcode
6989 table, and hence we need to _set_ RegMem instead of clearing it.
6990 We need to avoid setting the bit though on insns like KMOVW. */
6991 i
.tm
.opcode_modifier
.regmem
6992 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6993 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6994 && !i
.tm
.opcode_modifier
.regmem
;
7003 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
7004 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
7006 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != reg_es
)
7008 as_bad (_("`%s' operand %u must use `%ses' segment"),
7010 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
7015 /* There's only ever one segment override allowed per instruction.
7016 This instruction possibly has a legal segment override on the
7017 second operand, so copy the segment to where non-string
7018 instructions store it, allowing common code. */
7019 i
.seg
[op
] = i
.seg
[1];
7025 process_suffix (void)
7027 bool is_crc32
= false, is_movx
= false;
7029 /* If matched instruction specifies an explicit instruction mnemonic
7031 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
7032 i
.suffix
= WORD_MNEM_SUFFIX
;
7033 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
7034 i
.suffix
= LONG_MNEM_SUFFIX
;
7035 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
7036 i
.suffix
= QWORD_MNEM_SUFFIX
;
7037 else if (i
.reg_operands
7038 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
7039 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
7041 unsigned int numop
= i
.operands
;
7044 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7045 && (i
.tm
.base_opcode
| 8) == 0xbe)
7046 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7047 && i
.tm
.base_opcode
== 0x63
7048 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
7051 is_crc32
= (i
.tm
.base_opcode
== 0xf0
7052 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7053 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
7055 /* movsx/movzx want only their source operand considered here, for the
7056 ambiguity checking below. The suffix will be replaced afterwards
7057 to represent the destination (register). */
7058 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
7061 /* crc32 needs REX.W set regardless of suffix / source operand size. */
7062 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
7065 /* If there's no instruction mnemonic suffix we try to invent one
7066 based on GPR operands. */
7069 /* We take i.suffix from the last register operand specified,
7070 Destination register type is more significant than source
7071 register type. crc32 in SSE4.2 prefers source register
7073 unsigned int op
= is_crc32
? 1 : i
.operands
;
7076 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
7077 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7079 if (i
.types
[op
].bitfield
.class != Reg
)
7081 if (i
.types
[op
].bitfield
.byte
)
7082 i
.suffix
= BYTE_MNEM_SUFFIX
;
7083 else if (i
.types
[op
].bitfield
.word
)
7084 i
.suffix
= WORD_MNEM_SUFFIX
;
7085 else if (i
.types
[op
].bitfield
.dword
)
7086 i
.suffix
= LONG_MNEM_SUFFIX
;
7087 else if (i
.types
[op
].bitfield
.qword
)
7088 i
.suffix
= QWORD_MNEM_SUFFIX
;
7094 /* As an exception, movsx/movzx silently default to a byte source
7096 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
7097 i
.suffix
= BYTE_MNEM_SUFFIX
;
7099 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7102 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7103 && i
.tm
.opcode_modifier
.no_bsuf
)
7105 else if (!check_byte_reg ())
7108 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
7111 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7112 && i
.tm
.opcode_modifier
.no_lsuf
7113 && !i
.tm
.opcode_modifier
.todword
7114 && !i
.tm
.opcode_modifier
.toqword
)
7116 else if (!check_long_reg ())
7119 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7122 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7123 && i
.tm
.opcode_modifier
.no_qsuf
7124 && !i
.tm
.opcode_modifier
.todword
7125 && !i
.tm
.opcode_modifier
.toqword
)
7127 else if (!check_qword_reg ())
7130 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7133 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
7134 && i
.tm
.opcode_modifier
.no_wsuf
)
7136 else if (!check_word_reg ())
7139 else if (intel_syntax
7140 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
7141 /* Do nothing if the instruction is going to ignore the prefix. */
7146 /* Undo the movsx/movzx change done above. */
7149 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
7152 i
.suffix
= stackop_size
;
7153 if (stackop_size
== LONG_MNEM_SUFFIX
)
7155 /* stackop_size is set to LONG_MNEM_SUFFIX for the
7156 .code16gcc directive to support 16-bit mode with
7157 32-bit address. For IRET without a suffix, generate
7158 16-bit IRET (opcode 0xcf) to return from an interrupt
7160 if (i
.tm
.base_opcode
== 0xcf)
7162 i
.suffix
= WORD_MNEM_SUFFIX
;
7163 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
7165 /* Warn about changed behavior for segment register push/pop. */
7166 else if ((i
.tm
.base_opcode
| 1) == 0x07)
7167 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
7172 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
7173 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7174 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
7175 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
7176 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
7177 && i
.tm
.extension_opcode
<= 3)))
7182 if (!i
.tm
.opcode_modifier
.no_qsuf
)
7184 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
7185 || i
.tm
.opcode_modifier
.no_lsuf
)
7186 i
.suffix
= QWORD_MNEM_SUFFIX
;
7191 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7192 i
.suffix
= LONG_MNEM_SUFFIX
;
7195 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7196 i
.suffix
= WORD_MNEM_SUFFIX
;
7202 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7203 /* Also cover lret/retf/iret in 64-bit mode. */
7204 || (flag_code
== CODE_64BIT
7205 && !i
.tm
.opcode_modifier
.no_lsuf
7206 && !i
.tm
.opcode_modifier
.no_qsuf
))
7207 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7208 /* Explicit sizing prefixes are assumed to disambiguate insns. */
7209 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
7210 /* Accept FLDENV et al without suffix. */
7211 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
7213 unsigned int suffixes
, evex
= 0;
7215 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
7216 if (!i
.tm
.opcode_modifier
.no_wsuf
)
7218 if (!i
.tm
.opcode_modifier
.no_lsuf
)
7220 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
7222 if (!i
.tm
.opcode_modifier
.no_ssuf
)
7224 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
7227 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
7228 also suitable for AT&T syntax mode, it was requested that this be
7229 restricted to just Intel syntax. */
7230 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
.type
)
7234 for (op
= 0; op
< i
.tm
.operands
; ++op
)
7236 if (is_evex_encoding (&i
.tm
)
7237 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
7239 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7240 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
7241 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7242 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
7243 if (!i
.tm
.opcode_modifier
.evex
7244 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
7245 i
.tm
.opcode_modifier
.evex
= EVEX512
;
7248 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
7249 + i
.tm
.operand_types
[op
].bitfield
.ymmword
7250 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
7253 /* Any properly sized operand disambiguates the insn. */
7254 if (i
.types
[op
].bitfield
.xmmword
7255 || i
.types
[op
].bitfield
.ymmword
7256 || i
.types
[op
].bitfield
.zmmword
)
7258 suffixes
&= ~(7 << 6);
7263 if ((i
.flags
[op
] & Operand_Mem
)
7264 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7266 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7268 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7270 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7272 if (is_evex_encoding (&i
.tm
))
7278 /* Are multiple suffixes / operand sizes allowed? */
7279 if (suffixes
& (suffixes
- 1))
7282 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7283 || operand_check
== check_error
))
7285 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7288 if (operand_check
== check_error
)
7290 as_bad (_("no instruction mnemonic suffix given and "
7291 "no register operands; can't size `%s'"), i
.tm
.name
);
7294 if (operand_check
== check_warning
)
7295 as_warn (_("%s; using default for `%s'"),
7297 ? _("ambiguous operand size")
7298 : _("no instruction mnemonic suffix given and "
7299 "no register operands"),
7302 if (i
.tm
.opcode_modifier
.floatmf
)
7303 i
.suffix
= SHORT_MNEM_SUFFIX
;
7305 /* handled below */;
7307 i
.tm
.opcode_modifier
.evex
= evex
;
7308 else if (flag_code
== CODE_16BIT
)
7309 i
.suffix
= WORD_MNEM_SUFFIX
;
7310 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7311 i
.suffix
= LONG_MNEM_SUFFIX
;
7313 i
.suffix
= QWORD_MNEM_SUFFIX
;
7319 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7320 In AT&T syntax, if there is no suffix (warned about above), the default
7321 will be byte extension. */
7322 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7323 i
.tm
.base_opcode
|= 1;
7325 /* For further processing, the suffix should represent the destination
7326 (register). This is already the case when one was used with
7327 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7328 no suffix to begin with. */
7329 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7331 if (i
.types
[1].bitfield
.word
)
7332 i
.suffix
= WORD_MNEM_SUFFIX
;
7333 else if (i
.types
[1].bitfield
.qword
)
7334 i
.suffix
= QWORD_MNEM_SUFFIX
;
7336 i
.suffix
= LONG_MNEM_SUFFIX
;
7338 i
.tm
.opcode_modifier
.w
= 0;
7342 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7343 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7344 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7346 /* Change the opcode based on the operand size given by i.suffix. */
7349 /* Size floating point instruction. */
7350 case LONG_MNEM_SUFFIX
:
7351 if (i
.tm
.opcode_modifier
.floatmf
)
7353 i
.tm
.base_opcode
^= 4;
7357 case WORD_MNEM_SUFFIX
:
7358 case QWORD_MNEM_SUFFIX
:
7359 /* It's not a byte, select word/dword operation. */
7360 if (i
.tm
.opcode_modifier
.w
)
7363 i
.tm
.base_opcode
|= 8;
7365 i
.tm
.base_opcode
|= 1;
7368 case SHORT_MNEM_SUFFIX
:
7369 /* Now select between word & dword operations via the operand
7370 size prefix, except for instructions that will ignore this
7372 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7373 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7374 && !i
.tm
.opcode_modifier
.floatmf
7375 && !is_any_vex_encoding (&i
.tm
)
7376 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7377 || (flag_code
== CODE_64BIT
7378 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7380 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7382 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7383 prefix
= ADDR_PREFIX_OPCODE
;
7385 if (!add_prefix (prefix
))
7389 /* Set mode64 for an operand. */
7390 if (i
.suffix
== QWORD_MNEM_SUFFIX
7391 && flag_code
== CODE_64BIT
7392 && !i
.tm
.opcode_modifier
.norex64
7393 && !i
.tm
.opcode_modifier
.vexw
7394 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7396 && ! (i
.operands
== 2
7397 && i
.tm
.base_opcode
== 0x90
7398 && i
.tm
.extension_opcode
== None
7399 && i
.types
[0].bitfield
.instance
== Accum
7400 && i
.types
[0].bitfield
.qword
7401 && i
.types
[1].bitfield
.instance
== Accum
7402 && i
.types
[1].bitfield
.qword
))
7408 /* Select word/dword/qword operation with explicit data sizing prefix
7409 when there are no suitable register operands. */
7410 if (i
.tm
.opcode_modifier
.w
7411 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7413 || (i
.reg_operands
== 1
7415 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7417 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7418 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7421 i
.tm
.base_opcode
|= 1;
7425 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7427 gas_assert (!i
.suffix
);
7428 gas_assert (i
.reg_operands
);
7430 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7433 /* The address size override prefix changes the size of the
7435 if (flag_code
== CODE_64BIT
7436 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7438 as_bad (_("16-bit addressing unavailable for `%s'"),
7443 if ((flag_code
== CODE_32BIT
7444 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7445 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7446 && !add_prefix (ADDR_PREFIX_OPCODE
))
7451 /* Check invalid register operand when the address size override
7452 prefix changes the size of register operands. */
7454 enum { need_word
, need_dword
, need_qword
} need
;
7456 /* Check the register operand for the address size prefix if
7457 the memory operand has no real registers, like symbol, DISP
7458 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7459 if (i
.mem_operands
== 1
7460 && i
.reg_operands
== 1
7462 && i
.types
[1].bitfield
.class == Reg
7463 && (flag_code
== CODE_32BIT
7464 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7465 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7466 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7467 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7468 || (x86_elf_abi
== X86_64_X32_ABI
7470 && i
.base_reg
->reg_num
== RegIP
7471 && i
.base_reg
->reg_type
.bitfield
.qword
))
7475 && !add_prefix (ADDR_PREFIX_OPCODE
))
7478 if (flag_code
== CODE_32BIT
)
7479 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7480 else if (i
.prefix
[ADDR_PREFIX
])
7483 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7485 for (op
= 0; op
< i
.operands
; op
++)
7487 if (i
.types
[op
].bitfield
.class != Reg
)
7493 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7497 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7501 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7506 as_bad (_("invalid register operand size for `%s'"),
7517 check_byte_reg (void)
7521 for (op
= i
.operands
; --op
>= 0;)
7523 /* Skip non-register operands. */
7524 if (i
.types
[op
].bitfield
.class != Reg
)
7527 /* If this is an eight bit register, it's OK. If it's the 16 or
7528 32 bit version of an eight bit register, we will just use the
7529 low portion, and that's OK too. */
7530 if (i
.types
[op
].bitfield
.byte
)
7533 /* I/O port address operands are OK too. */
7534 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7535 && i
.tm
.operand_types
[op
].bitfield
.word
)
7538 /* crc32 only wants its source operand checked here. */
7539 if (i
.tm
.base_opcode
== 0xf0
7540 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7541 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7545 /* Any other register is bad. */
7546 as_bad (_("`%s%s' not allowed with `%s%c'"),
7547 register_prefix
, i
.op
[op
].regs
->reg_name
,
7548 i
.tm
.name
, i
.suffix
);
7555 check_long_reg (void)
7559 for (op
= i
.operands
; --op
>= 0;)
7560 /* Skip non-register operands. */
7561 if (i
.types
[op
].bitfield
.class != Reg
)
7563 /* Reject eight bit registers, except where the template requires
7564 them. (eg. movzb) */
7565 else if (i
.types
[op
].bitfield
.byte
7566 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7567 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7568 && (i
.tm
.operand_types
[op
].bitfield
.word
7569 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7571 as_bad (_("`%s%s' not allowed with `%s%c'"),
7573 i
.op
[op
].regs
->reg_name
,
7578 /* Error if the e prefix on a general reg is missing. */
7579 else if (i
.types
[op
].bitfield
.word
7580 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7581 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7582 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7584 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7585 register_prefix
, i
.op
[op
].regs
->reg_name
,
7589 /* Warn if the r prefix on a general reg is present. */
7590 else if (i
.types
[op
].bitfield
.qword
7591 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7592 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7593 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7596 && i
.tm
.opcode_modifier
.toqword
7597 && i
.types
[0].bitfield
.class != RegSIMD
)
7599 /* Convert to QWORD. We want REX byte. */
7600 i
.suffix
= QWORD_MNEM_SUFFIX
;
7604 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7605 register_prefix
, i
.op
[op
].regs
->reg_name
,
7614 check_qword_reg (void)
7618 for (op
= i
.operands
; --op
>= 0; )
7619 /* Skip non-register operands. */
7620 if (i
.types
[op
].bitfield
.class != Reg
)
7622 /* Reject eight bit registers, except where the template requires
7623 them. (eg. movzb) */
7624 else if (i
.types
[op
].bitfield
.byte
7625 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7626 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7627 && (i
.tm
.operand_types
[op
].bitfield
.word
7628 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7630 as_bad (_("`%s%s' not allowed with `%s%c'"),
7632 i
.op
[op
].regs
->reg_name
,
7637 /* Warn if the r prefix on a general reg is missing. */
7638 else if ((i
.types
[op
].bitfield
.word
7639 || i
.types
[op
].bitfield
.dword
)
7640 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7641 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7642 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7644 /* Prohibit these changes in the 64bit mode, since the
7645 lowering is more complicated. */
7647 && i
.tm
.opcode_modifier
.todword
7648 && i
.types
[0].bitfield
.class != RegSIMD
)
7650 /* Convert to DWORD. We don't want REX byte. */
7651 i
.suffix
= LONG_MNEM_SUFFIX
;
7655 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7656 register_prefix
, i
.op
[op
].regs
->reg_name
,
7665 check_word_reg (void)
7668 for (op
= i
.operands
; --op
>= 0;)
7669 /* Skip non-register operands. */
7670 if (i
.types
[op
].bitfield
.class != Reg
)
7672 /* Reject eight bit registers, except where the template requires
7673 them. (eg. movzb) */
7674 else if (i
.types
[op
].bitfield
.byte
7675 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7676 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7677 && (i
.tm
.operand_types
[op
].bitfield
.word
7678 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7680 as_bad (_("`%s%s' not allowed with `%s%c'"),
7682 i
.op
[op
].regs
->reg_name
,
7687 /* Error if the e or r prefix on a general reg is present. */
7688 else if ((i
.types
[op
].bitfield
.dword
7689 || i
.types
[op
].bitfield
.qword
)
7690 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7691 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7692 && i
.tm
.operand_types
[op
].bitfield
.word
)
7694 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7695 register_prefix
, i
.op
[op
].regs
->reg_name
,
7699 /* For some instructions need encode as EVEX.W=1 without explicit VexW1. */
7700 else if (i
.types
[op
].bitfield
.qword
7702 && i
.tm
.opcode_modifier
.toqword
)
7704 /* Convert to QWORD. We want EVEX.W byte. */
7705 i
.suffix
= QWORD_MNEM_SUFFIX
;
7711 update_imm (unsigned int j
)
7713 i386_operand_type overlap
= i
.types
[j
];
7714 if ((overlap
.bitfield
.imm8
7715 || overlap
.bitfield
.imm8s
7716 || overlap
.bitfield
.imm16
7717 || overlap
.bitfield
.imm32
7718 || overlap
.bitfield
.imm32s
7719 || overlap
.bitfield
.imm64
)
7720 && !operand_type_equal (&overlap
, &imm8
)
7721 && !operand_type_equal (&overlap
, &imm8s
)
7722 && !operand_type_equal (&overlap
, &imm16
)
7723 && !operand_type_equal (&overlap
, &imm32
)
7724 && !operand_type_equal (&overlap
, &imm32s
)
7725 && !operand_type_equal (&overlap
, &imm64
))
7729 i386_operand_type temp
;
7731 operand_type_set (&temp
, 0);
7732 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7734 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7735 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7737 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7738 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7739 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7741 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7742 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7745 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7748 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7749 || operand_type_equal (&overlap
, &imm16_32
)
7750 || operand_type_equal (&overlap
, &imm16_32s
))
7752 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7757 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7758 overlap
= operand_type_and (overlap
, imm32s
);
7759 else if (i
.prefix
[DATA_PREFIX
])
7760 overlap
= operand_type_and (overlap
,
7761 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7762 if (!operand_type_equal (&overlap
, &imm8
)
7763 && !operand_type_equal (&overlap
, &imm8s
)
7764 && !operand_type_equal (&overlap
, &imm16
)
7765 && !operand_type_equal (&overlap
, &imm32
)
7766 && !operand_type_equal (&overlap
, &imm32s
)
7767 && !operand_type_equal (&overlap
, &imm64
))
7769 as_bad (_("no instruction mnemonic suffix given; "
7770 "can't determine immediate size"));
7774 i
.types
[j
] = overlap
;
7784 /* Update the first 2 immediate operands. */
7785 n
= i
.operands
> 2 ? 2 : i
.operands
;
7788 for (j
= 0; j
< n
; j
++)
7789 if (update_imm (j
) == 0)
7792 /* The 3rd operand can't be immediate operand. */
7793 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7800 process_operands (void)
7802 /* Default segment register this instruction will use for memory
7803 accesses. 0 means unknown. This is only for optimizing out
7804 unnecessary segment overrides. */
7805 const reg_entry
*default_seg
= NULL
;
7807 if (i
.tm
.opcode_modifier
.sse2avx
)
7809 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7811 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7812 i
.prefix
[REX_PREFIX
] = 0;
7815 /* ImmExt should be processed after SSE2AVX. */
7816 else if (i
.tm
.opcode_modifier
.immext
)
7819 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7821 unsigned int dupl
= i
.operands
;
7822 unsigned int dest
= dupl
- 1;
7825 /* The destination must be an xmm register. */
7826 gas_assert (i
.reg_operands
7827 && MAX_OPERANDS
> dupl
7828 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7830 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7831 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7833 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7835 /* Keep xmm0 for instructions with VEX prefix and 3
7837 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7838 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7843 /* We remove the first xmm0 and keep the number of
7844 operands unchanged, which in fact duplicates the
7846 for (j
= 1; j
< i
.operands
; j
++)
7848 i
.op
[j
- 1] = i
.op
[j
];
7849 i
.types
[j
- 1] = i
.types
[j
];
7850 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7851 i
.flags
[j
- 1] = i
.flags
[j
];
7855 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7857 gas_assert ((MAX_OPERANDS
- 1) > dupl
7858 && (i
.tm
.opcode_modifier
.vexsources
7861 /* Add the implicit xmm0 for instructions with VEX prefix
7863 for (j
= i
.operands
; j
> 0; j
--)
7865 i
.op
[j
] = i
.op
[j
- 1];
7866 i
.types
[j
] = i
.types
[j
- 1];
7867 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7868 i
.flags
[j
] = i
.flags
[j
- 1];
7871 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7872 i
.types
[0] = regxmm
;
7873 i
.tm
.operand_types
[0] = regxmm
;
7876 i
.reg_operands
+= 2;
7881 i
.op
[dupl
] = i
.op
[dest
];
7882 i
.types
[dupl
] = i
.types
[dest
];
7883 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7884 i
.flags
[dupl
] = i
.flags
[dest
];
7893 i
.op
[dupl
] = i
.op
[dest
];
7894 i
.types
[dupl
] = i
.types
[dest
];
7895 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7896 i
.flags
[dupl
] = i
.flags
[dest
];
7899 if (i
.tm
.opcode_modifier
.immext
)
7902 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7903 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7907 for (j
= 1; j
< i
.operands
; j
++)
7909 i
.op
[j
- 1] = i
.op
[j
];
7910 i
.types
[j
- 1] = i
.types
[j
];
7912 /* We need to adjust fields in i.tm since they are used by
7913 build_modrm_byte. */
7914 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7916 i
.flags
[j
- 1] = i
.flags
[j
];
7923 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7925 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7927 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7928 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7929 regnum
= register_number (i
.op
[1].regs
);
7930 first_reg_in_group
= regnum
& ~3;
7931 last_reg_in_group
= first_reg_in_group
+ 3;
7932 if (regnum
!= first_reg_in_group
)
7933 as_warn (_("source register `%s%s' implicitly denotes"
7934 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7935 register_prefix
, i
.op
[1].regs
->reg_name
,
7936 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7937 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7940 else if (i
.tm
.opcode_modifier
.regkludge
)
7942 /* The imul $imm, %reg instruction is converted into
7943 imul $imm, %reg, %reg, and the clr %reg instruction
7944 is converted into xor %reg, %reg. */
7946 unsigned int first_reg_op
;
7948 if (operand_type_check (i
.types
[0], reg
))
7952 /* Pretend we saw the extra register operand. */
7953 gas_assert (i
.reg_operands
== 1
7954 && i
.op
[first_reg_op
+ 1].regs
== 0);
7955 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7956 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7961 if (i
.tm
.opcode_modifier
.modrm
)
7963 /* The opcode is completed (modulo i.tm.extension_opcode which
7964 must be put into the modrm byte). Now, we make the modrm and
7965 index base bytes based on all the info we've collected. */
7967 default_seg
= build_modrm_byte ();
7969 else if (i
.types
[0].bitfield
.class == SReg
)
7971 if (flag_code
!= CODE_64BIT
7972 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7973 && i
.op
[0].regs
->reg_num
== 1
7974 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7975 && i
.op
[0].regs
->reg_num
< 4)
7977 as_bad (_("you can't `%s %s%s'"),
7978 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7981 if (i
.op
[0].regs
->reg_num
> 3
7982 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7984 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7985 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7987 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7989 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7990 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7992 default_seg
= reg_ds
;
7994 else if (i
.tm
.opcode_modifier
.isstring
)
7996 /* For the string instructions that allow a segment override
7997 on one of their operands, the default segment is ds. */
7998 default_seg
= reg_ds
;
8000 else if (i
.short_form
)
8002 /* The register or float register operand is in operand
8004 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
8006 /* Register goes in low 3 bits of opcode. */
8007 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
8008 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
8010 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
8012 /* Warn about some common errors, but press on regardless.
8013 The first case can be generated by gcc (<= 2.8.1). */
8014 if (i
.operands
== 2)
8016 /* Reversed arguments on faddp, fsubp, etc. */
8017 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
8018 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
8019 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
8023 /* Extraneous `l' suffix on fp insn. */
8024 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
8025 register_prefix
, i
.op
[0].regs
->reg_name
);
8030 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
8031 && i
.tm
.base_opcode
== 0x8d /* lea */
8032 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
8033 && !is_any_vex_encoding(&i
.tm
))
8035 if (!quiet_warnings
)
8036 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
8040 i
.prefix
[SEG_PREFIX
] = 0;
8044 /* If a segment was explicitly specified, and the specified segment
8045 is neither the default nor the one already recorded from a prefix,
8046 use an opcode prefix to select it. If we never figured out what
8047 the default segment is, then default_seg will be zero at this
8048 point, and the specified segment prefix will always be used. */
8050 && i
.seg
[0] != default_seg
8051 && i386_seg_prefixes
[i
.seg
[0]->reg_num
] != i
.prefix
[SEG_PREFIX
])
8053 if (!add_prefix (i386_seg_prefixes
[i
.seg
[0]->reg_num
]))
8059 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
8062 if (r
->reg_flags
& RegRex
)
8064 if (i
.rex
& rex_bit
)
8065 as_bad (_("same type of prefix used twice"));
8068 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
8070 gas_assert (i
.vex
.register_specifier
== r
);
8071 i
.vex
.register_specifier
+= 8;
8074 if (r
->reg_flags
& RegVRex
)
8078 static const reg_entry
*
8079 build_modrm_byte (void)
8081 const reg_entry
*default_seg
= NULL
;
8082 unsigned int source
, dest
;
8085 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
8088 unsigned int nds
, reg_slot
;
8091 dest
= i
.operands
- 1;
8094 /* There are 2 kinds of instructions:
8095 1. 5 operands: 4 register operands or 3 register operands
8096 plus 1 memory operand plus one Imm4 operand, VexXDS, and
8097 VexW0 or VexW1. The destination must be either XMM, YMM or
8099 2. 4 operands: 4 register operands or 3 register operands
8100 plus 1 memory operand, with VexXDS. */
8101 gas_assert ((i
.reg_operands
== 4
8102 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
8103 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8104 && i
.tm
.opcode_modifier
.vexw
8105 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
8107 /* If VexW1 is set, the first non-immediate operand is the source and
8108 the second non-immediate one is encoded in the immediate operand. */
8109 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
8111 source
= i
.imm_operands
;
8112 reg_slot
= i
.imm_operands
+ 1;
8116 source
= i
.imm_operands
+ 1;
8117 reg_slot
= i
.imm_operands
;
8120 if (i
.imm_operands
== 0)
8122 /* When there is no immediate operand, generate an 8bit
8123 immediate operand to encode the first operand. */
8124 exp
= &im_expressions
[i
.imm_operands
++];
8125 i
.op
[i
.operands
].imms
= exp
;
8126 i
.types
[i
.operands
] = imm8
;
8129 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8130 exp
->X_op
= O_constant
;
8131 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
8132 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8136 gas_assert (i
.imm_operands
== 1);
8137 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
8138 gas_assert (!i
.tm
.opcode_modifier
.immext
);
8140 /* Turn on Imm8 again so that output_imm will generate it. */
8141 i
.types
[0].bitfield
.imm8
= 1;
8143 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
8144 i
.op
[0].imms
->X_add_number
8145 |= register_number (i
.op
[reg_slot
].regs
) << 4;
8146 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
8149 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
8150 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
8155 /* i.reg_operands MUST be the number of real register operands;
8156 implicit registers do not count. If there are 3 register
8157 operands, it must be a instruction with VexNDS. For a
8158 instruction with VexNDD, the destination register is encoded
8159 in VEX prefix. If there are 4 register operands, it must be
8160 a instruction with VEX prefix and 3 sources. */
8161 if (i
.mem_operands
== 0
8162 && ((i
.reg_operands
== 2
8163 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
8164 || (i
.reg_operands
== 3
8165 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8166 || (i
.reg_operands
== 4 && vex_3_sources
)))
8174 /* When there are 3 operands, one of them may be immediate,
8175 which may be the first or the last operand. Otherwise,
8176 the first operand must be shift count register (cl) or it
8177 is an instruction with VexNDS. */
8178 gas_assert (i
.imm_operands
== 1
8179 || (i
.imm_operands
== 0
8180 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8181 || (i
.types
[0].bitfield
.instance
== RegC
8182 && i
.types
[0].bitfield
.byte
))));
8183 if (operand_type_check (i
.types
[0], imm
)
8184 || (i
.types
[0].bitfield
.instance
== RegC
8185 && i
.types
[0].bitfield
.byte
))
8191 /* When there are 4 operands, the first two must be 8bit
8192 immediate operands. The source operand will be the 3rd
8195 For instructions with VexNDS, if the first operand
8196 an imm8, the source operand is the 2nd one. If the last
8197 operand is imm8, the source operand is the first one. */
8198 gas_assert ((i
.imm_operands
== 2
8199 && i
.types
[0].bitfield
.imm8
8200 && i
.types
[1].bitfield
.imm8
)
8201 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
8202 && i
.imm_operands
== 1
8203 && (i
.types
[0].bitfield
.imm8
8204 || i
.types
[i
.operands
- 1].bitfield
.imm8
8205 || i
.rounding
.type
!= rc_none
)));
8206 if (i
.imm_operands
== 2)
8210 if (i
.types
[0].bitfield
.imm8
)
8217 if (is_evex_encoding (&i
.tm
))
8219 /* For EVEX instructions, when there are 5 operands, the
8220 first one must be immediate operand. If the second one
8221 is immediate operand, the source operand is the 3th
8222 one. If the last one is immediate operand, the source
8223 operand is the 2nd one. */
8224 gas_assert (i
.imm_operands
== 2
8225 && i
.tm
.opcode_modifier
.sae
8226 && operand_type_check (i
.types
[0], imm
));
8227 if (operand_type_check (i
.types
[1], imm
))
8229 else if (operand_type_check (i
.types
[4], imm
))
8243 /* RC/SAE operand could be between DEST and SRC. That happens
8244 when one operand is GPR and the other one is XMM/YMM/ZMM
8246 if (i
.rounding
.type
!= rc_none
&& i
.rounding
.operand
== dest
)
8249 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8251 /* For instructions with VexNDS, the register-only source
8252 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
8253 register. It is encoded in VEX prefix. */
8255 i386_operand_type op
;
8258 /* Swap two source operands if needed. */
8259 if (i
.tm
.opcode_modifier
.swapsources
)
8267 op
= i
.tm
.operand_types
[vvvv
];
8268 if ((dest
+ 1) >= i
.operands
8269 || ((op
.bitfield
.class != Reg
8270 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
8271 && op
.bitfield
.class != RegSIMD
8272 && !operand_type_equal (&op
, ®mask
)))
8274 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8280 /* One of the register operands will be encoded in the i.rm.reg
8281 field, the other in the combined i.rm.mode and i.rm.regmem
8282 fields. If no form of this instruction supports a memory
8283 destination operand, then we assume the source operand may
8284 sometimes be a memory operand and so we need to store the
8285 destination in the i.rm.reg field. */
8286 if (!i
.tm
.opcode_modifier
.regmem
8287 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8289 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8290 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8291 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8292 set_rex_vrex (i
.op
[source
].regs
, REX_B
, false);
8296 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8297 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8298 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8299 set_rex_vrex (i
.op
[source
].regs
, REX_R
, false);
8301 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8303 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8306 add_prefix (LOCK_PREFIX_OPCODE
);
8310 { /* If it's not 2 reg operands... */
8315 unsigned int fake_zero_displacement
= 0;
8318 for (op
= 0; op
< i
.operands
; op
++)
8319 if (i
.flags
[op
] & Operand_Mem
)
8321 gas_assert (op
< i
.operands
);
8323 if (i
.tm
.opcode_modifier
.sib
)
8325 /* The index register of VSIB shouldn't be RegIZ. */
8326 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8327 && i
.index_reg
->reg_num
== RegIZ
)
8330 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8333 i
.sib
.base
= NO_BASE_REGISTER
;
8334 i
.sib
.scale
= i
.log2_scale_factor
;
8335 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8336 if (want_disp32 (&i
.tm
))
8337 i
.types
[op
].bitfield
.disp32
= 1;
8339 i
.types
[op
].bitfield
.disp32s
= 1;
8342 /* Since the mandatory SIB always has index register, so
8343 the code logic remains unchanged. The non-mandatory SIB
8344 without index register is allowed and will be handled
8348 if (i
.index_reg
->reg_num
== RegIZ
)
8349 i
.sib
.index
= NO_INDEX_REGISTER
;
8351 i
.sib
.index
= i
.index_reg
->reg_num
;
8352 set_rex_vrex (i
.index_reg
, REX_X
, false);
8356 default_seg
= reg_ds
;
8358 if (i
.base_reg
== 0)
8361 if (!i
.disp_operands
)
8362 fake_zero_displacement
= 1;
8363 if (i
.index_reg
== 0)
8365 /* Both check for VSIB and mandatory non-vector SIB. */
8366 gas_assert (!i
.tm
.opcode_modifier
.sib
8367 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8368 /* Operand is just <disp> */
8369 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8370 if (flag_code
== CODE_64BIT
)
8372 /* 64bit mode overwrites the 32bit absolute
8373 addressing by RIP relative addressing and
8374 absolute addressing is encoded by one of the
8375 redundant SIB forms. */
8376 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8377 i
.sib
.base
= NO_BASE_REGISTER
;
8378 i
.sib
.index
= NO_INDEX_REGISTER
;
8379 if (want_disp32 (&i
.tm
))
8380 i
.types
[op
].bitfield
.disp32
= 1;
8382 i
.types
[op
].bitfield
.disp32s
= 1;
8384 else if ((flag_code
== CODE_16BIT
)
8385 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8387 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8388 i
.types
[op
].bitfield
.disp16
= 1;
8392 i
.rm
.regmem
= NO_BASE_REGISTER
;
8393 i
.types
[op
].bitfield
.disp32
= 1;
8396 else if (!i
.tm
.opcode_modifier
.sib
)
8398 /* !i.base_reg && i.index_reg */
8399 if (i
.index_reg
->reg_num
== RegIZ
)
8400 i
.sib
.index
= NO_INDEX_REGISTER
;
8402 i
.sib
.index
= i
.index_reg
->reg_num
;
8403 i
.sib
.base
= NO_BASE_REGISTER
;
8404 i
.sib
.scale
= i
.log2_scale_factor
;
8405 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8406 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8407 if (want_disp32 (&i
.tm
))
8408 i
.types
[op
].bitfield
.disp32
= 1;
8410 i
.types
[op
].bitfield
.disp32s
= 1;
8411 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8415 /* RIP addressing for 64bit mode. */
8416 else if (i
.base_reg
->reg_num
== RegIP
)
8418 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8419 i
.rm
.regmem
= NO_BASE_REGISTER
;
8420 i
.types
[op
].bitfield
.disp8
= 0;
8421 i
.types
[op
].bitfield
.disp16
= 0;
8422 i
.types
[op
].bitfield
.disp32
= 0;
8423 i
.types
[op
].bitfield
.disp32s
= 1;
8424 i
.types
[op
].bitfield
.disp64
= 0;
8425 i
.flags
[op
] |= Operand_PCrel
;
8426 if (! i
.disp_operands
)
8427 fake_zero_displacement
= 1;
8429 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8431 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8432 switch (i
.base_reg
->reg_num
)
8435 if (i
.index_reg
== 0)
8437 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8438 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8441 default_seg
= reg_ss
;
8442 if (i
.index_reg
== 0)
8445 if (operand_type_check (i
.types
[op
], disp
) == 0)
8447 /* fake (%bp) into 0(%bp) */
8448 if (i
.disp_encoding
== disp_encoding_16bit
)
8449 i
.types
[op
].bitfield
.disp16
= 1;
8451 i
.types
[op
].bitfield
.disp8
= 1;
8452 fake_zero_displacement
= 1;
8455 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8456 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8458 default: /* (%si) -> 4 or (%di) -> 5 */
8459 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8461 if (!fake_zero_displacement
8465 fake_zero_displacement
= 1;
8466 if (i
.disp_encoding
== disp_encoding_8bit
)
8467 i
.types
[op
].bitfield
.disp8
= 1;
8469 i
.types
[op
].bitfield
.disp16
= 1;
8471 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8473 else /* i.base_reg and 32/64 bit mode */
8475 if (operand_type_check (i
.types
[op
], disp
))
8477 i
.types
[op
].bitfield
.disp16
= 0;
8478 i
.types
[op
].bitfield
.disp64
= 0;
8479 if (!want_disp32 (&i
.tm
))
8481 i
.types
[op
].bitfield
.disp32
= 0;
8482 i
.types
[op
].bitfield
.disp32s
= 1;
8486 i
.types
[op
].bitfield
.disp32
= 1;
8487 i
.types
[op
].bitfield
.disp32s
= 0;
8491 if (!i
.tm
.opcode_modifier
.sib
)
8492 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8493 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8495 i
.sib
.base
= i
.base_reg
->reg_num
;
8496 /* x86-64 ignores REX prefix bit here to avoid decoder
8498 if (!(i
.base_reg
->reg_flags
& RegRex
)
8499 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8500 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8501 default_seg
= reg_ss
;
8502 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8504 fake_zero_displacement
= 1;
8505 if (i
.disp_encoding
== disp_encoding_32bit
)
8506 i
.types
[op
].bitfield
.disp32
= 1;
8508 i
.types
[op
].bitfield
.disp8
= 1;
8510 i
.sib
.scale
= i
.log2_scale_factor
;
8511 if (i
.index_reg
== 0)
8513 /* Only check for VSIB. */
8514 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8515 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8516 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8518 /* <disp>(%esp) becomes two byte modrm with no index
8519 register. We've already stored the code for esp
8520 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8521 Any base register besides %esp will not use the
8522 extra modrm byte. */
8523 i
.sib
.index
= NO_INDEX_REGISTER
;
8525 else if (!i
.tm
.opcode_modifier
.sib
)
8527 if (i
.index_reg
->reg_num
== RegIZ
)
8528 i
.sib
.index
= NO_INDEX_REGISTER
;
8530 i
.sib
.index
= i
.index_reg
->reg_num
;
8531 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8532 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8537 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8538 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8542 if (!fake_zero_displacement
8546 fake_zero_displacement
= 1;
8547 if (i
.disp_encoding
== disp_encoding_8bit
)
8548 i
.types
[op
].bitfield
.disp8
= 1;
8550 i
.types
[op
].bitfield
.disp32
= 1;
8552 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8556 if (fake_zero_displacement
)
8558 /* Fakes a zero displacement assuming that i.types[op]
8559 holds the correct displacement size. */
8562 gas_assert (i
.op
[op
].disps
== 0);
8563 exp
= &disp_expressions
[i
.disp_operands
++];
8564 i
.op
[op
].disps
= exp
;
8565 exp
->X_op
= O_constant
;
8566 exp
->X_add_number
= 0;
8567 exp
->X_add_symbol
= (symbolS
*) 0;
8568 exp
->X_op_symbol
= (symbolS
*) 0;
8576 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8578 if (operand_type_check (i
.types
[0], imm
))
8579 i
.vex
.register_specifier
= NULL
;
8582 /* VEX.vvvv encodes one of the sources when the first
8583 operand is not an immediate. */
8584 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8585 i
.vex
.register_specifier
= i
.op
[0].regs
;
8587 i
.vex
.register_specifier
= i
.op
[1].regs
;
8590 /* Destination is a XMM register encoded in the ModRM.reg
8592 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8593 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8596 /* ModRM.rm and VEX.B encodes the other source. */
8597 if (!i
.mem_operands
)
8601 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8602 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8604 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8606 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8610 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8612 i
.vex
.register_specifier
= i
.op
[2].regs
;
8613 if (!i
.mem_operands
)
8616 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8617 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8621 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8622 (if any) based on i.tm.extension_opcode. Again, we must be
8623 careful to make sure that segment/control/debug/test/MMX
8624 registers are coded into the i.rm.reg field. */
8625 else if (i
.reg_operands
)
8628 unsigned int vex_reg
= ~0;
8630 for (op
= 0; op
< i
.operands
; op
++)
8631 if (i
.types
[op
].bitfield
.class == Reg
8632 || i
.types
[op
].bitfield
.class == RegBND
8633 || i
.types
[op
].bitfield
.class == RegMask
8634 || i
.types
[op
].bitfield
.class == SReg
8635 || i
.types
[op
].bitfield
.class == RegCR
8636 || i
.types
[op
].bitfield
.class == RegDR
8637 || i
.types
[op
].bitfield
.class == RegTR
8638 || i
.types
[op
].bitfield
.class == RegSIMD
8639 || i
.types
[op
].bitfield
.class == RegMMX
)
8644 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8646 /* For instructions with VexNDS, the register-only
8647 source operand is encoded in VEX prefix. */
8648 gas_assert (mem
!= (unsigned int) ~0);
8653 gas_assert (op
< i
.operands
);
8657 /* Check register-only source operand when two source
8658 operands are swapped. */
8659 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8660 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8664 gas_assert (mem
== (vex_reg
+ 1)
8665 && op
< i
.operands
);
8670 gas_assert (vex_reg
< i
.operands
);
8674 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8676 /* For instructions with VexNDD, the register destination
8677 is encoded in VEX prefix. */
8678 if (i
.mem_operands
== 0)
8680 /* There is no memory operand. */
8681 gas_assert ((op
+ 2) == i
.operands
);
8686 /* There are only 2 non-immediate operands. */
8687 gas_assert (op
< i
.imm_operands
+ 2
8688 && i
.operands
== i
.imm_operands
+ 2);
8689 vex_reg
= i
.imm_operands
+ 1;
8693 gas_assert (op
< i
.operands
);
8695 if (vex_reg
!= (unsigned int) ~0)
8697 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8699 if ((type
->bitfield
.class != Reg
8700 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8701 && type
->bitfield
.class != RegSIMD
8702 && !operand_type_equal (type
, ®mask
))
8705 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8708 /* Don't set OP operand twice. */
8711 /* If there is an extension opcode to put here, the
8712 register number must be put into the regmem field. */
8713 if (i
.tm
.extension_opcode
!= None
)
8715 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8716 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8717 i
.tm
.opcode_modifier
.sse2avx
);
8721 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8722 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8723 i
.tm
.opcode_modifier
.sse2avx
);
8727 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8728 must set it to 3 to indicate this is a register operand
8729 in the regmem field. */
8730 if (!i
.mem_operands
)
8734 /* Fill in i.rm.reg field with extension opcode (if any). */
8735 if (i
.tm
.extension_opcode
!= None
)
8736 i
.rm
.reg
= i
.tm
.extension_opcode
;
8742 frag_opcode_byte (unsigned char byte
)
8744 if (now_seg
!= absolute_section
)
8745 FRAG_APPEND_1_CHAR (byte
);
8747 ++abs_section_offset
;
8751 flip_code16 (unsigned int code16
)
8753 gas_assert (i
.tm
.operands
== 1);
8755 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8756 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8757 || i
.tm
.operand_types
[0].bitfield
.disp32s
8758 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8763 output_branch (void)
8769 relax_substateT subtype
;
8773 if (now_seg
== absolute_section
)
8775 as_bad (_("relaxable branches not supported in absolute section"));
8779 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8780 size
= i
.disp_encoding
> disp_encoding_8bit
? BIG
: SMALL
;
8783 if (i
.prefix
[DATA_PREFIX
] != 0)
8787 code16
^= flip_code16(code16
);
8789 /* Pentium4 branch hints. */
8790 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8791 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8796 if (i
.prefix
[REX_PREFIX
] != 0)
8802 /* BND prefixed jump. */
8803 if (i
.prefix
[BND_PREFIX
] != 0)
8809 if (i
.prefixes
!= 0)
8810 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8812 /* It's always a symbol; End frag & setup for relax.
8813 Make sure there is enough room in this frag for the largest
8814 instruction we may generate in md_convert_frag. This is 2
8815 bytes for the opcode and room for the prefix and largest
8817 frag_grow (prefix
+ 2 + 4);
8818 /* Prefix and 1 opcode byte go in fr_fix. */
8819 p
= frag_more (prefix
+ 1);
8820 if (i
.prefix
[DATA_PREFIX
] != 0)
8821 *p
++ = DATA_PREFIX_OPCODE
;
8822 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8823 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8824 *p
++ = i
.prefix
[SEG_PREFIX
];
8825 if (i
.prefix
[BND_PREFIX
] != 0)
8826 *p
++ = BND_PREFIX_OPCODE
;
8827 if (i
.prefix
[REX_PREFIX
] != 0)
8828 *p
++ = i
.prefix
[REX_PREFIX
];
8829 *p
= i
.tm
.base_opcode
;
8831 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8832 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8833 else if (cpu_arch_flags
.bitfield
.cpui386
)
8834 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8836 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8839 sym
= i
.op
[0].disps
->X_add_symbol
;
8840 off
= i
.op
[0].disps
->X_add_number
;
8842 if (i
.op
[0].disps
->X_op
!= O_constant
8843 && i
.op
[0].disps
->X_op
!= O_symbol
)
8845 /* Handle complex expressions. */
8846 sym
= make_expr_symbol (i
.op
[0].disps
);
8850 frag_now
->tc_frag_data
.code64
= flag_code
== CODE_64BIT
;
8852 /* 1 possible extra opcode + 4 byte displacement go in var part.
8853 Pass reloc in fr_var. */
8854 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8857 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8858 /* Return TRUE iff PLT32 relocation should be used for branching to
8862 need_plt32_p (symbolS
*s
)
8864 /* PLT32 relocation is ELF only. */
8869 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8870 krtld support it. */
8874 /* Since there is no need to prepare for PLT branch on x86-64, we
8875 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8876 be used as a marker for 32-bit PC-relative branches. */
8883 /* Weak or undefined symbol need PLT32 relocation. */
8884 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8887 /* Non-global symbol doesn't need PLT32 relocation. */
8888 if (! S_IS_EXTERNAL (s
))
8891 /* Other global symbols need PLT32 relocation. NB: Symbol with
8892 non-default visibilities are treated as normal global symbol
8893 so that PLT32 relocation can be used as a marker for 32-bit
8894 PC-relative branches. It is useful for linker relaxation. */
8905 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8907 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8909 /* This is a loop or jecxz type instruction. */
8911 if (i
.prefix
[ADDR_PREFIX
] != 0)
8913 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8916 /* Pentium4 branch hints. */
8917 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8918 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8920 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8929 if (flag_code
== CODE_16BIT
)
8932 if (i
.prefix
[DATA_PREFIX
] != 0)
8934 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8936 code16
^= flip_code16(code16
);
8944 /* BND prefixed jump. */
8945 if (i
.prefix
[BND_PREFIX
] != 0)
8947 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8951 if (i
.prefix
[REX_PREFIX
] != 0)
8953 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8957 if (i
.prefixes
!= 0)
8958 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8960 if (now_seg
== absolute_section
)
8962 abs_section_offset
+= i
.opcode_length
+ size
;
8966 p
= frag_more (i
.opcode_length
+ size
);
8967 switch (i
.opcode_length
)
8970 *p
++ = i
.tm
.base_opcode
>> 8;
8973 *p
++ = i
.tm
.base_opcode
;
8979 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8980 if (flag_code
== CODE_64BIT
&& size
== 4
8981 && jump_reloc
== NO_RELOC
&& i
.op
[0].disps
->X_add_number
== 0
8982 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8983 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8986 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8988 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8989 i
.op
[0].disps
, 1, jump_reloc
);
8991 /* All jumps handled here are signed, but don't unconditionally use a
8992 signed limit check for 32 and 16 bit jumps as we want to allow wrap
8993 around at 4G (outside of 64-bit mode) and 64k (except for XBEGIN)
8998 fixP
->fx_signed
= 1;
9002 if (i
.tm
.base_opcode
== 0xc7f8)
9003 fixP
->fx_signed
= 1;
9007 if (flag_code
== CODE_64BIT
)
9008 fixP
->fx_signed
= 1;
9014 output_interseg_jump (void)
9022 if (flag_code
== CODE_16BIT
)
9026 if (i
.prefix
[DATA_PREFIX
] != 0)
9033 gas_assert (!i
.prefix
[REX_PREFIX
]);
9039 if (i
.prefixes
!= 0)
9040 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
9042 if (now_seg
== absolute_section
)
9044 abs_section_offset
+= prefix
+ 1 + 2 + size
;
9048 /* 1 opcode; 2 segment; offset */
9049 p
= frag_more (prefix
+ 1 + 2 + size
);
9051 if (i
.prefix
[DATA_PREFIX
] != 0)
9052 *p
++ = DATA_PREFIX_OPCODE
;
9054 if (i
.prefix
[REX_PREFIX
] != 0)
9055 *p
++ = i
.prefix
[REX_PREFIX
];
9057 *p
++ = i
.tm
.base_opcode
;
9058 if (i
.op
[1].imms
->X_op
== O_constant
)
9060 offsetT n
= i
.op
[1].imms
->X_add_number
;
9063 && !fits_in_unsigned_word (n
)
9064 && !fits_in_signed_word (n
))
9066 as_bad (_("16-bit jump out of range"));
9069 md_number_to_chars (p
, n
, size
);
9072 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9073 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
9076 if (i
.op
[0].imms
->X_op
== O_constant
)
9077 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
9079 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
9080 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
9083 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9088 asection
*seg
= now_seg
;
9089 subsegT subseg
= now_subseg
;
9091 unsigned int alignment
, align_size_1
;
9092 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
9093 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
9094 unsigned int padding
;
9096 if (!IS_ELF
|| !x86_used_note
)
9099 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
9101 /* The .note.gnu.property section layout:
9103 Field Length Contents
9106 n_descsz 4 The note descriptor size
9107 n_type 4 NT_GNU_PROPERTY_TYPE_0
9109 n_desc n_descsz The program property array
9113 /* Create the .note.gnu.property section. */
9114 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
9115 bfd_set_section_flags (sec
,
9122 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
9133 bfd_set_section_alignment (sec
, alignment
);
9134 elf_section_type (sec
) = SHT_NOTE
;
9136 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
9138 isa_1_descsz_raw
= 4 + 4 + 4;
9139 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
9140 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
9142 feature_2_descsz_raw
= isa_1_descsz
;
9143 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
9145 feature_2_descsz_raw
+= 4 + 4 + 4;
9146 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
9147 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
9150 descsz
= feature_2_descsz
;
9151 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
9152 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
9154 /* Write n_namsz. */
9155 md_number_to_chars (p
, (valueT
) 4, 4);
9157 /* Write n_descsz. */
9158 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
9161 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
9164 memcpy (p
+ 4 * 3, "GNU", 4);
9166 /* Write 4-byte type. */
9167 md_number_to_chars (p
+ 4 * 4,
9168 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
9170 /* Write 4-byte data size. */
9171 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
9173 /* Write 4-byte data. */
9174 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
9176 /* Zero out paddings. */
9177 padding
= isa_1_descsz
- isa_1_descsz_raw
;
9179 memset (p
+ 4 * 7, 0, padding
);
9181 /* Write 4-byte type. */
9182 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
9183 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
9185 /* Write 4-byte data size. */
9186 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
9188 /* Write 4-byte data. */
9189 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
9190 (valueT
) x86_feature_2_used
, 4);
9192 /* Zero out paddings. */
9193 padding
= feature_2_descsz
- feature_2_descsz_raw
;
9195 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
9197 /* We probably can't restore the current segment, for there likely
9200 subseg_set (seg
, subseg
);
9205 encoding_length (const fragS
*start_frag
, offsetT start_off
,
9206 const char *frag_now_ptr
)
9208 unsigned int len
= 0;
9210 if (start_frag
!= frag_now
)
9212 const fragS
*fr
= start_frag
;
9217 } while (fr
&& fr
!= frag_now
);
9220 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
9223 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
9224 be macro-fused with conditional jumps.
9225 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
9226 or is one of the following format:
9239 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
9241 /* No RIP address. */
9242 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
9245 /* No opcodes outside of base encoding space. */
9246 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9249 /* add, sub without add/sub m, imm. */
9250 if (i
.tm
.base_opcode
<= 5
9251 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
9252 || ((i
.tm
.base_opcode
| 3) == 0x83
9253 && (i
.tm
.extension_opcode
== 0x5
9254 || i
.tm
.extension_opcode
== 0x0)))
9256 *mf_cmp_p
= mf_cmp_alu_cmp
;
9257 return !(i
.mem_operands
&& i
.imm_operands
);
9260 /* and without and m, imm. */
9261 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
9262 || ((i
.tm
.base_opcode
| 3) == 0x83
9263 && i
.tm
.extension_opcode
== 0x4))
9265 *mf_cmp_p
= mf_cmp_test_and
;
9266 return !(i
.mem_operands
&& i
.imm_operands
);
9269 /* test without test m imm. */
9270 if ((i
.tm
.base_opcode
| 1) == 0x85
9271 || (i
.tm
.base_opcode
| 1) == 0xa9
9272 || ((i
.tm
.base_opcode
| 1) == 0xf7
9273 && i
.tm
.extension_opcode
== 0))
9275 *mf_cmp_p
= mf_cmp_test_and
;
9276 return !(i
.mem_operands
&& i
.imm_operands
);
9279 /* cmp without cmp m, imm. */
9280 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9281 || ((i
.tm
.base_opcode
| 3) == 0x83
9282 && (i
.tm
.extension_opcode
== 0x7)))
9284 *mf_cmp_p
= mf_cmp_alu_cmp
;
9285 return !(i
.mem_operands
&& i
.imm_operands
);
9288 /* inc, dec without inc/dec m. */
9289 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9290 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9291 || ((i
.tm
.base_opcode
| 1) == 0xff
9292 && i
.tm
.extension_opcode
<= 0x1))
9294 *mf_cmp_p
= mf_cmp_incdec
;
9295 return !i
.mem_operands
;
9301 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9304 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9306 /* NB: Don't work with COND_JUMP86 without i386. */
9307 if (!align_branch_power
9308 || now_seg
== absolute_section
9309 || !cpu_arch_flags
.bitfield
.cpui386
9310 || !(align_branch
& align_branch_fused_bit
))
9313 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9315 if (last_insn
.kind
== last_insn_other
9316 || last_insn
.seg
!= now_seg
)
9319 as_warn_where (last_insn
.file
, last_insn
.line
,
9320 _("`%s` skips -malign-branch-boundary on `%s`"),
9321 last_insn
.name
, i
.tm
.name
);
9327 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9330 add_branch_prefix_frag_p (void)
9332 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9333 to PadLock instructions since they include prefixes in opcode. */
9334 if (!align_branch_power
9335 || !align_branch_prefix_size
9336 || now_seg
== absolute_section
9337 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9338 || !cpu_arch_flags
.bitfield
.cpui386
)
9341 /* Don't add prefix if it is a prefix or there is no operand in case
9342 that segment prefix is special. */
9343 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9346 if (last_insn
.kind
== last_insn_other
9347 || last_insn
.seg
!= now_seg
)
9351 as_warn_where (last_insn
.file
, last_insn
.line
,
9352 _("`%s` skips -malign-branch-boundary on `%s`"),
9353 last_insn
.name
, i
.tm
.name
);
9358 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9361 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9362 enum mf_jcc_kind
*mf_jcc_p
)
9366 /* NB: Don't work with COND_JUMP86 without i386. */
9367 if (!align_branch_power
9368 || now_seg
== absolute_section
9369 || !cpu_arch_flags
.bitfield
.cpui386
9370 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9375 /* Check for jcc and direct jmp. */
9376 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9378 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9380 *branch_p
= align_branch_jmp
;
9381 add_padding
= align_branch
& align_branch_jmp_bit
;
9385 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9386 igore the lowest bit. */
9387 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9388 *branch_p
= align_branch_jcc
;
9389 if ((align_branch
& align_branch_jcc_bit
))
9393 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9396 *branch_p
= align_branch_ret
;
9397 if ((align_branch
& align_branch_ret_bit
))
9402 /* Check for indirect jmp, direct and indirect calls. */
9403 if (i
.tm
.base_opcode
== 0xe8)
9406 *branch_p
= align_branch_call
;
9407 if ((align_branch
& align_branch_call_bit
))
9410 else if (i
.tm
.base_opcode
== 0xff
9411 && (i
.tm
.extension_opcode
== 2
9412 || i
.tm
.extension_opcode
== 4))
9414 /* Indirect call and jmp. */
9415 *branch_p
= align_branch_indirect
;
9416 if ((align_branch
& align_branch_indirect_bit
))
9423 && (i
.op
[0].disps
->X_op
== O_symbol
9424 || (i
.op
[0].disps
->X_op
== O_subtract
9425 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9427 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9428 /* No padding to call to global or undefined tls_get_addr. */
9429 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9430 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9436 && last_insn
.kind
!= last_insn_other
9437 && last_insn
.seg
== now_seg
)
9440 as_warn_where (last_insn
.file
, last_insn
.line
,
9441 _("`%s` skips -malign-branch-boundary on `%s`"),
9442 last_insn
.name
, i
.tm
.name
);
9452 fragS
*insn_start_frag
;
9453 offsetT insn_start_off
;
9454 fragS
*fragP
= NULL
;
9455 enum align_branch_kind branch
= align_branch_none
;
9456 /* The initializer is arbitrary just to avoid uninitialized error.
9457 it's actually either assigned in add_branch_padding_frag_p
9458 or never be used. */
9459 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9461 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9462 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9464 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9465 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9466 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9468 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9469 || i
.tm
.cpu_flags
.bitfield
.cpu287
9470 || i
.tm
.cpu_flags
.bitfield
.cpu387
9471 || i
.tm
.cpu_flags
.bitfield
.cpu687
9472 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9473 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9475 if ((i
.xstate
& xstate_mmx
)
9476 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9477 && !is_any_vex_encoding (&i
.tm
)
9478 && (i
.tm
.base_opcode
== 0x77 /* emms */
9479 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9480 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9484 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9485 i
.xstate
|= xstate_zmm
;
9486 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9487 i
.xstate
|= xstate_ymm
;
9488 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9489 i
.xstate
|= xstate_xmm
;
9492 /* vzeroall / vzeroupper */
9493 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9494 i
.xstate
|= xstate_ymm
;
9496 if ((i
.xstate
& xstate_xmm
)
9497 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9498 || (i
.tm
.base_opcode
== 0xae
9499 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9500 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9501 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9502 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9503 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9505 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9506 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9507 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9508 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9509 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9510 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9511 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9512 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9513 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9514 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9515 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9516 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9517 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9518 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9520 if (x86_feature_2_used
9521 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9522 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9523 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9524 && i
.tm
.base_opcode
== 0xc7
9525 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9526 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9527 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9528 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9529 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9530 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9531 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9532 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9533 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9534 /* LAHF-SAHF insns in 64-bit mode. */
9535 || (flag_code
== CODE_64BIT
9536 && (i
.tm
.base_opcode
| 1) == 0x9f
9537 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9538 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9539 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9540 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9541 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9542 CpuAVX512DQ, LPW, TBM and AMX. */
9543 || (i
.tm
.opcode_modifier
.vex
9544 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9545 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9546 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9547 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9548 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9549 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9550 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9551 || i
.tm
.cpu_flags
.bitfield
.cpufma
9552 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9553 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9554 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9555 || (x86_feature_2_used
9556 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9557 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9558 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9559 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9560 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9561 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9562 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9563 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9564 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9566 || (i
.tm
.opcode_modifier
.evex
9567 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9568 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9569 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9570 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9574 /* Tie dwarf2 debug info to the address at the start of the insn.
9575 We can't do this after the insn has been output as the current
9576 frag may have been closed off. eg. by frag_var. */
9577 dwarf2_emit_insn (0);
9579 insn_start_frag
= frag_now
;
9580 insn_start_off
= frag_now_fix ();
9582 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9585 /* Branch can be 8 bytes. Leave some room for prefixes. */
9586 unsigned int max_branch_padding_size
= 14;
9588 /* Align section to boundary. */
9589 record_alignment (now_seg
, align_branch_power
);
9591 /* Make room for padding. */
9592 frag_grow (max_branch_padding_size
);
9594 /* Start of the padding. */
9599 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9600 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9603 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9604 fragP
->tc_frag_data
.branch_type
= branch
;
9605 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9609 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9611 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9612 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9614 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9615 output_interseg_jump ();
9618 /* Output normal instructions here. */
9622 enum mf_cmp_kind mf_cmp
;
9625 && (i
.tm
.base_opcode
== 0xaee8
9626 || i
.tm
.base_opcode
== 0xaef0
9627 || i
.tm
.base_opcode
== 0xaef8))
9629 /* Encode lfence, mfence, and sfence as
9630 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9631 if (flag_code
== CODE_16BIT
)
9632 as_bad (_("Cannot convert `%s' in 16-bit mode"), i
.tm
.name
);
9633 else if (omit_lock_prefix
)
9634 as_bad (_("Cannot convert `%s' with `-momit-lock-prefix=yes' in effect"),
9636 else if (now_seg
!= absolute_section
)
9638 offsetT val
= 0x240483f0ULL
;
9641 md_number_to_chars (p
, val
, 5);
9644 abs_section_offset
+= 5;
9648 /* Some processors fail on LOCK prefix. This options makes
9649 assembler ignore LOCK prefix and serves as a workaround. */
9650 if (omit_lock_prefix
)
9652 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9653 && i
.tm
.opcode_modifier
.isprefix
)
9655 i
.prefix
[LOCK_PREFIX
] = 0;
9659 /* Skip if this is a branch. */
9661 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9663 /* Make room for padding. */
9664 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9669 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9670 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9673 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9674 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9675 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9677 else if (add_branch_prefix_frag_p ())
9679 unsigned int max_prefix_size
= align_branch_prefix_size
;
9681 /* Make room for padding. */
9682 frag_grow (max_prefix_size
);
9687 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9688 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9691 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9694 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9695 don't need the explicit prefix. */
9696 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9698 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9707 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9708 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9712 switch (i
.opcode_length
)
9717 /* Check for pseudo prefixes. */
9718 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9720 as_bad_where (insn_start_frag
->fr_file
,
9721 insn_start_frag
->fr_line
,
9722 _("pseudo prefix without instruction"));
9732 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9733 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9734 R_X86_64_GOTTPOFF relocation so that linker can safely
9735 perform IE->LE optimization. A dummy REX_OPCODE prefix
9736 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9737 relocation for GDesc -> IE/LE optimization. */
9738 if (x86_elf_abi
== X86_64_X32_ABI
9740 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9741 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9742 && i
.prefix
[REX_PREFIX
] == 0)
9743 add_prefix (REX_OPCODE
);
9746 /* The prefix bytes. */
9747 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9749 frag_opcode_byte (*q
);
9753 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9759 frag_opcode_byte (*q
);
9762 /* There should be no other prefixes for instructions
9767 /* For EVEX instructions i.vrex should become 0 after
9768 build_evex_prefix. For VEX instructions upper 16 registers
9769 aren't available, so VREX should be 0. */
9772 /* Now the VEX prefix. */
9773 if (now_seg
!= absolute_section
)
9775 p
= frag_more (i
.vex
.length
);
9776 for (j
= 0; j
< i
.vex
.length
; j
++)
9777 p
[j
] = i
.vex
.bytes
[j
];
9780 abs_section_offset
+= i
.vex
.length
;
9783 /* Now the opcode; be careful about word order here! */
9784 j
= i
.opcode_length
;
9786 switch (i
.tm
.opcode_modifier
.opcodespace
)
9801 if (now_seg
== absolute_section
)
9802 abs_section_offset
+= j
;
9805 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9811 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9814 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9815 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9819 switch (i
.opcode_length
)
9822 /* Put out high byte first: can't use md_number_to_chars! */
9823 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9826 *p
= i
.tm
.base_opcode
& 0xff;
9835 /* Now the modrm byte and sib byte (if present). */
9836 if (i
.tm
.opcode_modifier
.modrm
)
9838 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9840 | (i
.rm
.mode
<< 6));
9841 /* If i.rm.regmem == ESP (4)
9842 && i.rm.mode != (Register mode)
9844 ==> need second modrm byte. */
9845 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9847 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9848 frag_opcode_byte ((i
.sib
.base
<< 0)
9849 | (i
.sib
.index
<< 3)
9850 | (i
.sib
.scale
<< 6));
9853 if (i
.disp_operands
)
9854 output_disp (insn_start_frag
, insn_start_off
);
9857 output_imm (insn_start_frag
, insn_start_off
);
9860 * frag_now_fix () returning plain abs_section_offset when we're in the
9861 * absolute section, and abs_section_offset not getting updated as data
9862 * gets added to the frag breaks the logic below.
9864 if (now_seg
!= absolute_section
)
9866 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9868 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9872 /* NB: Don't add prefix with GOTPC relocation since
9873 output_disp() above depends on the fixed encoding
9874 length. Can't add prefix with TLS relocation since
9875 it breaks TLS linker optimization. */
9876 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9877 /* Prefix count on the current instruction. */
9878 unsigned int count
= i
.vex
.length
;
9880 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9881 /* REX byte is encoded in VEX/EVEX prefix. */
9882 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9885 /* Count prefixes for extended opcode maps. */
9887 switch (i
.tm
.opcode_modifier
.opcodespace
)
9902 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9905 /* Set the maximum prefix size in BRANCH_PREFIX
9907 if (fragP
->tc_frag_data
.max_bytes
> max
)
9908 fragP
->tc_frag_data
.max_bytes
= max
;
9909 if (fragP
->tc_frag_data
.max_bytes
> count
)
9910 fragP
->tc_frag_data
.max_bytes
-= count
;
9912 fragP
->tc_frag_data
.max_bytes
= 0;
9916 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9918 unsigned int max_prefix_size
;
9919 if (align_branch_prefix_size
> max
)
9920 max_prefix_size
= max
;
9922 max_prefix_size
= align_branch_prefix_size
;
9923 if (max_prefix_size
> count
)
9924 fragP
->tc_frag_data
.max_prefix_length
9925 = max_prefix_size
- count
;
9928 /* Use existing segment prefix if possible. Use CS
9929 segment prefix in 64-bit mode. In 32-bit mode, use SS
9930 segment prefix with ESP/EBP base register and use DS
9931 segment prefix without ESP/EBP base register. */
9932 if (i
.prefix
[SEG_PREFIX
])
9933 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9934 else if (flag_code
== CODE_64BIT
)
9935 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9937 && (i
.base_reg
->reg_num
== 4
9938 || i
.base_reg
->reg_num
== 5))
9939 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9941 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9946 /* NB: Don't work with COND_JUMP86 without i386. */
9947 if (align_branch_power
9948 && now_seg
!= absolute_section
9949 && cpu_arch_flags
.bitfield
.cpui386
)
9951 /* Terminate each frag so that we can add prefix and check for
9953 frag_wane (frag_now
);
9960 pi ("" /*line*/, &i
);
9962 #endif /* DEBUG386 */
9965 /* Return the size of the displacement operand N. */
9968 disp_size (unsigned int n
)
9972 if (i
.types
[n
].bitfield
.disp64
)
9974 else if (i
.types
[n
].bitfield
.disp8
)
9976 else if (i
.types
[n
].bitfield
.disp16
)
9981 /* Return the size of the immediate operand N. */
9984 imm_size (unsigned int n
)
9987 if (i
.types
[n
].bitfield
.imm64
)
9989 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9991 else if (i
.types
[n
].bitfield
.imm16
)
9997 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
10002 for (n
= 0; n
< i
.operands
; n
++)
10004 if (operand_type_check (i
.types
[n
], disp
))
10006 int size
= disp_size (n
);
10008 if (now_seg
== absolute_section
)
10009 abs_section_offset
+= size
;
10010 else if (i
.op
[n
].disps
->X_op
== O_constant
)
10012 offsetT val
= i
.op
[n
].disps
->X_add_number
;
10014 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
10016 p
= frag_more (size
);
10017 md_number_to_chars (p
, val
, size
);
10021 enum bfd_reloc_code_real reloc_type
;
10022 int sign
= i
.types
[n
].bitfield
.disp32s
;
10023 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
10026 /* We can't have 8 bit displacement here. */
10027 gas_assert (!i
.types
[n
].bitfield
.disp8
);
10029 /* The PC relative address is computed relative
10030 to the instruction boundary, so in case immediate
10031 fields follows, we need to adjust the value. */
10032 if (pcrel
&& i
.imm_operands
)
10037 for (n1
= 0; n1
< i
.operands
; n1
++)
10038 if (operand_type_check (i
.types
[n1
], imm
))
10040 /* Only one immediate is allowed for PC
10041 relative address. */
10042 gas_assert (sz
== 0);
10043 sz
= imm_size (n1
);
10044 i
.op
[n
].disps
->X_add_number
-= sz
;
10046 /* We should find the immediate. */
10047 gas_assert (sz
!= 0);
10050 p
= frag_more (size
);
10051 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
10053 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
10054 && (((reloc_type
== BFD_RELOC_32
10055 || reloc_type
== BFD_RELOC_X86_64_32S
10056 || (reloc_type
== BFD_RELOC_64
10058 && (i
.op
[n
].disps
->X_op
== O_symbol
10059 || (i
.op
[n
].disps
->X_op
== O_add
10060 && ((symbol_get_value_expression
10061 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
10063 || reloc_type
== BFD_RELOC_32_PCREL
))
10067 reloc_type
= BFD_RELOC_386_GOTPC
;
10068 i
.has_gotpc_tls_reloc
= true;
10069 i
.op
[n
].disps
->X_add_number
+=
10070 encoding_length (insn_start_frag
, insn_start_off
, p
);
10072 else if (reloc_type
== BFD_RELOC_64
)
10073 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10075 /* Don't do the adjustment for x86-64, as there
10076 the pcrel addressing is relative to the _next_
10077 insn, and that is taken care of in other code. */
10078 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10080 else if (align_branch_power
)
10082 switch (reloc_type
)
10084 case BFD_RELOC_386_TLS_GD
:
10085 case BFD_RELOC_386_TLS_LDM
:
10086 case BFD_RELOC_386_TLS_IE
:
10087 case BFD_RELOC_386_TLS_IE_32
:
10088 case BFD_RELOC_386_TLS_GOTIE
:
10089 case BFD_RELOC_386_TLS_GOTDESC
:
10090 case BFD_RELOC_386_TLS_DESC_CALL
:
10091 case BFD_RELOC_X86_64_TLSGD
:
10092 case BFD_RELOC_X86_64_TLSLD
:
10093 case BFD_RELOC_X86_64_GOTTPOFF
:
10094 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
10095 case BFD_RELOC_X86_64_TLSDESC_CALL
:
10096 i
.has_gotpc_tls_reloc
= true;
10101 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
10102 size
, i
.op
[n
].disps
, pcrel
,
10105 if (flag_code
== CODE_64BIT
&& size
== 4 && pcrel
10106 && !i
.prefix
[ADDR_PREFIX
])
10107 fixP
->fx_signed
= 1;
10109 /* Check for "call/jmp *mem", "mov mem, %reg",
10110 "test %reg, mem" and "binop mem, %reg" where binop
10111 is one of adc, add, and, cmp, or, sbb, sub, xor
10112 instructions without data prefix. Always generate
10113 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
10114 if (i
.prefix
[DATA_PREFIX
] == 0
10115 && (generate_relax_relocations
10118 && i
.rm
.regmem
== 5))
10120 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
10121 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
10122 && ((i
.operands
== 1
10123 && i
.tm
.base_opcode
== 0xff
10124 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
10125 || (i
.operands
== 2
10126 && (i
.tm
.base_opcode
== 0x8b
10127 || i
.tm
.base_opcode
== 0x85
10128 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
10132 fixP
->fx_tcbit
= i
.rex
!= 0;
10134 && (i
.base_reg
->reg_num
== RegIP
))
10135 fixP
->fx_tcbit2
= 1;
10138 fixP
->fx_tcbit2
= 1;
10146 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
10151 for (n
= 0; n
< i
.operands
; n
++)
10153 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
10154 if (i
.rounding
.type
!= rc_none
&& n
== i
.rounding
.operand
)
10157 if (operand_type_check (i
.types
[n
], imm
))
10159 int size
= imm_size (n
);
10161 if (now_seg
== absolute_section
)
10162 abs_section_offset
+= size
;
10163 else if (i
.op
[n
].imms
->X_op
== O_constant
)
10167 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
10169 p
= frag_more (size
);
10170 md_number_to_chars (p
, val
, size
);
10174 /* Not absolute_section.
10175 Need a 32-bit fixup (don't support 8bit
10176 non-absolute imms). Try to support other
10178 enum bfd_reloc_code_real reloc_type
;
10181 if (i
.types
[n
].bitfield
.imm32s
10182 && (i
.suffix
== QWORD_MNEM_SUFFIX
10183 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
10188 p
= frag_more (size
);
10189 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
10191 /* This is tough to explain. We end up with this one if we
10192 * have operands that look like
10193 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
10194 * obtain the absolute address of the GOT, and it is strongly
10195 * preferable from a performance point of view to avoid using
10196 * a runtime relocation for this. The actual sequence of
10197 * instructions often look something like:
10202 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
10204 * The call and pop essentially return the absolute address
10205 * of the label .L66 and store it in %ebx. The linker itself
10206 * will ultimately change the first operand of the addl so
10207 * that %ebx points to the GOT, but to keep things simple, the
10208 * .o file must have this operand set so that it generates not
10209 * the absolute address of .L66, but the absolute address of
10210 * itself. This allows the linker itself simply treat a GOTPC
10211 * relocation as asking for a pcrel offset to the GOT to be
10212 * added in, and the addend of the relocation is stored in the
10213 * operand field for the instruction itself.
10215 * Our job here is to fix the operand so that it would add
10216 * the correct offset so that %ebx would point to itself. The
10217 * thing that is tricky is that .-.L66 will point to the
10218 * beginning of the instruction, so we need to further modify
10219 * the operand so that it will point to itself. There are
10220 * other cases where you have something like:
10222 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
10224 * and here no correction would be required. Internally in
10225 * the assembler we treat operands of this form as not being
10226 * pcrel since the '.' is explicitly mentioned, and I wonder
10227 * whether it would simplify matters to do it this way. Who
10228 * knows. In earlier versions of the PIC patches, the
10229 * pcrel_adjust field was used to store the correction, but
10230 * since the expression is not pcrel, I felt it would be
10231 * confusing to do it this way. */
10233 if ((reloc_type
== BFD_RELOC_32
10234 || reloc_type
== BFD_RELOC_X86_64_32S
10235 || reloc_type
== BFD_RELOC_64
)
10237 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
10238 && (i
.op
[n
].imms
->X_op
== O_symbol
10239 || (i
.op
[n
].imms
->X_op
== O_add
10240 && ((symbol_get_value_expression
10241 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
10245 reloc_type
= BFD_RELOC_386_GOTPC
;
10246 else if (size
== 4)
10247 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
10248 else if (size
== 8)
10249 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
10250 i
.has_gotpc_tls_reloc
= true;
10251 i
.op
[n
].imms
->X_add_number
+=
10252 encoding_length (insn_start_frag
, insn_start_off
, p
);
10254 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
10255 i
.op
[n
].imms
, 0, reloc_type
);
10261 /* x86_cons_fix_new is called via the expression parsing code when a
10262 reloc is needed. We use this hook to get the correct .got reloc. */
10263 static int cons_sign
= -1;
10266 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
10267 expressionS
*exp
, bfd_reloc_code_real_type r
)
10269 r
= reloc (len
, 0, cons_sign
, r
);
10272 if (exp
->X_op
== O_secrel
)
10274 exp
->X_op
= O_symbol
;
10275 r
= BFD_RELOC_32_SECREL
;
10277 else if (exp
->X_op
== O_secidx
)
10278 r
= BFD_RELOC_16_SECIDX
;
10281 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10284 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10285 purpose of the `.dc.a' internal pseudo-op. */
10288 x86_address_bytes (void)
10290 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10292 return stdoutput
->arch_info
->bits_per_address
/ 8;
10295 #if (!(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10296 || defined (LEX_AT)) && !defined (TE_PE)
10297 # define lex_got(reloc, adjust, types) NULL
10299 /* Parse operands of the form
10300 <symbol>@GOTOFF+<nnn>
10301 and similar .plt or .got references.
10303 If we find one, set up the correct relocation in RELOC and copy the
10304 input string, minus the `@GOTOFF' into a malloc'd buffer for
10305 parsing by the calling routine. Return this buffer, and if ADJUST
10306 is non-null set it to the length of the string we removed from the
10307 input line. Otherwise return NULL. */
10309 lex_got (enum bfd_reloc_code_real
*rel
,
10311 i386_operand_type
*types
)
10313 /* Some of the relocations depend on the size of what field is to
10314 be relocated. But in our callers i386_immediate and i386_displacement
10315 we don't yet know the operand size (this will be set by insn
10316 matching). Hence we record the word32 relocation here,
10317 and adjust the reloc according to the real size in reloc(). */
10318 static const struct
10322 const enum bfd_reloc_code_real rel
[2];
10323 const i386_operand_type types64
;
10324 bool need_GOT_symbol
;
10329 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10330 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10331 BFD_RELOC_SIZE32
},
10332 OPERAND_TYPE_IMM32_64
, false },
10334 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10335 BFD_RELOC_X86_64_PLTOFF64
},
10336 OPERAND_TYPE_IMM64
, true },
10337 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10338 BFD_RELOC_X86_64_PLT32
},
10339 OPERAND_TYPE_IMM32_32S_DISP32
, false },
10340 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10341 BFD_RELOC_X86_64_GOTPLT64
},
10342 OPERAND_TYPE_IMM64_DISP64
, true },
10343 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10344 BFD_RELOC_X86_64_GOTOFF64
},
10345 OPERAND_TYPE_IMM64_DISP64
, true },
10346 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10347 BFD_RELOC_X86_64_GOTPCREL
},
10348 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10349 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10350 BFD_RELOC_X86_64_TLSGD
},
10351 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10352 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10353 _dummy_first_bfd_reloc_code_real
},
10354 OPERAND_TYPE_NONE
, true },
10355 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10356 BFD_RELOC_X86_64_TLSLD
},
10357 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10358 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10359 BFD_RELOC_X86_64_GOTTPOFF
},
10360 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10361 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10362 BFD_RELOC_X86_64_TPOFF32
},
10363 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10364 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10365 _dummy_first_bfd_reloc_code_real
},
10366 OPERAND_TYPE_NONE
, true },
10367 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10368 BFD_RELOC_X86_64_DTPOFF32
},
10369 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, true },
10370 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10371 _dummy_first_bfd_reloc_code_real
},
10372 OPERAND_TYPE_NONE
, true },
10373 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10374 _dummy_first_bfd_reloc_code_real
},
10375 OPERAND_TYPE_NONE
, true },
10376 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10377 BFD_RELOC_X86_64_GOT32
},
10378 OPERAND_TYPE_IMM32_32S_64_DISP32
, true },
10379 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10380 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10381 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10382 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10383 BFD_RELOC_X86_64_TLSDESC_CALL
},
10384 OPERAND_TYPE_IMM32_32S_DISP32
, true },
10386 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10387 BFD_RELOC_32_SECREL
},
10388 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, false },
10394 #if defined (OBJ_MAYBE_ELF) && !defined (TE_PE)
10399 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10400 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10403 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10405 int len
= gotrel
[j
].len
;
10406 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10408 if (gotrel
[j
].rel
[object_64bit
] != 0)
10411 char *tmpbuf
, *past_reloc
;
10413 *rel
= gotrel
[j
].rel
[object_64bit
];
10417 if (flag_code
!= CODE_64BIT
)
10419 types
->bitfield
.imm32
= 1;
10420 types
->bitfield
.disp32
= 1;
10423 *types
= gotrel
[j
].types64
;
10426 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10427 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10429 /* The length of the first part of our input line. */
10430 first
= cp
- input_line_pointer
;
10432 /* The second part goes from after the reloc token until
10433 (and including) an end_of_line char or comma. */
10434 past_reloc
= cp
+ 1 + len
;
10436 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10438 second
= cp
+ 1 - past_reloc
;
10440 /* Allocate and copy string. The trailing NUL shouldn't
10441 be necessary, but be safe. */
10442 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10443 memcpy (tmpbuf
, input_line_pointer
, first
);
10444 if (second
!= 0 && *past_reloc
!= ' ')
10445 /* Replace the relocation token with ' ', so that
10446 errors like foo@GOTOFF1 will be detected. */
10447 tmpbuf
[first
++] = ' ';
10449 /* Increment length by 1 if the relocation token is
10454 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10455 tmpbuf
[first
+ second
] = '\0';
10459 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10460 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10465 /* Might be a symbol version string. Don't as_bad here. */
10470 bfd_reloc_code_real_type
10471 x86_cons (expressionS
*exp
, int size
)
10473 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10475 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
10476 && !defined (LEX_AT)) \
10478 intel_syntax
= -intel_syntax
;
10481 if (size
== 4 || (object_64bit
&& size
== 8))
10483 /* Handle @GOTOFF and the like in an expression. */
10485 char *gotfree_input_line
;
10488 save
= input_line_pointer
;
10489 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10490 if (gotfree_input_line
)
10491 input_line_pointer
= gotfree_input_line
;
10495 if (gotfree_input_line
)
10497 /* expression () has merrily parsed up to the end of line,
10498 or a comma - in the wrong buffer. Transfer how far
10499 input_line_pointer has moved to the right buffer. */
10500 input_line_pointer
= (save
10501 + (input_line_pointer
- gotfree_input_line
)
10503 free (gotfree_input_line
);
10504 if (exp
->X_op
== O_constant
10505 || exp
->X_op
== O_absent
10506 || exp
->X_op
== O_illegal
10507 || exp
->X_op
== O_register
10508 || exp
->X_op
== O_big
)
10510 char c
= *input_line_pointer
;
10511 *input_line_pointer
= 0;
10512 as_bad (_("missing or invalid expression `%s'"), save
);
10513 *input_line_pointer
= c
;
10515 else if ((got_reloc
== BFD_RELOC_386_PLT32
10516 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10517 && exp
->X_op
!= O_symbol
)
10519 char c
= *input_line_pointer
;
10520 *input_line_pointer
= 0;
10521 as_bad (_("invalid PLT expression `%s'"), save
);
10522 *input_line_pointer
= c
;
10529 intel_syntax
= -intel_syntax
;
10532 i386_intel_simplify (exp
);
10537 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
10538 if (size
== 4 && exp
->X_op
== O_constant
&& !object_64bit
)
10539 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10545 signed_cons (int size
)
10555 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10562 if (exp
.X_op
== O_symbol
)
10563 exp
.X_op
= O_secrel
;
10565 emit_expr (&exp
, 4);
10567 while (*input_line_pointer
++ == ',');
10569 input_line_pointer
--;
10570 demand_empty_rest_of_line ();
10574 pe_directive_secidx (int dummy ATTRIBUTE_UNUSED
)
10581 if (exp
.X_op
== O_symbol
)
10582 exp
.X_op
= O_secidx
;
10584 emit_expr (&exp
, 2);
10586 while (*input_line_pointer
++ == ',');
10588 input_line_pointer
--;
10589 demand_empty_rest_of_line ();
10593 /* Handle Vector operations. */
10596 check_VecOperations (char *op_string
)
10598 const reg_entry
*mask
;
10605 if (*op_string
== '{')
10609 /* Check broadcasts. */
10610 if (startswith (op_string
, "1to"))
10612 unsigned int bcst_type
;
10614 if (i
.broadcast
.type
)
10615 goto duplicated_vec_op
;
10618 if (*op_string
== '8')
10620 else if (*op_string
== '4')
10622 else if (*op_string
== '2')
10624 else if (*op_string
== '1'
10625 && *(op_string
+1) == '6')
10630 else if (*op_string
== '3'
10631 && *(op_string
+1) == '2')
10638 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10643 i
.broadcast
.type
= bcst_type
;
10644 i
.broadcast
.operand
= this_operand
;
10646 /* Check masking operation. */
10647 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10649 if (mask
== &bad_reg
)
10652 /* k0 can't be used for write mask. */
10653 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10655 as_bad (_("`%s%s' can't be used for write mask"),
10656 register_prefix
, mask
->reg_name
);
10663 i
.mask
.operand
= this_operand
;
10665 else if (i
.mask
.reg
->reg_num
)
10666 goto duplicated_vec_op
;
10671 /* Only "{z}" is allowed here. No need to check
10672 zeroing mask explicitly. */
10673 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10675 as_bad (_("invalid write mask `%s'"), saved
);
10680 op_string
= end_op
;
10682 /* Check zeroing-flag for masking operation. */
10683 else if (*op_string
== 'z')
10687 i
.mask
.reg
= reg_k0
;
10688 i
.mask
.zeroing
= 1;
10689 i
.mask
.operand
= this_operand
;
10693 if (i
.mask
.zeroing
)
10696 as_bad (_("duplicated `%s'"), saved
);
10700 i
.mask
.zeroing
= 1;
10702 /* Only "{%k}" is allowed here. No need to check mask
10703 register explicitly. */
10704 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10706 as_bad (_("invalid zeroing-masking `%s'"),
10715 goto unknown_vec_op
;
10717 if (*op_string
!= '}')
10719 as_bad (_("missing `}' in `%s'"), saved
);
10724 /* Strip whitespace since the addition of pseudo prefixes
10725 changed how the scrubber treats '{'. */
10726 if (is_space_char (*op_string
))
10732 /* We don't know this one. */
10733 as_bad (_("unknown vector operation: `%s'"), saved
);
10737 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10739 as_bad (_("zeroing-masking only allowed with write mask"));
10747 i386_immediate (char *imm_start
)
10749 char *save_input_line_pointer
;
10750 char *gotfree_input_line
;
10753 i386_operand_type types
;
10755 operand_type_set (&types
, ~0);
10757 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10759 as_bad (_("at most %d immediate operands are allowed"),
10760 MAX_IMMEDIATE_OPERANDS
);
10764 exp
= &im_expressions
[i
.imm_operands
++];
10765 i
.op
[this_operand
].imms
= exp
;
10767 if (is_space_char (*imm_start
))
10770 save_input_line_pointer
= input_line_pointer
;
10771 input_line_pointer
= imm_start
;
10773 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10774 if (gotfree_input_line
)
10775 input_line_pointer
= gotfree_input_line
;
10777 exp_seg
= expression (exp
);
10779 SKIP_WHITESPACE ();
10780 if (*input_line_pointer
)
10781 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10783 input_line_pointer
= save_input_line_pointer
;
10784 if (gotfree_input_line
)
10786 free (gotfree_input_line
);
10788 if (exp
->X_op
== O_constant
)
10789 exp
->X_op
= O_illegal
;
10792 if (exp_seg
== reg_section
)
10794 as_bad (_("illegal immediate register operand %s"), imm_start
);
10798 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10802 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10803 i386_operand_type types
, const char *imm_start
)
10805 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10808 as_bad (_("missing or invalid immediate expression `%s'"),
10812 else if (exp
->X_op
== O_constant
)
10814 /* Size it properly later. */
10815 i
.types
[this_operand
].bitfield
.imm64
= 1;
10817 /* If not 64bit, sign/zero extend val, to account for wraparound
10819 if (flag_code
!= CODE_64BIT
)
10820 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
10822 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10823 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10824 && exp_seg
!= absolute_section
10825 && exp_seg
!= text_section
10826 && exp_seg
!= data_section
10827 && exp_seg
!= bss_section
10828 && exp_seg
!= undefined_section
10829 && !bfd_is_com_section (exp_seg
))
10831 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10837 /* This is an address. The size of the address will be
10838 determined later, depending on destination register,
10839 suffix, or the default for the section. */
10840 i
.types
[this_operand
].bitfield
.imm8
= 1;
10841 i
.types
[this_operand
].bitfield
.imm16
= 1;
10842 i
.types
[this_operand
].bitfield
.imm32
= 1;
10843 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10844 i
.types
[this_operand
].bitfield
.imm64
= 1;
10845 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10853 i386_scale (char *scale
)
10856 char *save
= input_line_pointer
;
10858 input_line_pointer
= scale
;
10859 val
= get_absolute_expression ();
10864 i
.log2_scale_factor
= 0;
10867 i
.log2_scale_factor
= 1;
10870 i
.log2_scale_factor
= 2;
10873 i
.log2_scale_factor
= 3;
10877 char sep
= *input_line_pointer
;
10879 *input_line_pointer
= '\0';
10880 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10882 *input_line_pointer
= sep
;
10883 input_line_pointer
= save
;
10887 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10889 as_warn (_("scale factor of %d without an index register"),
10890 1 << i
.log2_scale_factor
);
10891 i
.log2_scale_factor
= 0;
10893 scale
= input_line_pointer
;
10894 input_line_pointer
= save
;
10899 i386_displacement (char *disp_start
, char *disp_end
)
10903 char *save_input_line_pointer
;
10904 char *gotfree_input_line
;
10906 i386_operand_type bigdisp
, types
= anydisp
;
10909 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10911 as_bad (_("at most %d displacement operands are allowed"),
10912 MAX_MEMORY_OPERANDS
);
10916 operand_type_set (&bigdisp
, 0);
10918 || i
.types
[this_operand
].bitfield
.baseindex
10919 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10920 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10922 i386_addressing_mode ();
10923 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10924 if (flag_code
== CODE_64BIT
)
10928 bigdisp
.bitfield
.disp32s
= 1;
10929 bigdisp
.bitfield
.disp64
= 1;
10932 bigdisp
.bitfield
.disp32
= 1;
10934 else if ((flag_code
== CODE_16BIT
) ^ override
)
10935 bigdisp
.bitfield
.disp16
= 1;
10937 bigdisp
.bitfield
.disp32
= 1;
10941 /* For PC-relative branches, the width of the displacement may be
10942 dependent upon data size, but is never dependent upon address size.
10943 Also make sure to not unintentionally match against a non-PC-relative
10944 branch template. */
10945 static templates aux_templates
;
10946 const insn_template
*t
= current_templates
->start
;
10947 bool has_intel64
= false;
10949 aux_templates
.start
= t
;
10950 while (++t
< current_templates
->end
)
10952 if (t
->opcode_modifier
.jump
10953 != current_templates
->start
->opcode_modifier
.jump
)
10955 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10956 has_intel64
= true;
10958 if (t
< current_templates
->end
)
10960 aux_templates
.end
= t
;
10961 current_templates
= &aux_templates
;
10964 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10965 if (flag_code
== CODE_64BIT
)
10967 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10968 && (!intel64
|| !has_intel64
))
10969 bigdisp
.bitfield
.disp16
= 1;
10971 bigdisp
.bitfield
.disp32s
= 1;
10976 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10978 : LONG_MNEM_SUFFIX
));
10979 bigdisp
.bitfield
.disp32
= 1;
10980 if ((flag_code
== CODE_16BIT
) ^ override
)
10982 bigdisp
.bitfield
.disp32
= 0;
10983 bigdisp
.bitfield
.disp16
= 1;
10987 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10990 exp
= &disp_expressions
[i
.disp_operands
];
10991 i
.op
[this_operand
].disps
= exp
;
10993 save_input_line_pointer
= input_line_pointer
;
10994 input_line_pointer
= disp_start
;
10995 END_STRING_AND_SAVE (disp_end
);
10997 #ifndef GCC_ASM_O_HACK
10998 #define GCC_ASM_O_HACK 0
11001 END_STRING_AND_SAVE (disp_end
+ 1);
11002 if (i
.types
[this_operand
].bitfield
.baseIndex
11003 && displacement_string_end
[-1] == '+')
11005 /* This hack is to avoid a warning when using the "o"
11006 constraint within gcc asm statements.
11009 #define _set_tssldt_desc(n,addr,limit,type) \
11010 __asm__ __volatile__ ( \
11011 "movw %w2,%0\n\t" \
11012 "movw %w1,2+%0\n\t" \
11013 "rorl $16,%1\n\t" \
11014 "movb %b1,4+%0\n\t" \
11015 "movb %4,5+%0\n\t" \
11016 "movb $0,6+%0\n\t" \
11017 "movb %h1,7+%0\n\t" \
11019 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
11021 This works great except that the output assembler ends
11022 up looking a bit weird if it turns out that there is
11023 no offset. You end up producing code that looks like:
11036 So here we provide the missing zero. */
11038 *displacement_string_end
= '0';
11041 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
11042 if (gotfree_input_line
)
11043 input_line_pointer
= gotfree_input_line
;
11045 exp_seg
= expression (exp
);
11047 SKIP_WHITESPACE ();
11048 if (*input_line_pointer
)
11049 as_bad (_("junk `%s' after expression"), input_line_pointer
);
11051 RESTORE_END_STRING (disp_end
+ 1);
11053 input_line_pointer
= save_input_line_pointer
;
11054 if (gotfree_input_line
)
11056 free (gotfree_input_line
);
11058 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
11059 exp
->X_op
= O_illegal
;
11062 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
11064 RESTORE_END_STRING (disp_end
);
11070 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
11071 i386_operand_type types
, const char *disp_start
)
11073 i386_operand_type bigdisp
;
11076 /* We do this to make sure that the section symbol is in
11077 the symbol table. We will ultimately change the relocation
11078 to be relative to the beginning of the section. */
11079 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
11080 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
11081 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11083 if (exp
->X_op
!= O_symbol
)
11086 if (S_IS_LOCAL (exp
->X_add_symbol
)
11087 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
11088 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
11089 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
11090 exp
->X_op
= O_subtract
;
11091 exp
->X_op_symbol
= GOT_symbol
;
11092 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
11093 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
11094 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
11095 i
.reloc
[this_operand
] = BFD_RELOC_64
;
11097 i
.reloc
[this_operand
] = BFD_RELOC_32
;
11100 else if (exp
->X_op
== O_absent
11101 || exp
->X_op
== O_illegal
11102 || exp
->X_op
== O_big
)
11105 as_bad (_("missing or invalid displacement expression `%s'"),
11110 else if (exp
->X_op
== O_constant
)
11112 /* Sizing gets taken care of by optimize_disp().
11114 If not 64bit, sign/zero extend val, to account for wraparound
11116 if (flag_code
!= CODE_64BIT
)
11117 exp
->X_add_number
= extend_to_32bit_address (exp
->X_add_number
);
11120 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
11121 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
11122 && exp_seg
!= absolute_section
11123 && exp_seg
!= text_section
11124 && exp_seg
!= data_section
11125 && exp_seg
!= bss_section
11126 && exp_seg
!= undefined_section
11127 && !bfd_is_com_section (exp_seg
))
11129 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
11134 else if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
11135 i
.types
[this_operand
].bitfield
.disp8
= 1;
11137 /* Check if this is a displacement only operand. */
11138 bigdisp
= operand_type_and_not (i
.types
[this_operand
], anydisp
);
11139 if (operand_type_all_zero (&bigdisp
))
11140 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
11146 /* Return the active addressing mode, taking address override and
11147 registers forming the address into consideration. Update the
11148 address override prefix if necessary. */
11150 static enum flag_code
11151 i386_addressing_mode (void)
11153 enum flag_code addr_mode
;
11155 if (i
.prefix
[ADDR_PREFIX
])
11156 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
11157 else if (flag_code
== CODE_16BIT
11158 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
11159 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
11160 from md_assemble() by "is not a valid base/index expression"
11161 when there is a base and/or index. */
11162 && !i
.types
[this_operand
].bitfield
.baseindex
)
11164 /* MPX insn memory operands with neither base nor index must be forced
11165 to use 32-bit addressing in 16-bit mode. */
11166 addr_mode
= CODE_32BIT
;
11167 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11169 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
11170 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
11174 addr_mode
= flag_code
;
11176 #if INFER_ADDR_PREFIX
11177 if (i
.mem_operands
== 0)
11179 /* Infer address prefix from the first memory operand. */
11180 const reg_entry
*addr_reg
= i
.base_reg
;
11182 if (addr_reg
== NULL
)
11183 addr_reg
= i
.index_reg
;
11187 if (addr_reg
->reg_type
.bitfield
.dword
)
11188 addr_mode
= CODE_32BIT
;
11189 else if (flag_code
!= CODE_64BIT
11190 && addr_reg
->reg_type
.bitfield
.word
)
11191 addr_mode
= CODE_16BIT
;
11193 if (addr_mode
!= flag_code
)
11195 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11197 /* Change the size of any displacement too. At most one
11198 of Disp16 or Disp32 is set.
11199 FIXME. There doesn't seem to be any real need for
11200 separate Disp16 and Disp32 flags. The same goes for
11201 Imm16 and Imm32. Removing them would probably clean
11202 up the code quite a lot. */
11203 if (flag_code
!= CODE_64BIT
11204 && (i
.types
[this_operand
].bitfield
.disp16
11205 || i
.types
[this_operand
].bitfield
.disp32
))
11206 i
.types
[this_operand
]
11207 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11217 /* Make sure the memory operand we've been dealt is valid.
11218 Return 1 on success, 0 on a failure. */
11221 i386_index_check (const char *operand_string
)
11223 const char *kind
= "base/index";
11224 enum flag_code addr_mode
= i386_addressing_mode ();
11225 const insn_template
*t
= current_templates
->start
;
11227 if (t
->opcode_modifier
.isstring
11228 && !t
->cpu_flags
.bitfield
.cpupadlock
11229 && (current_templates
->end
[-1].opcode_modifier
.isstring
11230 || i
.mem_operands
))
11232 /* Memory operands of string insns are special in that they only allow
11233 a single register (rDI, rSI, or rBX) as their memory address. */
11234 const reg_entry
*expected_reg
;
11235 static const char *di_si
[][2] =
11241 static const char *bx
[] = { "ebx", "bx", "rbx" };
11243 kind
= "string address";
11245 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11247 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11248 - IS_STRING_ES_OP0
;
11251 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11252 || ((!i
.mem_operands
!= !intel_syntax
)
11253 && current_templates
->end
[-1].operand_types
[1]
11254 .bitfield
.baseindex
))
11257 = (const reg_entry
*) str_hash_find (reg_hash
,
11258 di_si
[addr_mode
][op
== es_op
]);
11262 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11264 if (i
.base_reg
!= expected_reg
11266 || operand_type_check (i
.types
[this_operand
], disp
))
11268 /* The second memory operand must have the same size as
11272 && !((addr_mode
== CODE_64BIT
11273 && i
.base_reg
->reg_type
.bitfield
.qword
)
11274 || (addr_mode
== CODE_32BIT
11275 ? i
.base_reg
->reg_type
.bitfield
.dword
11276 : i
.base_reg
->reg_type
.bitfield
.word
)))
11279 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11281 intel_syntax
? '[' : '(',
11283 expected_reg
->reg_name
,
11284 intel_syntax
? ']' : ')');
11291 as_bad (_("`%s' is not a valid %s expression"),
11292 operand_string
, kind
);
11297 if (addr_mode
!= CODE_16BIT
)
11299 /* 32-bit/64-bit checks. */
11300 if (i
.disp_encoding
== disp_encoding_16bit
)
11303 as_bad (_("invalid `%s' prefix"),
11304 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11309 && ((addr_mode
== CODE_64BIT
11310 ? !i
.base_reg
->reg_type
.bitfield
.qword
11311 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11312 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11313 || i
.base_reg
->reg_num
== RegIZ
))
11315 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11316 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11317 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11318 && ((addr_mode
== CODE_64BIT
11319 ? !i
.index_reg
->reg_type
.bitfield
.qword
11320 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11321 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11324 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11325 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11326 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11327 && t
->base_opcode
== 0x1b)
11328 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11329 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11330 && (t
->base_opcode
& ~1) == 0x1a)
11331 || t
->opcode_modifier
.sib
== SIBMEM
)
11333 /* They cannot use RIP-relative addressing. */
11334 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11336 as_bad (_("`%s' cannot be used here"), operand_string
);
11340 /* bndldx and bndstx ignore their scale factor. */
11341 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11342 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11343 && (t
->base_opcode
& ~1) == 0x1a
11344 && i
.log2_scale_factor
)
11345 as_warn (_("register scaling is being ignored here"));
11350 /* 16-bit checks. */
11351 if (i
.disp_encoding
== disp_encoding_32bit
)
11355 && (!i
.base_reg
->reg_type
.bitfield
.word
11356 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11358 && (!i
.index_reg
->reg_type
.bitfield
.word
11359 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11361 && i
.base_reg
->reg_num
< 6
11362 && i
.index_reg
->reg_num
>= 6
11363 && i
.log2_scale_factor
== 0))))
11370 /* Handle vector immediates. */
11373 RC_SAE_immediate (const char *imm_start
)
11375 unsigned int match_found
, j
;
11376 const char *pstr
= imm_start
;
11384 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11386 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11388 if (i
.rounding
.type
!= rc_none
)
11390 as_bad (_("duplicated `%s'"), imm_start
);
11394 i
.rounding
.type
= RC_NamesTable
[j
].type
;
11395 i
.rounding
.operand
= this_operand
;
11397 pstr
+= RC_NamesTable
[j
].len
;
11405 if (*pstr
++ != '}')
11407 as_bad (_("Missing '}': '%s'"), imm_start
);
11410 /* RC/SAE immediate string should contain nothing more. */;
11413 as_bad (_("Junk after '}': '%s'"), imm_start
);
11417 exp
= &im_expressions
[i
.imm_operands
++];
11418 i
.op
[this_operand
].imms
= exp
;
11420 exp
->X_op
= O_constant
;
11421 exp
->X_add_number
= 0;
11422 exp
->X_add_symbol
= (symbolS
*) 0;
11423 exp
->X_op_symbol
= (symbolS
*) 0;
11425 i
.types
[this_operand
].bitfield
.imm8
= 1;
11429 /* Only string instructions can have a second memory operand, so
11430 reduce current_templates to just those if it contains any. */
11432 maybe_adjust_templates (void)
11434 const insn_template
*t
;
11436 gas_assert (i
.mem_operands
== 1);
11438 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11439 if (t
->opcode_modifier
.isstring
)
11442 if (t
< current_templates
->end
)
11444 static templates aux_templates
;
11447 aux_templates
.start
= t
;
11448 for (; t
< current_templates
->end
; ++t
)
11449 if (!t
->opcode_modifier
.isstring
)
11451 aux_templates
.end
= t
;
11453 /* Determine whether to re-check the first memory operand. */
11454 recheck
= (aux_templates
.start
!= current_templates
->start
11455 || t
!= current_templates
->end
);
11457 current_templates
= &aux_templates
;
11461 i
.mem_operands
= 0;
11462 if (i
.memop1_string
!= NULL
11463 && i386_index_check (i
.memop1_string
) == 0)
11465 i
.mem_operands
= 1;
11472 static INLINE
bool starts_memory_operand (char c
)
11475 || is_identifier_char (c
)
11476 || strchr ("([\"+-!~", c
);
11479 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11483 i386_att_operand (char *operand_string
)
11485 const reg_entry
*r
;
11487 char *op_string
= operand_string
;
11489 if (is_space_char (*op_string
))
11492 /* We check for an absolute prefix (differentiating,
11493 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11494 if (*op_string
== ABSOLUTE_PREFIX
)
11497 if (is_space_char (*op_string
))
11499 i
.jumpabsolute
= true;
11502 /* Check if operand is a register. */
11503 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11505 i386_operand_type temp
;
11510 /* Check for a segment override by searching for ':' after a
11511 segment register. */
11512 op_string
= end_op
;
11513 if (is_space_char (*op_string
))
11515 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11517 i
.seg
[i
.mem_operands
] = r
;
11519 /* Skip the ':' and whitespace. */
11521 if (is_space_char (*op_string
))
11524 /* Handle case of %es:*foo. */
11525 if (!i
.jumpabsolute
&& *op_string
== ABSOLUTE_PREFIX
)
11528 if (is_space_char (*op_string
))
11530 i
.jumpabsolute
= true;
11533 if (!starts_memory_operand (*op_string
))
11535 as_bad (_("bad memory operand `%s'"), op_string
);
11538 goto do_memory_reference
;
11541 /* Handle vector operations. */
11542 if (*op_string
== '{')
11544 op_string
= check_VecOperations (op_string
);
11545 if (op_string
== NULL
)
11551 as_bad (_("junk `%s' after register"), op_string
);
11554 temp
= r
->reg_type
;
11555 temp
.bitfield
.baseindex
= 0;
11556 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11558 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11559 i
.op
[this_operand
].regs
= r
;
11562 else if (*op_string
== REGISTER_PREFIX
)
11564 as_bad (_("bad register name `%s'"), op_string
);
11567 else if (*op_string
== IMMEDIATE_PREFIX
)
11570 if (i
.jumpabsolute
)
11572 as_bad (_("immediate operand illegal with absolute jump"));
11575 if (!i386_immediate (op_string
))
11578 else if (RC_SAE_immediate (operand_string
))
11580 /* If it is a RC or SAE immediate, do nothing. */
11583 else if (starts_memory_operand (*op_string
))
11585 /* This is a memory reference of some sort. */
11588 /* Start and end of displacement string expression (if found). */
11589 char *displacement_string_start
;
11590 char *displacement_string_end
;
11592 do_memory_reference
:
11593 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11595 if ((i
.mem_operands
== 1
11596 && !current_templates
->start
->opcode_modifier
.isstring
)
11597 || i
.mem_operands
== 2)
11599 as_bad (_("too many memory references for `%s'"),
11600 current_templates
->start
->name
);
11604 /* Check for base index form. We detect the base index form by
11605 looking for an ')' at the end of the operand, searching
11606 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11608 base_string
= op_string
+ strlen (op_string
);
11610 /* Handle vector operations. */
11612 if (is_space_char (*base_string
))
11615 if (*base_string
== '}')
11617 char *vop_start
= NULL
;
11619 while (base_string
-- > op_string
)
11621 if (*base_string
== '"')
11623 if (*base_string
!= '{')
11626 vop_start
= base_string
;
11629 if (is_space_char (*base_string
))
11632 if (*base_string
!= '}')
11640 as_bad (_("unbalanced figure braces"));
11644 if (check_VecOperations (vop_start
) == NULL
)
11648 /* If we only have a displacement, set-up for it to be parsed later. */
11649 displacement_string_start
= op_string
;
11650 displacement_string_end
= base_string
+ 1;
11652 if (*base_string
== ')')
11655 unsigned int parens_not_balanced
= 1;
11657 /* We've already checked that the number of left & right ()'s are
11658 equal, so this loop will not be infinite. */
11662 if (*base_string
== ')')
11663 parens_not_balanced
++;
11664 if (*base_string
== '(')
11665 parens_not_balanced
--;
11667 while (parens_not_balanced
&& *base_string
!= '"');
11669 temp_string
= base_string
;
11671 /* Skip past '(' and whitespace. */
11672 if (*base_string
== '(')
11674 if (is_space_char (*base_string
))
11677 if (*base_string
== ','
11678 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11681 displacement_string_end
= temp_string
;
11683 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11687 if (i
.base_reg
== &bad_reg
)
11689 base_string
= end_op
;
11690 if (is_space_char (*base_string
))
11694 /* There may be an index reg or scale factor here. */
11695 if (*base_string
== ',')
11698 if (is_space_char (*base_string
))
11701 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11704 if (i
.index_reg
== &bad_reg
)
11706 base_string
= end_op
;
11707 if (is_space_char (*base_string
))
11709 if (*base_string
== ',')
11712 if (is_space_char (*base_string
))
11715 else if (*base_string
!= ')')
11717 as_bad (_("expecting `,' or `)' "
11718 "after index register in `%s'"),
11723 else if (*base_string
== REGISTER_PREFIX
)
11725 end_op
= strchr (base_string
, ',');
11728 as_bad (_("bad register name `%s'"), base_string
);
11732 /* Check for scale factor. */
11733 if (*base_string
!= ')')
11735 char *end_scale
= i386_scale (base_string
);
11740 base_string
= end_scale
;
11741 if (is_space_char (*base_string
))
11743 if (*base_string
!= ')')
11745 as_bad (_("expecting `)' "
11746 "after scale factor in `%s'"),
11751 else if (!i
.index_reg
)
11753 as_bad (_("expecting index register or scale factor "
11754 "after `,'; got '%c'"),
11759 else if (*base_string
!= ')')
11761 as_bad (_("expecting `,' or `)' "
11762 "after base register in `%s'"),
11767 else if (*base_string
== REGISTER_PREFIX
)
11769 end_op
= strchr (base_string
, ',');
11772 as_bad (_("bad register name `%s'"), base_string
);
11777 /* If there's an expression beginning the operand, parse it,
11778 assuming displacement_string_start and
11779 displacement_string_end are meaningful. */
11780 if (displacement_string_start
!= displacement_string_end
)
11782 if (!i386_displacement (displacement_string_start
,
11783 displacement_string_end
))
11787 /* Special case for (%dx) while doing input/output op. */
11789 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11790 && i
.base_reg
->reg_type
.bitfield
.word
11791 && i
.index_reg
== 0
11792 && i
.log2_scale_factor
== 0
11793 && i
.seg
[i
.mem_operands
] == 0
11794 && !operand_type_check (i
.types
[this_operand
], disp
))
11796 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11800 if (i386_index_check (operand_string
) == 0)
11802 i
.flags
[this_operand
] |= Operand_Mem
;
11803 if (i
.mem_operands
== 0)
11804 i
.memop1_string
= xstrdup (operand_string
);
11809 /* It's not a memory operand; argh! */
11810 as_bad (_("invalid char %s beginning operand %d `%s'"),
11811 output_invalid (*op_string
),
11816 return 1; /* Normal return. */
11819 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11820 that an rs_machine_dependent frag may reach. */
11823 i386_frag_max_var (fragS
*frag
)
11825 /* The only relaxable frags are for jumps.
11826 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11827 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11828 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11831 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11833 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11835 /* STT_GNU_IFUNC symbol must go through PLT. */
11836 if ((symbol_get_bfdsym (fr_symbol
)->flags
11837 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11840 if (!S_IS_EXTERNAL (fr_symbol
))
11841 /* Symbol may be weak or local. */
11842 return !S_IS_WEAK (fr_symbol
);
11844 /* Global symbols with non-default visibility can't be preempted. */
11845 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11848 if (fr_var
!= NO_RELOC
)
11849 switch ((enum bfd_reloc_code_real
) fr_var
)
11851 case BFD_RELOC_386_PLT32
:
11852 case BFD_RELOC_X86_64_PLT32
:
11853 /* Symbol with PLT relocation may be preempted. */
11859 /* Global symbols with default visibility in a shared library may be
11860 preempted by another definition. */
11865 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11866 Note also work for Skylake and Cascadelake.
11867 ---------------------------------------------------------------------
11868 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11869 | ------ | ----------- | ------- | -------- |
11871 | Jno | N | N | Y |
11872 | Jc/Jb | Y | N | Y |
11873 | Jae/Jnb | Y | N | Y |
11874 | Je/Jz | Y | Y | Y |
11875 | Jne/Jnz | Y | Y | Y |
11876 | Jna/Jbe | Y | N | Y |
11877 | Ja/Jnbe | Y | N | Y |
11879 | Jns | N | N | Y |
11880 | Jp/Jpe | N | N | Y |
11881 | Jnp/Jpo | N | N | Y |
11882 | Jl/Jnge | Y | Y | Y |
11883 | Jge/Jnl | Y | Y | Y |
11884 | Jle/Jng | Y | Y | Y |
11885 | Jg/Jnle | Y | Y | Y |
11886 --------------------------------------------------------------------- */
11888 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11890 if (mf_cmp
== mf_cmp_alu_cmp
)
11891 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11892 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11893 if (mf_cmp
== mf_cmp_incdec
)
11894 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11895 || mf_jcc
== mf_jcc_jle
);
11896 if (mf_cmp
== mf_cmp_test_and
)
11901 /* Return the next non-empty frag. */
11904 i386_next_non_empty_frag (fragS
*fragP
)
11906 /* There may be a frag with a ".fill 0" when there is no room in
11907 the current frag for frag_grow in output_insn. */
11908 for (fragP
= fragP
->fr_next
;
11910 && fragP
->fr_type
== rs_fill
11911 && fragP
->fr_fix
== 0);
11912 fragP
= fragP
->fr_next
)
11917 /* Return the next jcc frag after BRANCH_PADDING. */
11920 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11922 fragS
*branch_fragP
;
11926 if (pad_fragP
->fr_type
== rs_machine_dependent
11927 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11928 == BRANCH_PADDING
))
11930 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11931 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11933 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11934 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11935 pad_fragP
->tc_frag_data
.mf_type
))
11936 return branch_fragP
;
11942 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11945 i386_classify_machine_dependent_frag (fragS
*fragP
)
11949 fragS
*branch_fragP
;
11951 unsigned int max_prefix_length
;
11953 if (fragP
->tc_frag_data
.classified
)
11956 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11957 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11958 for (next_fragP
= fragP
;
11959 next_fragP
!= NULL
;
11960 next_fragP
= next_fragP
->fr_next
)
11962 next_fragP
->tc_frag_data
.classified
= 1;
11963 if (next_fragP
->fr_type
== rs_machine_dependent
)
11964 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11966 case BRANCH_PADDING
:
11967 /* The BRANCH_PADDING frag must be followed by a branch
11969 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11970 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11972 case FUSED_JCC_PADDING
:
11973 /* Check if this is a fused jcc:
11975 CMP like instruction
11979 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11980 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11981 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11984 /* The BRANCH_PADDING frag is merged with the
11985 FUSED_JCC_PADDING frag. */
11986 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11987 /* CMP like instruction size. */
11988 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11989 frag_wane (pad_fragP
);
11990 /* Skip to branch_fragP. */
11991 next_fragP
= branch_fragP
;
11993 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11995 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11997 next_fragP
->fr_subtype
11998 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11999 next_fragP
->tc_frag_data
.max_bytes
12000 = next_fragP
->tc_frag_data
.max_prefix_length
;
12001 /* This will be updated in the BRANCH_PREFIX scan. */
12002 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
12005 frag_wane (next_fragP
);
12010 /* Stop if there is no BRANCH_PREFIX. */
12011 if (!align_branch_prefix_size
)
12014 /* Scan for BRANCH_PREFIX. */
12015 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
12017 if (fragP
->fr_type
!= rs_machine_dependent
12018 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12022 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
12023 COND_JUMP_PREFIX. */
12024 max_prefix_length
= 0;
12025 for (next_fragP
= fragP
;
12026 next_fragP
!= NULL
;
12027 next_fragP
= next_fragP
->fr_next
)
12029 if (next_fragP
->fr_type
== rs_fill
)
12030 /* Skip rs_fill frags. */
12032 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
12033 /* Stop for all other frags. */
12036 /* rs_machine_dependent frags. */
12037 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12040 /* Count BRANCH_PREFIX frags. */
12041 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
12043 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
12044 frag_wane (next_fragP
);
12048 += next_fragP
->tc_frag_data
.max_bytes
;
12050 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12052 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12053 == FUSED_JCC_PADDING
))
12055 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
12056 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
12060 /* Stop for other rs_machine_dependent frags. */
12064 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
12066 /* Skip to the next frag. */
12067 fragP
= next_fragP
;
12071 /* Compute padding size for
12074 CMP like instruction
12076 COND_JUMP/UNCOND_JUMP
12081 COND_JUMP/UNCOND_JUMP
12085 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
12087 unsigned int offset
, size
, padding_size
;
12088 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
12090 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
12092 address
= fragP
->fr_address
;
12093 address
+= fragP
->fr_fix
;
12095 /* CMP like instrunction size. */
12096 size
= fragP
->tc_frag_data
.cmp_size
;
12098 /* The base size of the branch frag. */
12099 size
+= branch_fragP
->fr_fix
;
12101 /* Add opcode and displacement bytes for the rs_machine_dependent
12103 if (branch_fragP
->fr_type
== rs_machine_dependent
)
12104 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
12106 /* Check if branch is within boundary and doesn't end at the last
12108 offset
= address
& ((1U << align_branch_power
) - 1);
12109 if ((offset
+ size
) >= (1U << align_branch_power
))
12110 /* Padding needed to avoid crossing boundary. */
12111 padding_size
= (1U << align_branch_power
) - offset
;
12113 /* No padding needed. */
12116 /* The return value may be saved in tc_frag_data.length which is
12118 if (!fits_in_unsigned_byte (padding_size
))
12121 return padding_size
;
12124 /* i386_generic_table_relax_frag()
12126 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
12127 grow/shrink padding to align branch frags. Hand others to
12131 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
12133 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12134 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12136 long padding_size
= i386_branch_padding_size (fragP
, 0);
12137 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
12139 /* When the BRANCH_PREFIX frag is used, the computed address
12140 must match the actual address and there should be no padding. */
12141 if (fragP
->tc_frag_data
.padding_address
12142 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
12146 /* Update the padding size. */
12148 fragP
->tc_frag_data
.length
= padding_size
;
12152 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12154 fragS
*padding_fragP
, *next_fragP
;
12155 long padding_size
, left_size
, last_size
;
12157 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12158 if (!padding_fragP
)
12159 /* Use the padding set by the leading BRANCH_PREFIX frag. */
12160 return (fragP
->tc_frag_data
.length
12161 - fragP
->tc_frag_data
.last_length
);
12163 /* Compute the relative address of the padding frag in the very
12164 first time where the BRANCH_PREFIX frag sizes are zero. */
12165 if (!fragP
->tc_frag_data
.padding_address
)
12166 fragP
->tc_frag_data
.padding_address
12167 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
12169 /* First update the last length from the previous interation. */
12170 left_size
= fragP
->tc_frag_data
.prefix_length
;
12171 for (next_fragP
= fragP
;
12172 next_fragP
!= padding_fragP
;
12173 next_fragP
= next_fragP
->fr_next
)
12174 if (next_fragP
->fr_type
== rs_machine_dependent
12175 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12180 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12184 if (max
> left_size
)
12189 next_fragP
->tc_frag_data
.last_length
= size
;
12193 next_fragP
->tc_frag_data
.last_length
= 0;
12196 /* Check the padding size for the padding frag. */
12197 padding_size
= i386_branch_padding_size
12198 (padding_fragP
, (fragP
->fr_address
12199 + fragP
->tc_frag_data
.padding_address
));
12201 last_size
= fragP
->tc_frag_data
.prefix_length
;
12202 /* Check if there is change from the last interation. */
12203 if (padding_size
== last_size
)
12205 /* Update the expected address of the padding frag. */
12206 padding_fragP
->tc_frag_data
.padding_address
12207 = (fragP
->fr_address
+ padding_size
12208 + fragP
->tc_frag_data
.padding_address
);
12212 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12214 /* No padding if there is no sufficient room. Clear the
12215 expected address of the padding frag. */
12216 padding_fragP
->tc_frag_data
.padding_address
= 0;
12220 /* Store the expected address of the padding frag. */
12221 padding_fragP
->tc_frag_data
.padding_address
12222 = (fragP
->fr_address
+ padding_size
12223 + fragP
->tc_frag_data
.padding_address
);
12225 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12227 /* Update the length for the current interation. */
12228 left_size
= padding_size
;
12229 for (next_fragP
= fragP
;
12230 next_fragP
!= padding_fragP
;
12231 next_fragP
= next_fragP
->fr_next
)
12232 if (next_fragP
->fr_type
== rs_machine_dependent
12233 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12238 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12242 if (max
> left_size
)
12247 next_fragP
->tc_frag_data
.length
= size
;
12251 next_fragP
->tc_frag_data
.length
= 0;
12254 return (fragP
->tc_frag_data
.length
12255 - fragP
->tc_frag_data
.last_length
);
12257 return relax_frag (segment
, fragP
, stretch
);
12260 /* md_estimate_size_before_relax()
12262 Called just before relax() for rs_machine_dependent frags. The x86
12263 assembler uses these frags to handle variable size jump
12266 Any symbol that is now undefined will not become defined.
12267 Return the correct fr_subtype in the frag.
12268 Return the initial "guess for variable size of frag" to caller.
12269 The guess is actually the growth beyond the fixed part. Whatever
12270 we do to grow the fixed or variable part contributes to our
12274 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12276 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12277 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12278 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12280 i386_classify_machine_dependent_frag (fragP
);
12281 return fragP
->tc_frag_data
.length
;
12284 /* We've already got fragP->fr_subtype right; all we have to do is
12285 check for un-relaxable symbols. On an ELF system, we can't relax
12286 an externally visible symbol, because it may be overridden by a
12288 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12289 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12291 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12294 #if defined (OBJ_COFF) && defined (TE_PE)
12295 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12296 && S_IS_WEAK (fragP
->fr_symbol
))
12300 /* Symbol is undefined in this segment, or we need to keep a
12301 reloc so that weak symbols can be overridden. */
12302 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12303 enum bfd_reloc_code_real reloc_type
;
12304 unsigned char *opcode
;
12308 if (fragP
->fr_var
!= NO_RELOC
)
12309 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12310 else if (size
== 2)
12311 reloc_type
= BFD_RELOC_16_PCREL
;
12312 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12313 else if (fragP
->tc_frag_data
.code64
&& fragP
->fr_offset
== 0
12314 && need_plt32_p (fragP
->fr_symbol
))
12315 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12318 reloc_type
= BFD_RELOC_32_PCREL
;
12320 old_fr_fix
= fragP
->fr_fix
;
12321 opcode
= (unsigned char *) fragP
->fr_opcode
;
12323 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12326 /* Make jmp (0xeb) a (d)word displacement jump. */
12328 fragP
->fr_fix
+= size
;
12329 fixP
= fix_new (fragP
, old_fr_fix
, size
,
12331 fragP
->fr_offset
, 1,
12337 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12339 /* Negate the condition, and branch past an
12340 unconditional jump. */
12343 /* Insert an unconditional jump. */
12345 /* We added two extra opcode bytes, and have a two byte
12347 fragP
->fr_fix
+= 2 + 2;
12348 fix_new (fragP
, old_fr_fix
+ 2, 2,
12350 fragP
->fr_offset
, 1,
12354 /* Fall through. */
12357 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12359 fragP
->fr_fix
+= 1;
12360 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12362 fragP
->fr_offset
, 1,
12363 BFD_RELOC_8_PCREL
);
12364 fixP
->fx_signed
= 1;
12368 /* This changes the byte-displacement jump 0x7N
12369 to the (d)word-displacement jump 0x0f,0x8N. */
12370 opcode
[1] = opcode
[0] + 0x10;
12371 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12372 /* We've added an opcode byte. */
12373 fragP
->fr_fix
+= 1 + size
;
12374 fixP
= fix_new (fragP
, old_fr_fix
+ 1, size
,
12376 fragP
->fr_offset
, 1,
12381 BAD_CASE (fragP
->fr_subtype
);
12385 /* All jumps handled here are signed, but don't unconditionally use a
12386 signed limit check for 32 and 16 bit jumps as we want to allow wrap
12387 around at 4G (outside of 64-bit mode) and 64k. */
12388 if (size
== 4 && flag_code
== CODE_64BIT
)
12389 fixP
->fx_signed
= 1;
12392 return fragP
->fr_fix
- old_fr_fix
;
12395 /* Guess size depending on current relax state. Initially the relax
12396 state will correspond to a short jump and we return 1, because
12397 the variable part of the frag (the branch offset) is one byte
12398 long. However, we can relax a section more than once and in that
12399 case we must either set fr_subtype back to the unrelaxed state,
12400 or return the value for the appropriate branch. */
12401 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12404 /* Called after relax() is finished.
12406 In: Address of frag.
12407 fr_type == rs_machine_dependent.
12408 fr_subtype is what the address relaxed to.
12410 Out: Any fixSs and constants are set up.
12411 Caller will turn frag into a ".space 0". */
12414 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12417 unsigned char *opcode
;
12418 unsigned char *where_to_put_displacement
= NULL
;
12419 offsetT target_address
;
12420 offsetT opcode_address
;
12421 unsigned int extension
= 0;
12422 offsetT displacement_from_opcode_start
;
12424 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12425 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12426 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12428 /* Generate nop padding. */
12429 unsigned int size
= fragP
->tc_frag_data
.length
;
12432 if (size
> fragP
->tc_frag_data
.max_bytes
)
12438 const char *branch
= "branch";
12439 const char *prefix
= "";
12440 fragS
*padding_fragP
;
12441 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12444 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12445 switch (fragP
->tc_frag_data
.default_prefix
)
12450 case CS_PREFIX_OPCODE
:
12453 case DS_PREFIX_OPCODE
:
12456 case ES_PREFIX_OPCODE
:
12459 case FS_PREFIX_OPCODE
:
12462 case GS_PREFIX_OPCODE
:
12465 case SS_PREFIX_OPCODE
:
12470 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12471 "%s within %d-byte boundary\n");
12473 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12474 "align %s within %d-byte boundary\n");
12478 padding_fragP
= fragP
;
12479 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12480 "%s within %d-byte boundary\n");
12484 switch (padding_fragP
->tc_frag_data
.branch_type
)
12486 case align_branch_jcc
:
12489 case align_branch_fused
:
12490 branch
= "fused jcc";
12492 case align_branch_jmp
:
12495 case align_branch_call
:
12498 case align_branch_indirect
:
12499 branch
= "indiret branch";
12501 case align_branch_ret
:
12508 fprintf (stdout
, msg
,
12509 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12510 (long long) fragP
->fr_address
, branch
,
12511 1 << align_branch_power
);
12513 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12514 memset (fragP
->fr_opcode
,
12515 fragP
->tc_frag_data
.default_prefix
, size
);
12517 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12519 fragP
->fr_fix
+= size
;
12524 opcode
= (unsigned char *) fragP
->fr_opcode
;
12526 /* Address we want to reach in file space. */
12527 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12529 /* Address opcode resides at in file space. */
12530 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12532 /* Displacement from opcode start to fill into instruction. */
12533 displacement_from_opcode_start
= target_address
- opcode_address
;
12535 if ((fragP
->fr_subtype
& BIG
) == 0)
12537 /* Don't have to change opcode. */
12538 extension
= 1; /* 1 opcode + 1 displacement */
12539 where_to_put_displacement
= &opcode
[1];
12543 if (no_cond_jump_promotion
12544 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12545 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12546 _("long jump required"));
12548 switch (fragP
->fr_subtype
)
12550 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12551 extension
= 4; /* 1 opcode + 4 displacement */
12553 where_to_put_displacement
= &opcode
[1];
12556 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12557 extension
= 2; /* 1 opcode + 2 displacement */
12559 where_to_put_displacement
= &opcode
[1];
12562 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12563 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12564 extension
= 5; /* 2 opcode + 4 displacement */
12565 opcode
[1] = opcode
[0] + 0x10;
12566 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12567 where_to_put_displacement
= &opcode
[2];
12570 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12571 extension
= 3; /* 2 opcode + 2 displacement */
12572 opcode
[1] = opcode
[0] + 0x10;
12573 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12574 where_to_put_displacement
= &opcode
[2];
12577 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12582 where_to_put_displacement
= &opcode
[3];
12586 BAD_CASE (fragP
->fr_subtype
);
12591 /* If size if less then four we are sure that the operand fits,
12592 but if it's 4, then it could be that the displacement is larger
12594 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12596 && ((addressT
) (displacement_from_opcode_start
- extension
12597 + ((addressT
) 1 << 31))
12598 > (((addressT
) 2 << 31) - 1)))
12600 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12601 _("jump target out of range"));
12602 /* Make us emit 0. */
12603 displacement_from_opcode_start
= extension
;
12605 /* Now put displacement after opcode. */
12606 md_number_to_chars ((char *) where_to_put_displacement
,
12607 (valueT
) (displacement_from_opcode_start
- extension
),
12608 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12609 fragP
->fr_fix
+= extension
;
12612 /* Apply a fixup (fixP) to segment data, once it has been determined
12613 by our caller that we have all the info we need to fix it up.
12615 Parameter valP is the pointer to the value of the bits.
12617 On the 386, immediates, displacements, and data pointers are all in
12618 the same (little-endian) format, so we don't need to care about which
12619 we are handling. */
12622 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12624 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12625 valueT value
= *valP
;
12627 #if !defined (TE_Mach)
12628 if (fixP
->fx_pcrel
)
12630 switch (fixP
->fx_r_type
)
12636 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12639 case BFD_RELOC_X86_64_32S
:
12640 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12643 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12646 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12651 if (fixP
->fx_addsy
!= NULL
12652 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12653 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12654 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12655 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12656 && !use_rela_relocations
)
12658 /* This is a hack. There should be a better way to handle this.
12659 This covers for the fact that bfd_install_relocation will
12660 subtract the current location (for partial_inplace, PC relative
12661 relocations); see more below. */
12665 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12668 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12670 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12673 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12675 if ((sym_seg
== seg
12676 || (symbol_section_p (fixP
->fx_addsy
)
12677 && sym_seg
!= absolute_section
))
12678 && !generic_force_reloc (fixP
))
12680 /* Yes, we add the values in twice. This is because
12681 bfd_install_relocation subtracts them out again. I think
12682 bfd_install_relocation is broken, but I don't dare change
12684 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12688 #if defined (OBJ_COFF) && defined (TE_PE)
12689 /* For some reason, the PE format does not store a
12690 section address offset for a PC relative symbol. */
12691 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12692 || S_IS_WEAK (fixP
->fx_addsy
))
12693 value
+= md_pcrel_from (fixP
);
12696 #if defined (OBJ_COFF) && defined (TE_PE)
12697 if (fixP
->fx_addsy
!= NULL
12698 && S_IS_WEAK (fixP
->fx_addsy
)
12699 /* PR 16858: Do not modify weak function references. */
12700 && ! fixP
->fx_pcrel
)
12702 #if !defined (TE_PEP)
12703 /* For x86 PE weak function symbols are neither PC-relative
12704 nor do they set S_IS_FUNCTION. So the only reliable way
12705 to detect them is to check the flags of their containing
12707 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12708 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12712 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12716 /* Fix a few things - the dynamic linker expects certain values here,
12717 and we must not disappoint it. */
12718 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12719 if (IS_ELF
&& fixP
->fx_addsy
)
12720 switch (fixP
->fx_r_type
)
12722 case BFD_RELOC_386_PLT32
:
12723 case BFD_RELOC_X86_64_PLT32
:
12724 /* Make the jump instruction point to the address of the operand.
12725 At runtime we merely add the offset to the actual PLT entry.
12726 NB: Subtract the offset size only for jump instructions. */
12727 if (fixP
->fx_pcrel
)
12731 case BFD_RELOC_386_TLS_GD
:
12732 case BFD_RELOC_386_TLS_LDM
:
12733 case BFD_RELOC_386_TLS_IE_32
:
12734 case BFD_RELOC_386_TLS_IE
:
12735 case BFD_RELOC_386_TLS_GOTIE
:
12736 case BFD_RELOC_386_TLS_GOTDESC
:
12737 case BFD_RELOC_X86_64_TLSGD
:
12738 case BFD_RELOC_X86_64_TLSLD
:
12739 case BFD_RELOC_X86_64_GOTTPOFF
:
12740 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12741 value
= 0; /* Fully resolved at runtime. No addend. */
12743 case BFD_RELOC_386_TLS_LE
:
12744 case BFD_RELOC_386_TLS_LDO_32
:
12745 case BFD_RELOC_386_TLS_LE_32
:
12746 case BFD_RELOC_X86_64_DTPOFF32
:
12747 case BFD_RELOC_X86_64_DTPOFF64
:
12748 case BFD_RELOC_X86_64_TPOFF32
:
12749 case BFD_RELOC_X86_64_TPOFF64
:
12750 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12753 case BFD_RELOC_386_TLS_DESC_CALL
:
12754 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12755 value
= 0; /* Fully resolved at runtime. No addend. */
12756 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12760 case BFD_RELOC_VTABLE_INHERIT
:
12761 case BFD_RELOC_VTABLE_ENTRY
:
12768 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12770 /* If not 64bit, massage value, to account for wraparound when !BFD64. */
12772 value
= extend_to_32bit_address (value
);
12775 #endif /* !defined (TE_Mach) */
12777 /* Are we finished with this relocation now? */
12778 if (fixP
->fx_addsy
== NULL
)
12781 switch (fixP
->fx_r_type
)
12783 case BFD_RELOC_X86_64_32S
:
12784 fixP
->fx_signed
= 1;
12791 #if defined (OBJ_COFF) && defined (TE_PE)
12792 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12795 /* Remember value for tc_gen_reloc. */
12796 fixP
->fx_addnumber
= value
;
12797 /* Clear out the frag for now. */
12801 else if (use_rela_relocations
)
12803 if (!disallow_64bit_reloc
|| fixP
->fx_r_type
== NO_RELOC
)
12804 fixP
->fx_no_overflow
= 1;
12805 /* Remember value for tc_gen_reloc. */
12806 fixP
->fx_addnumber
= value
;
12810 md_number_to_chars (p
, value
, fixP
->fx_size
);
12814 md_atof (int type
, char *litP
, int *sizeP
)
12816 /* This outputs the LITTLENUMs in REVERSE order;
12817 in accord with the bigendian 386. */
12818 return ieee_md_atof (type
, litP
, sizeP
, false);
12821 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12824 output_invalid (int c
)
12827 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12830 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12831 "(0x%x)", (unsigned char) c
);
12832 return output_invalid_buf
;
12835 /* Verify that @r can be used in the current context. */
12837 static bool check_register (const reg_entry
*r
)
12839 if (allow_pseudo_reg
)
12842 if (operand_type_all_zero (&r
->reg_type
))
12845 if ((r
->reg_type
.bitfield
.dword
12846 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12847 || r
->reg_type
.bitfield
.class == RegCR
12848 || r
->reg_type
.bitfield
.class == RegDR
)
12849 && !cpu_arch_flags
.bitfield
.cpui386
)
12852 if (r
->reg_type
.bitfield
.class == RegTR
12853 && (flag_code
== CODE_64BIT
12854 || !cpu_arch_flags
.bitfield
.cpui386
12855 || cpu_arch_isa_flags
.bitfield
.cpui586
12856 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12859 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12862 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12864 if (r
->reg_type
.bitfield
.zmmword
12865 || r
->reg_type
.bitfield
.class == RegMask
)
12868 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12870 if (r
->reg_type
.bitfield
.ymmword
)
12873 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12878 if (r
->reg_type
.bitfield
.tmmword
12879 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12880 || flag_code
!= CODE_64BIT
))
12883 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12886 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12887 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12890 /* Upper 16 vector registers are only available with VREX in 64bit
12891 mode, and require EVEX encoding. */
12892 if (r
->reg_flags
& RegVRex
)
12894 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12895 || flag_code
!= CODE_64BIT
)
12898 if (i
.vec_encoding
== vex_encoding_default
)
12899 i
.vec_encoding
= vex_encoding_evex
;
12900 else if (i
.vec_encoding
!= vex_encoding_evex
)
12901 i
.vec_encoding
= vex_encoding_error
;
12904 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12905 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12906 && flag_code
!= CODE_64BIT
)
12909 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12916 /* REG_STRING starts *before* REGISTER_PREFIX. */
12918 static const reg_entry
*
12919 parse_real_register (char *reg_string
, char **end_op
)
12921 char *s
= reg_string
;
12923 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12924 const reg_entry
*r
;
12926 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12927 if (*s
== REGISTER_PREFIX
)
12930 if (is_space_char (*s
))
12933 p
= reg_name_given
;
12934 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12936 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12937 return (const reg_entry
*) NULL
;
12941 /* For naked regs, make sure that we are not dealing with an identifier.
12942 This prevents confusing an identifier like `eax_var' with register
12944 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12945 return (const reg_entry
*) NULL
;
12949 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12951 /* Handle floating point regs, allowing spaces in the (i) part. */
12954 if (!cpu_arch_flags
.bitfield
.cpu8087
12955 && !cpu_arch_flags
.bitfield
.cpu287
12956 && !cpu_arch_flags
.bitfield
.cpu387
12957 && !allow_pseudo_reg
)
12958 return (const reg_entry
*) NULL
;
12960 if (is_space_char (*s
))
12965 if (is_space_char (*s
))
12967 if (*s
>= '0' && *s
<= '7')
12969 int fpr
= *s
- '0';
12971 if (is_space_char (*s
))
12976 know (r
[fpr
].reg_num
== fpr
);
12980 /* We have "%st(" then garbage. */
12981 return (const reg_entry
*) NULL
;
12985 return r
&& check_register (r
) ? r
: NULL
;
12988 /* REG_STRING starts *before* REGISTER_PREFIX. */
12990 static const reg_entry
*
12991 parse_register (char *reg_string
, char **end_op
)
12993 const reg_entry
*r
;
12995 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12996 r
= parse_real_register (reg_string
, end_op
);
13001 char *save
= input_line_pointer
;
13005 input_line_pointer
= reg_string
;
13006 c
= get_symbol_name (®_string
);
13007 symbolP
= symbol_find (reg_string
);
13008 while (symbolP
&& S_GET_SEGMENT (symbolP
) != reg_section
)
13010 const expressionS
*e
= symbol_get_value_expression(symbolP
);
13012 if (e
->X_op
!= O_symbol
|| e
->X_add_number
)
13014 symbolP
= e
->X_add_symbol
;
13016 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
13018 const expressionS
*e
= symbol_get_value_expression (symbolP
);
13020 know (e
->X_op
== O_register
);
13021 know (e
->X_add_number
>= 0
13022 && (valueT
) e
->X_add_number
< i386_regtab_size
);
13023 r
= i386_regtab
+ e
->X_add_number
;
13024 if (!check_register (r
))
13026 as_bad (_("register '%s%s' cannot be used here"),
13027 register_prefix
, r
->reg_name
);
13030 *end_op
= input_line_pointer
;
13032 *input_line_pointer
= c
;
13033 input_line_pointer
= save
;
13039 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
13041 const reg_entry
*r
= NULL
;
13042 char *end
= input_line_pointer
;
13045 if (*name
== REGISTER_PREFIX
|| allow_naked_reg
)
13046 r
= parse_real_register (name
, &input_line_pointer
);
13047 if (r
&& end
<= input_line_pointer
)
13049 *nextcharP
= *input_line_pointer
;
13050 *input_line_pointer
= 0;
13053 e
->X_op
= O_register
;
13054 e
->X_add_number
= r
- i386_regtab
;
13057 e
->X_op
= O_illegal
;
13060 input_line_pointer
= end
;
13062 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
13066 md_operand (expressionS
*e
)
13069 const reg_entry
*r
;
13071 switch (*input_line_pointer
)
13073 case REGISTER_PREFIX
:
13074 r
= parse_real_register (input_line_pointer
, &end
);
13077 e
->X_op
= O_register
;
13078 e
->X_add_number
= r
- i386_regtab
;
13079 input_line_pointer
= end
;
13084 gas_assert (intel_syntax
);
13085 end
= input_line_pointer
++;
13087 if (*input_line_pointer
== ']')
13089 ++input_line_pointer
;
13090 e
->X_op_symbol
= make_expr_symbol (e
);
13091 e
->X_add_symbol
= NULL
;
13092 e
->X_add_number
= 0;
13097 e
->X_op
= O_absent
;
13098 input_line_pointer
= end
;
13105 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13106 const char *md_shortopts
= "kVQ:sqnO::";
13108 const char *md_shortopts
= "qnO::";
13111 #define OPTION_32 (OPTION_MD_BASE + 0)
13112 #define OPTION_64 (OPTION_MD_BASE + 1)
13113 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
13114 #define OPTION_MARCH (OPTION_MD_BASE + 3)
13115 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
13116 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
13117 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
13118 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
13119 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
13120 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
13121 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
13122 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
13123 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
13124 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
13125 #define OPTION_X32 (OPTION_MD_BASE + 14)
13126 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
13127 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
13128 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
13129 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
13130 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
13131 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
13132 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
13133 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
13134 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
13135 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
13136 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
13137 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
13138 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
13139 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
13140 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
13141 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
13142 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
13143 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
13144 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
13145 #define OPTION_MUSE_UNALIGNED_VECTOR_MOVE (OPTION_MD_BASE + 34)
13147 struct option md_longopts
[] =
13149 {"32", no_argument
, NULL
, OPTION_32
},
13150 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13151 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13152 {"64", no_argument
, NULL
, OPTION_64
},
13154 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13155 {"x32", no_argument
, NULL
, OPTION_X32
},
13156 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
13157 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
13159 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
13160 {"march", required_argument
, NULL
, OPTION_MARCH
},
13161 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
13162 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
13163 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
13164 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
13165 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
13166 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
13167 {"muse-unaligned-vector-move", no_argument
, NULL
, OPTION_MUSE_UNALIGNED_VECTOR_MOVE
},
13168 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
13169 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
13170 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
13171 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
13172 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
13173 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
13174 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
13175 # if defined (TE_PE) || defined (TE_PEP)
13176 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
13178 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
13179 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
13180 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
13181 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
13182 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
13183 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
13184 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
13185 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
13186 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
13187 {"mlfence-before-indirect-branch", required_argument
, NULL
,
13188 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
13189 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
13190 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
13191 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
13192 {NULL
, no_argument
, NULL
, 0}
13194 size_t md_longopts_size
= sizeof (md_longopts
);
13197 md_parse_option (int c
, const char *arg
)
13200 char *arch
, *next
, *saved
, *type
;
13205 optimize_align_code
= 0;
13209 quiet_warnings
= 1;
13212 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13213 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
13214 should be emitted or not. FIXME: Not implemented. */
13216 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
13220 /* -V: SVR4 argument to print version ID. */
13222 print_version_id ();
13225 /* -k: Ignore for FreeBSD compatibility. */
13230 /* -s: On i386 Solaris, this tells the native assembler to use
13231 .stab instead of .stab.excl. We always use .stab anyhow. */
13234 case OPTION_MSHARED
:
13238 case OPTION_X86_USED_NOTE
:
13239 if (strcasecmp (arg
, "yes") == 0)
13241 else if (strcasecmp (arg
, "no") == 0)
13244 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13249 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13250 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13253 const char **list
, **l
;
13255 list
= bfd_target_list ();
13256 for (l
= list
; *l
!= NULL
; l
++)
13257 if (startswith (*l
, "elf64-x86-64")
13258 || strcmp (*l
, "coff-x86-64") == 0
13259 || strcmp (*l
, "pe-x86-64") == 0
13260 || strcmp (*l
, "pei-x86-64") == 0
13261 || strcmp (*l
, "mach-o-x86-64") == 0)
13263 default_arch
= "x86_64";
13267 as_fatal (_("no compiled in support for x86_64"));
13273 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13277 const char **list
, **l
;
13279 list
= bfd_target_list ();
13280 for (l
= list
; *l
!= NULL
; l
++)
13281 if (startswith (*l
, "elf32-x86-64"))
13283 default_arch
= "x86_64:32";
13287 as_fatal (_("no compiled in support for 32bit x86_64"));
13291 as_fatal (_("32bit x86_64 is only supported for ELF"));
13296 default_arch
= "i386";
13299 case OPTION_DIVIDE
:
13300 #ifdef SVR4_COMMENT_CHARS
13305 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13307 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13311 i386_comment_chars
= n
;
13317 saved
= xstrdup (arg
);
13319 /* Allow -march=+nosse. */
13325 as_fatal (_("invalid -march= option: `%s'"), arg
);
13326 next
= strchr (arch
, '+');
13329 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13331 if (arch
== saved
&& strcmp (arch
, cpu_arch
[j
].name
) == 0)
13334 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13337 cpu_arch_name
= cpu_arch
[j
].name
;
13338 cpu_sub_arch_name
= NULL
;
13339 cpu_arch_flags
= cpu_arch
[j
].flags
;
13340 cpu_arch_isa
= cpu_arch
[j
].type
;
13341 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13342 if (!cpu_arch_tune_set
)
13344 cpu_arch_tune
= cpu_arch_isa
;
13345 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13349 else if (*cpu_arch
[j
].name
== '.'
13350 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13352 /* ISA extension. */
13353 i386_cpu_flags flags
;
13355 flags
= cpu_flags_or (cpu_arch_flags
,
13356 cpu_arch
[j
].flags
);
13358 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13360 if (cpu_sub_arch_name
)
13362 char *name
= cpu_sub_arch_name
;
13363 cpu_sub_arch_name
= concat (name
,
13365 (const char *) NULL
);
13369 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13370 cpu_arch_flags
= flags
;
13371 cpu_arch_isa_flags
= flags
;
13375 = cpu_flags_or (cpu_arch_isa_flags
,
13376 cpu_arch
[j
].flags
);
13381 if (j
>= ARRAY_SIZE (cpu_arch
))
13383 /* Disable an ISA extension. */
13384 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13385 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13387 i386_cpu_flags flags
;
13389 flags
= cpu_flags_and_not (cpu_arch_flags
,
13390 cpu_noarch
[j
].flags
);
13391 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13393 if (cpu_sub_arch_name
)
13395 char *name
= cpu_sub_arch_name
;
13396 cpu_sub_arch_name
= concat (arch
,
13397 (const char *) NULL
);
13401 cpu_sub_arch_name
= xstrdup (arch
);
13402 cpu_arch_flags
= flags
;
13403 cpu_arch_isa_flags
= flags
;
13408 if (j
>= ARRAY_SIZE (cpu_noarch
))
13409 j
= ARRAY_SIZE (cpu_arch
);
13412 if (j
>= ARRAY_SIZE (cpu_arch
))
13413 as_fatal (_("invalid -march= option: `%s'"), arg
);
13417 while (next
!= NULL
);
13423 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13424 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13426 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13428 cpu_arch_tune_set
= 1;
13429 cpu_arch_tune
= cpu_arch
[j
].type
;
13430 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13434 if (j
>= ARRAY_SIZE (cpu_arch
))
13435 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13438 case OPTION_MMNEMONIC
:
13439 if (strcasecmp (arg
, "att") == 0)
13440 intel_mnemonic
= 0;
13441 else if (strcasecmp (arg
, "intel") == 0)
13442 intel_mnemonic
= 1;
13444 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13447 case OPTION_MSYNTAX
:
13448 if (strcasecmp (arg
, "att") == 0)
13450 else if (strcasecmp (arg
, "intel") == 0)
13453 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13456 case OPTION_MINDEX_REG
:
13457 allow_index_reg
= 1;
13460 case OPTION_MNAKED_REG
:
13461 allow_naked_reg
= 1;
13464 case OPTION_MSSE2AVX
:
13468 case OPTION_MUSE_UNALIGNED_VECTOR_MOVE
:
13469 use_unaligned_vector_move
= 1;
13472 case OPTION_MSSE_CHECK
:
13473 if (strcasecmp (arg
, "error") == 0)
13474 sse_check
= check_error
;
13475 else if (strcasecmp (arg
, "warning") == 0)
13476 sse_check
= check_warning
;
13477 else if (strcasecmp (arg
, "none") == 0)
13478 sse_check
= check_none
;
13480 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13483 case OPTION_MOPERAND_CHECK
:
13484 if (strcasecmp (arg
, "error") == 0)
13485 operand_check
= check_error
;
13486 else if (strcasecmp (arg
, "warning") == 0)
13487 operand_check
= check_warning
;
13488 else if (strcasecmp (arg
, "none") == 0)
13489 operand_check
= check_none
;
13491 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13494 case OPTION_MAVXSCALAR
:
13495 if (strcasecmp (arg
, "128") == 0)
13496 avxscalar
= vex128
;
13497 else if (strcasecmp (arg
, "256") == 0)
13498 avxscalar
= vex256
;
13500 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13503 case OPTION_MVEXWIG
:
13504 if (strcmp (arg
, "0") == 0)
13506 else if (strcmp (arg
, "1") == 0)
13509 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13512 case OPTION_MADD_BND_PREFIX
:
13513 add_bnd_prefix
= 1;
13516 case OPTION_MEVEXLIG
:
13517 if (strcmp (arg
, "128") == 0)
13518 evexlig
= evexl128
;
13519 else if (strcmp (arg
, "256") == 0)
13520 evexlig
= evexl256
;
13521 else if (strcmp (arg
, "512") == 0)
13522 evexlig
= evexl512
;
13524 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13527 case OPTION_MEVEXRCIG
:
13528 if (strcmp (arg
, "rne") == 0)
13530 else if (strcmp (arg
, "rd") == 0)
13532 else if (strcmp (arg
, "ru") == 0)
13534 else if (strcmp (arg
, "rz") == 0)
13537 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13540 case OPTION_MEVEXWIG
:
13541 if (strcmp (arg
, "0") == 0)
13543 else if (strcmp (arg
, "1") == 0)
13546 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13549 # if defined (TE_PE) || defined (TE_PEP)
13550 case OPTION_MBIG_OBJ
:
13555 case OPTION_MOMIT_LOCK_PREFIX
:
13556 if (strcasecmp (arg
, "yes") == 0)
13557 omit_lock_prefix
= 1;
13558 else if (strcasecmp (arg
, "no") == 0)
13559 omit_lock_prefix
= 0;
13561 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13564 case OPTION_MFENCE_AS_LOCK_ADD
:
13565 if (strcasecmp (arg
, "yes") == 0)
13567 else if (strcasecmp (arg
, "no") == 0)
13570 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13573 case OPTION_MLFENCE_AFTER_LOAD
:
13574 if (strcasecmp (arg
, "yes") == 0)
13575 lfence_after_load
= 1;
13576 else if (strcasecmp (arg
, "no") == 0)
13577 lfence_after_load
= 0;
13579 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13582 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13583 if (strcasecmp (arg
, "all") == 0)
13585 lfence_before_indirect_branch
= lfence_branch_all
;
13586 if (lfence_before_ret
== lfence_before_ret_none
)
13587 lfence_before_ret
= lfence_before_ret_shl
;
13589 else if (strcasecmp (arg
, "memory") == 0)
13590 lfence_before_indirect_branch
= lfence_branch_memory
;
13591 else if (strcasecmp (arg
, "register") == 0)
13592 lfence_before_indirect_branch
= lfence_branch_register
;
13593 else if (strcasecmp (arg
, "none") == 0)
13594 lfence_before_indirect_branch
= lfence_branch_none
;
13596 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13600 case OPTION_MLFENCE_BEFORE_RET
:
13601 if (strcasecmp (arg
, "or") == 0)
13602 lfence_before_ret
= lfence_before_ret_or
;
13603 else if (strcasecmp (arg
, "not") == 0)
13604 lfence_before_ret
= lfence_before_ret_not
;
13605 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13606 lfence_before_ret
= lfence_before_ret_shl
;
13607 else if (strcasecmp (arg
, "none") == 0)
13608 lfence_before_ret
= lfence_before_ret_none
;
13610 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13614 case OPTION_MRELAX_RELOCATIONS
:
13615 if (strcasecmp (arg
, "yes") == 0)
13616 generate_relax_relocations
= 1;
13617 else if (strcasecmp (arg
, "no") == 0)
13618 generate_relax_relocations
= 0;
13620 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13623 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13626 long int align
= strtoul (arg
, &end
, 0);
13631 align_branch_power
= 0;
13634 else if (align
>= 16)
13637 for (align_power
= 0;
13639 align
>>= 1, align_power
++)
13641 /* Limit alignment power to 31. */
13642 if (align
== 1 && align_power
< 32)
13644 align_branch_power
= align_power
;
13649 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13653 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13656 int align
= strtoul (arg
, &end
, 0);
13657 /* Some processors only support 5 prefixes. */
13658 if (*end
== '\0' && align
>= 0 && align
< 6)
13660 align_branch_prefix_size
= align
;
13663 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13668 case OPTION_MALIGN_BRANCH
:
13670 saved
= xstrdup (arg
);
13674 next
= strchr (type
, '+');
13677 if (strcasecmp (type
, "jcc") == 0)
13678 align_branch
|= align_branch_jcc_bit
;
13679 else if (strcasecmp (type
, "fused") == 0)
13680 align_branch
|= align_branch_fused_bit
;
13681 else if (strcasecmp (type
, "jmp") == 0)
13682 align_branch
|= align_branch_jmp_bit
;
13683 else if (strcasecmp (type
, "call") == 0)
13684 align_branch
|= align_branch_call_bit
;
13685 else if (strcasecmp (type
, "ret") == 0)
13686 align_branch
|= align_branch_ret_bit
;
13687 else if (strcasecmp (type
, "indirect") == 0)
13688 align_branch
|= align_branch_indirect_bit
;
13690 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13693 while (next
!= NULL
);
13697 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13698 align_branch_power
= 5;
13699 align_branch_prefix_size
= 5;
13700 align_branch
= (align_branch_jcc_bit
13701 | align_branch_fused_bit
13702 | align_branch_jmp_bit
);
13705 case OPTION_MAMD64
:
13709 case OPTION_MINTEL64
:
13717 /* Turn off -Os. */
13718 optimize_for_space
= 0;
13720 else if (*arg
== 's')
13722 optimize_for_space
= 1;
13723 /* Turn on all encoding optimizations. */
13724 optimize
= INT_MAX
;
13728 optimize
= atoi (arg
);
13729 /* Turn off -Os. */
13730 optimize_for_space
= 0;
13740 #define MESSAGE_TEMPLATE \
13744 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13745 int *left_p
, const char *name
, int len
)
13747 int size
= sizeof (MESSAGE_TEMPLATE
);
13748 int left
= *left_p
;
13750 /* Reserve 2 spaces for ", " or ",\0" */
13753 /* Check if there is any room. */
13761 p
= mempcpy (p
, name
, len
);
13765 /* Output the current message now and start a new one. */
13768 fprintf (stream
, "%s\n", message
);
13770 left
= size
- (start
- message
) - len
- 2;
13772 gas_assert (left
>= 0);
13774 p
= mempcpy (p
, name
, len
);
13782 show_arch (FILE *stream
, int ext
, int check
)
13784 static char message
[] = MESSAGE_TEMPLATE
;
13785 char *start
= message
+ 27;
13787 int size
= sizeof (MESSAGE_TEMPLATE
);
13794 left
= size
- (start
- message
);
13795 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13797 /* Should it be skipped? */
13798 if (cpu_arch
[j
].skip
)
13801 name
= cpu_arch
[j
].name
;
13802 len
= cpu_arch
[j
].len
;
13805 /* It is an extension. Skip if we aren't asked to show it. */
13816 /* It is an processor. Skip if we show only extension. */
13819 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13821 /* It is an impossible processor - skip. */
13825 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13828 /* Display disabled extensions. */
13830 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13832 name
= cpu_noarch
[j
].name
;
13833 len
= cpu_noarch
[j
].len
;
13834 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13839 fprintf (stream
, "%s\n", message
);
13843 md_show_usage (FILE *stream
)
13845 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13846 fprintf (stream
, _("\
13847 -Qy, -Qn ignored\n\
13848 -V print assembler version number\n\
13851 fprintf (stream
, _("\
13852 -n Do not optimize code alignment\n\
13853 -q quieten some warnings\n"));
13854 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13855 fprintf (stream
, _("\
13859 # if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13860 fprintf (stream
, _("\
13861 --32/--64/--x32 generate 32bit/64bit/x32 object\n"));
13862 # elif defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O)
13863 fprintf (stream
, _("\
13864 --32/--64 generate 32bit/64bit object\n"));
13867 #ifdef SVR4_COMMENT_CHARS
13868 fprintf (stream
, _("\
13869 --divide do not treat `/' as a comment character\n"));
13871 fprintf (stream
, _("\
13872 --divide ignored\n"));
13874 fprintf (stream
, _("\
13875 -march=CPU[,+EXTENSION...]\n\
13876 generate code for CPU and EXTENSION, CPU is one of:\n"));
13877 show_arch (stream
, 0, 1);
13878 fprintf (stream
, _("\
13879 EXTENSION is combination of:\n"));
13880 show_arch (stream
, 1, 0);
13881 fprintf (stream
, _("\
13882 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13883 show_arch (stream
, 0, 0);
13884 fprintf (stream
, _("\
13885 -msse2avx encode SSE instructions with VEX prefix\n"));
13886 fprintf (stream
, _("\
13887 -muse-unaligned-vector-move\n\
13888 encode aligned vector move as unaligned vector move\n"));
13889 fprintf (stream
, _("\
13890 -msse-check=[none|error|warning] (default: warning)\n\
13891 check SSE instructions\n"));
13892 fprintf (stream
, _("\
13893 -moperand-check=[none|error|warning] (default: warning)\n\
13894 check operand combinations for validity\n"));
13895 fprintf (stream
, _("\
13896 -mavxscalar=[128|256] (default: 128)\n\
13897 encode scalar AVX instructions with specific vector\n\
13899 fprintf (stream
, _("\
13900 -mvexwig=[0|1] (default: 0)\n\
13901 encode VEX instructions with specific VEX.W value\n\
13902 for VEX.W bit ignored instructions\n"));
13903 fprintf (stream
, _("\
13904 -mevexlig=[128|256|512] (default: 128)\n\
13905 encode scalar EVEX instructions with specific vector\n\
13907 fprintf (stream
, _("\
13908 -mevexwig=[0|1] (default: 0)\n\
13909 encode EVEX instructions with specific EVEX.W value\n\
13910 for EVEX.W bit ignored instructions\n"));
13911 fprintf (stream
, _("\
13912 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13913 encode EVEX instructions with specific EVEX.RC value\n\
13914 for SAE-only ignored instructions\n"));
13915 fprintf (stream
, _("\
13916 -mmnemonic=[att|intel] "));
13917 if (SYSV386_COMPAT
)
13918 fprintf (stream
, _("(default: att)\n"));
13920 fprintf (stream
, _("(default: intel)\n"));
13921 fprintf (stream
, _("\
13922 use AT&T/Intel mnemonic\n"));
13923 fprintf (stream
, _("\
13924 -msyntax=[att|intel] (default: att)\n\
13925 use AT&T/Intel syntax\n"));
13926 fprintf (stream
, _("\
13927 -mindex-reg support pseudo index registers\n"));
13928 fprintf (stream
, _("\
13929 -mnaked-reg don't require `%%' prefix for registers\n"));
13930 fprintf (stream
, _("\
13931 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13932 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13933 fprintf (stream
, _("\
13934 -mshared disable branch optimization for shared code\n"));
13935 fprintf (stream
, _("\
13936 -mx86-used-note=[no|yes] "));
13937 if (DEFAULT_X86_USED_NOTE
)
13938 fprintf (stream
, _("(default: yes)\n"));
13940 fprintf (stream
, _("(default: no)\n"));
13941 fprintf (stream
, _("\
13942 generate x86 used ISA and feature properties\n"));
13944 #if defined (TE_PE) || defined (TE_PEP)
13945 fprintf (stream
, _("\
13946 -mbig-obj generate big object files\n"));
13948 fprintf (stream
, _("\
13949 -momit-lock-prefix=[no|yes] (default: no)\n\
13950 strip all lock prefixes\n"));
13951 fprintf (stream
, _("\
13952 -mfence-as-lock-add=[no|yes] (default: no)\n\
13953 encode lfence, mfence and sfence as\n\
13954 lock addl $0x0, (%%{re}sp)\n"));
13955 fprintf (stream
, _("\
13956 -mrelax-relocations=[no|yes] "));
13957 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13958 fprintf (stream
, _("(default: yes)\n"));
13960 fprintf (stream
, _("(default: no)\n"));
13961 fprintf (stream
, _("\
13962 generate relax relocations\n"));
13963 fprintf (stream
, _("\
13964 -malign-branch-boundary=NUM (default: 0)\n\
13965 align branches within NUM byte boundary\n"));
13966 fprintf (stream
, _("\
13967 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13968 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13970 specify types of branches to align\n"));
13971 fprintf (stream
, _("\
13972 -malign-branch-prefix-size=NUM (default: 5)\n\
13973 align branches with NUM prefixes per instruction\n"));
13974 fprintf (stream
, _("\
13975 -mbranches-within-32B-boundaries\n\
13976 align branches within 32 byte boundary\n"));
13977 fprintf (stream
, _("\
13978 -mlfence-after-load=[no|yes] (default: no)\n\
13979 generate lfence after load\n"));
13980 fprintf (stream
, _("\
13981 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13982 generate lfence before indirect near branch\n"));
13983 fprintf (stream
, _("\
13984 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13985 generate lfence before ret\n"));
13986 fprintf (stream
, _("\
13987 -mamd64 accept only AMD64 ISA [default]\n"));
13988 fprintf (stream
, _("\
13989 -mintel64 accept only Intel64 ISA\n"));
13992 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13993 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13994 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13996 /* Pick the target format to use. */
13999 i386_target_format (void)
14001 if (startswith (default_arch
, "x86_64"))
14003 update_code_flag (CODE_64BIT
, 1);
14004 if (default_arch
[6] == '\0')
14005 x86_elf_abi
= X86_64_ABI
;
14007 x86_elf_abi
= X86_64_X32_ABI
;
14009 else if (!strcmp (default_arch
, "i386"))
14010 update_code_flag (CODE_32BIT
, 1);
14011 else if (!strcmp (default_arch
, "iamcu"))
14013 update_code_flag (CODE_32BIT
, 1);
14014 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
14016 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
14017 cpu_arch_name
= "iamcu";
14018 cpu_sub_arch_name
= NULL
;
14019 cpu_arch_flags
= iamcu_flags
;
14020 cpu_arch_isa
= PROCESSOR_IAMCU
;
14021 cpu_arch_isa_flags
= iamcu_flags
;
14022 if (!cpu_arch_tune_set
)
14024 cpu_arch_tune
= cpu_arch_isa
;
14025 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
14028 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
14029 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
14033 as_fatal (_("unknown architecture"));
14035 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
14036 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
14037 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
14038 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
14040 switch (OUTPUT_FLAVOR
)
14042 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
14043 case bfd_target_aout_flavour
:
14044 return AOUT_TARGET_FORMAT
;
14046 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
14047 # if defined (TE_PE) || defined (TE_PEP)
14048 case bfd_target_coff_flavour
:
14049 if (flag_code
== CODE_64BIT
)
14052 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
14054 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
14055 # elif defined (TE_GO32)
14056 case bfd_target_coff_flavour
:
14057 return "coff-go32";
14059 case bfd_target_coff_flavour
:
14060 return "coff-i386";
14063 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14064 case bfd_target_elf_flavour
:
14066 const char *format
;
14068 switch (x86_elf_abi
)
14071 format
= ELF_TARGET_FORMAT
;
14073 tls_get_addr
= "___tls_get_addr";
14077 use_rela_relocations
= 1;
14080 tls_get_addr
= "__tls_get_addr";
14082 format
= ELF_TARGET_FORMAT64
;
14084 case X86_64_X32_ABI
:
14085 use_rela_relocations
= 1;
14088 tls_get_addr
= "__tls_get_addr";
14090 disallow_64bit_reloc
= 1;
14091 format
= ELF_TARGET_FORMAT32
;
14094 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
14096 if (x86_elf_abi
!= I386_ABI
)
14097 as_fatal (_("Intel MCU is 32bit only"));
14098 return ELF_TARGET_IAMCU_FORMAT
;
14104 #if defined (OBJ_MACH_O)
14105 case bfd_target_mach_o_flavour
:
14106 if (flag_code
== CODE_64BIT
)
14108 use_rela_relocations
= 1;
14110 return "mach-o-x86-64";
14113 return "mach-o-i386";
14121 #endif /* OBJ_MAYBE_ more than one */
14124 md_undefined_symbol (char *name
)
14126 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
14127 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
14128 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
14129 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
14133 if (symbol_find (name
))
14134 as_bad (_("GOT already in symbol table"));
14135 GOT_symbol
= symbol_new (name
, undefined_section
,
14136 &zero_address_frag
, 0);
14143 /* Round up a section size to the appropriate boundary. */
14146 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
14148 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
14149 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
14151 /* For a.out, force the section size to be aligned. If we don't do
14152 this, BFD will align it for us, but it will not write out the
14153 final bytes of the section. This may be a bug in BFD, but it is
14154 easier to fix it here since that is how the other a.out targets
14158 align
= bfd_section_alignment (segment
);
14159 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
14166 /* On the i386, PC-relative offsets are relative to the start of the
14167 next instruction. That is, the address of the offset, plus its
14168 size, since the offset is always the last part of the insn. */
14171 md_pcrel_from (fixS
*fixP
)
14173 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
14179 s_bss (int ignore ATTRIBUTE_UNUSED
)
14183 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14185 obj_elf_section_change_hook ();
14187 temp
= get_absolute_expression ();
14188 subseg_set (bss_section
, (subsegT
) temp
);
14189 demand_empty_rest_of_line ();
14194 /* Remember constant directive. */
14197 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
14199 if (last_insn
.kind
!= last_insn_directive
14200 && (bfd_section_flags (now_seg
) & SEC_CODE
))
14202 last_insn
.seg
= now_seg
;
14203 last_insn
.kind
= last_insn_directive
;
14204 last_insn
.name
= "constant directive";
14205 last_insn
.file
= as_where (&last_insn
.line
);
14206 if (lfence_before_ret
!= lfence_before_ret_none
)
14208 if (lfence_before_indirect_branch
!= lfence_branch_none
)
14209 as_warn (_("constant directive skips -mlfence-before-ret "
14210 "and -mlfence-before-indirect-branch"));
14212 as_warn (_("constant directive skips -mlfence-before-ret"));
14214 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
14215 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
14220 i386_validate_fix (fixS
*fixp
)
14222 if (fixp
->fx_addsy
&& S_GET_SEGMENT(fixp
->fx_addsy
) == reg_section
)
14224 reloc_howto_type
*howto
;
14226 howto
= bfd_reloc_type_lookup (stdoutput
, fixp
->fx_r_type
);
14227 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14228 _("invalid %s relocation against register"),
14229 howto
? howto
->name
: "<unknown>");
14233 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14234 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14235 || fixp
->fx_r_type
== BFD_RELOC_SIZE64
)
14236 return IS_ELF
&& fixp
->fx_addsy
14237 && (!S_IS_DEFINED (fixp
->fx_addsy
)
14238 || S_IS_EXTERNAL (fixp
->fx_addsy
));
14241 if (fixp
->fx_subsy
)
14243 if (fixp
->fx_subsy
== GOT_symbol
)
14245 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
14249 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14250 if (fixp
->fx_tcbit2
)
14251 fixp
->fx_r_type
= (fixp
->fx_tcbit
14252 ? BFD_RELOC_X86_64_REX_GOTPCRELX
14253 : BFD_RELOC_X86_64_GOTPCRELX
);
14256 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14261 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14263 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14265 fixp
->fx_subsy
= 0;
14268 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14271 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14272 to section. Since PLT32 relocation must be against symbols,
14273 turn such PLT32 relocation into PC32 relocation. */
14275 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14276 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14277 && symbol_section_p (fixp
->fx_addsy
))
14278 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14281 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14282 && fixp
->fx_tcbit2
)
14283 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14292 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14295 bfd_reloc_code_real_type code
;
14297 switch (fixp
->fx_r_type
)
14299 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14302 case BFD_RELOC_SIZE32
:
14303 case BFD_RELOC_SIZE64
:
14305 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))
14306 && (!fixp
->fx_subsy
14307 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))))
14308 sym
= fixp
->fx_addsy
;
14309 else if (fixp
->fx_subsy
14310 && !bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_subsy
))
14311 && (!fixp
->fx_addsy
14312 || bfd_is_abs_section (S_GET_SEGMENT (fixp
->fx_addsy
))))
14313 sym
= fixp
->fx_subsy
;
14316 if (IS_ELF
&& sym
&& S_IS_DEFINED (sym
) && !S_IS_EXTERNAL (sym
))
14318 /* Resolve size relocation against local symbol to size of
14319 the symbol plus addend. */
14320 valueT value
= S_GET_SIZE (sym
);
14322 if (symbol_get_bfdsym (sym
)->flags
& BSF_SECTION_SYM
)
14323 value
= bfd_section_size (S_GET_SEGMENT (sym
));
14324 if (sym
== fixp
->fx_subsy
)
14327 if (fixp
->fx_addsy
)
14328 value
+= S_GET_VALUE (fixp
->fx_addsy
);
14330 else if (fixp
->fx_subsy
)
14331 value
-= S_GET_VALUE (fixp
->fx_subsy
);
14332 value
+= fixp
->fx_offset
;
14333 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14335 && !fits_in_unsigned_long (value
))
14336 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14337 _("symbol size computation overflow"));
14338 fixp
->fx_addsy
= NULL
;
14339 fixp
->fx_subsy
= NULL
;
14340 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14343 if (!fixp
->fx_addsy
|| fixp
->fx_subsy
)
14345 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14346 "unsupported expression involving @size");
14350 /* Fall through. */
14352 case BFD_RELOC_X86_64_PLT32
:
14353 case BFD_RELOC_X86_64_GOT32
:
14354 case BFD_RELOC_X86_64_GOTPCREL
:
14355 case BFD_RELOC_X86_64_GOTPCRELX
:
14356 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14357 case BFD_RELOC_386_PLT32
:
14358 case BFD_RELOC_386_GOT32
:
14359 case BFD_RELOC_386_GOT32X
:
14360 case BFD_RELOC_386_GOTOFF
:
14361 case BFD_RELOC_386_GOTPC
:
14362 case BFD_RELOC_386_TLS_GD
:
14363 case BFD_RELOC_386_TLS_LDM
:
14364 case BFD_RELOC_386_TLS_LDO_32
:
14365 case BFD_RELOC_386_TLS_IE_32
:
14366 case BFD_RELOC_386_TLS_IE
:
14367 case BFD_RELOC_386_TLS_GOTIE
:
14368 case BFD_RELOC_386_TLS_LE_32
:
14369 case BFD_RELOC_386_TLS_LE
:
14370 case BFD_RELOC_386_TLS_GOTDESC
:
14371 case BFD_RELOC_386_TLS_DESC_CALL
:
14372 case BFD_RELOC_X86_64_TLSGD
:
14373 case BFD_RELOC_X86_64_TLSLD
:
14374 case BFD_RELOC_X86_64_DTPOFF32
:
14375 case BFD_RELOC_X86_64_DTPOFF64
:
14376 case BFD_RELOC_X86_64_GOTTPOFF
:
14377 case BFD_RELOC_X86_64_TPOFF32
:
14378 case BFD_RELOC_X86_64_TPOFF64
:
14379 case BFD_RELOC_X86_64_GOTOFF64
:
14380 case BFD_RELOC_X86_64_GOTPC32
:
14381 case BFD_RELOC_X86_64_GOT64
:
14382 case BFD_RELOC_X86_64_GOTPCREL64
:
14383 case BFD_RELOC_X86_64_GOTPC64
:
14384 case BFD_RELOC_X86_64_GOTPLT64
:
14385 case BFD_RELOC_X86_64_PLTOFF64
:
14386 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14387 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14388 case BFD_RELOC_RVA
:
14389 case BFD_RELOC_VTABLE_ENTRY
:
14390 case BFD_RELOC_VTABLE_INHERIT
:
14392 case BFD_RELOC_32_SECREL
:
14393 case BFD_RELOC_16_SECIDX
:
14395 code
= fixp
->fx_r_type
;
14397 case BFD_RELOC_X86_64_32S
:
14398 if (!fixp
->fx_pcrel
)
14400 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14401 code
= fixp
->fx_r_type
;
14404 /* Fall through. */
14406 if (fixp
->fx_pcrel
)
14408 switch (fixp
->fx_size
)
14411 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14412 _("can not do %d byte pc-relative relocation"),
14414 code
= BFD_RELOC_32_PCREL
;
14416 case 1: code
= BFD_RELOC_8_PCREL
; break;
14417 case 2: code
= BFD_RELOC_16_PCREL
; break;
14418 case 4: code
= BFD_RELOC_32_PCREL
; break;
14420 case 8: code
= BFD_RELOC_64_PCREL
; break;
14426 switch (fixp
->fx_size
)
14429 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14430 _("can not do %d byte relocation"),
14432 code
= BFD_RELOC_32
;
14434 case 1: code
= BFD_RELOC_8
; break;
14435 case 2: code
= BFD_RELOC_16
; break;
14436 case 4: code
= BFD_RELOC_32
; break;
14438 case 8: code
= BFD_RELOC_64
; break;
14445 if ((code
== BFD_RELOC_32
14446 || code
== BFD_RELOC_32_PCREL
14447 || code
== BFD_RELOC_X86_64_32S
)
14449 && fixp
->fx_addsy
== GOT_symbol
)
14452 code
= BFD_RELOC_386_GOTPC
;
14454 code
= BFD_RELOC_X86_64_GOTPC32
;
14456 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14458 && fixp
->fx_addsy
== GOT_symbol
)
14460 code
= BFD_RELOC_X86_64_GOTPC64
;
14463 rel
= XNEW (arelent
);
14464 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14465 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14467 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14469 if (!use_rela_relocations
)
14471 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14472 vtable entry to be used in the relocation's section offset. */
14473 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14474 rel
->address
= fixp
->fx_offset
;
14475 #if defined (OBJ_COFF) && defined (TE_PE)
14476 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14477 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14482 /* Use the rela in 64bit mode. */
14485 if (disallow_64bit_reloc
)
14488 case BFD_RELOC_X86_64_DTPOFF64
:
14489 case BFD_RELOC_X86_64_TPOFF64
:
14490 case BFD_RELOC_64_PCREL
:
14491 case BFD_RELOC_X86_64_GOTOFF64
:
14492 case BFD_RELOC_X86_64_GOT64
:
14493 case BFD_RELOC_X86_64_GOTPCREL64
:
14494 case BFD_RELOC_X86_64_GOTPC64
:
14495 case BFD_RELOC_X86_64_GOTPLT64
:
14496 case BFD_RELOC_X86_64_PLTOFF64
:
14497 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14498 _("cannot represent relocation type %s in x32 mode"),
14499 bfd_get_reloc_code_name (code
));
14505 if (!fixp
->fx_pcrel
)
14506 rel
->addend
= fixp
->fx_offset
;
14510 case BFD_RELOC_X86_64_PLT32
:
14511 case BFD_RELOC_X86_64_GOT32
:
14512 case BFD_RELOC_X86_64_GOTPCREL
:
14513 case BFD_RELOC_X86_64_GOTPCRELX
:
14514 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14515 case BFD_RELOC_X86_64_TLSGD
:
14516 case BFD_RELOC_X86_64_TLSLD
:
14517 case BFD_RELOC_X86_64_GOTTPOFF
:
14518 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14519 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14520 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14523 rel
->addend
= (section
->vma
14525 + fixp
->fx_addnumber
14526 + md_pcrel_from (fixp
));
14531 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14532 if (rel
->howto
== NULL
)
14534 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14535 _("cannot represent relocation type %s"),
14536 bfd_get_reloc_code_name (code
));
14537 /* Set howto to a garbage value so that we can keep going. */
14538 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14539 gas_assert (rel
->howto
!= NULL
);
14545 #include "tc-i386-intel.c"
14548 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14550 int saved_naked_reg
;
14551 char saved_register_dot
;
14553 saved_naked_reg
= allow_naked_reg
;
14554 allow_naked_reg
= 1;
14555 saved_register_dot
= register_chars
['.'];
14556 register_chars
['.'] = '.';
14557 allow_pseudo_reg
= 1;
14558 expression_and_evaluate (exp
);
14559 allow_pseudo_reg
= 0;
14560 register_chars
['.'] = saved_register_dot
;
14561 allow_naked_reg
= saved_naked_reg
;
14563 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14565 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14567 exp
->X_op
= O_constant
;
14568 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14569 .dw2_regnum
[flag_code
>> 1];
14572 exp
->X_op
= O_illegal
;
14577 tc_x86_frame_initial_instructions (void)
14579 static unsigned int sp_regno
[2];
14581 if (!sp_regno
[flag_code
>> 1])
14583 char *saved_input
= input_line_pointer
;
14584 char sp
[][4] = {"esp", "rsp"};
14587 input_line_pointer
= sp
[flag_code
>> 1];
14588 tc_x86_parse_to_dw2regnum (&exp
);
14589 gas_assert (exp
.X_op
== O_constant
);
14590 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14591 input_line_pointer
= saved_input
;
14594 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14595 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14599 x86_dwarf2_addr_size (void)
14601 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14602 if (x86_elf_abi
== X86_64_X32_ABI
)
14605 return bfd_arch_bits_per_address (stdoutput
) / 8;
14609 i386_elf_section_type (const char *str
, size_t len
)
14611 if (flag_code
== CODE_64BIT
14612 && len
== sizeof ("unwind") - 1
14613 && startswith (str
, "unwind"))
14614 return SHT_X86_64_UNWIND
;
14621 i386_solaris_fix_up_eh_frame (segT sec
)
14623 if (flag_code
== CODE_64BIT
)
14624 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14630 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14634 exp
.X_op
= O_secrel
;
14635 exp
.X_add_symbol
= symbol
;
14636 exp
.X_add_number
= 0;
14637 emit_expr (&exp
, size
);
14641 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14642 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14645 x86_64_section_letter (int letter
, const char **ptr_msg
)
14647 if (flag_code
== CODE_64BIT
)
14650 return SHF_X86_64_LARGE
;
14652 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14655 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14660 x86_64_section_word (char *str
, size_t len
)
14662 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14663 return SHF_X86_64_LARGE
;
14669 handle_large_common (int small ATTRIBUTE_UNUSED
)
14671 if (flag_code
!= CODE_64BIT
)
14673 s_comm_internal (0, elf_common_parse
);
14674 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14678 static segT lbss_section
;
14679 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14680 asection
*saved_bss_section
= bss_section
;
14682 if (lbss_section
== NULL
)
14684 flagword applicable
;
14685 segT seg
= now_seg
;
14686 subsegT subseg
= now_subseg
;
14688 /* The .lbss section is for local .largecomm symbols. */
14689 lbss_section
= subseg_new (".lbss", 0);
14690 applicable
= bfd_applicable_section_flags (stdoutput
);
14691 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14692 seg_info (lbss_section
)->bss
= 1;
14694 subseg_set (seg
, subseg
);
14697 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14698 bss_section
= lbss_section
;
14700 s_comm_internal (0, elf_common_parse
);
14702 elf_com_section_ptr
= saved_com_section_ptr
;
14703 bss_section
= saved_bss_section
;
14706 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */