1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2021 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
52 #define DEFAULT_ARCH "i386"
57 #define INLINE __inline__
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
95 #define END_OF_INSN '\0'
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
109 const insn_template
*start
;
110 const insn_template
*end
;
114 /* 386 operand encoding bytes: see 386 book for details of this. */
117 unsigned int regmem
; /* codes register or memory operand */
118 unsigned int reg
; /* codes register operand (or extended opcode) */
119 unsigned int mode
; /* how to interpret regmem & reg */
123 /* x86-64 extension prefix. */
124 typedef int rex_byte
;
126 /* 386 opcode byte to code indirect addressing. */
135 /* x86 arch names, types and features */
138 const char *name
; /* arch name */
139 unsigned int len
; /* arch string length */
140 enum processor_type type
; /* arch type */
141 i386_cpu_flags flags
; /* cpu feature flags */
142 unsigned int skip
; /* show_arch should skip this. */
146 /* Used to turn off indicated flags. */
149 const char *name
; /* arch name */
150 unsigned int len
; /* arch string length */
151 i386_cpu_flags flags
; /* cpu feature flags */
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
164 static void pe_directive_secrel (int);
166 static void signed_cons (int);
167 static char *output_invalid (int c
);
168 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
170 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS
*);
175 static int i386_intel_parse_name (const char *, expressionS
*);
176 static const reg_entry
*parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (unsigned int, unsigned int);
181 static enum flag_code
i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template
*match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry
*build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS
*, offsetT
);
196 static void output_disp (fragS
*, offsetT
);
198 static void s_bss (int);
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used
;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used
;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
211 static const char *default_arch
= DEFAULT_ARCH
;
213 /* parse_register() returns this when a register alias cannot be used. */
214 static const reg_entry bad_reg
= { "<bad>", OPERAND_TYPE_NONE
, 0, 0,
215 { Dw2Inval
, Dw2Inval
} };
217 static const reg_entry
*reg_k0
;
219 /* This struct describes rounding control and SAE in the instruction. */
230 unsigned int operand
;
233 static struct RC_Operation rc_op
;
235 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
237 struct Broadcast_Operation
239 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
242 /* Index of broadcasted operand. */
243 unsigned int operand
;
245 /* Number of bytes to broadcast. */
249 static struct Broadcast_Operation broadcast_op
;
254 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
255 unsigned char bytes
[4];
257 /* Destination or source register specifier. */
258 const reg_entry
*register_specifier
;
261 /* 'md_assemble ()' gathers together information and puts it into a
268 const reg_entry
*regs
;
273 operand_size_mismatch
,
274 operand_type_mismatch
,
275 register_type_mismatch
,
276 number_of_operands_mismatch
,
277 invalid_instruction_suffix
,
279 unsupported_with_intel_mnemonic
,
283 invalid_vsib_address
,
284 invalid_vector_register_set
,
285 invalid_tmm_register_set
,
286 unsupported_vector_index_register
,
287 unsupported_broadcast
,
290 mask_not_on_destination
,
293 rc_sae_operand_not_last_imm
,
294 invalid_register_operand
,
299 /* TM holds the template for the insn were currently assembling. */
302 /* SUFFIX holds the instruction size suffix for byte, word, dword
303 or qword, if given. */
306 /* OPCODE_LENGTH holds the number of base opcode bytes. */
307 unsigned char opcode_length
;
309 /* OPERANDS gives the number of given operands. */
310 unsigned int operands
;
312 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
313 of given register, displacement, memory operands and immediate
315 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
317 /* TYPES [i] is the type (see above #defines) which tells us how to
318 use OP[i] for the corresponding operand. */
319 i386_operand_type types
[MAX_OPERANDS
];
321 /* Displacement expression, immediate expression, or register for each
323 union i386_op op
[MAX_OPERANDS
];
325 /* Flags for operands. */
326 unsigned int flags
[MAX_OPERANDS
];
327 #define Operand_PCrel 1
328 #define Operand_Mem 2
330 /* Relocation type for operand */
331 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
333 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
334 the base index byte below. */
335 const reg_entry
*base_reg
;
336 const reg_entry
*index_reg
;
337 unsigned int log2_scale_factor
;
339 /* SEG gives the seg_entries of this insn. They are zero unless
340 explicit segment overrides are given. */
341 const seg_entry
*seg
[2];
343 /* Copied first memory operand string, for re-checking. */
346 /* PREFIX holds all the given prefix opcodes (usually null).
347 PREFIXES is the number of prefix opcodes. */
348 unsigned int prefixes
;
349 unsigned char prefix
[MAX_PREFIXES
];
351 /* Register is in low 3 bits of opcode. */
352 bfd_boolean short_form
;
354 /* The operand to a branch insn indicates an absolute branch. */
355 bfd_boolean jumpabsolute
;
357 /* Extended states. */
365 xstate_ymm
= 1 << 2 | xstate_xmm
,
367 xstate_zmm
= 1 << 3 | xstate_ymm
,
370 /* Use MASK state. */
374 /* Has GOTPC or TLS relocation. */
375 bfd_boolean has_gotpc_tls_reloc
;
377 /* RM and SIB are the modrm byte and the sib byte where the
378 addressing modes of this insn are encoded. */
385 /* Masking attributes.
387 The struct describes masking, applied to OPERAND in the instruction.
388 REG is a pointer to the corresponding mask register. ZEROING tells
389 whether merging or zeroing mask is used. */
390 struct Mask_Operation
392 const reg_entry
*reg
;
393 unsigned int zeroing
;
394 /* The operand where this operation is associated. */
395 unsigned int operand
;
398 /* Rounding control and SAE attributes. */
399 struct RC_Operation
*rounding
;
401 /* Broadcasting attributes. */
402 struct Broadcast_Operation
*broadcast
;
404 /* Compressed disp8*N attribute. */
405 unsigned int memshift
;
407 /* Prefer load or store in encoding. */
410 dir_encoding_default
= 0,
416 /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
419 disp_encoding_default
= 0,
425 /* Prefer the REX byte in encoding. */
426 bfd_boolean rex_encoding
;
428 /* Disable instruction size optimization. */
429 bfd_boolean no_optimize
;
431 /* How to encode vector instructions. */
434 vex_encoding_default
= 0,
442 const char *rep_prefix
;
445 const char *hle_prefix
;
447 /* Have BND prefix. */
448 const char *bnd_prefix
;
450 /* Have NOTRACK prefix. */
451 const char *notrack_prefix
;
454 enum i386_error error
;
457 typedef struct _i386_insn i386_insn
;
459 /* Link RC type with corresponding string, that'll be looked for in
468 static const struct RC_name RC_NamesTable
[] =
470 { rne
, STRING_COMMA_LEN ("rn-sae") },
471 { rd
, STRING_COMMA_LEN ("rd-sae") },
472 { ru
, STRING_COMMA_LEN ("ru-sae") },
473 { rz
, STRING_COMMA_LEN ("rz-sae") },
474 { saeonly
, STRING_COMMA_LEN ("sae") },
477 /* List of chars besides those in app.c:symbol_chars that can start an
478 operand. Used to prevent the scrubber eating vital white-space. */
479 const char extra_symbol_chars
[] = "*%-([{}"
488 #if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
489 && !defined (TE_GNU) \
490 && !defined (TE_LINUX) \
491 && !defined (TE_FreeBSD) \
492 && !defined (TE_DragonFly) \
493 && !defined (TE_NetBSD))
494 /* This array holds the chars that always start a comment. If the
495 pre-processor is disabled, these aren't very useful. The option
496 --divide will remove '/' from this list. */
497 const char *i386_comment_chars
= "#/";
498 #define SVR4_COMMENT_CHARS 1
499 #define PREFIX_SEPARATOR '\\'
502 const char *i386_comment_chars
= "#";
503 #define PREFIX_SEPARATOR '/'
506 /* This array holds the chars that only start a comment at the beginning of
507 a line. If the line seems to have the form '# 123 filename'
508 .line and .file directives will appear in the pre-processed output.
509 Note that input_file.c hand checks for '#' at the beginning of the
510 first line of the input file. This is because the compiler outputs
511 #NO_APP at the beginning of its output.
512 Also note that comments started like this one will always work if
513 '/' isn't otherwise defined. */
514 const char line_comment_chars
[] = "#/";
516 const char line_separator_chars
[] = ";";
518 /* Chars that can be used to separate mant from exp in floating point
520 const char EXP_CHARS
[] = "eE";
522 /* Chars that mean this number is a floating point constant
525 const char FLT_CHARS
[] = "fFdDxX";
527 /* Tables for lexical analysis. */
528 static char mnemonic_chars
[256];
529 static char register_chars
[256];
530 static char operand_chars
[256];
531 static char identifier_chars
[256];
532 static char digit_chars
[256];
534 /* Lexical macros. */
535 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
536 #define is_operand_char(x) (operand_chars[(unsigned char) x])
537 #define is_register_char(x) (register_chars[(unsigned char) x])
538 #define is_space_char(x) ((x) == ' ')
539 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
540 #define is_digit_char(x) (digit_chars[(unsigned char) x])
542 /* All non-digit non-letter characters that may occur in an operand. */
543 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
545 /* md_assemble() always leaves the strings it's passed unaltered. To
546 effect this we maintain a stack of saved characters that we've smashed
547 with '\0's (indicating end of strings for various sub-fields of the
548 assembler instruction). */
549 static char save_stack
[32];
550 static char *save_stack_p
;
551 #define END_STRING_AND_SAVE(s) \
552 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
553 #define RESTORE_END_STRING(s) \
554 do { *(s) = *--save_stack_p; } while (0)
556 /* The instruction we're assembling. */
559 /* Possible templates for current insn. */
560 static const templates
*current_templates
;
562 /* Per instruction expressionS buffers: max displacements & immediates. */
563 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
564 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
566 /* Current operand we are working on. */
567 static int this_operand
= -1;
569 /* We support four different modes. FLAG_CODE variable is used to distinguish
577 static enum flag_code flag_code
;
578 static unsigned int object_64bit
;
579 static unsigned int disallow_64bit_reloc
;
580 static int use_rela_relocations
= 0;
581 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
582 static const char *tls_get_addr
;
584 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
585 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
586 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
588 /* The ELF ABI to use. */
596 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
599 #if defined (TE_PE) || defined (TE_PEP)
600 /* Use big object file format. */
601 static int use_big_obj
= 0;
604 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
605 /* 1 if generating code for a shared library. */
606 static int shared
= 0;
609 /* 1 for intel syntax,
611 static int intel_syntax
= 0;
613 static enum x86_64_isa
615 amd64
= 1, /* AMD64 ISA. */
616 intel64
/* Intel64 ISA. */
619 /* 1 for intel mnemonic,
620 0 if att mnemonic. */
621 static int intel_mnemonic
= !SYSV386_COMPAT
;
623 /* 1 if pseudo registers are permitted. */
624 static int allow_pseudo_reg
= 0;
626 /* 1 if register prefix % not required. */
627 static int allow_naked_reg
= 0;
629 /* 1 if the assembler should add BND prefix for all control-transferring
630 instructions supporting it, even if this prefix wasn't specified
632 static int add_bnd_prefix
= 0;
634 /* 1 if pseudo index register, eiz/riz, is allowed . */
635 static int allow_index_reg
= 0;
637 /* 1 if the assembler should ignore LOCK prefix, even if it was
638 specified explicitly. */
639 static int omit_lock_prefix
= 0;
641 /* 1 if the assembler should encode lfence, mfence, and sfence as
642 "lock addl $0, (%{re}sp)". */
643 static int avoid_fence
= 0;
645 /* 1 if lfence should be inserted after every load. */
646 static int lfence_after_load
= 0;
648 /* Non-zero if lfence should be inserted before indirect branch. */
649 static enum lfence_before_indirect_branch_kind
651 lfence_branch_none
= 0,
652 lfence_branch_register
,
653 lfence_branch_memory
,
656 lfence_before_indirect_branch
;
658 /* Non-zero if lfence should be inserted before ret. */
659 static enum lfence_before_ret_kind
661 lfence_before_ret_none
= 0,
662 lfence_before_ret_not
,
663 lfence_before_ret_or
,
664 lfence_before_ret_shl
668 /* Types of previous instruction is .byte or prefix. */
683 /* 1 if the assembler should generate relax relocations. */
685 static int generate_relax_relocations
686 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
688 static enum check_kind
694 sse_check
, operand_check
= check_warning
;
696 /* Non-zero if branches should be aligned within power of 2 boundary. */
697 static int align_branch_power
= 0;
699 /* Types of branches to align. */
700 enum align_branch_kind
702 align_branch_none
= 0,
703 align_branch_jcc
= 1,
704 align_branch_fused
= 2,
705 align_branch_jmp
= 3,
706 align_branch_call
= 4,
707 align_branch_indirect
= 5,
711 /* Type bits of branches to align. */
712 enum align_branch_bit
714 align_branch_jcc_bit
= 1 << align_branch_jcc
,
715 align_branch_fused_bit
= 1 << align_branch_fused
,
716 align_branch_jmp_bit
= 1 << align_branch_jmp
,
717 align_branch_call_bit
= 1 << align_branch_call
,
718 align_branch_indirect_bit
= 1 << align_branch_indirect
,
719 align_branch_ret_bit
= 1 << align_branch_ret
722 static unsigned int align_branch
= (align_branch_jcc_bit
723 | align_branch_fused_bit
724 | align_branch_jmp_bit
);
726 /* Types of condition jump used by macro-fusion. */
729 mf_jcc_jo
= 0, /* base opcode 0x70 */
730 mf_jcc_jc
, /* base opcode 0x72 */
731 mf_jcc_je
, /* base opcode 0x74 */
732 mf_jcc_jna
, /* base opcode 0x76 */
733 mf_jcc_js
, /* base opcode 0x78 */
734 mf_jcc_jp
, /* base opcode 0x7a */
735 mf_jcc_jl
, /* base opcode 0x7c */
736 mf_jcc_jle
, /* base opcode 0x7e */
739 /* Types of compare flag-modifying insntructions used by macro-fusion. */
742 mf_cmp_test_and
, /* test/cmp */
743 mf_cmp_alu_cmp
, /* add/sub/cmp */
744 mf_cmp_incdec
/* inc/dec */
747 /* The maximum padding size for fused jcc. CMP like instruction can
748 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
750 #define MAX_FUSED_JCC_PADDING_SIZE 20
752 /* The maximum number of prefixes added for an instruction. */
753 static unsigned int align_branch_prefix_size
= 5;
756 1. Clear the REX_W bit with register operand if possible.
757 2. Above plus use 128bit vector instruction to clear the full vector
760 static int optimize
= 0;
763 1. Clear the REX_W bit with register operand if possible.
764 2. Above plus use 128bit vector instruction to clear the full vector
766 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
769 static int optimize_for_space
= 0;
771 /* Register prefix used for error message. */
772 static const char *register_prefix
= "%";
774 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
775 leave, push, and pop instructions so that gcc has the same stack
776 frame as in 32 bit mode. */
777 static char stackop_size
= '\0';
779 /* Non-zero to optimize code alignment. */
780 int optimize_align_code
= 1;
782 /* Non-zero to quieten some warnings. */
783 static int quiet_warnings
= 0;
786 static const char *cpu_arch_name
= NULL
;
787 static char *cpu_sub_arch_name
= NULL
;
789 /* CPU feature flags. */
790 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
792 /* If we have selected a cpu we are generating instructions for. */
793 static int cpu_arch_tune_set
= 0;
795 /* Cpu we are generating instructions for. */
796 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
798 /* CPU feature flags of cpu we are generating instructions for. */
799 static i386_cpu_flags cpu_arch_tune_flags
;
801 /* CPU instruction set architecture used. */
802 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
804 /* CPU feature flags of instruction set architecture used. */
805 i386_cpu_flags cpu_arch_isa_flags
;
807 /* If set, conditional jumps are not automatically promoted to handle
808 larger than a byte offset. */
809 static unsigned int no_cond_jump_promotion
= 0;
811 /* Encode SSE instructions with VEX prefix. */
812 static unsigned int sse2avx
;
814 /* Encode scalar AVX instructions with specific vector length. */
821 /* Encode VEX WIG instructions with specific vex.w. */
828 /* Encode scalar EVEX LIG instructions with specific vector length. */
836 /* Encode EVEX WIG instructions with specific evex.w. */
843 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
844 static enum rc_type evexrcig
= rne
;
846 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
847 static symbolS
*GOT_symbol
;
849 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
850 unsigned int x86_dwarf2_return_column
;
852 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
853 int x86_cie_data_alignment
;
855 /* Interface to relax_segment.
856 There are 3 major relax states for 386 jump insns because the
857 different types of jumps add different sizes to frags when we're
858 figuring out what sort of jump to choose to reach a given label.
860 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
861 branches which are handled by md_estimate_size_before_relax() and
862 i386_generic_table_relax_frag(). */
865 #define UNCOND_JUMP 0
867 #define COND_JUMP86 2
868 #define BRANCH_PADDING 3
869 #define BRANCH_PREFIX 4
870 #define FUSED_JCC_PADDING 5
875 #define SMALL16 (SMALL | CODE16)
877 #define BIG16 (BIG | CODE16)
881 #define INLINE __inline__
887 #define ENCODE_RELAX_STATE(type, size) \
888 ((relax_substateT) (((type) << 2) | (size)))
889 #define TYPE_FROM_RELAX_STATE(s) \
891 #define DISP_SIZE_FROM_RELAX_STATE(s) \
892 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
894 /* This table is used by relax_frag to promote short jumps to long
895 ones where necessary. SMALL (short) jumps may be promoted to BIG
896 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
897 don't allow a short jump in a 32 bit code segment to be promoted to
898 a 16 bit offset jump because it's slower (requires data size
899 prefix), and doesn't work, unless the destination is in the bottom
900 64k of the code segment (The top 16 bits of eip are zeroed). */
902 const relax_typeS md_relax_table
[] =
905 1) most positive reach of this state,
906 2) most negative reach of this state,
907 3) how many bytes this mode will have in the variable part of the frag
908 4) which index into the table to try if we can't fit into this one. */
910 /* UNCOND_JUMP states. */
911 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
912 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
913 /* dword jmp adds 4 bytes to frag:
914 0 extra opcode bytes, 4 displacement bytes. */
916 /* word jmp adds 2 byte2 to frag:
917 0 extra opcode bytes, 2 displacement bytes. */
920 /* COND_JUMP states. */
921 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
922 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
923 /* dword conditionals adds 5 bytes to frag:
924 1 extra opcode byte, 4 displacement bytes. */
926 /* word conditionals add 3 bytes to frag:
927 1 extra opcode byte, 2 displacement bytes. */
930 /* COND_JUMP86 states. */
931 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
932 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
933 /* dword conditionals adds 5 bytes to frag:
934 1 extra opcode byte, 4 displacement bytes. */
936 /* word conditionals add 4 bytes to frag:
937 1 displacement byte and a 3 byte long branch insn. */
941 static const arch_entry cpu_arch
[] =
943 /* Do not replace the first two entries - i386_target_format()
944 relies on them being there in this order. */
945 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
946 CPU_GENERIC32_FLAGS
, 0 },
947 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
948 CPU_GENERIC64_FLAGS
, 0 },
949 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
951 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
953 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
955 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
957 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
959 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
961 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
963 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
965 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
966 CPU_PENTIUMPRO_FLAGS
, 0 },
967 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
969 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
971 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
973 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
975 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
976 CPU_NOCONA_FLAGS
, 0 },
977 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
979 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
981 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
982 CPU_CORE2_FLAGS
, 1 },
983 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
984 CPU_CORE2_FLAGS
, 0 },
985 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
986 CPU_COREI7_FLAGS
, 0 },
987 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
989 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
991 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
992 CPU_IAMCU_FLAGS
, 0 },
993 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
995 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
997 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
998 CPU_ATHLON_FLAGS
, 0 },
999 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
1001 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
1003 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
1005 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
1006 CPU_AMDFAM10_FLAGS
, 0 },
1007 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
1008 CPU_BDVER1_FLAGS
, 0 },
1009 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
1010 CPU_BDVER2_FLAGS
, 0 },
1011 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
1012 CPU_BDVER3_FLAGS
, 0 },
1013 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
1014 CPU_BDVER4_FLAGS
, 0 },
1015 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
1016 CPU_ZNVER1_FLAGS
, 0 },
1017 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
1018 CPU_ZNVER2_FLAGS
, 0 },
1019 { STRING_COMMA_LEN ("znver3"), PROCESSOR_ZNVER
,
1020 CPU_ZNVER3_FLAGS
, 0 },
1021 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
1022 CPU_BTVER1_FLAGS
, 0 },
1023 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
1024 CPU_BTVER2_FLAGS
, 0 },
1025 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
1026 CPU_8087_FLAGS
, 0 },
1027 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
1029 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
1031 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
1033 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
1034 CPU_CMOV_FLAGS
, 0 },
1035 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
1036 CPU_FXSR_FLAGS
, 0 },
1037 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
1039 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
1041 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
1042 CPU_SSE2_FLAGS
, 0 },
1043 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
1044 CPU_SSE3_FLAGS
, 0 },
1045 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1046 CPU_SSE4A_FLAGS
, 0 },
1047 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
1048 CPU_SSSE3_FLAGS
, 0 },
1049 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
1050 CPU_SSE4_1_FLAGS
, 0 },
1051 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
1052 CPU_SSE4_2_FLAGS
, 0 },
1053 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
1054 CPU_SSE4_2_FLAGS
, 0 },
1055 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1057 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1058 CPU_AVX2_FLAGS
, 0 },
1059 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1060 CPU_AVX512F_FLAGS
, 0 },
1061 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1062 CPU_AVX512CD_FLAGS
, 0 },
1063 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1064 CPU_AVX512ER_FLAGS
, 0 },
1065 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1066 CPU_AVX512PF_FLAGS
, 0 },
1067 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1068 CPU_AVX512DQ_FLAGS
, 0 },
1069 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1070 CPU_AVX512BW_FLAGS
, 0 },
1071 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1072 CPU_AVX512VL_FLAGS
, 0 },
1073 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1075 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1076 CPU_VMFUNC_FLAGS
, 0 },
1077 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1079 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1080 CPU_XSAVE_FLAGS
, 0 },
1081 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1082 CPU_XSAVEOPT_FLAGS
, 0 },
1083 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1084 CPU_XSAVEC_FLAGS
, 0 },
1085 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1086 CPU_XSAVES_FLAGS
, 0 },
1087 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1089 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1090 CPU_PCLMUL_FLAGS
, 0 },
1091 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1092 CPU_PCLMUL_FLAGS
, 1 },
1093 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1094 CPU_FSGSBASE_FLAGS
, 0 },
1095 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1096 CPU_RDRND_FLAGS
, 0 },
1097 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1098 CPU_F16C_FLAGS
, 0 },
1099 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1100 CPU_BMI2_FLAGS
, 0 },
1101 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1103 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1104 CPU_FMA4_FLAGS
, 0 },
1105 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1107 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1109 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1110 CPU_MOVBE_FLAGS
, 0 },
1111 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1112 CPU_CX16_FLAGS
, 0 },
1113 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1115 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1116 CPU_LZCNT_FLAGS
, 0 },
1117 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN
,
1118 CPU_POPCNT_FLAGS
, 0 },
1119 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1121 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1123 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1124 CPU_INVPCID_FLAGS
, 0 },
1125 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1126 CPU_CLFLUSH_FLAGS
, 0 },
1127 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1129 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1130 CPU_SYSCALL_FLAGS
, 0 },
1131 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1132 CPU_RDTSCP_FLAGS
, 0 },
1133 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1134 CPU_3DNOW_FLAGS
, 0 },
1135 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1136 CPU_3DNOWA_FLAGS
, 0 },
1137 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1138 CPU_PADLOCK_FLAGS
, 0 },
1139 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1140 CPU_SVME_FLAGS
, 1 },
1141 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1142 CPU_SVME_FLAGS
, 0 },
1143 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1144 CPU_SSE4A_FLAGS
, 0 },
1145 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1147 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1149 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1151 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1153 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1154 CPU_RDSEED_FLAGS
, 0 },
1155 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1156 CPU_PRFCHW_FLAGS
, 0 },
1157 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1158 CPU_SMAP_FLAGS
, 0 },
1159 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1161 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1163 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1164 CPU_CLFLUSHOPT_FLAGS
, 0 },
1165 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1166 CPU_PREFETCHWT1_FLAGS
, 0 },
1167 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1169 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1170 CPU_CLWB_FLAGS
, 0 },
1171 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1172 CPU_AVX512IFMA_FLAGS
, 0 },
1173 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1174 CPU_AVX512VBMI_FLAGS
, 0 },
1175 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1176 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1177 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1178 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1179 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1180 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1181 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1182 CPU_AVX512_VBMI2_FLAGS
, 0 },
1183 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1184 CPU_AVX512_VNNI_FLAGS
, 0 },
1185 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1186 CPU_AVX512_BITALG_FLAGS
, 0 },
1187 { STRING_COMMA_LEN (".avx_vnni"), PROCESSOR_UNKNOWN
,
1188 CPU_AVX_VNNI_FLAGS
, 0 },
1189 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1190 CPU_CLZERO_FLAGS
, 0 },
1191 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1192 CPU_MWAITX_FLAGS
, 0 },
1193 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1194 CPU_OSPKE_FLAGS
, 0 },
1195 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1196 CPU_RDPID_FLAGS
, 0 },
1197 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1198 CPU_PTWRITE_FLAGS
, 0 },
1199 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1201 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1202 CPU_SHSTK_FLAGS
, 0 },
1203 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1204 CPU_GFNI_FLAGS
, 0 },
1205 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1206 CPU_VAES_FLAGS
, 0 },
1207 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1208 CPU_VPCLMULQDQ_FLAGS
, 0 },
1209 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1210 CPU_WBNOINVD_FLAGS
, 0 },
1211 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1212 CPU_PCONFIG_FLAGS
, 0 },
1213 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1214 CPU_WAITPKG_FLAGS
, 0 },
1215 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1216 CPU_CLDEMOTE_FLAGS
, 0 },
1217 { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN
,
1218 CPU_AMX_INT8_FLAGS
, 0 },
1219 { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN
,
1220 CPU_AMX_BF16_FLAGS
, 0 },
1221 { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN
,
1222 CPU_AMX_TILE_FLAGS
, 0 },
1223 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1224 CPU_MOVDIRI_FLAGS
, 0 },
1225 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1226 CPU_MOVDIR64B_FLAGS
, 0 },
1227 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1228 CPU_AVX512_BF16_FLAGS
, 0 },
1229 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1230 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1231 { STRING_COMMA_LEN (".tdx"), PROCESSOR_UNKNOWN
,
1233 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1234 CPU_ENQCMD_FLAGS
, 0 },
1235 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN
,
1236 CPU_SERIALIZE_FLAGS
, 0 },
1237 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1238 CPU_RDPRU_FLAGS
, 0 },
1239 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1240 CPU_MCOMMIT_FLAGS
, 0 },
1241 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN
,
1242 CPU_SEV_ES_FLAGS
, 0 },
1243 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN
,
1244 CPU_TSXLDTRK_FLAGS
, 0 },
1245 { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN
,
1247 { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN
,
1248 CPU_WIDEKL_FLAGS
, 0 },
1249 { STRING_COMMA_LEN (".uintr"), PROCESSOR_UNKNOWN
,
1250 CPU_UINTR_FLAGS
, 0 },
1251 { STRING_COMMA_LEN (".hreset"), PROCESSOR_UNKNOWN
,
1252 CPU_HRESET_FLAGS
, 0 },
1255 static const noarch_entry cpu_noarch
[] =
1257 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1258 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1259 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1260 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1261 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1262 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1263 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1264 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1265 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1266 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1267 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS
},
1268 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1269 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1270 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1271 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1272 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1273 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1274 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1275 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1276 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1277 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1278 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1279 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1280 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1281 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1282 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1283 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1284 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1285 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1286 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1287 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1288 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1289 { STRING_COMMA_LEN ("noavx_vnni"), CPU_ANY_AVX_VNNI_FLAGS
},
1290 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1291 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1292 { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS
},
1293 { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS
},
1294 { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS
},
1295 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1296 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1297 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1298 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1299 CPU_ANY_AVX512_VP2INTERSECT_FLAGS
},
1300 { STRING_COMMA_LEN ("notdx"), CPU_ANY_TDX_FLAGS
},
1301 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1302 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS
},
1303 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS
},
1304 { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS
},
1305 { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS
},
1306 { STRING_COMMA_LEN ("nouintr"), CPU_ANY_UINTR_FLAGS
},
1307 { STRING_COMMA_LEN ("nohreset"), CPU_ANY_HRESET_FLAGS
},
1311 /* Like s_lcomm_internal in gas/read.c but the alignment string
1312 is allowed to be optional. */
1315 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1322 && *input_line_pointer
== ',')
1324 align
= parse_align (needs_align
- 1);
1326 if (align
== (addressT
) -1)
1341 bss_alloc (symbolP
, size
, align
);
1346 pe_lcomm (int needs_align
)
1348 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1352 const pseudo_typeS md_pseudo_table
[] =
1354 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1355 {"align", s_align_bytes
, 0},
1357 {"align", s_align_ptwo
, 0},
1359 {"arch", set_cpu_arch
, 0},
1363 {"lcomm", pe_lcomm
, 1},
1365 {"ffloat", float_cons
, 'f'},
1366 {"dfloat", float_cons
, 'd'},
1367 {"tfloat", float_cons
, 'x'},
1369 {"slong", signed_cons
, 4},
1370 {"noopt", s_ignore
, 0},
1371 {"optim", s_ignore
, 0},
1372 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1373 {"code16", set_code_flag
, CODE_16BIT
},
1374 {"code32", set_code_flag
, CODE_32BIT
},
1376 {"code64", set_code_flag
, CODE_64BIT
},
1378 {"intel_syntax", set_intel_syntax
, 1},
1379 {"att_syntax", set_intel_syntax
, 0},
1380 {"intel_mnemonic", set_intel_mnemonic
, 1},
1381 {"att_mnemonic", set_intel_mnemonic
, 0},
1382 {"allow_index_reg", set_allow_index_reg
, 1},
1383 {"disallow_index_reg", set_allow_index_reg
, 0},
1384 {"sse_check", set_check
, 0},
1385 {"operand_check", set_check
, 1},
1386 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1387 {"largecomm", handle_large_common
, 0},
1389 {"file", dwarf2_directive_file
, 0},
1390 {"loc", dwarf2_directive_loc
, 0},
1391 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1394 {"secrel32", pe_directive_secrel
, 0},
1399 /* For interface with expression (). */
1400 extern char *input_line_pointer
;
1402 /* Hash table for instruction mnemonic lookup. */
1403 static htab_t op_hash
;
1405 /* Hash table for register lookup. */
1406 static htab_t reg_hash
;
1408 /* Various efficient no-op patterns for aligning code labels.
1409 Note: Don't try to assemble the instructions in the comments.
1410 0L and 0w are not legal. */
1411 static const unsigned char f32_1
[] =
1413 static const unsigned char f32_2
[] =
1414 {0x66,0x90}; /* xchg %ax,%ax */
1415 static const unsigned char f32_3
[] =
1416 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1417 static const unsigned char f32_4
[] =
1418 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1419 static const unsigned char f32_6
[] =
1420 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1421 static const unsigned char f32_7
[] =
1422 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1423 static const unsigned char f16_3
[] =
1424 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1425 static const unsigned char f16_4
[] =
1426 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1427 static const unsigned char jump_disp8
[] =
1428 {0xeb}; /* jmp disp8 */
1429 static const unsigned char jump32_disp32
[] =
1430 {0xe9}; /* jmp disp32 */
1431 static const unsigned char jump16_disp32
[] =
1432 {0x66,0xe9}; /* jmp disp32 */
1433 /* 32-bit NOPs patterns. */
1434 static const unsigned char *const f32_patt
[] = {
1435 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1437 /* 16-bit NOPs patterns. */
1438 static const unsigned char *const f16_patt
[] = {
1439 f32_1
, f32_2
, f16_3
, f16_4
1441 /* nopl (%[re]ax) */
1442 static const unsigned char alt_3
[] =
1444 /* nopl 0(%[re]ax) */
1445 static const unsigned char alt_4
[] =
1446 {0x0f,0x1f,0x40,0x00};
1447 /* nopl 0(%[re]ax,%[re]ax,1) */
1448 static const unsigned char alt_5
[] =
1449 {0x0f,0x1f,0x44,0x00,0x00};
1450 /* nopw 0(%[re]ax,%[re]ax,1) */
1451 static const unsigned char alt_6
[] =
1452 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1453 /* nopl 0L(%[re]ax) */
1454 static const unsigned char alt_7
[] =
1455 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1456 /* nopl 0L(%[re]ax,%[re]ax,1) */
1457 static const unsigned char alt_8
[] =
1458 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1459 /* nopw 0L(%[re]ax,%[re]ax,1) */
1460 static const unsigned char alt_9
[] =
1461 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1462 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1463 static const unsigned char alt_10
[] =
1464 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1465 /* data16 nopw %cs:0L(%eax,%eax,1) */
1466 static const unsigned char alt_11
[] =
1467 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1468 /* 32-bit and 64-bit NOPs patterns. */
1469 static const unsigned char *const alt_patt
[] = {
1470 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1471 alt_9
, alt_10
, alt_11
1474 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1475 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1478 i386_output_nops (char *where
, const unsigned char *const *patt
,
1479 int count
, int max_single_nop_size
)
1482 /* Place the longer NOP first. */
1485 const unsigned char *nops
;
1487 if (max_single_nop_size
< 1)
1489 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1490 max_single_nop_size
);
1494 nops
= patt
[max_single_nop_size
- 1];
1496 /* Use the smaller one if the requsted one isn't available. */
1499 max_single_nop_size
--;
1500 nops
= patt
[max_single_nop_size
- 1];
1503 last
= count
% max_single_nop_size
;
1506 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1507 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1511 nops
= patt
[last
- 1];
1514 /* Use the smaller one plus one-byte NOP if the needed one
1517 nops
= patt
[last
- 1];
1518 memcpy (where
+ offset
, nops
, last
);
1519 where
[offset
+ last
] = *patt
[0];
1522 memcpy (where
+ offset
, nops
, last
);
1527 fits_in_imm7 (offsetT num
)
1529 return (num
& 0x7f) == num
;
1533 fits_in_imm31 (offsetT num
)
1535 return (num
& 0x7fffffff) == num
;
1538 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1539 single NOP instruction LIMIT. */
1542 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1544 const unsigned char *const *patt
= NULL
;
1545 int max_single_nop_size
;
1546 /* Maximum number of NOPs before switching to jump over NOPs. */
1547 int max_number_of_nops
;
1549 switch (fragP
->fr_type
)
1554 case rs_machine_dependent
:
1555 /* Allow NOP padding for jumps and calls. */
1556 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1557 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1564 /* We need to decide which NOP sequence to use for 32bit and
1565 64bit. When -mtune= is used:
1567 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1568 PROCESSOR_GENERIC32, f32_patt will be used.
1569 2. For the rest, alt_patt will be used.
1571 When -mtune= isn't used, alt_patt will be used if
1572 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1575 When -march= or .arch is used, we can't use anything beyond
1576 cpu_arch_isa_flags. */
1578 if (flag_code
== CODE_16BIT
)
1581 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1582 /* Limit number of NOPs to 2 in 16-bit mode. */
1583 max_number_of_nops
= 2;
1587 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1589 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1590 switch (cpu_arch_tune
)
1592 case PROCESSOR_UNKNOWN
:
1593 /* We use cpu_arch_isa_flags to check if we SHOULD
1594 optimize with nops. */
1595 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1600 case PROCESSOR_PENTIUM4
:
1601 case PROCESSOR_NOCONA
:
1602 case PROCESSOR_CORE
:
1603 case PROCESSOR_CORE2
:
1604 case PROCESSOR_COREI7
:
1605 case PROCESSOR_L1OM
:
1606 case PROCESSOR_K1OM
:
1607 case PROCESSOR_GENERIC64
:
1609 case PROCESSOR_ATHLON
:
1611 case PROCESSOR_AMDFAM10
:
1613 case PROCESSOR_ZNVER
:
1617 case PROCESSOR_I386
:
1618 case PROCESSOR_I486
:
1619 case PROCESSOR_PENTIUM
:
1620 case PROCESSOR_PENTIUMPRO
:
1621 case PROCESSOR_IAMCU
:
1622 case PROCESSOR_GENERIC32
:
1629 switch (fragP
->tc_frag_data
.tune
)
1631 case PROCESSOR_UNKNOWN
:
1632 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1633 PROCESSOR_UNKNOWN. */
1637 case PROCESSOR_I386
:
1638 case PROCESSOR_I486
:
1639 case PROCESSOR_PENTIUM
:
1640 case PROCESSOR_IAMCU
:
1642 case PROCESSOR_ATHLON
:
1644 case PROCESSOR_AMDFAM10
:
1646 case PROCESSOR_ZNVER
:
1648 case PROCESSOR_GENERIC32
:
1649 /* We use cpu_arch_isa_flags to check if we CAN optimize
1651 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1656 case PROCESSOR_PENTIUMPRO
:
1657 case PROCESSOR_PENTIUM4
:
1658 case PROCESSOR_NOCONA
:
1659 case PROCESSOR_CORE
:
1660 case PROCESSOR_CORE2
:
1661 case PROCESSOR_COREI7
:
1662 case PROCESSOR_L1OM
:
1663 case PROCESSOR_K1OM
:
1664 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1669 case PROCESSOR_GENERIC64
:
1675 if (patt
== f32_patt
)
1677 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1678 /* Limit number of NOPs to 2 for older processors. */
1679 max_number_of_nops
= 2;
1683 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1684 /* Limit number of NOPs to 7 for newer processors. */
1685 max_number_of_nops
= 7;
1690 limit
= max_single_nop_size
;
1692 if (fragP
->fr_type
== rs_fill_nop
)
1694 /* Output NOPs for .nop directive. */
1695 if (limit
> max_single_nop_size
)
1697 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1698 _("invalid single nop size: %d "
1699 "(expect within [0, %d])"),
1700 limit
, max_single_nop_size
);
1704 else if (fragP
->fr_type
!= rs_machine_dependent
)
1705 fragP
->fr_var
= count
;
1707 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1709 /* Generate jump over NOPs. */
1710 offsetT disp
= count
- 2;
1711 if (fits_in_imm7 (disp
))
1713 /* Use "jmp disp8" if possible. */
1715 where
[0] = jump_disp8
[0];
1721 unsigned int size_of_jump
;
1723 if (flag_code
== CODE_16BIT
)
1725 where
[0] = jump16_disp32
[0];
1726 where
[1] = jump16_disp32
[1];
1731 where
[0] = jump32_disp32
[0];
1735 count
-= size_of_jump
+ 4;
1736 if (!fits_in_imm31 (count
))
1738 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1739 _("jump over nop padding out of range"));
1743 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1744 where
+= size_of_jump
+ 4;
1748 /* Generate multiple NOPs. */
1749 i386_output_nops (where
, patt
, count
, limit
);
1753 operand_type_all_zero (const union i386_operand_type
*x
)
1755 switch (ARRAY_SIZE(x
->array
))
1766 return !x
->array
[0];
1773 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1775 switch (ARRAY_SIZE(x
->array
))
1791 x
->bitfield
.class = ClassNone
;
1792 x
->bitfield
.instance
= InstanceNone
;
1796 operand_type_equal (const union i386_operand_type
*x
,
1797 const union i386_operand_type
*y
)
1799 switch (ARRAY_SIZE(x
->array
))
1802 if (x
->array
[2] != y
->array
[2])
1806 if (x
->array
[1] != y
->array
[1])
1810 return x
->array
[0] == y
->array
[0];
1818 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1820 switch (ARRAY_SIZE(x
->array
))
1835 return !x
->array
[0];
1842 cpu_flags_equal (const union i386_cpu_flags
*x
,
1843 const union i386_cpu_flags
*y
)
1845 switch (ARRAY_SIZE(x
->array
))
1848 if (x
->array
[3] != y
->array
[3])
1852 if (x
->array
[2] != y
->array
[2])
1856 if (x
->array
[1] != y
->array
[1])
1860 return x
->array
[0] == y
->array
[0];
1868 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1870 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1871 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1874 static INLINE i386_cpu_flags
1875 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1877 switch (ARRAY_SIZE (x
.array
))
1880 x
.array
[3] &= y
.array
[3];
1883 x
.array
[2] &= y
.array
[2];
1886 x
.array
[1] &= y
.array
[1];
1889 x
.array
[0] &= y
.array
[0];
1897 static INLINE i386_cpu_flags
1898 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1900 switch (ARRAY_SIZE (x
.array
))
1903 x
.array
[3] |= y
.array
[3];
1906 x
.array
[2] |= y
.array
[2];
1909 x
.array
[1] |= y
.array
[1];
1912 x
.array
[0] |= y
.array
[0];
1920 static INLINE i386_cpu_flags
1921 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1923 switch (ARRAY_SIZE (x
.array
))
1926 x
.array
[3] &= ~y
.array
[3];
1929 x
.array
[2] &= ~y
.array
[2];
1932 x
.array
[1] &= ~y
.array
[1];
1935 x
.array
[0] &= ~y
.array
[0];
1943 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
1945 #define CPU_FLAGS_ARCH_MATCH 0x1
1946 #define CPU_FLAGS_64BIT_MATCH 0x2
1948 #define CPU_FLAGS_PERFECT_MATCH \
1949 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1951 /* Return CPU flags match bits. */
1954 cpu_flags_match (const insn_template
*t
)
1956 i386_cpu_flags x
= t
->cpu_flags
;
1957 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1959 x
.bitfield
.cpu64
= 0;
1960 x
.bitfield
.cpuno64
= 0;
1962 if (cpu_flags_all_zero (&x
))
1964 /* This instruction is available on all archs. */
1965 match
|= CPU_FLAGS_ARCH_MATCH
;
1969 /* This instruction is available only on some archs. */
1970 i386_cpu_flags cpu
= cpu_arch_flags
;
1972 /* AVX512VL is no standalone feature - match it and then strip it. */
1973 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1975 x
.bitfield
.cpuavx512vl
= 0;
1977 cpu
= cpu_flags_and (x
, cpu
);
1978 if (!cpu_flags_all_zero (&cpu
))
1980 if (x
.bitfield
.cpuavx
)
1982 /* We need to check a few extra flags with AVX. */
1983 if (cpu
.bitfield
.cpuavx
1984 && (!t
->opcode_modifier
.sse2avx
1985 || (sse2avx
&& !i
.prefix
[DATA_PREFIX
]))
1986 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1987 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1988 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1989 match
|= CPU_FLAGS_ARCH_MATCH
;
1991 else if (x
.bitfield
.cpuavx512f
)
1993 /* We need to check a few extra flags with AVX512F. */
1994 if (cpu
.bitfield
.cpuavx512f
1995 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1996 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1997 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1998 match
|= CPU_FLAGS_ARCH_MATCH
;
2001 match
|= CPU_FLAGS_ARCH_MATCH
;
2007 static INLINE i386_operand_type
2008 operand_type_and (i386_operand_type x
, i386_operand_type y
)
2010 if (x
.bitfield
.class != y
.bitfield
.class)
2011 x
.bitfield
.class = ClassNone
;
2012 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
2013 x
.bitfield
.instance
= InstanceNone
;
2015 switch (ARRAY_SIZE (x
.array
))
2018 x
.array
[2] &= y
.array
[2];
2021 x
.array
[1] &= y
.array
[1];
2024 x
.array
[0] &= y
.array
[0];
2032 static INLINE i386_operand_type
2033 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
2035 gas_assert (y
.bitfield
.class == ClassNone
);
2036 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2038 switch (ARRAY_SIZE (x
.array
))
2041 x
.array
[2] &= ~y
.array
[2];
2044 x
.array
[1] &= ~y
.array
[1];
2047 x
.array
[0] &= ~y
.array
[0];
2055 static INLINE i386_operand_type
2056 operand_type_or (i386_operand_type x
, i386_operand_type y
)
2058 gas_assert (x
.bitfield
.class == ClassNone
||
2059 y
.bitfield
.class == ClassNone
||
2060 x
.bitfield
.class == y
.bitfield
.class);
2061 gas_assert (x
.bitfield
.instance
== InstanceNone
||
2062 y
.bitfield
.instance
== InstanceNone
||
2063 x
.bitfield
.instance
== y
.bitfield
.instance
);
2065 switch (ARRAY_SIZE (x
.array
))
2068 x
.array
[2] |= y
.array
[2];
2071 x
.array
[1] |= y
.array
[1];
2074 x
.array
[0] |= y
.array
[0];
2082 static INLINE i386_operand_type
2083 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
2085 gas_assert (y
.bitfield
.class == ClassNone
);
2086 gas_assert (y
.bitfield
.instance
== InstanceNone
);
2088 switch (ARRAY_SIZE (x
.array
))
2091 x
.array
[2] ^= y
.array
[2];
2094 x
.array
[1] ^= y
.array
[1];
2097 x
.array
[0] ^= y
.array
[0];
2105 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2106 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2107 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2108 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2109 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2110 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2111 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2112 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2113 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2114 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2115 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2116 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2117 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2118 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2119 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2120 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2121 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2132 operand_type_check (i386_operand_type t
, enum operand_type c
)
2137 return t
.bitfield
.class == Reg
;
2140 return (t
.bitfield
.imm8
2144 || t
.bitfield
.imm32s
2145 || t
.bitfield
.imm64
);
2148 return (t
.bitfield
.disp8
2149 || t
.bitfield
.disp16
2150 || t
.bitfield
.disp32
2151 || t
.bitfield
.disp32s
2152 || t
.bitfield
.disp64
);
2155 return (t
.bitfield
.disp8
2156 || t
.bitfield
.disp16
2157 || t
.bitfield
.disp32
2158 || t
.bitfield
.disp32s
2159 || t
.bitfield
.disp64
2160 || t
.bitfield
.baseindex
);
2169 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2170 between operand GIVEN and opeand WANTED for instruction template T. */
2173 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2176 return !((i
.types
[given
].bitfield
.byte
2177 && !t
->operand_types
[wanted
].bitfield
.byte
)
2178 || (i
.types
[given
].bitfield
.word
2179 && !t
->operand_types
[wanted
].bitfield
.word
)
2180 || (i
.types
[given
].bitfield
.dword
2181 && !t
->operand_types
[wanted
].bitfield
.dword
)
2182 || (i
.types
[given
].bitfield
.qword
2183 && !t
->operand_types
[wanted
].bitfield
.qword
)
2184 || (i
.types
[given
].bitfield
.tbyte
2185 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2188 /* Return 1 if there is no conflict in SIMD register between operand
2189 GIVEN and opeand WANTED for instruction template T. */
2192 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2195 return !((i
.types
[given
].bitfield
.xmmword
2196 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2197 || (i
.types
[given
].bitfield
.ymmword
2198 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2199 || (i
.types
[given
].bitfield
.zmmword
2200 && !t
->operand_types
[wanted
].bitfield
.zmmword
)
2201 || (i
.types
[given
].bitfield
.tmmword
2202 && !t
->operand_types
[wanted
].bitfield
.tmmword
));
2205 /* Return 1 if there is no conflict in any size between operand GIVEN
2206 and opeand WANTED for instruction template T. */
2209 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2212 return (match_operand_size (t
, wanted
, given
)
2213 && !((i
.types
[given
].bitfield
.unspecified
2215 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2216 || (i
.types
[given
].bitfield
.fword
2217 && !t
->operand_types
[wanted
].bitfield
.fword
)
2218 /* For scalar opcode templates to allow register and memory
2219 operands at the same time, some special casing is needed
2220 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2221 down-conversion vpmov*. */
2222 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2223 && t
->operand_types
[wanted
].bitfield
.byte
2224 + t
->operand_types
[wanted
].bitfield
.word
2225 + t
->operand_types
[wanted
].bitfield
.dword
2226 + t
->operand_types
[wanted
].bitfield
.qword
2227 > !!t
->opcode_modifier
.broadcast
)
2228 ? (i
.types
[given
].bitfield
.xmmword
2229 || i
.types
[given
].bitfield
.ymmword
2230 || i
.types
[given
].bitfield
.zmmword
)
2231 : !match_simd_size(t
, wanted
, given
))));
2234 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2235 operands for instruction template T, and it has MATCH_REVERSE set if there
2236 is no size conflict on any operands for the template with operands reversed
2237 (and the template allows for reversing in the first place). */
2239 #define MATCH_STRAIGHT 1
2240 #define MATCH_REVERSE 2
2242 static INLINE
unsigned int
2243 operand_size_match (const insn_template
*t
)
2245 unsigned int j
, match
= MATCH_STRAIGHT
;
2247 /* Don't check non-absolute jump instructions. */
2248 if (t
->opcode_modifier
.jump
2249 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2252 /* Check memory and accumulator operand size. */
2253 for (j
= 0; j
< i
.operands
; j
++)
2255 if (i
.types
[j
].bitfield
.class != Reg
2256 && i
.types
[j
].bitfield
.class != RegSIMD
2257 && t
->opcode_modifier
.anysize
)
2260 if (t
->operand_types
[j
].bitfield
.class == Reg
2261 && !match_operand_size (t
, j
, j
))
2267 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2268 && !match_simd_size (t
, j
, j
))
2274 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2275 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2281 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2288 if (!t
->opcode_modifier
.d
)
2292 i
.error
= operand_size_mismatch
;
2296 /* Check reverse. */
2297 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2299 for (j
= 0; j
< i
.operands
; j
++)
2301 unsigned int given
= i
.operands
- j
- 1;
2303 if (t
->operand_types
[j
].bitfield
.class == Reg
2304 && !match_operand_size (t
, j
, given
))
2307 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2308 && !match_simd_size (t
, j
, given
))
2311 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2312 && (!match_operand_size (t
, j
, given
)
2313 || !match_simd_size (t
, j
, given
)))
2316 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2320 return match
| MATCH_REVERSE
;
2324 operand_type_match (i386_operand_type overlap
,
2325 i386_operand_type given
)
2327 i386_operand_type temp
= overlap
;
2329 temp
.bitfield
.unspecified
= 0;
2330 temp
.bitfield
.byte
= 0;
2331 temp
.bitfield
.word
= 0;
2332 temp
.bitfield
.dword
= 0;
2333 temp
.bitfield
.fword
= 0;
2334 temp
.bitfield
.qword
= 0;
2335 temp
.bitfield
.tbyte
= 0;
2336 temp
.bitfield
.xmmword
= 0;
2337 temp
.bitfield
.ymmword
= 0;
2338 temp
.bitfield
.zmmword
= 0;
2339 temp
.bitfield
.tmmword
= 0;
2340 if (operand_type_all_zero (&temp
))
2343 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2347 i
.error
= operand_type_mismatch
;
2351 /* If given types g0 and g1 are registers they must be of the same type
2352 unless the expected operand type register overlap is null.
2353 Some Intel syntax memory operand size checking also happens here. */
2356 operand_type_register_match (i386_operand_type g0
,
2357 i386_operand_type t0
,
2358 i386_operand_type g1
,
2359 i386_operand_type t1
)
2361 if (g0
.bitfield
.class != Reg
2362 && g0
.bitfield
.class != RegSIMD
2363 && (!operand_type_check (g0
, anymem
)
2364 || g0
.bitfield
.unspecified
2365 || (t0
.bitfield
.class != Reg
2366 && t0
.bitfield
.class != RegSIMD
)))
2369 if (g1
.bitfield
.class != Reg
2370 && g1
.bitfield
.class != RegSIMD
2371 && (!operand_type_check (g1
, anymem
)
2372 || g1
.bitfield
.unspecified
2373 || (t1
.bitfield
.class != Reg
2374 && t1
.bitfield
.class != RegSIMD
)))
2377 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2378 && g0
.bitfield
.word
== g1
.bitfield
.word
2379 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2380 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2381 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2382 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2383 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2386 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2387 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2388 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2389 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2390 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2391 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2392 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2395 i
.error
= register_type_mismatch
;
2400 static INLINE
unsigned int
2401 register_number (const reg_entry
*r
)
2403 unsigned int nr
= r
->reg_num
;
2405 if (r
->reg_flags
& RegRex
)
2408 if (r
->reg_flags
& RegVRex
)
2414 static INLINE
unsigned int
2415 mode_from_disp_size (i386_operand_type t
)
2417 if (t
.bitfield
.disp8
)
2419 else if (t
.bitfield
.disp16
2420 || t
.bitfield
.disp32
2421 || t
.bitfield
.disp32s
)
2428 fits_in_signed_byte (addressT num
)
2430 return num
+ 0x80 <= 0xff;
2434 fits_in_unsigned_byte (addressT num
)
2440 fits_in_unsigned_word (addressT num
)
2442 return num
<= 0xffff;
2446 fits_in_signed_word (addressT num
)
2448 return num
+ 0x8000 <= 0xffff;
2452 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2457 return num
+ 0x80000000 <= 0xffffffff;
2459 } /* fits_in_signed_long() */
2462 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2467 return num
<= 0xffffffff;
2469 } /* fits_in_unsigned_long() */
2472 fits_in_disp8 (offsetT num
)
2474 int shift
= i
.memshift
;
2480 mask
= (1 << shift
) - 1;
2482 /* Return 0 if NUM isn't properly aligned. */
2486 /* Check if NUM will fit in 8bit after shift. */
2487 return fits_in_signed_byte (num
>> shift
);
2491 fits_in_imm4 (offsetT num
)
2493 return (num
& 0xf) == num
;
2496 static i386_operand_type
2497 smallest_imm_type (offsetT num
)
2499 i386_operand_type t
;
2501 operand_type_set (&t
, 0);
2502 t
.bitfield
.imm64
= 1;
2504 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2506 /* This code is disabled on the 486 because all the Imm1 forms
2507 in the opcode table are slower on the i486. They're the
2508 versions with the implicitly specified single-position
2509 displacement, which has another syntax if you really want to
2511 t
.bitfield
.imm1
= 1;
2512 t
.bitfield
.imm8
= 1;
2513 t
.bitfield
.imm8s
= 1;
2514 t
.bitfield
.imm16
= 1;
2515 t
.bitfield
.imm32
= 1;
2516 t
.bitfield
.imm32s
= 1;
2518 else if (fits_in_signed_byte (num
))
2520 t
.bitfield
.imm8
= 1;
2521 t
.bitfield
.imm8s
= 1;
2522 t
.bitfield
.imm16
= 1;
2523 t
.bitfield
.imm32
= 1;
2524 t
.bitfield
.imm32s
= 1;
2526 else if (fits_in_unsigned_byte (num
))
2528 t
.bitfield
.imm8
= 1;
2529 t
.bitfield
.imm16
= 1;
2530 t
.bitfield
.imm32
= 1;
2531 t
.bitfield
.imm32s
= 1;
2533 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2535 t
.bitfield
.imm16
= 1;
2536 t
.bitfield
.imm32
= 1;
2537 t
.bitfield
.imm32s
= 1;
2539 else if (fits_in_signed_long (num
))
2541 t
.bitfield
.imm32
= 1;
2542 t
.bitfield
.imm32s
= 1;
2544 else if (fits_in_unsigned_long (num
))
2545 t
.bitfield
.imm32
= 1;
2551 offset_in_range (offsetT val
, int size
)
2557 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2558 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2559 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2561 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2566 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2568 char buf1
[40], buf2
[40];
2570 sprint_value (buf1
, val
);
2571 sprint_value (buf2
, val
& mask
);
2572 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2587 a. PREFIX_EXIST if attempting to add a prefix where one from the
2588 same class already exists.
2589 b. PREFIX_LOCK if lock prefix is added.
2590 c. PREFIX_REP if rep/repne prefix is added.
2591 d. PREFIX_DS if ds prefix is added.
2592 e. PREFIX_OTHER if other prefix is added.
2595 static enum PREFIX_GROUP
2596 add_prefix (unsigned int prefix
)
2598 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2601 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2602 && flag_code
== CODE_64BIT
)
2604 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2605 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2606 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2607 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2618 case DS_PREFIX_OPCODE
:
2621 case CS_PREFIX_OPCODE
:
2622 case ES_PREFIX_OPCODE
:
2623 case FS_PREFIX_OPCODE
:
2624 case GS_PREFIX_OPCODE
:
2625 case SS_PREFIX_OPCODE
:
2629 case REPNE_PREFIX_OPCODE
:
2630 case REPE_PREFIX_OPCODE
:
2635 case LOCK_PREFIX_OPCODE
:
2644 case ADDR_PREFIX_OPCODE
:
2648 case DATA_PREFIX_OPCODE
:
2652 if (i
.prefix
[q
] != 0)
2660 i
.prefix
[q
] |= prefix
;
2663 as_bad (_("same type of prefix used twice"));
2669 update_code_flag (int value
, int check
)
2671 PRINTF_LIKE ((*as_error
));
2673 flag_code
= (enum flag_code
) value
;
2674 if (flag_code
== CODE_64BIT
)
2676 cpu_arch_flags
.bitfield
.cpu64
= 1;
2677 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2681 cpu_arch_flags
.bitfield
.cpu64
= 0;
2682 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2684 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2687 as_error
= as_fatal
;
2690 (*as_error
) (_("64bit mode not supported on `%s'."),
2691 cpu_arch_name
? cpu_arch_name
: default_arch
);
2693 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2696 as_error
= as_fatal
;
2699 (*as_error
) (_("32bit mode not supported on `%s'."),
2700 cpu_arch_name
? cpu_arch_name
: default_arch
);
2702 stackop_size
= '\0';
2706 set_code_flag (int value
)
2708 update_code_flag (value
, 0);
2712 set_16bit_gcc_code_flag (int new_code_flag
)
2714 flag_code
= (enum flag_code
) new_code_flag
;
2715 if (flag_code
!= CODE_16BIT
)
2717 cpu_arch_flags
.bitfield
.cpu64
= 0;
2718 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2719 stackop_size
= LONG_MNEM_SUFFIX
;
2723 set_intel_syntax (int syntax_flag
)
2725 /* Find out if register prefixing is specified. */
2726 int ask_naked_reg
= 0;
2729 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2732 int e
= get_symbol_name (&string
);
2734 if (strcmp (string
, "prefix") == 0)
2736 else if (strcmp (string
, "noprefix") == 0)
2739 as_bad (_("bad argument to syntax directive."));
2740 (void) restore_line_pointer (e
);
2742 demand_empty_rest_of_line ();
2744 intel_syntax
= syntax_flag
;
2746 if (ask_naked_reg
== 0)
2747 allow_naked_reg
= (intel_syntax
2748 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2750 allow_naked_reg
= (ask_naked_reg
< 0);
2752 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2754 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2755 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2756 register_prefix
= allow_naked_reg
? "" : "%";
2760 set_intel_mnemonic (int mnemonic_flag
)
2762 intel_mnemonic
= mnemonic_flag
;
2766 set_allow_index_reg (int flag
)
2768 allow_index_reg
= flag
;
2772 set_check (int what
)
2774 enum check_kind
*kind
;
2779 kind
= &operand_check
;
2790 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2793 int e
= get_symbol_name (&string
);
2795 if (strcmp (string
, "none") == 0)
2797 else if (strcmp (string
, "warning") == 0)
2798 *kind
= check_warning
;
2799 else if (strcmp (string
, "error") == 0)
2800 *kind
= check_error
;
2802 as_bad (_("bad argument to %s_check directive."), str
);
2803 (void) restore_line_pointer (e
);
2806 as_bad (_("missing argument for %s_check directive"), str
);
2808 demand_empty_rest_of_line ();
2812 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2813 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2815 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2816 static const char *arch
;
2818 /* Intel LIOM is only supported on ELF. */
2824 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2825 use default_arch. */
2826 arch
= cpu_arch_name
;
2828 arch
= default_arch
;
2831 /* If we are targeting Intel MCU, we must enable it. */
2832 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2833 || new_flag
.bitfield
.cpuiamcu
)
2836 /* If we are targeting Intel L1OM, we must enable it. */
2837 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2838 || new_flag
.bitfield
.cpul1om
)
2841 /* If we are targeting Intel K1OM, we must enable it. */
2842 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2843 || new_flag
.bitfield
.cpuk1om
)
2846 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2851 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2855 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2858 int e
= get_symbol_name (&string
);
2860 i386_cpu_flags flags
;
2862 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2864 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2866 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2870 cpu_arch_name
= cpu_arch
[j
].name
;
2871 cpu_sub_arch_name
= NULL
;
2872 cpu_arch_flags
= cpu_arch
[j
].flags
;
2873 if (flag_code
== CODE_64BIT
)
2875 cpu_arch_flags
.bitfield
.cpu64
= 1;
2876 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2880 cpu_arch_flags
.bitfield
.cpu64
= 0;
2881 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2883 cpu_arch_isa
= cpu_arch
[j
].type
;
2884 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2885 if (!cpu_arch_tune_set
)
2887 cpu_arch_tune
= cpu_arch_isa
;
2888 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2893 flags
= cpu_flags_or (cpu_arch_flags
,
2896 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2898 if (cpu_sub_arch_name
)
2900 char *name
= cpu_sub_arch_name
;
2901 cpu_sub_arch_name
= concat (name
,
2903 (const char *) NULL
);
2907 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2908 cpu_arch_flags
= flags
;
2909 cpu_arch_isa_flags
= flags
;
2913 = cpu_flags_or (cpu_arch_isa_flags
,
2915 (void) restore_line_pointer (e
);
2916 demand_empty_rest_of_line ();
2921 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2923 /* Disable an ISA extension. */
2924 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2925 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2927 flags
= cpu_flags_and_not (cpu_arch_flags
,
2928 cpu_noarch
[j
].flags
);
2929 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2931 if (cpu_sub_arch_name
)
2933 char *name
= cpu_sub_arch_name
;
2934 cpu_sub_arch_name
= concat (name
, string
,
2935 (const char *) NULL
);
2939 cpu_sub_arch_name
= xstrdup (string
);
2940 cpu_arch_flags
= flags
;
2941 cpu_arch_isa_flags
= flags
;
2943 (void) restore_line_pointer (e
);
2944 demand_empty_rest_of_line ();
2948 j
= ARRAY_SIZE (cpu_arch
);
2951 if (j
>= ARRAY_SIZE (cpu_arch
))
2952 as_bad (_("no such architecture: `%s'"), string
);
2954 *input_line_pointer
= e
;
2957 as_bad (_("missing cpu architecture"));
2959 no_cond_jump_promotion
= 0;
2960 if (*input_line_pointer
== ','
2961 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2966 ++input_line_pointer
;
2967 e
= get_symbol_name (&string
);
2969 if (strcmp (string
, "nojumps") == 0)
2970 no_cond_jump_promotion
= 1;
2971 else if (strcmp (string
, "jumps") == 0)
2974 as_bad (_("no such architecture modifier: `%s'"), string
);
2976 (void) restore_line_pointer (e
);
2979 demand_empty_rest_of_line ();
2982 enum bfd_architecture
2985 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2987 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2988 || flag_code
!= CODE_64BIT
)
2989 as_fatal (_("Intel L1OM is 64bit ELF only"));
2990 return bfd_arch_l1om
;
2992 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2994 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2995 || flag_code
!= CODE_64BIT
)
2996 as_fatal (_("Intel K1OM is 64bit ELF only"));
2997 return bfd_arch_k1om
;
2999 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3001 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3002 || flag_code
== CODE_64BIT
)
3003 as_fatal (_("Intel MCU is 32bit ELF only"));
3004 return bfd_arch_iamcu
;
3007 return bfd_arch_i386
;
3013 if (!strncmp (default_arch
, "x86_64", 6))
3015 if (cpu_arch_isa
== PROCESSOR_L1OM
)
3017 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3018 || default_arch
[6] != '\0')
3019 as_fatal (_("Intel L1OM is 64bit ELF only"));
3020 return bfd_mach_l1om
;
3022 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
3024 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
3025 || default_arch
[6] != '\0')
3026 as_fatal (_("Intel K1OM is 64bit ELF only"));
3027 return bfd_mach_k1om
;
3029 else if (default_arch
[6] == '\0')
3030 return bfd_mach_x86_64
;
3032 return bfd_mach_x64_32
;
3034 else if (!strcmp (default_arch
, "i386")
3035 || !strcmp (default_arch
, "iamcu"))
3037 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
3039 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
3040 as_fatal (_("Intel MCU is 32bit ELF only"));
3041 return bfd_mach_i386_iamcu
;
3044 return bfd_mach_i386_i386
;
3047 as_fatal (_("unknown architecture"));
3053 /* Support pseudo prefixes like {disp32}. */
3054 lex_type
['{'] = LEX_BEGIN_NAME
;
3056 /* Initialize op_hash hash table. */
3057 op_hash
= str_htab_create ();
3060 const insn_template
*optab
;
3061 templates
*core_optab
;
3063 /* Setup for loop. */
3065 core_optab
= XNEW (templates
);
3066 core_optab
->start
= optab
;
3071 if (optab
->name
== NULL
3072 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
3074 /* different name --> ship out current template list;
3075 add to hash table; & begin anew. */
3076 core_optab
->end
= optab
;
3077 if (str_hash_insert (op_hash
, (optab
- 1)->name
, core_optab
, 0))
3078 as_fatal (_("duplicate %s"), (optab
- 1)->name
);
3080 if (optab
->name
== NULL
)
3082 core_optab
= XNEW (templates
);
3083 core_optab
->start
= optab
;
3088 /* Initialize reg_hash hash table. */
3089 reg_hash
= str_htab_create ();
3091 const reg_entry
*regtab
;
3092 unsigned int regtab_size
= i386_regtab_size
;
3094 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3096 if (str_hash_insert (reg_hash
, regtab
->reg_name
, regtab
, 0) != NULL
)
3097 as_fatal (_("duplicate %s"), regtab
->reg_name
);
3099 if (regtab
->reg_type
.bitfield
.class == RegMask
&& !regtab
->reg_num
)
3104 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3109 for (c
= 0; c
< 256; c
++)
3114 mnemonic_chars
[c
] = c
;
3115 register_chars
[c
] = c
;
3116 operand_chars
[c
] = c
;
3118 else if (ISLOWER (c
))
3120 mnemonic_chars
[c
] = c
;
3121 register_chars
[c
] = c
;
3122 operand_chars
[c
] = c
;
3124 else if (ISUPPER (c
))
3126 mnemonic_chars
[c
] = TOLOWER (c
);
3127 register_chars
[c
] = mnemonic_chars
[c
];
3128 operand_chars
[c
] = c
;
3130 else if (c
== '{' || c
== '}')
3132 mnemonic_chars
[c
] = c
;
3133 operand_chars
[c
] = c
;
3135 #ifdef SVR4_COMMENT_CHARS
3136 else if (c
== '\\' && strchr (i386_comment_chars
, '/'))
3137 operand_chars
[c
] = c
;
3140 if (ISALPHA (c
) || ISDIGIT (c
))
3141 identifier_chars
[c
] = c
;
3144 identifier_chars
[c
] = c
;
3145 operand_chars
[c
] = c
;
3150 identifier_chars
['@'] = '@';
3153 identifier_chars
['?'] = '?';
3154 operand_chars
['?'] = '?';
3156 digit_chars
['-'] = '-';
3157 mnemonic_chars
['_'] = '_';
3158 mnemonic_chars
['-'] = '-';
3159 mnemonic_chars
['.'] = '.';
3160 identifier_chars
['_'] = '_';
3161 identifier_chars
['.'] = '.';
3163 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3164 operand_chars
[(unsigned char) *p
] = *p
;
3167 if (flag_code
== CODE_64BIT
)
3169 #if defined (OBJ_COFF) && defined (TE_PE)
3170 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3173 x86_dwarf2_return_column
= 16;
3175 x86_cie_data_alignment
= -8;
3179 x86_dwarf2_return_column
= 8;
3180 x86_cie_data_alignment
= -4;
3183 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3184 can be turned into BRANCH_PREFIX frag. */
3185 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3190 i386_print_statistics (FILE *file
)
3192 htab_print_statistics (file
, "i386 opcode", op_hash
);
3193 htab_print_statistics (file
, "i386 register", reg_hash
);
3198 /* Debugging routines for md_assemble. */
3199 static void pte (insn_template
*);
3200 static void pt (i386_operand_type
);
3201 static void pe (expressionS
*);
3202 static void ps (symbolS
*);
3205 pi (const char *line
, i386_insn
*x
)
3209 fprintf (stdout
, "%s: template ", line
);
3211 fprintf (stdout
, " address: base %s index %s scale %x\n",
3212 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3213 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3214 x
->log2_scale_factor
);
3215 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3216 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3217 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3218 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3219 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3220 (x
->rex
& REX_W
) != 0,
3221 (x
->rex
& REX_R
) != 0,
3222 (x
->rex
& REX_X
) != 0,
3223 (x
->rex
& REX_B
) != 0);
3224 for (j
= 0; j
< x
->operands
; j
++)
3226 fprintf (stdout
, " #%d: ", j
+ 1);
3228 fprintf (stdout
, "\n");
3229 if (x
->types
[j
].bitfield
.class == Reg
3230 || x
->types
[j
].bitfield
.class == RegMMX
3231 || x
->types
[j
].bitfield
.class == RegSIMD
3232 || x
->types
[j
].bitfield
.class == RegMask
3233 || x
->types
[j
].bitfield
.class == SReg
3234 || x
->types
[j
].bitfield
.class == RegCR
3235 || x
->types
[j
].bitfield
.class == RegDR
3236 || x
->types
[j
].bitfield
.class == RegTR
3237 || x
->types
[j
].bitfield
.class == RegBND
)
3238 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3239 if (operand_type_check (x
->types
[j
], imm
))
3241 if (operand_type_check (x
->types
[j
], disp
))
3242 pe (x
->op
[j
].disps
);
3247 pte (insn_template
*t
)
3249 static const unsigned char opc_pfx
[] = { 0, 0x66, 0xf3, 0xf2 };
3250 static const char *const opc_spc
[] = {
3251 NULL
, "0f", "0f38", "0f3a", NULL
, NULL
, NULL
, NULL
,
3252 "XOP08", "XOP09", "XOP0A",
3256 fprintf (stdout
, " %d operands ", t
->operands
);
3257 if (opc_pfx
[t
->opcode_modifier
.opcodeprefix
])
3258 fprintf (stdout
, "pfx %x ", opc_pfx
[t
->opcode_modifier
.opcodeprefix
]);
3259 if (opc_spc
[t
->opcode_modifier
.opcodespace
])
3260 fprintf (stdout
, "space %s ", opc_spc
[t
->opcode_modifier
.opcodespace
]);
3261 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3262 if (t
->extension_opcode
!= None
)
3263 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3264 if (t
->opcode_modifier
.d
)
3265 fprintf (stdout
, "D");
3266 if (t
->opcode_modifier
.w
)
3267 fprintf (stdout
, "W");
3268 fprintf (stdout
, "\n");
3269 for (j
= 0; j
< t
->operands
; j
++)
3271 fprintf (stdout
, " #%d type ", j
+ 1);
3272 pt (t
->operand_types
[j
]);
3273 fprintf (stdout
, "\n");
3280 fprintf (stdout
, " operation %d\n", e
->X_op
);
3281 fprintf (stdout
, " add_number %ld (%lx)\n",
3282 (long) e
->X_add_number
, (long) e
->X_add_number
);
3283 if (e
->X_add_symbol
)
3285 fprintf (stdout
, " add_symbol ");
3286 ps (e
->X_add_symbol
);
3287 fprintf (stdout
, "\n");
3291 fprintf (stdout
, " op_symbol ");
3292 ps (e
->X_op_symbol
);
3293 fprintf (stdout
, "\n");
3300 fprintf (stdout
, "%s type %s%s",
3302 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3303 segment_name (S_GET_SEGMENT (s
)));
3306 static struct type_name
3308 i386_operand_type mask
;
3311 const type_names
[] =
3313 { OPERAND_TYPE_REG8
, "r8" },
3314 { OPERAND_TYPE_REG16
, "r16" },
3315 { OPERAND_TYPE_REG32
, "r32" },
3316 { OPERAND_TYPE_REG64
, "r64" },
3317 { OPERAND_TYPE_ACC8
, "acc8" },
3318 { OPERAND_TYPE_ACC16
, "acc16" },
3319 { OPERAND_TYPE_ACC32
, "acc32" },
3320 { OPERAND_TYPE_ACC64
, "acc64" },
3321 { OPERAND_TYPE_IMM8
, "i8" },
3322 { OPERAND_TYPE_IMM8
, "i8s" },
3323 { OPERAND_TYPE_IMM16
, "i16" },
3324 { OPERAND_TYPE_IMM32
, "i32" },
3325 { OPERAND_TYPE_IMM32S
, "i32s" },
3326 { OPERAND_TYPE_IMM64
, "i64" },
3327 { OPERAND_TYPE_IMM1
, "i1" },
3328 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3329 { OPERAND_TYPE_DISP8
, "d8" },
3330 { OPERAND_TYPE_DISP16
, "d16" },
3331 { OPERAND_TYPE_DISP32
, "d32" },
3332 { OPERAND_TYPE_DISP32S
, "d32s" },
3333 { OPERAND_TYPE_DISP64
, "d64" },
3334 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3335 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3336 { OPERAND_TYPE_CONTROL
, "control reg" },
3337 { OPERAND_TYPE_TEST
, "test reg" },
3338 { OPERAND_TYPE_DEBUG
, "debug reg" },
3339 { OPERAND_TYPE_FLOATREG
, "FReg" },
3340 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3341 { OPERAND_TYPE_SREG
, "SReg" },
3342 { OPERAND_TYPE_REGMMX
, "rMMX" },
3343 { OPERAND_TYPE_REGXMM
, "rXMM" },
3344 { OPERAND_TYPE_REGYMM
, "rYMM" },
3345 { OPERAND_TYPE_REGZMM
, "rZMM" },
3346 { OPERAND_TYPE_REGTMM
, "rTMM" },
3347 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3351 pt (i386_operand_type t
)
3354 i386_operand_type a
;
3356 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3358 a
= operand_type_and (t
, type_names
[j
].mask
);
3359 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3360 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3365 #endif /* DEBUG386 */
3367 static bfd_reloc_code_real_type
3368 reloc (unsigned int size
,
3371 bfd_reloc_code_real_type other
)
3373 if (other
!= NO_RELOC
)
3375 reloc_howto_type
*rel
;
3380 case BFD_RELOC_X86_64_GOT32
:
3381 return BFD_RELOC_X86_64_GOT64
;
3383 case BFD_RELOC_X86_64_GOTPLT64
:
3384 return BFD_RELOC_X86_64_GOTPLT64
;
3386 case BFD_RELOC_X86_64_PLTOFF64
:
3387 return BFD_RELOC_X86_64_PLTOFF64
;
3389 case BFD_RELOC_X86_64_GOTPC32
:
3390 other
= BFD_RELOC_X86_64_GOTPC64
;
3392 case BFD_RELOC_X86_64_GOTPCREL
:
3393 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3395 case BFD_RELOC_X86_64_TPOFF32
:
3396 other
= BFD_RELOC_X86_64_TPOFF64
;
3398 case BFD_RELOC_X86_64_DTPOFF32
:
3399 other
= BFD_RELOC_X86_64_DTPOFF64
;
3405 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3406 if (other
== BFD_RELOC_SIZE32
)
3409 other
= BFD_RELOC_SIZE64
;
3412 as_bad (_("there are no pc-relative size relocations"));
3418 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3419 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3422 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3424 as_bad (_("unknown relocation (%u)"), other
);
3425 else if (size
!= bfd_get_reloc_size (rel
))
3426 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3427 bfd_get_reloc_size (rel
),
3429 else if (pcrel
&& !rel
->pc_relative
)
3430 as_bad (_("non-pc-relative relocation for pc-relative field"));
3431 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3433 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3435 as_bad (_("relocated field and relocation type differ in signedness"));
3444 as_bad (_("there are no unsigned pc-relative relocations"));
3447 case 1: return BFD_RELOC_8_PCREL
;
3448 case 2: return BFD_RELOC_16_PCREL
;
3449 case 4: return BFD_RELOC_32_PCREL
;
3450 case 8: return BFD_RELOC_64_PCREL
;
3452 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3459 case 4: return BFD_RELOC_X86_64_32S
;
3464 case 1: return BFD_RELOC_8
;
3465 case 2: return BFD_RELOC_16
;
3466 case 4: return BFD_RELOC_32
;
3467 case 8: return BFD_RELOC_64
;
3469 as_bad (_("cannot do %s %u byte relocation"),
3470 sign
> 0 ? "signed" : "unsigned", size
);
3476 /* Here we decide which fixups can be adjusted to make them relative to
3477 the beginning of the section instead of the symbol. Basically we need
3478 to make sure that the dynamic relocations are done correctly, so in
3479 some cases we force the original symbol to be used. */
3482 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3484 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3488 /* Don't adjust pc-relative references to merge sections in 64-bit
3490 if (use_rela_relocations
3491 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3495 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3496 and changed later by validate_fix. */
3497 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3498 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3501 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3502 for size relocations. */
3503 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3504 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3505 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3506 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3507 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3508 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3509 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3510 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3511 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3512 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3513 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3514 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3515 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3516 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3517 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3518 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3519 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3520 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3521 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3522 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3523 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3524 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3525 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3526 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3527 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3528 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3529 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3530 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3531 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3532 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3533 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3540 intel_float_operand (const char *mnemonic
)
3542 /* Note that the value returned is meaningful only for opcodes with (memory)
3543 operands, hence the code here is free to improperly handle opcodes that
3544 have no operands (for better performance and smaller code). */
3546 if (mnemonic
[0] != 'f')
3547 return 0; /* non-math */
3549 switch (mnemonic
[1])
3551 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3552 the fs segment override prefix not currently handled because no
3553 call path can make opcodes without operands get here */
3555 return 2 /* integer op */;
3557 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3558 return 3; /* fldcw/fldenv */
3561 if (mnemonic
[2] != 'o' /* fnop */)
3562 return 3; /* non-waiting control op */
3565 if (mnemonic
[2] == 's')
3566 return 3; /* frstor/frstpm */
3569 if (mnemonic
[2] == 'a')
3570 return 3; /* fsave */
3571 if (mnemonic
[2] == 't')
3573 switch (mnemonic
[3])
3575 case 'c': /* fstcw */
3576 case 'd': /* fstdw */
3577 case 'e': /* fstenv */
3578 case 's': /* fsts[gw] */
3584 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3585 return 0; /* fxsave/fxrstor are not really math ops */
3593 install_template (const insn_template
*t
)
3599 /* Note that for pseudo prefixes this produces a length of 1. But for them
3600 the length isn't interesting at all. */
3601 for (l
= 1; l
< 4; ++l
)
3602 if (!(t
->base_opcode
>> (8 * l
)))
3605 i
.opcode_length
= l
;
3608 /* Build the VEX prefix. */
3611 build_vex_prefix (const insn_template
*t
)
3613 unsigned int register_specifier
;
3614 unsigned int vector_length
;
3617 /* Check register specifier. */
3618 if (i
.vex
.register_specifier
)
3620 register_specifier
=
3621 ~register_number (i
.vex
.register_specifier
) & 0xf;
3622 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3625 register_specifier
= 0xf;
3627 /* Use 2-byte VEX prefix by swapping destination and source operand
3628 if there are more than 1 register operand. */
3629 if (i
.reg_operands
> 1
3630 && i
.vec_encoding
!= vex_encoding_vex3
3631 && i
.dir_encoding
== dir_encoding_default
3632 && i
.operands
== i
.reg_operands
3633 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3634 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3635 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3638 unsigned int xchg
= i
.operands
- 1;
3639 union i386_op temp_op
;
3640 i386_operand_type temp_type
;
3642 temp_type
= i
.types
[xchg
];
3643 i
.types
[xchg
] = i
.types
[0];
3644 i
.types
[0] = temp_type
;
3645 temp_op
= i
.op
[xchg
];
3646 i
.op
[xchg
] = i
.op
[0];
3649 gas_assert (i
.rm
.mode
== 3);
3653 i
.rm
.regmem
= i
.rm
.reg
;
3656 if (i
.tm
.opcode_modifier
.d
)
3657 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3658 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3659 else /* Use the next insn. */
3660 install_template (&t
[1]);
3663 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3664 are no memory operands and at least 3 register ones. */
3665 if (i
.reg_operands
>= 3
3666 && i
.vec_encoding
!= vex_encoding_vex3
3667 && i
.reg_operands
== i
.operands
- i
.imm_operands
3668 && i
.tm
.opcode_modifier
.vex
3669 && i
.tm
.opcode_modifier
.commutative
3670 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3672 && i
.vex
.register_specifier
3673 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3675 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3676 union i386_op temp_op
;
3677 i386_operand_type temp_type
;
3679 gas_assert (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
);
3680 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3681 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3682 &i
.types
[i
.operands
- 3]));
3683 gas_assert (i
.rm
.mode
== 3);
3685 temp_type
= i
.types
[xchg
];
3686 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3687 i
.types
[xchg
+ 1] = temp_type
;
3688 temp_op
= i
.op
[xchg
];
3689 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3690 i
.op
[xchg
+ 1] = temp_op
;
3693 xchg
= i
.rm
.regmem
| 8;
3694 i
.rm
.regmem
= ~register_specifier
& 0xf;
3695 gas_assert (!(i
.rm
.regmem
& 8));
3696 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3697 register_specifier
= ~xchg
& 0xf;
3700 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3701 vector_length
= avxscalar
;
3702 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3708 /* Determine vector length from the last multi-length vector
3711 for (op
= t
->operands
; op
--;)
3712 if (t
->operand_types
[op
].bitfield
.xmmword
3713 && t
->operand_types
[op
].bitfield
.ymmword
3714 && i
.types
[op
].bitfield
.ymmword
)
3721 /* Check the REX.W bit and VEXW. */
3722 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3723 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3724 else if (i
.tm
.opcode_modifier
.vexw
)
3725 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3727 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3729 /* Use 2-byte VEX prefix if possible. */
3731 && i
.vec_encoding
!= vex_encoding_vex3
3732 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
3733 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3735 /* 2-byte VEX prefix. */
3739 i
.vex
.bytes
[0] = 0xc5;
3741 /* Check the REX.R bit. */
3742 r
= (i
.rex
& REX_R
) ? 0 : 1;
3743 i
.vex
.bytes
[1] = (r
<< 7
3744 | register_specifier
<< 3
3745 | vector_length
<< 2
3746 | i
.tm
.opcode_modifier
.opcodeprefix
);
3750 /* 3-byte VEX prefix. */
3753 switch (i
.tm
.opcode_modifier
.opcodespace
)
3758 i
.vex
.bytes
[0] = 0xc4;
3763 i
.vex
.bytes
[0] = 0x8f;
3769 /* The high 3 bits of the second VEX byte are 1's compliment
3770 of RXB bits from REX. */
3771 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3773 i
.vex
.bytes
[2] = (w
<< 7
3774 | register_specifier
<< 3
3775 | vector_length
<< 2
3776 | i
.tm
.opcode_modifier
.opcodeprefix
);
3780 static INLINE bfd_boolean
3781 is_evex_encoding (const insn_template
*t
)
3783 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3784 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3785 || t
->opcode_modifier
.sae
;
3788 static INLINE bfd_boolean
3789 is_any_vex_encoding (const insn_template
*t
)
3791 return t
->opcode_modifier
.vex
|| is_evex_encoding (t
);
3794 /* Build the EVEX prefix. */
3797 build_evex_prefix (void)
3799 unsigned int register_specifier
, w
;
3800 rex_byte vrex_used
= 0;
3802 /* Check register specifier. */
3803 if (i
.vex
.register_specifier
)
3805 gas_assert ((i
.vrex
& REX_X
) == 0);
3807 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3808 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3809 register_specifier
+= 8;
3810 /* The upper 16 registers are encoded in the fourth byte of the
3812 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3813 i
.vex
.bytes
[3] = 0x8;
3814 register_specifier
= ~register_specifier
& 0xf;
3818 register_specifier
= 0xf;
3820 /* Encode upper 16 vector index register in the fourth byte of
3822 if (!(i
.vrex
& REX_X
))
3823 i
.vex
.bytes
[3] = 0x8;
3828 /* 4 byte EVEX prefix. */
3830 i
.vex
.bytes
[0] = 0x62;
3832 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3834 gas_assert (i
.tm
.opcode_modifier
.opcodespace
>= SPACE_0F
);
3835 gas_assert (i
.tm
.opcode_modifier
.opcodespace
<= SPACE_0F3A
);
3836 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | i
.tm
.opcode_modifier
.opcodespace
;
3838 /* The fifth bit of the second EVEX byte is 1's compliment of the
3839 REX_R bit in VREX. */
3840 if (!(i
.vrex
& REX_R
))
3841 i
.vex
.bytes
[1] |= 0x10;
3845 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3847 /* When all operands are registers, the REX_X bit in REX is not
3848 used. We reuse it to encode the upper 16 registers, which is
3849 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3850 as 1's compliment. */
3851 if ((i
.vrex
& REX_B
))
3854 i
.vex
.bytes
[1] &= ~0x40;
3858 /* EVEX instructions shouldn't need the REX prefix. */
3859 i
.vrex
&= ~vrex_used
;
3860 gas_assert (i
.vrex
== 0);
3862 /* Check the REX.W bit and VEXW. */
3863 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3864 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3865 else if (i
.tm
.opcode_modifier
.vexw
)
3866 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3868 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3870 /* The third byte of the EVEX prefix. */
3871 i
.vex
.bytes
[2] = ((w
<< 7)
3872 | (register_specifier
<< 3)
3873 | 4 /* Encode the U bit. */
3874 | i
.tm
.opcode_modifier
.opcodeprefix
);
3876 /* The fourth byte of the EVEX prefix. */
3877 /* The zeroing-masking bit. */
3878 if (i
.mask
.reg
&& i
.mask
.zeroing
)
3879 i
.vex
.bytes
[3] |= 0x80;
3881 /* Don't always set the broadcast bit if there is no RC. */
3884 /* Encode the vector length. */
3885 unsigned int vec_length
;
3887 if (!i
.tm
.opcode_modifier
.evex
3888 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3892 /* Determine vector length from the last multi-length vector
3894 for (op
= i
.operands
; op
--;)
3895 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3896 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3897 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3899 if (i
.types
[op
].bitfield
.zmmword
)
3901 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3904 else if (i
.types
[op
].bitfield
.ymmword
)
3906 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3909 else if (i
.types
[op
].bitfield
.xmmword
)
3911 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3914 else if (i
.broadcast
&& op
== i
.broadcast
->operand
)
3916 switch (i
.broadcast
->bytes
)
3919 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3922 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3925 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3934 if (op
>= MAX_OPERANDS
)
3938 switch (i
.tm
.opcode_modifier
.evex
)
3940 case EVEXLIG
: /* LL' is ignored */
3941 vec_length
= evexlig
<< 5;
3944 vec_length
= 0 << 5;
3947 vec_length
= 1 << 5;
3950 vec_length
= 2 << 5;
3956 i
.vex
.bytes
[3] |= vec_length
;
3957 /* Encode the broadcast bit. */
3959 i
.vex
.bytes
[3] |= 0x10;
3963 if (i
.rounding
->type
!= saeonly
)
3964 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3966 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3970 i
.vex
.bytes
[3] |= i
.mask
.reg
->reg_num
;
3974 process_immext (void)
3978 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3979 which is coded in the same place as an 8-bit immediate field
3980 would be. Here we fake an 8-bit immediate operand from the
3981 opcode suffix stored in tm.extension_opcode.
3983 AVX instructions also use this encoding, for some of
3984 3 argument instructions. */
3986 gas_assert (i
.imm_operands
<= 1
3988 || (is_any_vex_encoding (&i
.tm
)
3989 && i
.operands
<= 4)));
3991 exp
= &im_expressions
[i
.imm_operands
++];
3992 i
.op
[i
.operands
].imms
= exp
;
3993 i
.types
[i
.operands
] = imm8
;
3995 exp
->X_op
= O_constant
;
3996 exp
->X_add_number
= i
.tm
.extension_opcode
;
3997 i
.tm
.extension_opcode
= None
;
4004 switch (i
.tm
.opcode_modifier
.prefixok
)
4012 as_bad (_("invalid instruction `%s' after `%s'"),
4013 i
.tm
.name
, i
.hle_prefix
);
4016 if (i
.prefix
[LOCK_PREFIX
])
4018 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
4022 case PrefixHLERelease
:
4023 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
4025 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4029 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
4031 as_bad (_("memory destination needed for instruction `%s'"
4032 " after `xrelease'"), i
.tm
.name
);
4039 /* Try the shortest encoding by shortening operand size. */
4042 optimize_encoding (void)
4046 if (optimize_for_space
4047 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4048 && i
.reg_operands
== 1
4049 && i
.imm_operands
== 1
4050 && !i
.types
[1].bitfield
.byte
4051 && i
.op
[0].imms
->X_op
== O_constant
4052 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4053 && (i
.tm
.base_opcode
== 0xa8
4054 || (i
.tm
.base_opcode
== 0xf6
4055 && i
.tm
.extension_opcode
== 0x0)))
4058 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4060 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4061 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4063 i
.types
[1].bitfield
.byte
= 1;
4064 /* Ignore the suffix. */
4066 /* Convert to byte registers. */
4067 if (i
.types
[1].bitfield
.word
)
4069 else if (i
.types
[1].bitfield
.dword
)
4073 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4078 else if (flag_code
== CODE_64BIT
4079 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4080 && ((i
.types
[1].bitfield
.qword
4081 && i
.reg_operands
== 1
4082 && i
.imm_operands
== 1
4083 && i
.op
[0].imms
->X_op
== O_constant
4084 && ((i
.tm
.base_opcode
== 0xb8
4085 && i
.tm
.extension_opcode
== None
4086 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4087 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4088 && ((i
.tm
.base_opcode
== 0x24
4089 || i
.tm
.base_opcode
== 0xa8)
4090 || (i
.tm
.base_opcode
== 0x80
4091 && i
.tm
.extension_opcode
== 0x4)
4092 || ((i
.tm
.base_opcode
== 0xf6
4093 || (i
.tm
.base_opcode
| 1) == 0xc7)
4094 && i
.tm
.extension_opcode
== 0x0)))
4095 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4096 && i
.tm
.base_opcode
== 0x83
4097 && i
.tm
.extension_opcode
== 0x4)))
4098 || (i
.types
[0].bitfield
.qword
4099 && ((i
.reg_operands
== 2
4100 && i
.op
[0].regs
== i
.op
[1].regs
4101 && (i
.tm
.base_opcode
== 0x30
4102 || i
.tm
.base_opcode
== 0x28))
4103 || (i
.reg_operands
== 1
4105 && i
.tm
.base_opcode
== 0x30)))))
4108 andq $imm31, %r64 -> andl $imm31, %r32
4109 andq $imm7, %r64 -> andl $imm7, %r32
4110 testq $imm31, %r64 -> testl $imm31, %r32
4111 xorq %r64, %r64 -> xorl %r32, %r32
4112 subq %r64, %r64 -> subl %r32, %r32
4113 movq $imm31, %r64 -> movl $imm31, %r32
4114 movq $imm32, %r64 -> movl $imm32, %r32
4116 i
.tm
.opcode_modifier
.norex64
= 1;
4117 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4120 movq $imm31, %r64 -> movl $imm31, %r32
4121 movq $imm32, %r64 -> movl $imm32, %r32
4123 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4124 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4125 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4126 i
.types
[0].bitfield
.imm32
= 1;
4127 i
.types
[0].bitfield
.imm32s
= 0;
4128 i
.types
[0].bitfield
.imm64
= 0;
4129 i
.types
[1].bitfield
.dword
= 1;
4130 i
.types
[1].bitfield
.qword
= 0;
4131 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4134 movq $imm31, %r64 -> movl $imm31, %r32
4136 i
.tm
.base_opcode
= 0xb8;
4137 i
.tm
.extension_opcode
= None
;
4138 i
.tm
.opcode_modifier
.w
= 0;
4139 i
.tm
.opcode_modifier
.modrm
= 0;
4143 else if (optimize
> 1
4144 && !optimize_for_space
4145 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4146 && i
.reg_operands
== 2
4147 && i
.op
[0].regs
== i
.op
[1].regs
4148 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4149 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4150 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4153 andb %rN, %rN -> testb %rN, %rN
4154 andw %rN, %rN -> testw %rN, %rN
4155 andq %rN, %rN -> testq %rN, %rN
4156 orb %rN, %rN -> testb %rN, %rN
4157 orw %rN, %rN -> testw %rN, %rN
4158 orq %rN, %rN -> testq %rN, %rN
4160 and outside of 64-bit mode
4162 andl %rN, %rN -> testl %rN, %rN
4163 orl %rN, %rN -> testl %rN, %rN
4165 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4167 else if (i
.reg_operands
== 3
4168 && i
.op
[0].regs
== i
.op
[1].regs
4169 && !i
.types
[2].bitfield
.xmmword
4170 && (i
.tm
.opcode_modifier
.vex
4171 || ((!i
.mask
.reg
|| i
.mask
.zeroing
)
4173 && is_evex_encoding (&i
.tm
)
4174 && (i
.vec_encoding
!= vex_encoding_evex
4175 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4176 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4177 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4178 && i
.types
[2].bitfield
.ymmword
))))
4179 && ((i
.tm
.base_opcode
== 0x55
4180 || i
.tm
.base_opcode
== 0x57
4181 || i
.tm
.base_opcode
== 0xdf
4182 || i
.tm
.base_opcode
== 0xef
4183 || i
.tm
.base_opcode
== 0xf8
4184 || i
.tm
.base_opcode
== 0xf9
4185 || i
.tm
.base_opcode
== 0xfa
4186 || i
.tm
.base_opcode
== 0xfb
4187 || i
.tm
.base_opcode
== 0x42
4188 || i
.tm
.base_opcode
== 0x47)
4189 && i
.tm
.extension_opcode
== None
))
4192 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4194 EVEX VOP %zmmM, %zmmM, %zmmN
4195 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4196 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4197 EVEX VOP %ymmM, %ymmM, %ymmN
4198 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4199 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4200 VEX VOP %ymmM, %ymmM, %ymmN
4201 -> VEX VOP %xmmM, %xmmM, %xmmN
4202 VOP, one of vpandn and vpxor:
4203 VEX VOP %ymmM, %ymmM, %ymmN
4204 -> VEX VOP %xmmM, %xmmM, %xmmN
4205 VOP, one of vpandnd and vpandnq:
4206 EVEX VOP %zmmM, %zmmM, %zmmN
4207 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4208 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4209 EVEX VOP %ymmM, %ymmM, %ymmN
4210 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4211 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4212 VOP, one of vpxord and vpxorq:
4213 EVEX VOP %zmmM, %zmmM, %zmmN
4214 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4215 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4216 EVEX VOP %ymmM, %ymmM, %ymmN
4217 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4218 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4219 VOP, one of kxord and kxorq:
4220 VEX VOP %kM, %kM, %kN
4221 -> VEX kxorw %kM, %kM, %kN
4222 VOP, one of kandnd and kandnq:
4223 VEX VOP %kM, %kM, %kN
4224 -> VEX kandnw %kM, %kM, %kN
4226 if (is_evex_encoding (&i
.tm
))
4228 if (i
.vec_encoding
!= vex_encoding_evex
)
4230 i
.tm
.opcode_modifier
.vex
= VEX128
;
4231 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4232 i
.tm
.opcode_modifier
.evex
= 0;
4234 else if (optimize
> 1)
4235 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4239 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4241 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_NONE
;
4242 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4245 i
.tm
.opcode_modifier
.vex
= VEX128
;
4247 if (i
.tm
.opcode_modifier
.vex
)
4248 for (j
= 0; j
< 3; j
++)
4250 i
.types
[j
].bitfield
.xmmword
= 1;
4251 i
.types
[j
].bitfield
.ymmword
= 0;
4254 else if (i
.vec_encoding
!= vex_encoding_evex
4255 && !i
.types
[0].bitfield
.zmmword
4256 && !i
.types
[1].bitfield
.zmmword
4259 && is_evex_encoding (&i
.tm
)
4260 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4261 || (i
.tm
.base_opcode
& ~4) == 0xdb
4262 || (i
.tm
.base_opcode
& ~4) == 0xeb)
4263 && i
.tm
.extension_opcode
== None
)
4266 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4267 vmovdqu32 and vmovdqu64:
4268 EVEX VOP %xmmM, %xmmN
4269 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4270 EVEX VOP %ymmM, %ymmN
4271 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4273 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4275 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4277 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4279 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4280 VOP, one of vpand, vpandn, vpor, vpxor:
4281 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4282 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4283 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4284 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4285 EVEX VOP{d,q} mem, %xmmM, %xmmN
4286 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4287 EVEX VOP{d,q} mem, %ymmM, %ymmN
4288 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4290 for (j
= 0; j
< i
.operands
; j
++)
4291 if (operand_type_check (i
.types
[j
], disp
)
4292 && i
.op
[j
].disps
->X_op
== O_constant
)
4294 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4295 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4296 bytes, we choose EVEX Disp8 over VEX Disp32. */
4297 int evex_disp8
, vex_disp8
;
4298 unsigned int memshift
= i
.memshift
;
4299 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4301 evex_disp8
= fits_in_disp8 (n
);
4303 vex_disp8
= fits_in_disp8 (n
);
4304 if (evex_disp8
!= vex_disp8
)
4306 i
.memshift
= memshift
;
4310 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4313 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x6f
4314 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
)
4315 i
.tm
.opcode_modifier
.opcodeprefix
= PREFIX_0XF3
;
4316 i
.tm
.opcode_modifier
.vex
4317 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4318 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4319 /* VPAND, VPOR, and VPXOR are commutative. */
4320 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0xdf)
4321 i
.tm
.opcode_modifier
.commutative
= 1;
4322 i
.tm
.opcode_modifier
.evex
= 0;
4323 i
.tm
.opcode_modifier
.masking
= 0;
4324 i
.tm
.opcode_modifier
.broadcast
= 0;
4325 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4328 i
.types
[j
].bitfield
.disp8
4329 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4333 /* Return non-zero for load instruction. */
4339 int any_vex_p
= is_any_vex_encoding (&i
.tm
);
4340 unsigned int base_opcode
= i
.tm
.base_opcode
| 1;
4344 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4345 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4346 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4347 if (i
.tm
.opcode_modifier
.anysize
)
4351 if (strcmp (i
.tm
.name
, "pop") == 0)
4355 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4358 if (i
.tm
.base_opcode
== 0x9d
4359 || i
.tm
.base_opcode
== 0x61)
4362 /* movs, cmps, lods, scas. */
4363 if ((i
.tm
.base_opcode
| 0xb) == 0xaf)
4367 if (base_opcode
== 0x6f
4368 || i
.tm
.base_opcode
== 0xd7)
4370 /* NB: For AMD-specific insns with implicit memory operands,
4371 they're intentionally not covered. */
4374 /* No memory operand. */
4375 if (!i
.mem_operands
)
4381 if (i
.tm
.base_opcode
== 0xae
4382 && i
.tm
.opcode_modifier
.vex
4383 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4384 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4385 && i
.tm
.extension_opcode
== 2)
4388 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
4390 /* test, not, neg, mul, imul, div, idiv. */
4391 if ((i
.tm
.base_opcode
== 0xf6 || i
.tm
.base_opcode
== 0xf7)
4392 && i
.tm
.extension_opcode
!= 1)
4396 if (base_opcode
== 0xff && i
.tm
.extension_opcode
<= 1)
4399 /* add, or, adc, sbb, and, sub, xor, cmp. */
4400 if (i
.tm
.base_opcode
>= 0x80 && i
.tm
.base_opcode
<= 0x83)
4403 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4404 if ((base_opcode
== 0xc1
4405 || (i
.tm
.base_opcode
>= 0xd0 && i
.tm
.base_opcode
<= 0xd3))
4406 && i
.tm
.extension_opcode
!= 6)
4409 /* Check for x87 instructions. */
4410 if (base_opcode
>= 0xd8 && base_opcode
<= 0xdf)
4412 /* Skip fst, fstp, fstenv, fstcw. */
4413 if (i
.tm
.base_opcode
== 0xd9
4414 && (i
.tm
.extension_opcode
== 2
4415 || i
.tm
.extension_opcode
== 3
4416 || i
.tm
.extension_opcode
== 6
4417 || i
.tm
.extension_opcode
== 7))
4420 /* Skip fisttp, fist, fistp, fstp. */
4421 if (i
.tm
.base_opcode
== 0xdb
4422 && (i
.tm
.extension_opcode
== 1
4423 || i
.tm
.extension_opcode
== 2
4424 || i
.tm
.extension_opcode
== 3
4425 || i
.tm
.extension_opcode
== 7))
4428 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4429 if (i
.tm
.base_opcode
== 0xdd
4430 && (i
.tm
.extension_opcode
== 1
4431 || i
.tm
.extension_opcode
== 2
4432 || i
.tm
.extension_opcode
== 3
4433 || i
.tm
.extension_opcode
== 6
4434 || i
.tm
.extension_opcode
== 7))
4437 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4438 if (i
.tm
.base_opcode
== 0xdf
4439 && (i
.tm
.extension_opcode
== 1
4440 || i
.tm
.extension_opcode
== 2
4441 || i
.tm
.extension_opcode
== 3
4442 || i
.tm
.extension_opcode
== 6
4443 || i
.tm
.extension_opcode
== 7))
4449 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
)
4451 /* bt, bts, btr, btc. */
4452 if (i
.tm
.base_opcode
== 0xba
4453 && (i
.tm
.extension_opcode
>= 4 && i
.tm
.extension_opcode
<= 7))
4456 /* cmpxchg8b, cmpxchg16b, xrstors, vmptrld. */
4457 if (i
.tm
.base_opcode
== 0xc7
4458 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
4459 && (i
.tm
.extension_opcode
== 1 || i
.tm
.extension_opcode
== 3
4460 || i
.tm
.extension_opcode
== 6))
4463 /* fxrstor, ldmxcsr, xrstor. */
4464 if (i
.tm
.base_opcode
== 0xae
4465 && (i
.tm
.extension_opcode
== 1
4466 || i
.tm
.extension_opcode
== 2
4467 || i
.tm
.extension_opcode
== 5))
4470 /* lgdt, lidt, lmsw. */
4471 if (i
.tm
.base_opcode
== 0x01
4472 && (i
.tm
.extension_opcode
== 2
4473 || i
.tm
.extension_opcode
== 3
4474 || i
.tm
.extension_opcode
== 6))
4478 dest
= i
.operands
- 1;
4480 /* Check fake imm8 operand and 3 source operands. */
4481 if ((i
.tm
.opcode_modifier
.immext
4482 || i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
4483 && i
.types
[dest
].bitfield
.imm8
)
4486 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg. */
4487 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4488 && (base_opcode
== 0x1
4489 || base_opcode
== 0x9
4490 || base_opcode
== 0x11
4491 || base_opcode
== 0x19
4492 || base_opcode
== 0x21
4493 || base_opcode
== 0x29
4494 || base_opcode
== 0x31
4495 || base_opcode
== 0x39
4496 || (base_opcode
| 2) == 0x87))
4500 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
4501 && base_opcode
== 0xc1)
4504 /* Check for load instruction. */
4505 return (i
.types
[dest
].bitfield
.class != ClassNone
4506 || i
.types
[dest
].bitfield
.instance
== Accum
);
4509 /* Output lfence, 0xfaee8, after instruction. */
4512 insert_lfence_after (void)
4514 if (lfence_after_load
&& load_insn_p ())
4516 /* There are also two REP string instructions that require
4517 special treatment. Specifically, the compare string (CMPS)
4518 and scan string (SCAS) instructions set EFLAGS in a manner
4519 that depends on the data being compared/scanned. When used
4520 with a REP prefix, the number of iterations may therefore
4521 vary depending on this data. If the data is a program secret
4522 chosen by the adversary using an LVI method,
4523 then this data-dependent behavior may leak some aspect
4525 if (((i
.tm
.base_opcode
| 0x1) == 0xa7
4526 || (i
.tm
.base_opcode
| 0x1) == 0xaf)
4527 && i
.prefix
[REP_PREFIX
])
4529 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4532 char *p
= frag_more (3);
4539 /* Output lfence, 0xfaee8, before instruction. */
4542 insert_lfence_before (void)
4546 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
4549 if (i
.tm
.base_opcode
== 0xff
4550 && (i
.tm
.extension_opcode
== 2 || i
.tm
.extension_opcode
== 4))
4552 /* Insert lfence before indirect branch if needed. */
4554 if (lfence_before_indirect_branch
== lfence_branch_none
)
4557 if (i
.operands
!= 1)
4560 if (i
.reg_operands
== 1)
4562 /* Indirect branch via register. Don't insert lfence with
4563 -mlfence-after-load=yes. */
4564 if (lfence_after_load
4565 || lfence_before_indirect_branch
== lfence_branch_memory
)
4568 else if (i
.mem_operands
== 1
4569 && lfence_before_indirect_branch
!= lfence_branch_register
)
4571 as_warn (_("indirect `%s` with memory operand should be avoided"),
4578 if (last_insn
.kind
!= last_insn_other
4579 && last_insn
.seg
== now_seg
)
4581 as_warn_where (last_insn
.file
, last_insn
.line
,
4582 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4583 last_insn
.name
, i
.tm
.name
);
4594 /* Output or/not/shl and lfence before near ret. */
4595 if (lfence_before_ret
!= lfence_before_ret_none
4596 && (i
.tm
.base_opcode
== 0xc2
4597 || i
.tm
.base_opcode
== 0xc3))
4599 if (last_insn
.kind
!= last_insn_other
4600 && last_insn
.seg
== now_seg
)
4602 as_warn_where (last_insn
.file
, last_insn
.line
,
4603 _("`%s` skips -mlfence-before-ret on `%s`"),
4604 last_insn
.name
, i
.tm
.name
);
4608 /* Near ret ingore operand size override under CPU64. */
4609 char prefix
= flag_code
== CODE_64BIT
4611 : i
.prefix
[DATA_PREFIX
] ? 0x66 : 0x0;
4613 if (lfence_before_ret
== lfence_before_ret_not
)
4615 /* not: 0xf71424, may add prefix
4616 for operand size override or 64-bit code. */
4617 p
= frag_more ((prefix
? 2 : 0) + 6 + 3);
4631 p
= frag_more ((prefix
? 1 : 0) + 4 + 3);
4634 if (lfence_before_ret
== lfence_before_ret_or
)
4636 /* or: 0x830c2400, may add prefix
4637 for operand size override or 64-bit code. */
4643 /* shl: 0xc1242400, may add prefix
4644 for operand size override or 64-bit code. */
4659 /* This is the guts of the machine-dependent assembler. LINE points to a
4660 machine dependent instruction. This function is supposed to emit
4661 the frags/bytes it assembles to. */
4664 md_assemble (char *line
)
4667 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4668 const insn_template
*t
;
4670 /* Initialize globals. */
4671 memset (&i
, '\0', sizeof (i
));
4672 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4673 i
.reloc
[j
] = NO_RELOC
;
4674 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4675 memset (im_expressions
, '\0', sizeof (im_expressions
));
4676 save_stack_p
= save_stack
;
4678 /* First parse an instruction mnemonic & call i386_operand for the operands.
4679 We assume that the scrubber has arranged it so that line[0] is the valid
4680 start of a (possibly prefixed) mnemonic. */
4682 line
= parse_insn (line
, mnemonic
);
4685 mnem_suffix
= i
.suffix
;
4687 line
= parse_operands (line
, mnemonic
);
4689 xfree (i
.memop1_string
);
4690 i
.memop1_string
= NULL
;
4694 /* Now we've parsed the mnemonic into a set of templates, and have the
4695 operands at hand. */
4697 /* All Intel opcodes have reversed operands except for "bound", "enter",
4698 "invlpg*", "monitor*", "mwait*", "tpause", "umwait", "pvalidate",
4699 "rmpadjust", and "rmpupdate". We also don't reverse intersegment "jmp"
4700 and "call" instructions with 2 immediate operands so that the immediate
4701 segment precedes the offset consistently in Intel and AT&T modes. */
4704 && (strcmp (mnemonic
, "bound") != 0)
4705 && (strncmp (mnemonic
, "invlpg", 6) != 0)
4706 && (strncmp (mnemonic
, "monitor", 7) != 0)
4707 && (strncmp (mnemonic
, "mwait", 5) != 0)
4708 && (strcmp (mnemonic
, "pvalidate") != 0)
4709 && (strncmp (mnemonic
, "rmp", 3) != 0)
4710 && (strcmp (mnemonic
, "tpause") != 0)
4711 && (strcmp (mnemonic
, "umwait") != 0)
4712 && !(operand_type_check (i
.types
[0], imm
)
4713 && operand_type_check (i
.types
[1], imm
)))
4716 /* The order of the immediates should be reversed
4717 for 2 immediates extrq and insertq instructions */
4718 if (i
.imm_operands
== 2
4719 && (strcmp (mnemonic
, "extrq") == 0
4720 || strcmp (mnemonic
, "insertq") == 0))
4721 swap_2_operands (0, 1);
4726 /* Don't optimize displacement for movabs since it only takes 64bit
4729 && i
.disp_encoding
!= disp_encoding_32bit
4730 && (flag_code
!= CODE_64BIT
4731 || strcmp (mnemonic
, "movabs") != 0))
4734 /* Next, we find a template that matches the given insn,
4735 making sure the overlap of the given operands types is consistent
4736 with the template operand types. */
4738 if (!(t
= match_template (mnem_suffix
)))
4741 if (sse_check
!= check_none
4742 && !i
.tm
.opcode_modifier
.noavx
4743 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4744 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4745 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4746 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4747 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4748 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4749 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4750 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4751 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4752 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4753 || i
.tm
.cpu_flags
.bitfield
.cpusha
4754 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4756 (sse_check
== check_warning
4758 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4761 if (i
.tm
.opcode_modifier
.fwait
)
4762 if (!add_prefix (FWAIT_OPCODE
))
4765 /* Check if REP prefix is OK. */
4766 if (i
.rep_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixRep
)
4768 as_bad (_("invalid instruction `%s' after `%s'"),
4769 i
.tm
.name
, i
.rep_prefix
);
4773 /* Check for lock without a lockable instruction. Destination operand
4774 must be memory unless it is xchg (0x86). */
4775 if (i
.prefix
[LOCK_PREFIX
]
4776 && (i
.tm
.opcode_modifier
.prefixok
< PrefixLock
4777 || i
.mem_operands
== 0
4778 || (i
.tm
.base_opcode
!= 0x86
4779 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4781 as_bad (_("expecting lockable instruction after `lock'"));
4785 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4786 if (i
.prefix
[DATA_PREFIX
]
4787 && (is_any_vex_encoding (&i
.tm
)
4788 || i
.tm
.operand_types
[i
.imm_operands
].bitfield
.class >= RegMMX
4789 || i
.tm
.operand_types
[i
.imm_operands
+ 1].bitfield
.class >= RegMMX
))
4791 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4795 /* Check if HLE prefix is OK. */
4796 if (i
.hle_prefix
&& !check_hle ())
4799 /* Check BND prefix. */
4800 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4801 as_bad (_("expecting valid branch instruction after `bnd'"));
4803 /* Check NOTRACK prefix. */
4804 if (i
.notrack_prefix
&& i
.tm
.opcode_modifier
.prefixok
!= PrefixNoTrack
)
4805 as_bad (_("expecting indirect branch instruction after `notrack'"));
4807 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4809 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4810 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4811 else if (flag_code
!= CODE_16BIT
4812 ? i
.prefix
[ADDR_PREFIX
]
4813 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4814 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4817 /* Insert BND prefix. */
4818 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4820 if (!i
.prefix
[BND_PREFIX
])
4821 add_prefix (BND_PREFIX_OPCODE
);
4822 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4824 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4825 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4829 /* Check string instruction segment overrides. */
4830 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4832 gas_assert (i
.mem_operands
);
4833 if (!check_string ())
4835 i
.disp_operands
= 0;
4838 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4839 optimize_encoding ();
4841 if (!process_suffix ())
4844 /* Update operand types and check extended states. */
4845 for (j
= 0; j
< i
.operands
; j
++)
4847 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4848 switch (i
.tm
.operand_types
[j
].bitfield
.class)
4853 i
.xstate
|= xstate_mmx
;
4856 i
.xstate
|= xstate_mask
;
4859 if (i
.tm
.operand_types
[j
].bitfield
.tmmword
)
4860 i
.xstate
|= xstate_tmm
;
4861 else if (i
.tm
.operand_types
[j
].bitfield
.zmmword
)
4862 i
.xstate
|= xstate_zmm
;
4863 else if (i
.tm
.operand_types
[j
].bitfield
.ymmword
)
4864 i
.xstate
|= xstate_ymm
;
4865 else if (i
.tm
.operand_types
[j
].bitfield
.xmmword
)
4866 i
.xstate
|= xstate_xmm
;
4871 /* Make still unresolved immediate matches conform to size of immediate
4872 given in i.suffix. */
4873 if (!finalize_imm ())
4876 if (i
.types
[0].bitfield
.imm1
)
4877 i
.imm_operands
= 0; /* kludge for shift insns. */
4879 /* We only need to check those implicit registers for instructions
4880 with 3 operands or less. */
4881 if (i
.operands
<= 3)
4882 for (j
= 0; j
< i
.operands
; j
++)
4883 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4884 && !i
.types
[j
].bitfield
.xmmword
)
4887 /* For insns with operands there are more diddles to do to the opcode. */
4890 if (!process_operands ())
4893 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4895 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4896 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4899 if (is_any_vex_encoding (&i
.tm
))
4901 if (!cpu_arch_flags
.bitfield
.cpui286
)
4903 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4908 /* Check for explicit REX prefix. */
4909 if (i
.prefix
[REX_PREFIX
] || i
.rex_encoding
)
4911 as_bad (_("REX prefix invalid with `%s'"), i
.tm
.name
);
4915 if (i
.tm
.opcode_modifier
.vex
)
4916 build_vex_prefix (t
);
4918 build_evex_prefix ();
4920 /* The individual REX.RXBW bits got consumed. */
4921 i
.rex
&= REX_OPCODE
;
4924 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4925 instructions may define INT_OPCODE as well, so avoid this corner
4926 case for those instructions that use MODRM. */
4927 if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
4928 && i
.tm
.base_opcode
== INT_OPCODE
4929 && !i
.tm
.opcode_modifier
.modrm
4930 && i
.op
[0].imms
->X_add_number
== 3)
4932 i
.tm
.base_opcode
= INT3_OPCODE
;
4936 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4937 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4938 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4939 && i
.op
[0].disps
->X_op
== O_constant
)
4941 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4942 the absolute address given by the constant. Since ix86 jumps and
4943 calls are pc relative, we need to generate a reloc. */
4944 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4945 i
.op
[0].disps
->X_op
= O_symbol
;
4948 /* For 8 bit registers we need an empty rex prefix. Also if the
4949 instruction already has a prefix, we need to convert old
4950 registers to new ones. */
4952 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4953 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4954 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4955 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4956 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4957 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4962 i
.rex
|= REX_OPCODE
;
4963 for (x
= 0; x
< 2; x
++)
4965 /* Look for 8 bit operand that uses old registers. */
4966 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4967 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4969 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4970 /* In case it is "hi" register, give up. */
4971 if (i
.op
[x
].regs
->reg_num
> 3)
4972 as_bad (_("can't encode register '%s%s' in an "
4973 "instruction requiring REX prefix."),
4974 register_prefix
, i
.op
[x
].regs
->reg_name
);
4976 /* Otherwise it is equivalent to the extended register.
4977 Since the encoding doesn't change this is merely
4978 cosmetic cleanup for debug output. */
4980 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4985 if (i
.rex
== 0 && i
.rex_encoding
)
4987 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4988 that uses legacy register. If it is "hi" register, don't add
4989 the REX_OPCODE byte. */
4991 for (x
= 0; x
< 2; x
++)
4992 if (i
.types
[x
].bitfield
.class == Reg
4993 && i
.types
[x
].bitfield
.byte
4994 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4995 && i
.op
[x
].regs
->reg_num
> 3)
4997 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4998 i
.rex_encoding
= FALSE
;
5007 add_prefix (REX_OPCODE
| i
.rex
);
5009 insert_lfence_before ();
5011 /* We are ready to output the insn. */
5014 insert_lfence_after ();
5016 last_insn
.seg
= now_seg
;
5018 if (i
.tm
.opcode_modifier
.isprefix
)
5020 last_insn
.kind
= last_insn_prefix
;
5021 last_insn
.name
= i
.tm
.name
;
5022 last_insn
.file
= as_where (&last_insn
.line
);
5025 last_insn
.kind
= last_insn_other
;
5029 parse_insn (char *line
, char *mnemonic
)
5032 char *token_start
= l
;
5035 const insn_template
*t
;
5041 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
5046 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
5048 as_bad (_("no such instruction: `%s'"), token_start
);
5053 if (!is_space_char (*l
)
5054 && *l
!= END_OF_INSN
5056 || (*l
!= PREFIX_SEPARATOR
5059 as_bad (_("invalid character %s in mnemonic"),
5060 output_invalid (*l
));
5063 if (token_start
== l
)
5065 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
5066 as_bad (_("expecting prefix; got nothing"));
5068 as_bad (_("expecting mnemonic; got nothing"));
5072 /* Look up instruction (or prefix) via hash table. */
5073 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5075 if (*l
!= END_OF_INSN
5076 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
5077 && current_templates
5078 && current_templates
->start
->opcode_modifier
.isprefix
)
5080 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
5082 as_bad ((flag_code
!= CODE_64BIT
5083 ? _("`%s' is only supported in 64-bit mode")
5084 : _("`%s' is not supported in 64-bit mode")),
5085 current_templates
->start
->name
);
5088 /* If we are in 16-bit mode, do not allow addr16 or data16.
5089 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5090 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
5091 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5092 && flag_code
!= CODE_64BIT
5093 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
5094 ^ (flag_code
== CODE_16BIT
)))
5096 as_bad (_("redundant %s prefix"),
5097 current_templates
->start
->name
);
5101 if (current_templates
->start
->base_opcode
== PSEUDO_PREFIX
)
5103 /* Handle pseudo prefixes. */
5104 switch (current_templates
->start
->extension_opcode
)
5108 i
.disp_encoding
= disp_encoding_8bit
;
5112 i
.disp_encoding
= disp_encoding_16bit
;
5116 i
.disp_encoding
= disp_encoding_32bit
;
5120 i
.dir_encoding
= dir_encoding_load
;
5124 i
.dir_encoding
= dir_encoding_store
;
5128 i
.vec_encoding
= vex_encoding_vex
;
5132 i
.vec_encoding
= vex_encoding_vex3
;
5136 i
.vec_encoding
= vex_encoding_evex
;
5140 i
.rex_encoding
= TRUE
;
5142 case Prefix_NoOptimize
:
5144 i
.no_optimize
= TRUE
;
5152 /* Add prefix, checking for repeated prefixes. */
5153 switch (add_prefix (current_templates
->start
->base_opcode
))
5158 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
5159 i
.notrack_prefix
= current_templates
->start
->name
;
5162 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
5163 i
.hle_prefix
= current_templates
->start
->name
;
5164 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
5165 i
.bnd_prefix
= current_templates
->start
->name
;
5167 i
.rep_prefix
= current_templates
->start
->name
;
5173 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5180 if (!current_templates
)
5182 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5183 Check if we should swap operand or force 32bit displacement in
5185 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
5186 i
.dir_encoding
= dir_encoding_swap
;
5187 else if (mnem_p
- 3 == dot_p
5190 i
.disp_encoding
= disp_encoding_8bit
;
5191 else if (mnem_p
- 4 == dot_p
5195 i
.disp_encoding
= disp_encoding_32bit
;
5200 current_templates
= (const templates
*) str_hash_find (op_hash
, mnemonic
);
5203 if (!current_templates
)
5206 if (mnem_p
> mnemonic
)
5208 /* See if we can get a match by trimming off a suffix. */
5211 case WORD_MNEM_SUFFIX
:
5212 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
5213 i
.suffix
= SHORT_MNEM_SUFFIX
;
5216 case BYTE_MNEM_SUFFIX
:
5217 case QWORD_MNEM_SUFFIX
:
5218 i
.suffix
= mnem_p
[-1];
5221 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5223 case SHORT_MNEM_SUFFIX
:
5224 case LONG_MNEM_SUFFIX
:
5227 i
.suffix
= mnem_p
[-1];
5230 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5238 if (intel_float_operand (mnemonic
) == 1)
5239 i
.suffix
= SHORT_MNEM_SUFFIX
;
5241 i
.suffix
= LONG_MNEM_SUFFIX
;
5244 = (const templates
*) str_hash_find (op_hash
, mnemonic
);
5250 if (!current_templates
)
5252 as_bad (_("no such instruction: `%s'"), token_start
);
5257 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
5258 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
5260 /* Check for a branch hint. We allow ",pt" and ",pn" for
5261 predict taken and predict not taken respectively.
5262 I'm not sure that branch hints actually do anything on loop
5263 and jcxz insns (JumpByte) for current Pentium4 chips. They
5264 may work in the future and it doesn't hurt to accept them
5266 if (l
[0] == ',' && l
[1] == 'p')
5270 if (!add_prefix (DS_PREFIX_OPCODE
))
5274 else if (l
[2] == 'n')
5276 if (!add_prefix (CS_PREFIX_OPCODE
))
5282 /* Any other comma loses. */
5285 as_bad (_("invalid character %s in mnemonic"),
5286 output_invalid (*l
));
5290 /* Check if instruction is supported on specified architecture. */
5292 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
5294 supported
|= cpu_flags_match (t
);
5295 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
5297 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
5298 as_warn (_("use .code16 to ensure correct addressing mode"));
5304 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
5305 as_bad (flag_code
== CODE_64BIT
5306 ? _("`%s' is not supported in 64-bit mode")
5307 : _("`%s' is only supported in 64-bit mode"),
5308 current_templates
->start
->name
);
5310 as_bad (_("`%s' is not supported on `%s%s'"),
5311 current_templates
->start
->name
,
5312 cpu_arch_name
? cpu_arch_name
: default_arch
,
5313 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
5319 parse_operands (char *l
, const char *mnemonic
)
5323 /* 1 if operand is pending after ','. */
5324 unsigned int expecting_operand
= 0;
5326 /* Non-zero if operand parens not balanced. */
5327 unsigned int paren_not_balanced
;
5329 while (*l
!= END_OF_INSN
)
5331 /* Skip optional white space before operand. */
5332 if (is_space_char (*l
))
5334 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
5336 as_bad (_("invalid character %s before operand %d"),
5337 output_invalid (*l
),
5341 token_start
= l
; /* After white space. */
5342 paren_not_balanced
= 0;
5343 while (paren_not_balanced
|| *l
!= ',')
5345 if (*l
== END_OF_INSN
)
5347 if (paren_not_balanced
)
5350 as_bad (_("unbalanced parenthesis in operand %d."),
5353 as_bad (_("unbalanced brackets in operand %d."),
5358 break; /* we are done */
5360 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
5362 as_bad (_("invalid character %s in operand %d"),
5363 output_invalid (*l
),
5370 ++paren_not_balanced
;
5372 --paren_not_balanced
;
5377 ++paren_not_balanced
;
5379 --paren_not_balanced
;
5383 if (l
!= token_start
)
5384 { /* Yes, we've read in another operand. */
5385 unsigned int operand_ok
;
5386 this_operand
= i
.operands
++;
5387 if (i
.operands
> MAX_OPERANDS
)
5389 as_bad (_("spurious operands; (%d operands/instruction max)"),
5393 i
.types
[this_operand
].bitfield
.unspecified
= 1;
5394 /* Now parse operand adding info to 'i' as we go along. */
5395 END_STRING_AND_SAVE (l
);
5397 if (i
.mem_operands
> 1)
5399 as_bad (_("too many memory references for `%s'"),
5406 i386_intel_operand (token_start
,
5407 intel_float_operand (mnemonic
));
5409 operand_ok
= i386_att_operand (token_start
);
5411 RESTORE_END_STRING (l
);
5417 if (expecting_operand
)
5419 expecting_operand_after_comma
:
5420 as_bad (_("expecting operand after ','; got nothing"));
5425 as_bad (_("expecting operand before ','; got nothing"));
5430 /* Now *l must be either ',' or END_OF_INSN. */
5433 if (*++l
== END_OF_INSN
)
5435 /* Just skip it, if it's \n complain. */
5436 goto expecting_operand_after_comma
;
5438 expecting_operand
= 1;
5445 swap_2_operands (unsigned int xchg1
, unsigned int xchg2
)
5447 union i386_op temp_op
;
5448 i386_operand_type temp_type
;
5449 unsigned int temp_flags
;
5450 enum bfd_reloc_code_real temp_reloc
;
5452 temp_type
= i
.types
[xchg2
];
5453 i
.types
[xchg2
] = i
.types
[xchg1
];
5454 i
.types
[xchg1
] = temp_type
;
5456 temp_flags
= i
.flags
[xchg2
];
5457 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5458 i
.flags
[xchg1
] = temp_flags
;
5460 temp_op
= i
.op
[xchg2
];
5461 i
.op
[xchg2
] = i
.op
[xchg1
];
5462 i
.op
[xchg1
] = temp_op
;
5464 temp_reloc
= i
.reloc
[xchg2
];
5465 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5466 i
.reloc
[xchg1
] = temp_reloc
;
5470 if (i
.mask
.operand
== xchg1
)
5471 i
.mask
.operand
= xchg2
;
5472 else if (i
.mask
.operand
== xchg2
)
5473 i
.mask
.operand
= xchg1
;
5477 if (i
.broadcast
->operand
== xchg1
)
5478 i
.broadcast
->operand
= xchg2
;
5479 else if (i
.broadcast
->operand
== xchg2
)
5480 i
.broadcast
->operand
= xchg1
;
5484 if (i
.rounding
->operand
== xchg1
)
5485 i
.rounding
->operand
= xchg2
;
5486 else if (i
.rounding
->operand
== xchg2
)
5487 i
.rounding
->operand
= xchg1
;
5492 swap_operands (void)
5498 swap_2_operands (1, i
.operands
- 2);
5502 swap_2_operands (0, i
.operands
- 1);
5508 if (i
.mem_operands
== 2)
5510 const seg_entry
*temp_seg
;
5511 temp_seg
= i
.seg
[0];
5512 i
.seg
[0] = i
.seg
[1];
5513 i
.seg
[1] = temp_seg
;
5517 /* Try to ensure constant immediates are represented in the smallest
5522 char guess_suffix
= 0;
5526 guess_suffix
= i
.suffix
;
5527 else if (i
.reg_operands
)
5529 /* Figure out a suffix from the last register operand specified.
5530 We can't do this properly yet, i.e. excluding special register
5531 instances, but the following works for instructions with
5532 immediates. In any case, we can't set i.suffix yet. */
5533 for (op
= i
.operands
; --op
>= 0;)
5534 if (i
.types
[op
].bitfield
.class != Reg
)
5536 else if (i
.types
[op
].bitfield
.byte
)
5538 guess_suffix
= BYTE_MNEM_SUFFIX
;
5541 else if (i
.types
[op
].bitfield
.word
)
5543 guess_suffix
= WORD_MNEM_SUFFIX
;
5546 else if (i
.types
[op
].bitfield
.dword
)
5548 guess_suffix
= LONG_MNEM_SUFFIX
;
5551 else if (i
.types
[op
].bitfield
.qword
)
5553 guess_suffix
= QWORD_MNEM_SUFFIX
;
5557 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5558 guess_suffix
= WORD_MNEM_SUFFIX
;
5560 for (op
= i
.operands
; --op
>= 0;)
5561 if (operand_type_check (i
.types
[op
], imm
))
5563 switch (i
.op
[op
].imms
->X_op
)
5566 /* If a suffix is given, this operand may be shortened. */
5567 switch (guess_suffix
)
5569 case LONG_MNEM_SUFFIX
:
5570 i
.types
[op
].bitfield
.imm32
= 1;
5571 i
.types
[op
].bitfield
.imm64
= 1;
5573 case WORD_MNEM_SUFFIX
:
5574 i
.types
[op
].bitfield
.imm16
= 1;
5575 i
.types
[op
].bitfield
.imm32
= 1;
5576 i
.types
[op
].bitfield
.imm32s
= 1;
5577 i
.types
[op
].bitfield
.imm64
= 1;
5579 case BYTE_MNEM_SUFFIX
:
5580 i
.types
[op
].bitfield
.imm8
= 1;
5581 i
.types
[op
].bitfield
.imm8s
= 1;
5582 i
.types
[op
].bitfield
.imm16
= 1;
5583 i
.types
[op
].bitfield
.imm32
= 1;
5584 i
.types
[op
].bitfield
.imm32s
= 1;
5585 i
.types
[op
].bitfield
.imm64
= 1;
5589 /* If this operand is at most 16 bits, convert it
5590 to a signed 16 bit number before trying to see
5591 whether it will fit in an even smaller size.
5592 This allows a 16-bit operand such as $0xffe0 to
5593 be recognised as within Imm8S range. */
5594 if ((i
.types
[op
].bitfield
.imm16
)
5595 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5597 i
.op
[op
].imms
->X_add_number
=
5598 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5601 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5602 if ((i
.types
[op
].bitfield
.imm32
)
5603 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5606 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5607 ^ ((offsetT
) 1 << 31))
5608 - ((offsetT
) 1 << 31));
5612 = operand_type_or (i
.types
[op
],
5613 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5615 /* We must avoid matching of Imm32 templates when 64bit
5616 only immediate is available. */
5617 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5618 i
.types
[op
].bitfield
.imm32
= 0;
5625 /* Symbols and expressions. */
5627 /* Convert symbolic operand to proper sizes for matching, but don't
5628 prevent matching a set of insns that only supports sizes other
5629 than those matching the insn suffix. */
5631 i386_operand_type mask
, allowed
;
5632 const insn_template
*t
;
5634 operand_type_set (&mask
, 0);
5635 operand_type_set (&allowed
, 0);
5637 for (t
= current_templates
->start
;
5638 t
< current_templates
->end
;
5641 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5642 allowed
= operand_type_and (allowed
, anyimm
);
5644 switch (guess_suffix
)
5646 case QWORD_MNEM_SUFFIX
:
5647 mask
.bitfield
.imm64
= 1;
5648 mask
.bitfield
.imm32s
= 1;
5650 case LONG_MNEM_SUFFIX
:
5651 mask
.bitfield
.imm32
= 1;
5653 case WORD_MNEM_SUFFIX
:
5654 mask
.bitfield
.imm16
= 1;
5656 case BYTE_MNEM_SUFFIX
:
5657 mask
.bitfield
.imm8
= 1;
5662 allowed
= operand_type_and (mask
, allowed
);
5663 if (!operand_type_all_zero (&allowed
))
5664 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5671 /* Try to use the smallest displacement type too. */
5673 optimize_disp (void)
5677 for (op
= i
.operands
; --op
>= 0;)
5678 if (operand_type_check (i
.types
[op
], disp
))
5680 if (i
.op
[op
].disps
->X_op
== O_constant
)
5682 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5684 if (i
.types
[op
].bitfield
.disp16
5685 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5687 /* If this operand is at most 16 bits, convert
5688 to a signed 16 bit number and don't use 64bit
5690 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5691 i
.types
[op
].bitfield
.disp64
= 0;
5694 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5695 if (i
.types
[op
].bitfield
.disp32
5696 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5698 /* If this operand is at most 32 bits, convert
5699 to a signed 32 bit number and don't use 64bit
5701 op_disp
&= (((offsetT
) 2 << 31) - 1);
5702 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5703 i
.types
[op
].bitfield
.disp64
= 0;
5706 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5708 i
.types
[op
].bitfield
.disp8
= 0;
5709 i
.types
[op
].bitfield
.disp16
= 0;
5710 i
.types
[op
].bitfield
.disp32
= 0;
5711 i
.types
[op
].bitfield
.disp32s
= 0;
5712 i
.types
[op
].bitfield
.disp64
= 0;
5716 else if (flag_code
== CODE_64BIT
)
5718 if (fits_in_signed_long (op_disp
))
5720 i
.types
[op
].bitfield
.disp64
= 0;
5721 i
.types
[op
].bitfield
.disp32s
= 1;
5723 if (i
.prefix
[ADDR_PREFIX
]
5724 && fits_in_unsigned_long (op_disp
))
5725 i
.types
[op
].bitfield
.disp32
= 1;
5727 if ((i
.types
[op
].bitfield
.disp32
5728 || i
.types
[op
].bitfield
.disp32s
5729 || i
.types
[op
].bitfield
.disp16
)
5730 && fits_in_disp8 (op_disp
))
5731 i
.types
[op
].bitfield
.disp8
= 1;
5733 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5734 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5736 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5737 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5738 i
.types
[op
].bitfield
.disp8
= 0;
5739 i
.types
[op
].bitfield
.disp16
= 0;
5740 i
.types
[op
].bitfield
.disp32
= 0;
5741 i
.types
[op
].bitfield
.disp32s
= 0;
5742 i
.types
[op
].bitfield
.disp64
= 0;
5745 /* We only support 64bit displacement on constants. */
5746 i
.types
[op
].bitfield
.disp64
= 0;
5750 /* Return 1 if there is a match in broadcast bytes between operand
5751 GIVEN and instruction template T. */
5754 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5756 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5757 && i
.types
[given
].bitfield
.byte
)
5758 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5759 && i
.types
[given
].bitfield
.word
)
5760 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5761 && i
.types
[given
].bitfield
.dword
)
5762 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5763 && i
.types
[given
].bitfield
.qword
));
5766 /* Check if operands are valid for the instruction. */
5769 check_VecOperands (const insn_template
*t
)
5774 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5775 any one operand are implicity requiring AVX512VL support if the actual
5776 operand size is YMMword or XMMword. Since this function runs after
5777 template matching, there's no need to check for YMMword/XMMword in
5779 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5780 if (!cpu_flags_all_zero (&cpu
)
5781 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5782 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5784 for (op
= 0; op
< t
->operands
; ++op
)
5786 if (t
->operand_types
[op
].bitfield
.zmmword
5787 && (i
.types
[op
].bitfield
.ymmword
5788 || i
.types
[op
].bitfield
.xmmword
))
5790 i
.error
= unsupported
;
5796 /* Without VSIB byte, we can't have a vector register for index. */
5797 if (!t
->opcode_modifier
.sib
5799 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5800 || i
.index_reg
->reg_type
.bitfield
.ymmword
5801 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5803 i
.error
= unsupported_vector_index_register
;
5807 /* Check if default mask is allowed. */
5808 if (t
->opcode_modifier
.nodefmask
5809 && (!i
.mask
.reg
|| i
.mask
.reg
->reg_num
== 0))
5811 i
.error
= no_default_mask
;
5815 /* For VSIB byte, we need a vector register for index, and all vector
5816 registers must be distinct. */
5817 if (t
->opcode_modifier
.sib
&& t
->opcode_modifier
.sib
!= SIBMEM
)
5820 || !((t
->opcode_modifier
.sib
== VECSIB128
5821 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5822 || (t
->opcode_modifier
.sib
== VECSIB256
5823 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5824 || (t
->opcode_modifier
.sib
== VECSIB512
5825 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5827 i
.error
= invalid_vsib_address
;
5831 gas_assert (i
.reg_operands
== 2 || i
.mask
.reg
);
5832 if (i
.reg_operands
== 2 && !i
.mask
.reg
)
5834 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5835 gas_assert (i
.types
[0].bitfield
.xmmword
5836 || i
.types
[0].bitfield
.ymmword
);
5837 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5838 gas_assert (i
.types
[2].bitfield
.xmmword
5839 || i
.types
[2].bitfield
.ymmword
);
5840 if (operand_check
== check_none
)
5842 if (register_number (i
.op
[0].regs
)
5843 != register_number (i
.index_reg
)
5844 && register_number (i
.op
[2].regs
)
5845 != register_number (i
.index_reg
)
5846 && register_number (i
.op
[0].regs
)
5847 != register_number (i
.op
[2].regs
))
5849 if (operand_check
== check_error
)
5851 i
.error
= invalid_vector_register_set
;
5854 as_warn (_("mask, index, and destination registers should be distinct"));
5856 else if (i
.reg_operands
== 1 && i
.mask
.reg
)
5858 if (i
.types
[1].bitfield
.class == RegSIMD
5859 && (i
.types
[1].bitfield
.xmmword
5860 || i
.types
[1].bitfield
.ymmword
5861 || i
.types
[1].bitfield
.zmmword
)
5862 && (register_number (i
.op
[1].regs
)
5863 == register_number (i
.index_reg
)))
5865 if (operand_check
== check_error
)
5867 i
.error
= invalid_vector_register_set
;
5870 if (operand_check
!= check_none
)
5871 as_warn (_("index and destination registers should be distinct"));
5876 /* For AMX instructions with three tmmword operands, all tmmword operand must be
5878 if (t
->operand_types
[0].bitfield
.tmmword
5879 && i
.reg_operands
== 3)
5881 if (register_number (i
.op
[0].regs
)
5882 == register_number (i
.op
[1].regs
)
5883 || register_number (i
.op
[0].regs
)
5884 == register_number (i
.op
[2].regs
)
5885 || register_number (i
.op
[1].regs
)
5886 == register_number (i
.op
[2].regs
))
5888 i
.error
= invalid_tmm_register_set
;
5893 /* Check if broadcast is supported by the instruction and is applied
5894 to the memory operand. */
5897 i386_operand_type type
, overlap
;
5899 /* Check if specified broadcast is supported in this instruction,
5900 and its broadcast bytes match the memory operand. */
5901 op
= i
.broadcast
->operand
;
5902 if (!t
->opcode_modifier
.broadcast
5903 || !(i
.flags
[op
] & Operand_Mem
)
5904 || (!i
.types
[op
].bitfield
.unspecified
5905 && !match_broadcast_size (t
, op
)))
5908 i
.error
= unsupported_broadcast
;
5912 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5913 * i
.broadcast
->type
);
5914 operand_type_set (&type
, 0);
5915 switch (i
.broadcast
->bytes
)
5918 type
.bitfield
.word
= 1;
5921 type
.bitfield
.dword
= 1;
5924 type
.bitfield
.qword
= 1;
5927 type
.bitfield
.xmmword
= 1;
5930 type
.bitfield
.ymmword
= 1;
5933 type
.bitfield
.zmmword
= 1;
5939 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5940 if (t
->operand_types
[op
].bitfield
.class == RegSIMD
5941 && t
->operand_types
[op
].bitfield
.byte
5942 + t
->operand_types
[op
].bitfield
.word
5943 + t
->operand_types
[op
].bitfield
.dword
5944 + t
->operand_types
[op
].bitfield
.qword
> 1)
5946 overlap
.bitfield
.xmmword
= 0;
5947 overlap
.bitfield
.ymmword
= 0;
5948 overlap
.bitfield
.zmmword
= 0;
5950 if (operand_type_all_zero (&overlap
))
5953 if (t
->opcode_modifier
.checkregsize
)
5957 type
.bitfield
.baseindex
= 1;
5958 for (j
= 0; j
< i
.operands
; ++j
)
5961 && !operand_type_register_match(i
.types
[j
],
5962 t
->operand_types
[j
],
5964 t
->operand_types
[op
]))
5969 /* If broadcast is supported in this instruction, we need to check if
5970 operand of one-element size isn't specified without broadcast. */
5971 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5973 /* Find memory operand. */
5974 for (op
= 0; op
< i
.operands
; op
++)
5975 if (i
.flags
[op
] & Operand_Mem
)
5977 gas_assert (op
< i
.operands
);
5978 /* Check size of the memory operand. */
5979 if (match_broadcast_size (t
, op
))
5981 i
.error
= broadcast_needed
;
5986 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5988 /* Check if requested masking is supported. */
5991 switch (t
->opcode_modifier
.masking
)
5995 case MERGING_MASKING
:
5999 i
.error
= unsupported_masking
;
6003 case DYNAMIC_MASKING
:
6004 /* Memory destinations allow only merging masking. */
6005 if (i
.mask
.zeroing
&& i
.mem_operands
)
6007 /* Find memory operand. */
6008 for (op
= 0; op
< i
.operands
; op
++)
6009 if (i
.flags
[op
] & Operand_Mem
)
6011 gas_assert (op
< i
.operands
);
6012 if (op
== i
.operands
- 1)
6014 i
.error
= unsupported_masking
;
6024 /* Check if masking is applied to dest operand. */
6025 if (i
.mask
.reg
&& (i
.mask
.operand
!= i
.operands
- 1))
6027 i
.error
= mask_not_on_destination
;
6034 if (!t
->opcode_modifier
.sae
6035 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
6037 i
.error
= unsupported_rc_sae
;
6040 /* If the instruction has several immediate operands and one of
6041 them is rounding, the rounding operand should be the last
6042 immediate operand. */
6043 if (i
.imm_operands
> 1
6044 && i
.rounding
->operand
!= i
.imm_operands
- 1)
6046 i
.error
= rc_sae_operand_not_last_imm
;
6051 /* Check the special Imm4 cases; must be the first operand. */
6052 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
6054 if (i
.op
[0].imms
->X_op
!= O_constant
6055 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
6061 /* Turn off Imm<N> so that update_imm won't complain. */
6062 operand_type_set (&i
.types
[0], 0);
6065 /* Check vector Disp8 operand. */
6066 if (t
->opcode_modifier
.disp8memshift
6067 && i
.disp_encoding
!= disp_encoding_32bit
)
6070 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
6071 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
6072 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
6075 const i386_operand_type
*type
= NULL
;
6078 for (op
= 0; op
< i
.operands
; op
++)
6079 if (i
.flags
[op
] & Operand_Mem
)
6081 if (t
->opcode_modifier
.evex
== EVEXLIG
)
6082 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
6083 else if (t
->operand_types
[op
].bitfield
.xmmword
6084 + t
->operand_types
[op
].bitfield
.ymmword
6085 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
6086 type
= &t
->operand_types
[op
];
6087 else if (!i
.types
[op
].bitfield
.unspecified
)
6088 type
= &i
.types
[op
];
6090 else if (i
.types
[op
].bitfield
.class == RegSIMD
6091 && t
->opcode_modifier
.evex
!= EVEXLIG
)
6093 if (i
.types
[op
].bitfield
.zmmword
)
6095 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
6097 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
6103 if (type
->bitfield
.zmmword
)
6105 else if (type
->bitfield
.ymmword
)
6107 else if (type
->bitfield
.xmmword
)
6111 /* For the check in fits_in_disp8(). */
6112 if (i
.memshift
== 0)
6116 for (op
= 0; op
< i
.operands
; op
++)
6117 if (operand_type_check (i
.types
[op
], disp
)
6118 && i
.op
[op
].disps
->X_op
== O_constant
)
6120 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
6122 i
.types
[op
].bitfield
.disp8
= 1;
6125 i
.types
[op
].bitfield
.disp8
= 0;
6134 /* Check if encoding requirements are met by the instruction. */
6137 VEX_check_encoding (const insn_template
*t
)
6139 if (i
.vec_encoding
== vex_encoding_error
)
6141 i
.error
= unsupported
;
6145 if (i
.vec_encoding
== vex_encoding_evex
)
6147 /* This instruction must be encoded with EVEX prefix. */
6148 if (!is_evex_encoding (t
))
6150 i
.error
= unsupported
;
6156 if (!t
->opcode_modifier
.vex
)
6158 /* This instruction template doesn't have VEX prefix. */
6159 if (i
.vec_encoding
!= vex_encoding_default
)
6161 i
.error
= unsupported
;
6170 static const insn_template
*
6171 match_template (char mnem_suffix
)
6173 /* Points to template once we've found it. */
6174 const insn_template
*t
;
6175 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
6176 i386_operand_type overlap4
;
6177 unsigned int found_reverse_match
;
6178 i386_opcode_modifier suffix_check
;
6179 i386_operand_type operand_types
[MAX_OPERANDS
];
6180 int addr_prefix_disp
;
6181 unsigned int j
, size_match
, check_register
;
6182 enum i386_error specific_error
= 0;
6184 #if MAX_OPERANDS != 5
6185 # error "MAX_OPERANDS must be 5."
6188 found_reverse_match
= 0;
6189 addr_prefix_disp
= -1;
6191 /* Prepare for mnemonic suffix check. */
6192 memset (&suffix_check
, 0, sizeof (suffix_check
));
6193 switch (mnem_suffix
)
6195 case BYTE_MNEM_SUFFIX
:
6196 suffix_check
.no_bsuf
= 1;
6198 case WORD_MNEM_SUFFIX
:
6199 suffix_check
.no_wsuf
= 1;
6201 case SHORT_MNEM_SUFFIX
:
6202 suffix_check
.no_ssuf
= 1;
6204 case LONG_MNEM_SUFFIX
:
6205 suffix_check
.no_lsuf
= 1;
6207 case QWORD_MNEM_SUFFIX
:
6208 suffix_check
.no_qsuf
= 1;
6211 /* NB: In Intel syntax, normally we can check for memory operand
6212 size when there is no mnemonic suffix. But jmp and call have
6213 2 different encodings with Dword memory operand size, one with
6214 No_ldSuf and the other without. i.suffix is set to
6215 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6216 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
6217 suffix_check
.no_ldsuf
= 1;
6220 /* Must have right number of operands. */
6221 i
.error
= number_of_operands_mismatch
;
6223 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
6225 addr_prefix_disp
= -1;
6226 found_reverse_match
= 0;
6228 if (i
.operands
!= t
->operands
)
6231 /* Check processor support. */
6232 i
.error
= unsupported
;
6233 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
6236 /* Check Pseudo Prefix. */
6237 i
.error
= unsupported
;
6238 if (t
->opcode_modifier
.pseudovexprefix
6239 && !(i
.vec_encoding
== vex_encoding_vex
6240 || i
.vec_encoding
== vex_encoding_vex3
))
6243 /* Check AT&T mnemonic. */
6244 i
.error
= unsupported_with_intel_mnemonic
;
6245 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
6248 /* Check AT&T/Intel syntax. */
6249 i
.error
= unsupported_syntax
;
6250 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
6251 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
6254 /* Check Intel64/AMD64 ISA. */
6258 /* Default: Don't accept Intel64. */
6259 if (t
->opcode_modifier
.isa64
== INTEL64
)
6263 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6264 if (t
->opcode_modifier
.isa64
>= INTEL64
)
6268 /* -mintel64: Don't accept AMD64. */
6269 if (t
->opcode_modifier
.isa64
== AMD64
&& flag_code
== CODE_64BIT
)
6274 /* Check the suffix. */
6275 i
.error
= invalid_instruction_suffix
;
6276 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
6277 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
6278 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
6279 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
6280 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
6281 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
6284 size_match
= operand_size_match (t
);
6288 /* This is intentionally not
6290 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6292 as the case of a missing * on the operand is accepted (perhaps with
6293 a warning, issued further down). */
6294 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
6296 i
.error
= operand_type_mismatch
;
6300 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6301 operand_types
[j
] = t
->operand_types
[j
];
6303 /* In general, don't allow
6304 - 64-bit operands outside of 64-bit mode,
6305 - 32-bit operands on pre-386. */
6306 j
= i
.imm_operands
+ (t
->operands
> i
.imm_operands
+ 1);
6307 if (((i
.suffix
== QWORD_MNEM_SUFFIX
6308 && flag_code
!= CODE_64BIT
6309 && !(t
->opcode_modifier
.opcodespace
== SPACE_0F
6310 && t
->base_opcode
== 0xc7
6311 && t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
6312 && t
->extension_opcode
== 1) /* cmpxchg8b */)
6313 || (i
.suffix
== LONG_MNEM_SUFFIX
6314 && !cpu_arch_flags
.bitfield
.cpui386
))
6316 ? (t
->opcode_modifier
.mnemonicsize
!= IGNORESIZE
6317 && !intel_float_operand (t
->name
))
6318 : intel_float_operand (t
->name
) != 2)
6319 && (t
->operands
== i
.imm_operands
6320 || (operand_types
[i
.imm_operands
].bitfield
.class != RegMMX
6321 && operand_types
[i
.imm_operands
].bitfield
.class != RegSIMD
6322 && operand_types
[i
.imm_operands
].bitfield
.class != RegMask
)
6323 || (operand_types
[j
].bitfield
.class != RegMMX
6324 && operand_types
[j
].bitfield
.class != RegSIMD
6325 && operand_types
[j
].bitfield
.class != RegMask
))
6326 && !t
->opcode_modifier
.sib
)
6329 /* Do not verify operands when there are none. */
6332 if (VEX_check_encoding (t
))
6334 specific_error
= i
.error
;
6338 /* We've found a match; break out of loop. */
6342 if (!t
->opcode_modifier
.jump
6343 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
6345 /* There should be only one Disp operand. */
6346 for (j
= 0; j
< MAX_OPERANDS
; j
++)
6347 if (operand_type_check (operand_types
[j
], disp
))
6349 if (j
< MAX_OPERANDS
)
6351 bfd_boolean override
= (i
.prefix
[ADDR_PREFIX
] != 0);
6353 addr_prefix_disp
= j
;
6355 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6356 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6360 override
= !override
;
6363 if (operand_types
[j
].bitfield
.disp32
6364 && operand_types
[j
].bitfield
.disp16
)
6366 operand_types
[j
].bitfield
.disp16
= override
;
6367 operand_types
[j
].bitfield
.disp32
= !override
;
6369 operand_types
[j
].bitfield
.disp32s
= 0;
6370 operand_types
[j
].bitfield
.disp64
= 0;
6374 if (operand_types
[j
].bitfield
.disp32s
6375 || operand_types
[j
].bitfield
.disp64
)
6377 operand_types
[j
].bitfield
.disp64
&= !override
;
6378 operand_types
[j
].bitfield
.disp32s
&= !override
;
6379 operand_types
[j
].bitfield
.disp32
= override
;
6381 operand_types
[j
].bitfield
.disp16
= 0;
6387 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6388 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
6389 && t
->base_opcode
== 0xa0
6390 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
)
6393 /* We check register size if needed. */
6394 if (t
->opcode_modifier
.checkregsize
)
6396 check_register
= (1 << t
->operands
) - 1;
6398 check_register
&= ~(1 << i
.broadcast
->operand
);
6403 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
6404 switch (t
->operands
)
6407 if (!operand_type_match (overlap0
, i
.types
[0]))
6411 /* xchg %eax, %eax is a special case. It is an alias for nop
6412 only in 32bit mode and we can use opcode 0x90. In 64bit
6413 mode, we can't use 0x90 for xchg %eax, %eax since it should
6414 zero-extend %eax to %rax. */
6415 if (flag_code
== CODE_64BIT
6416 && t
->base_opcode
== 0x90
6417 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6418 && i
.types
[0].bitfield
.instance
== Accum
6419 && i
.types
[0].bitfield
.dword
6420 && i
.types
[1].bitfield
.instance
== Accum
6421 && i
.types
[1].bitfield
.dword
)
6423 /* xrelease mov %eax, <disp> is another special case. It must not
6424 match the accumulator-only encoding of mov. */
6425 if (flag_code
!= CODE_64BIT
6427 && t
->base_opcode
== 0xa0
6428 && t
->opcode_modifier
.opcodespace
== SPACE_BASE
6429 && i
.types
[0].bitfield
.instance
== Accum
6430 && (i
.flags
[1] & Operand_Mem
))
6435 if (!(size_match
& MATCH_STRAIGHT
))
6437 /* Reverse direction of operands if swapping is possible in the first
6438 place (operands need to be symmetric) and
6439 - the load form is requested, and the template is a store form,
6440 - the store form is requested, and the template is a load form,
6441 - the non-default (swapped) form is requested. */
6442 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6443 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6444 && !operand_type_all_zero (&overlap1
))
6445 switch (i
.dir_encoding
)
6447 case dir_encoding_load
:
6448 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6449 || t
->opcode_modifier
.regmem
)
6453 case dir_encoding_store
:
6454 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6455 && !t
->opcode_modifier
.regmem
)
6459 case dir_encoding_swap
:
6462 case dir_encoding_default
:
6465 /* If we want store form, we skip the current load. */
6466 if ((i
.dir_encoding
== dir_encoding_store
6467 || i
.dir_encoding
== dir_encoding_swap
)
6468 && i
.mem_operands
== 0
6469 && t
->opcode_modifier
.load
)
6474 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6475 if (!operand_type_match (overlap0
, i
.types
[0])
6476 || !operand_type_match (overlap1
, i
.types
[1])
6477 || ((check_register
& 3) == 3
6478 && !operand_type_register_match (i
.types
[0],
6483 /* Check if other direction is valid ... */
6484 if (!t
->opcode_modifier
.d
)
6488 if (!(size_match
& MATCH_REVERSE
))
6490 /* Try reversing direction of operands. */
6491 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6492 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6493 if (!operand_type_match (overlap0
, i
.types
[0])
6494 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6496 && !operand_type_register_match (i
.types
[0],
6497 operand_types
[i
.operands
- 1],
6498 i
.types
[i
.operands
- 1],
6501 /* Does not match either direction. */
6504 /* found_reverse_match holds which of D or FloatR
6506 if (!t
->opcode_modifier
.d
)
6507 found_reverse_match
= 0;
6508 else if (operand_types
[0].bitfield
.tbyte
)
6509 found_reverse_match
= Opcode_FloatD
;
6510 else if (operand_types
[0].bitfield
.xmmword
6511 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6512 || operand_types
[0].bitfield
.class == RegMMX
6513 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6514 || is_any_vex_encoding(t
))
6515 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6516 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6518 found_reverse_match
= Opcode_D
;
6519 if (t
->opcode_modifier
.floatr
)
6520 found_reverse_match
|= Opcode_FloatR
;
6524 /* Found a forward 2 operand match here. */
6525 switch (t
->operands
)
6528 overlap4
= operand_type_and (i
.types
[4],
6532 overlap3
= operand_type_and (i
.types
[3],
6536 overlap2
= operand_type_and (i
.types
[2],
6541 switch (t
->operands
)
6544 if (!operand_type_match (overlap4
, i
.types
[4])
6545 || !operand_type_register_match (i
.types
[3],
6552 if (!operand_type_match (overlap3
, i
.types
[3])
6553 || ((check_register
& 0xa) == 0xa
6554 && !operand_type_register_match (i
.types
[1],
6558 || ((check_register
& 0xc) == 0xc
6559 && !operand_type_register_match (i
.types
[2],
6566 /* Here we make use of the fact that there are no
6567 reverse match 3 operand instructions. */
6568 if (!operand_type_match (overlap2
, i
.types
[2])
6569 || ((check_register
& 5) == 5
6570 && !operand_type_register_match (i
.types
[0],
6574 || ((check_register
& 6) == 6
6575 && !operand_type_register_match (i
.types
[1],
6583 /* Found either forward/reverse 2, 3 or 4 operand match here:
6584 slip through to break. */
6587 /* Check if vector operands are valid. */
6588 if (check_VecOperands (t
))
6590 specific_error
= i
.error
;
6594 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6595 if (VEX_check_encoding (t
))
6597 specific_error
= i
.error
;
6601 /* We've found a match; break out of loop. */
6605 if (t
== current_templates
->end
)
6607 /* We found no match. */
6608 const char *err_msg
;
6609 switch (specific_error
? specific_error
: i
.error
)
6613 case operand_size_mismatch
:
6614 err_msg
= _("operand size mismatch");
6616 case operand_type_mismatch
:
6617 err_msg
= _("operand type mismatch");
6619 case register_type_mismatch
:
6620 err_msg
= _("register type mismatch");
6622 case number_of_operands_mismatch
:
6623 err_msg
= _("number of operands mismatch");
6625 case invalid_instruction_suffix
:
6626 err_msg
= _("invalid instruction suffix");
6629 err_msg
= _("constant doesn't fit in 4 bits");
6631 case unsupported_with_intel_mnemonic
:
6632 err_msg
= _("unsupported with Intel mnemonic");
6634 case unsupported_syntax
:
6635 err_msg
= _("unsupported syntax");
6638 as_bad (_("unsupported instruction `%s'"),
6639 current_templates
->start
->name
);
6641 case invalid_sib_address
:
6642 err_msg
= _("invalid SIB address");
6644 case invalid_vsib_address
:
6645 err_msg
= _("invalid VSIB address");
6647 case invalid_vector_register_set
:
6648 err_msg
= _("mask, index, and destination registers must be distinct");
6650 case invalid_tmm_register_set
:
6651 err_msg
= _("all tmm registers must be distinct");
6653 case unsupported_vector_index_register
:
6654 err_msg
= _("unsupported vector index register");
6656 case unsupported_broadcast
:
6657 err_msg
= _("unsupported broadcast");
6659 case broadcast_needed
:
6660 err_msg
= _("broadcast is needed for operand of such type");
6662 case unsupported_masking
:
6663 err_msg
= _("unsupported masking");
6665 case mask_not_on_destination
:
6666 err_msg
= _("mask not on destination operand");
6668 case no_default_mask
:
6669 err_msg
= _("default mask isn't allowed");
6671 case unsupported_rc_sae
:
6672 err_msg
= _("unsupported static rounding/sae");
6674 case rc_sae_operand_not_last_imm
:
6676 err_msg
= _("RC/SAE operand must precede immediate operands");
6678 err_msg
= _("RC/SAE operand must follow immediate operands");
6680 case invalid_register_operand
:
6681 err_msg
= _("invalid register operand");
6684 as_bad (_("%s for `%s'"), err_msg
,
6685 current_templates
->start
->name
);
6689 if (!quiet_warnings
)
6692 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6693 as_warn (_("indirect %s without `*'"), t
->name
);
6695 if (t
->opcode_modifier
.isprefix
6696 && t
->opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6698 /* Warn them that a data or address size prefix doesn't
6699 affect assembly of the next line of code. */
6700 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6704 /* Copy the template we found. */
6705 install_template (t
);
6707 if (addr_prefix_disp
!= -1)
6708 i
.tm
.operand_types
[addr_prefix_disp
]
6709 = operand_types
[addr_prefix_disp
];
6711 if (found_reverse_match
)
6713 /* If we found a reverse match we must alter the opcode direction
6714 bit and clear/flip the regmem modifier one. found_reverse_match
6715 holds bits to change (different for int & float insns). */
6717 i
.tm
.base_opcode
^= found_reverse_match
;
6719 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6720 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6722 /* Certain SIMD insns have their load forms specified in the opcode
6723 table, and hence we need to _set_ RegMem instead of clearing it.
6724 We need to avoid setting the bit though on insns like KMOVW. */
6725 i
.tm
.opcode_modifier
.regmem
6726 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6727 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6728 && !i
.tm
.opcode_modifier
.regmem
;
6737 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6738 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6740 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != &es
)
6742 as_bad (_("`%s' operand %u must use `%ses' segment"),
6744 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6749 /* There's only ever one segment override allowed per instruction.
6750 This instruction possibly has a legal segment override on the
6751 second operand, so copy the segment to where non-string
6752 instructions store it, allowing common code. */
6753 i
.seg
[op
] = i
.seg
[1];
6759 process_suffix (void)
6761 bfd_boolean is_crc32
= FALSE
, is_movx
= FALSE
;
6763 /* If matched instruction specifies an explicit instruction mnemonic
6765 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6766 i
.suffix
= WORD_MNEM_SUFFIX
;
6767 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6768 i
.suffix
= LONG_MNEM_SUFFIX
;
6769 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6770 i
.suffix
= QWORD_MNEM_SUFFIX
;
6771 else if (i
.reg_operands
6772 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
)
6773 && !i
.tm
.opcode_modifier
.addrprefixopreg
)
6775 unsigned int numop
= i
.operands
;
6778 is_movx
= (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
6779 && (i
.tm
.base_opcode
| 8) == 0xbe)
6780 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
6781 && i
.tm
.base_opcode
== 0x63
6782 && i
.tm
.cpu_flags
.bitfield
.cpu64
);
6785 is_crc32
= (i
.tm
.base_opcode
== 0xf0
6786 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
6787 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
);
6789 /* movsx/movzx want only their source operand considered here, for the
6790 ambiguity checking below. The suffix will be replaced afterwards
6791 to represent the destination (register). */
6792 if (is_movx
&& (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63))
6795 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6796 if (is_crc32
&& i
.tm
.operand_types
[1].bitfield
.qword
)
6799 /* If there's no instruction mnemonic suffix we try to invent one
6800 based on GPR operands. */
6803 /* We take i.suffix from the last register operand specified,
6804 Destination register type is more significant than source
6805 register type. crc32 in SSE4.2 prefers source register
6807 unsigned int op
= is_crc32
? 1 : i
.operands
;
6810 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6811 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6813 if (i
.types
[op
].bitfield
.class != Reg
)
6815 if (i
.types
[op
].bitfield
.byte
)
6816 i
.suffix
= BYTE_MNEM_SUFFIX
;
6817 else if (i
.types
[op
].bitfield
.word
)
6818 i
.suffix
= WORD_MNEM_SUFFIX
;
6819 else if (i
.types
[op
].bitfield
.dword
)
6820 i
.suffix
= LONG_MNEM_SUFFIX
;
6821 else if (i
.types
[op
].bitfield
.qword
)
6822 i
.suffix
= QWORD_MNEM_SUFFIX
;
6828 /* As an exception, movsx/movzx silently default to a byte source
6830 if (is_movx
&& i
.tm
.opcode_modifier
.w
&& !i
.suffix
&& !intel_syntax
)
6831 i
.suffix
= BYTE_MNEM_SUFFIX
;
6833 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6836 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6837 && i
.tm
.opcode_modifier
.no_bsuf
)
6839 else if (!check_byte_reg ())
6842 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6845 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6846 && i
.tm
.opcode_modifier
.no_lsuf
6847 && !i
.tm
.opcode_modifier
.todword
6848 && !i
.tm
.opcode_modifier
.toqword
)
6850 else if (!check_long_reg ())
6853 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6856 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6857 && i
.tm
.opcode_modifier
.no_qsuf
6858 && !i
.tm
.opcode_modifier
.todword
6859 && !i
.tm
.opcode_modifier
.toqword
)
6861 else if (!check_qword_reg ())
6864 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6867 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
6868 && i
.tm
.opcode_modifier
.no_wsuf
)
6870 else if (!check_word_reg ())
6873 else if (intel_syntax
6874 && i
.tm
.opcode_modifier
.mnemonicsize
== IGNORESIZE
)
6875 /* Do nothing if the instruction is going to ignore the prefix. */
6880 /* Undo the movsx/movzx change done above. */
6883 else if (i
.tm
.opcode_modifier
.mnemonicsize
== DEFAULTSIZE
6886 i
.suffix
= stackop_size
;
6887 if (stackop_size
== LONG_MNEM_SUFFIX
)
6889 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6890 .code16gcc directive to support 16-bit mode with
6891 32-bit address. For IRET without a suffix, generate
6892 16-bit IRET (opcode 0xcf) to return from an interrupt
6894 if (i
.tm
.base_opcode
== 0xcf)
6896 i
.suffix
= WORD_MNEM_SUFFIX
;
6897 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6899 /* Warn about changed behavior for segment register push/pop. */
6900 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6901 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6906 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6907 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6908 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6909 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
6910 && i
.tm
.base_opcode
== 0x01 /* [ls][gi]dt */
6911 && i
.tm
.extension_opcode
<= 3)))
6916 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6918 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6919 || i
.tm
.opcode_modifier
.no_lsuf
)
6920 i
.suffix
= QWORD_MNEM_SUFFIX
;
6925 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6926 i
.suffix
= LONG_MNEM_SUFFIX
;
6929 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6930 i
.suffix
= WORD_MNEM_SUFFIX
;
6936 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
6937 /* Also cover lret/retf/iret in 64-bit mode. */
6938 || (flag_code
== CODE_64BIT
6939 && !i
.tm
.opcode_modifier
.no_lsuf
6940 && !i
.tm
.opcode_modifier
.no_qsuf
))
6941 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
6942 /* Explicit sizing prefixes are assumed to disambiguate insns. */
6943 && !i
.prefix
[DATA_PREFIX
] && !(i
.prefix
[REX_PREFIX
] & REX_W
)
6944 /* Accept FLDENV et al without suffix. */
6945 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6947 unsigned int suffixes
, evex
= 0;
6949 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6950 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6952 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6954 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6956 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6958 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6961 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6962 also suitable for AT&T syntax mode, it was requested that this be
6963 restricted to just Intel syntax. */
6964 if (intel_syntax
&& is_any_vex_encoding (&i
.tm
) && !i
.broadcast
)
6968 for (op
= 0; op
< i
.tm
.operands
; ++op
)
6970 if (is_evex_encoding (&i
.tm
)
6971 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
6973 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
6974 i
.tm
.operand_types
[op
].bitfield
.xmmword
= 0;
6975 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
6976 i
.tm
.operand_types
[op
].bitfield
.ymmword
= 0;
6977 if (!i
.tm
.opcode_modifier
.evex
6978 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
6979 i
.tm
.opcode_modifier
.evex
= EVEX512
;
6982 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
6983 + i
.tm
.operand_types
[op
].bitfield
.ymmword
6984 + i
.tm
.operand_types
[op
].bitfield
.zmmword
< 2)
6987 /* Any properly sized operand disambiguates the insn. */
6988 if (i
.types
[op
].bitfield
.xmmword
6989 || i
.types
[op
].bitfield
.ymmword
6990 || i
.types
[op
].bitfield
.zmmword
)
6992 suffixes
&= ~(7 << 6);
6997 if ((i
.flags
[op
] & Operand_Mem
)
6998 && i
.tm
.operand_types
[op
].bitfield
.unspecified
)
7000 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
)
7002 if (i
.tm
.operand_types
[op
].bitfield
.ymmword
)
7004 if (i
.tm
.operand_types
[op
].bitfield
.zmmword
)
7006 if (is_evex_encoding (&i
.tm
))
7012 /* Are multiple suffixes / operand sizes allowed? */
7013 if (suffixes
& (suffixes
- 1))
7016 && (i
.tm
.opcode_modifier
.mnemonicsize
!= DEFAULTSIZE
7017 || operand_check
== check_error
))
7019 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
7022 if (operand_check
== check_error
)
7024 as_bad (_("no instruction mnemonic suffix given and "
7025 "no register operands; can't size `%s'"), i
.tm
.name
);
7028 if (operand_check
== check_warning
)
7029 as_warn (_("%s; using default for `%s'"),
7031 ? _("ambiguous operand size")
7032 : _("no instruction mnemonic suffix given and "
7033 "no register operands"),
7036 if (i
.tm
.opcode_modifier
.floatmf
)
7037 i
.suffix
= SHORT_MNEM_SUFFIX
;
7039 /* handled below */;
7041 i
.tm
.opcode_modifier
.evex
= evex
;
7042 else if (flag_code
== CODE_16BIT
)
7043 i
.suffix
= WORD_MNEM_SUFFIX
;
7044 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
7045 i
.suffix
= LONG_MNEM_SUFFIX
;
7047 i
.suffix
= QWORD_MNEM_SUFFIX
;
7053 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
7054 In AT&T syntax, if there is no suffix (warned about above), the default
7055 will be byte extension. */
7056 if (i
.tm
.opcode_modifier
.w
&& i
.suffix
&& i
.suffix
!= BYTE_MNEM_SUFFIX
)
7057 i
.tm
.base_opcode
|= 1;
7059 /* For further processing, the suffix should represent the destination
7060 (register). This is already the case when one was used with
7061 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
7062 no suffix to begin with. */
7063 if (i
.tm
.opcode_modifier
.w
|| i
.tm
.base_opcode
== 0x63 || !i
.suffix
)
7065 if (i
.types
[1].bitfield
.word
)
7066 i
.suffix
= WORD_MNEM_SUFFIX
;
7067 else if (i
.types
[1].bitfield
.qword
)
7068 i
.suffix
= QWORD_MNEM_SUFFIX
;
7070 i
.suffix
= LONG_MNEM_SUFFIX
;
7072 i
.tm
.opcode_modifier
.w
= 0;
7076 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
7077 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
7078 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
7080 /* Change the opcode based on the operand size given by i.suffix. */
7083 /* Size floating point instruction. */
7084 case LONG_MNEM_SUFFIX
:
7085 if (i
.tm
.opcode_modifier
.floatmf
)
7087 i
.tm
.base_opcode
^= 4;
7091 case WORD_MNEM_SUFFIX
:
7092 case QWORD_MNEM_SUFFIX
:
7093 /* It's not a byte, select word/dword operation. */
7094 if (i
.tm
.opcode_modifier
.w
)
7097 i
.tm
.base_opcode
|= 8;
7099 i
.tm
.base_opcode
|= 1;
7102 case SHORT_MNEM_SUFFIX
:
7103 /* Now select between word & dword operations via the operand
7104 size prefix, except for instructions that will ignore this
7106 if (i
.suffix
!= QWORD_MNEM_SUFFIX
7107 && i
.tm
.opcode_modifier
.mnemonicsize
!= IGNORESIZE
7108 && !i
.tm
.opcode_modifier
.floatmf
7109 && !is_any_vex_encoding (&i
.tm
)
7110 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
7111 || (flag_code
== CODE_64BIT
7112 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
7114 unsigned int prefix
= DATA_PREFIX_OPCODE
;
7116 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
7117 prefix
= ADDR_PREFIX_OPCODE
;
7119 if (!add_prefix (prefix
))
7123 /* Set mode64 for an operand. */
7124 if (i
.suffix
== QWORD_MNEM_SUFFIX
7125 && flag_code
== CODE_64BIT
7126 && !i
.tm
.opcode_modifier
.norex64
7127 && !i
.tm
.opcode_modifier
.vexw
7128 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7130 && ! (i
.operands
== 2
7131 && i
.tm
.base_opcode
== 0x90
7132 && i
.tm
.extension_opcode
== None
7133 && i
.types
[0].bitfield
.instance
== Accum
7134 && i
.types
[0].bitfield
.qword
7135 && i
.types
[1].bitfield
.instance
== Accum
7136 && i
.types
[1].bitfield
.qword
))
7142 /* Select word/dword/qword operation with explicit data sizing prefix
7143 when there are no suitable register operands. */
7144 if (i
.tm
.opcode_modifier
.w
7145 && (i
.prefix
[DATA_PREFIX
] || (i
.prefix
[REX_PREFIX
] & REX_W
))
7147 || (i
.reg_operands
== 1
7149 && (i
.tm
.operand_types
[0].bitfield
.instance
== RegC
7151 || i
.tm
.operand_types
[0].bitfield
.instance
== RegD
7152 || i
.tm
.operand_types
[1].bitfield
.instance
== RegD
7155 i
.tm
.base_opcode
|= 1;
7159 if (i
.tm
.opcode_modifier
.addrprefixopreg
)
7161 gas_assert (!i
.suffix
);
7162 gas_assert (i
.reg_operands
);
7164 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7167 /* The address size override prefix changes the size of the
7169 if (flag_code
== CODE_64BIT
7170 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
7172 as_bad (_("16-bit addressing unavailable for `%s'"),
7177 if ((flag_code
== CODE_32BIT
7178 ? i
.op
[0].regs
->reg_type
.bitfield
.word
7179 : i
.op
[0].regs
->reg_type
.bitfield
.dword
)
7180 && !add_prefix (ADDR_PREFIX_OPCODE
))
7185 /* Check invalid register operand when the address size override
7186 prefix changes the size of register operands. */
7188 enum { need_word
, need_dword
, need_qword
} need
;
7190 /* Check the register operand for the address size prefix if
7191 the memory operand has no real registers, like symbol, DISP
7192 or bogus (x32-only) symbol(%rip) when symbol(%eip) is meant. */
7193 if (i
.mem_operands
== 1
7194 && i
.reg_operands
== 1
7196 && i
.types
[1].bitfield
.class == Reg
7197 && (flag_code
== CODE_32BIT
7198 ? i
.op
[1].regs
->reg_type
.bitfield
.word
7199 : i
.op
[1].regs
->reg_type
.bitfield
.dword
)
7200 && ((i
.base_reg
== NULL
&& i
.index_reg
== NULL
)
7201 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7202 || (x86_elf_abi
== X86_64_X32_ABI
7204 && i
.base_reg
->reg_num
== RegIP
7205 && i
.base_reg
->reg_type
.bitfield
.qword
))
7209 && !add_prefix (ADDR_PREFIX_OPCODE
))
7212 if (flag_code
== CODE_32BIT
)
7213 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
7214 else if (i
.prefix
[ADDR_PREFIX
])
7217 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
7219 for (op
= 0; op
< i
.operands
; op
++)
7221 if (i
.types
[op
].bitfield
.class != Reg
)
7227 if (i
.op
[op
].regs
->reg_type
.bitfield
.word
)
7231 if (i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
7235 if (i
.op
[op
].regs
->reg_type
.bitfield
.qword
)
7240 as_bad (_("invalid register operand size for `%s'"),
7251 check_byte_reg (void)
7255 for (op
= i
.operands
; --op
>= 0;)
7257 /* Skip non-register operands. */
7258 if (i
.types
[op
].bitfield
.class != Reg
)
7261 /* If this is an eight bit register, it's OK. If it's the 16 or
7262 32 bit version of an eight bit register, we will just use the
7263 low portion, and that's OK too. */
7264 if (i
.types
[op
].bitfield
.byte
)
7267 /* I/O port address operands are OK too. */
7268 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
7269 && i
.tm
.operand_types
[op
].bitfield
.word
)
7272 /* crc32 only wants its source operand checked here. */
7273 if (i
.tm
.base_opcode
== 0xf0
7274 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
7275 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_0XF2
7279 /* Any other register is bad. */
7280 as_bad (_("`%s%s' not allowed with `%s%c'"),
7281 register_prefix
, i
.op
[op
].regs
->reg_name
,
7282 i
.tm
.name
, i
.suffix
);
7289 check_long_reg (void)
7293 for (op
= i
.operands
; --op
>= 0;)
7294 /* Skip non-register operands. */
7295 if (i
.types
[op
].bitfield
.class != Reg
)
7297 /* Reject eight bit registers, except where the template requires
7298 them. (eg. movzb) */
7299 else if (i
.types
[op
].bitfield
.byte
7300 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7301 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7302 && (i
.tm
.operand_types
[op
].bitfield
.word
7303 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7305 as_bad (_("`%s%s' not allowed with `%s%c'"),
7307 i
.op
[op
].regs
->reg_name
,
7312 /* Error if the e prefix on a general reg is missing. */
7313 else if (i
.types
[op
].bitfield
.word
7314 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7315 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7316 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7318 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7319 register_prefix
, i
.op
[op
].regs
->reg_name
,
7323 /* Warn if the r prefix on a general reg is present. */
7324 else if (i
.types
[op
].bitfield
.qword
7325 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7326 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7327 && i
.tm
.operand_types
[op
].bitfield
.dword
)
7330 && i
.tm
.opcode_modifier
.toqword
7331 && i
.types
[0].bitfield
.class != RegSIMD
)
7333 /* Convert to QWORD. We want REX byte. */
7334 i
.suffix
= QWORD_MNEM_SUFFIX
;
7338 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7339 register_prefix
, i
.op
[op
].regs
->reg_name
,
7348 check_qword_reg (void)
7352 for (op
= i
.operands
; --op
>= 0; )
7353 /* Skip non-register operands. */
7354 if (i
.types
[op
].bitfield
.class != Reg
)
7356 /* Reject eight bit registers, except where the template requires
7357 them. (eg. movzb) */
7358 else if (i
.types
[op
].bitfield
.byte
7359 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7360 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7361 && (i
.tm
.operand_types
[op
].bitfield
.word
7362 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7364 as_bad (_("`%s%s' not allowed with `%s%c'"),
7366 i
.op
[op
].regs
->reg_name
,
7371 /* Warn if the r prefix on a general reg is missing. */
7372 else if ((i
.types
[op
].bitfield
.word
7373 || i
.types
[op
].bitfield
.dword
)
7374 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7375 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7376 && i
.tm
.operand_types
[op
].bitfield
.qword
)
7378 /* Prohibit these changes in the 64bit mode, since the
7379 lowering is more complicated. */
7381 && i
.tm
.opcode_modifier
.todword
7382 && i
.types
[0].bitfield
.class != RegSIMD
)
7384 /* Convert to DWORD. We don't want REX byte. */
7385 i
.suffix
= LONG_MNEM_SUFFIX
;
7389 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7390 register_prefix
, i
.op
[op
].regs
->reg_name
,
7399 check_word_reg (void)
7402 for (op
= i
.operands
; --op
>= 0;)
7403 /* Skip non-register operands. */
7404 if (i
.types
[op
].bitfield
.class != Reg
)
7406 /* Reject eight bit registers, except where the template requires
7407 them. (eg. movzb) */
7408 else if (i
.types
[op
].bitfield
.byte
7409 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7410 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7411 && (i
.tm
.operand_types
[op
].bitfield
.word
7412 || i
.tm
.operand_types
[op
].bitfield
.dword
))
7414 as_bad (_("`%s%s' not allowed with `%s%c'"),
7416 i
.op
[op
].regs
->reg_name
,
7421 /* Error if the e or r prefix on a general reg is present. */
7422 else if ((i
.types
[op
].bitfield
.dword
7423 || i
.types
[op
].bitfield
.qword
)
7424 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
7425 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
7426 && i
.tm
.operand_types
[op
].bitfield
.word
)
7428 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7429 register_prefix
, i
.op
[op
].regs
->reg_name
,
7437 update_imm (unsigned int j
)
7439 i386_operand_type overlap
= i
.types
[j
];
7440 if ((overlap
.bitfield
.imm8
7441 || overlap
.bitfield
.imm8s
7442 || overlap
.bitfield
.imm16
7443 || overlap
.bitfield
.imm32
7444 || overlap
.bitfield
.imm32s
7445 || overlap
.bitfield
.imm64
)
7446 && !operand_type_equal (&overlap
, &imm8
)
7447 && !operand_type_equal (&overlap
, &imm8s
)
7448 && !operand_type_equal (&overlap
, &imm16
)
7449 && !operand_type_equal (&overlap
, &imm32
)
7450 && !operand_type_equal (&overlap
, &imm32s
)
7451 && !operand_type_equal (&overlap
, &imm64
))
7455 i386_operand_type temp
;
7457 operand_type_set (&temp
, 0);
7458 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
7460 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
7461 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
7463 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
7464 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
7465 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
7467 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
7468 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
7471 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
7474 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
7475 || operand_type_equal (&overlap
, &imm16_32
)
7476 || operand_type_equal (&overlap
, &imm16_32s
))
7478 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
7483 else if (i
.prefix
[REX_PREFIX
] & REX_W
)
7484 overlap
= operand_type_and (overlap
, imm32s
);
7485 else if (i
.prefix
[DATA_PREFIX
])
7486 overlap
= operand_type_and (overlap
,
7487 flag_code
!= CODE_16BIT
? imm16
: imm32
);
7488 if (!operand_type_equal (&overlap
, &imm8
)
7489 && !operand_type_equal (&overlap
, &imm8s
)
7490 && !operand_type_equal (&overlap
, &imm16
)
7491 && !operand_type_equal (&overlap
, &imm32
)
7492 && !operand_type_equal (&overlap
, &imm32s
)
7493 && !operand_type_equal (&overlap
, &imm64
))
7495 as_bad (_("no instruction mnemonic suffix given; "
7496 "can't determine immediate size"));
7500 i
.types
[j
] = overlap
;
7510 /* Update the first 2 immediate operands. */
7511 n
= i
.operands
> 2 ? 2 : i
.operands
;
7514 for (j
= 0; j
< n
; j
++)
7515 if (update_imm (j
) == 0)
7518 /* The 3rd operand can't be immediate operand. */
7519 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
7526 process_operands (void)
7528 /* Default segment register this instruction will use for memory
7529 accesses. 0 means unknown. This is only for optimizing out
7530 unnecessary segment overrides. */
7531 const seg_entry
*default_seg
= 0;
7533 if (i
.tm
.opcode_modifier
.sse2avx
)
7535 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7537 i
.rex
|= i
.prefix
[REX_PREFIX
] & (REX_W
| REX_R
| REX_X
| REX_B
);
7538 i
.prefix
[REX_PREFIX
] = 0;
7541 /* ImmExt should be processed after SSE2AVX. */
7542 else if (i
.tm
.opcode_modifier
.immext
)
7545 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
7547 unsigned int dupl
= i
.operands
;
7548 unsigned int dest
= dupl
- 1;
7551 /* The destination must be an xmm register. */
7552 gas_assert (i
.reg_operands
7553 && MAX_OPERANDS
> dupl
7554 && operand_type_equal (&i
.types
[dest
], ®xmm
));
7556 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7557 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7559 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
7561 /* Keep xmm0 for instructions with VEX prefix and 3
7563 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
7564 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
7569 /* We remove the first xmm0 and keep the number of
7570 operands unchanged, which in fact duplicates the
7572 for (j
= 1; j
< i
.operands
; j
++)
7574 i
.op
[j
- 1] = i
.op
[j
];
7575 i
.types
[j
- 1] = i
.types
[j
];
7576 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7577 i
.flags
[j
- 1] = i
.flags
[j
];
7581 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
7583 gas_assert ((MAX_OPERANDS
- 1) > dupl
7584 && (i
.tm
.opcode_modifier
.vexsources
7587 /* Add the implicit xmm0 for instructions with VEX prefix
7589 for (j
= i
.operands
; j
> 0; j
--)
7591 i
.op
[j
] = i
.op
[j
- 1];
7592 i
.types
[j
] = i
.types
[j
- 1];
7593 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
7594 i
.flags
[j
] = i
.flags
[j
- 1];
7597 = (const reg_entry
*) str_hash_find (reg_hash
, "xmm0");
7598 i
.types
[0] = regxmm
;
7599 i
.tm
.operand_types
[0] = regxmm
;
7602 i
.reg_operands
+= 2;
7607 i
.op
[dupl
] = i
.op
[dest
];
7608 i
.types
[dupl
] = i
.types
[dest
];
7609 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7610 i
.flags
[dupl
] = i
.flags
[dest
];
7619 i
.op
[dupl
] = i
.op
[dest
];
7620 i
.types
[dupl
] = i
.types
[dest
];
7621 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7622 i
.flags
[dupl
] = i
.flags
[dest
];
7625 if (i
.tm
.opcode_modifier
.immext
)
7628 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7629 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7633 for (j
= 1; j
< i
.operands
; j
++)
7635 i
.op
[j
- 1] = i
.op
[j
];
7636 i
.types
[j
- 1] = i
.types
[j
];
7638 /* We need to adjust fields in i.tm since they are used by
7639 build_modrm_byte. */
7640 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7642 i
.flags
[j
- 1] = i
.flags
[j
];
7649 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7651 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7653 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7654 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7655 regnum
= register_number (i
.op
[1].regs
);
7656 first_reg_in_group
= regnum
& ~3;
7657 last_reg_in_group
= first_reg_in_group
+ 3;
7658 if (regnum
!= first_reg_in_group
)
7659 as_warn (_("source register `%s%s' implicitly denotes"
7660 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7661 register_prefix
, i
.op
[1].regs
->reg_name
,
7662 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7663 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7666 else if (i
.tm
.opcode_modifier
.regkludge
)
7668 /* The imul $imm, %reg instruction is converted into
7669 imul $imm, %reg, %reg, and the clr %reg instruction
7670 is converted into xor %reg, %reg. */
7672 unsigned int first_reg_op
;
7674 if (operand_type_check (i
.types
[0], reg
))
7678 /* Pretend we saw the extra register operand. */
7679 gas_assert (i
.reg_operands
== 1
7680 && i
.op
[first_reg_op
+ 1].regs
== 0);
7681 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7682 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7687 if (i
.tm
.opcode_modifier
.modrm
)
7689 /* The opcode is completed (modulo i.tm.extension_opcode which
7690 must be put into the modrm byte). Now, we make the modrm and
7691 index base bytes based on all the info we've collected. */
7693 default_seg
= build_modrm_byte ();
7695 else if (i
.types
[0].bitfield
.class == SReg
)
7697 if (flag_code
!= CODE_64BIT
7698 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7699 && i
.op
[0].regs
->reg_num
== 1
7700 : (i
.tm
.base_opcode
| 1) == (POP_SEG386_SHORT
& 0xff)
7701 && i
.op
[0].regs
->reg_num
< 4)
7703 as_bad (_("you can't `%s %s%s'"),
7704 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7707 if (i
.op
[0].regs
->reg_num
> 3
7708 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
)
7710 i
.tm
.base_opcode
^= (POP_SEG_SHORT
^ POP_SEG386_SHORT
) & 0xff;
7711 i
.tm
.opcode_modifier
.opcodespace
= SPACE_0F
;
7713 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7715 else if (i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7716 && (i
.tm
.base_opcode
& ~3) == MOV_AX_DISP32
)
7720 else if (i
.tm
.opcode_modifier
.isstring
)
7722 /* For the string instructions that allow a segment override
7723 on one of their operands, the default segment is ds. */
7726 else if (i
.short_form
)
7728 /* The register or float register operand is in operand
7730 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7732 /* Register goes in low 3 bits of opcode. */
7733 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7734 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7736 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7738 /* Warn about some common errors, but press on regardless.
7739 The first case can be generated by gcc (<= 2.8.1). */
7740 if (i
.operands
== 2)
7742 /* Reversed arguments on faddp, fsubp, etc. */
7743 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7744 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7745 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7749 /* Extraneous `l' suffix on fp insn. */
7750 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7751 register_prefix
, i
.op
[0].regs
->reg_name
);
7756 if ((i
.seg
[0] || i
.prefix
[SEG_PREFIX
])
7757 && i
.tm
.base_opcode
== 0x8d /* lea */
7758 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
7759 && !is_any_vex_encoding(&i
.tm
))
7761 if (!quiet_warnings
)
7762 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7766 i
.prefix
[SEG_PREFIX
] = 0;
7770 /* If a segment was explicitly specified, and the specified segment
7771 is neither the default nor the one already recorded from a prefix,
7772 use an opcode prefix to select it. If we never figured out what
7773 the default segment is, then default_seg will be zero at this
7774 point, and the specified segment prefix will always be used. */
7776 && i
.seg
[0] != default_seg
7777 && i
.seg
[0]->seg_prefix
!= i
.prefix
[SEG_PREFIX
])
7779 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7785 static INLINE
void set_rex_vrex (const reg_entry
*r
, unsigned int rex_bit
,
7786 bfd_boolean do_sse2avx
)
7788 if (r
->reg_flags
& RegRex
)
7790 if (i
.rex
& rex_bit
)
7791 as_bad (_("same type of prefix used twice"));
7794 else if (do_sse2avx
&& (i
.rex
& rex_bit
) && i
.vex
.register_specifier
)
7796 gas_assert (i
.vex
.register_specifier
== r
);
7797 i
.vex
.register_specifier
+= 8;
7800 if (r
->reg_flags
& RegVRex
)
7804 static const seg_entry
*
7805 build_modrm_byte (void)
7807 const seg_entry
*default_seg
= 0;
7808 unsigned int source
, dest
;
7811 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7814 unsigned int nds
, reg_slot
;
7817 dest
= i
.operands
- 1;
7820 /* There are 2 kinds of instructions:
7821 1. 5 operands: 4 register operands or 3 register operands
7822 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7823 VexW0 or VexW1. The destination must be either XMM, YMM or
7825 2. 4 operands: 4 register operands or 3 register operands
7826 plus 1 memory operand, with VexXDS. */
7827 gas_assert ((i
.reg_operands
== 4
7828 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7829 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7830 && i
.tm
.opcode_modifier
.vexw
7831 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7833 /* If VexW1 is set, the first non-immediate operand is the source and
7834 the second non-immediate one is encoded in the immediate operand. */
7835 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7837 source
= i
.imm_operands
;
7838 reg_slot
= i
.imm_operands
+ 1;
7842 source
= i
.imm_operands
+ 1;
7843 reg_slot
= i
.imm_operands
;
7846 if (i
.imm_operands
== 0)
7848 /* When there is no immediate operand, generate an 8bit
7849 immediate operand to encode the first operand. */
7850 exp
= &im_expressions
[i
.imm_operands
++];
7851 i
.op
[i
.operands
].imms
= exp
;
7852 i
.types
[i
.operands
] = imm8
;
7855 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7856 exp
->X_op
= O_constant
;
7857 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7858 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7862 gas_assert (i
.imm_operands
== 1);
7863 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7864 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7866 /* Turn on Imm8 again so that output_imm will generate it. */
7867 i
.types
[0].bitfield
.imm8
= 1;
7869 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7870 i
.op
[0].imms
->X_add_number
7871 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7872 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7875 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7876 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7881 /* i.reg_operands MUST be the number of real register operands;
7882 implicit registers do not count. If there are 3 register
7883 operands, it must be a instruction with VexNDS. For a
7884 instruction with VexNDD, the destination register is encoded
7885 in VEX prefix. If there are 4 register operands, it must be
7886 a instruction with VEX prefix and 3 sources. */
7887 if (i
.mem_operands
== 0
7888 && ((i
.reg_operands
== 2
7889 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7890 || (i
.reg_operands
== 3
7891 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7892 || (i
.reg_operands
== 4 && vex_3_sources
)))
7900 /* When there are 3 operands, one of them may be immediate,
7901 which may be the first or the last operand. Otherwise,
7902 the first operand must be shift count register (cl) or it
7903 is an instruction with VexNDS. */
7904 gas_assert (i
.imm_operands
== 1
7905 || (i
.imm_operands
== 0
7906 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7907 || (i
.types
[0].bitfield
.instance
== RegC
7908 && i
.types
[0].bitfield
.byte
))));
7909 if (operand_type_check (i
.types
[0], imm
)
7910 || (i
.types
[0].bitfield
.instance
== RegC
7911 && i
.types
[0].bitfield
.byte
))
7917 /* When there are 4 operands, the first two must be 8bit
7918 immediate operands. The source operand will be the 3rd
7921 For instructions with VexNDS, if the first operand
7922 an imm8, the source operand is the 2nd one. If the last
7923 operand is imm8, the source operand is the first one. */
7924 gas_assert ((i
.imm_operands
== 2
7925 && i
.types
[0].bitfield
.imm8
7926 && i
.types
[1].bitfield
.imm8
)
7927 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7928 && i
.imm_operands
== 1
7929 && (i
.types
[0].bitfield
.imm8
7930 || i
.types
[i
.operands
- 1].bitfield
.imm8
7932 if (i
.imm_operands
== 2)
7936 if (i
.types
[0].bitfield
.imm8
)
7943 if (is_evex_encoding (&i
.tm
))
7945 /* For EVEX instructions, when there are 5 operands, the
7946 first one must be immediate operand. If the second one
7947 is immediate operand, the source operand is the 3th
7948 one. If the last one is immediate operand, the source
7949 operand is the 2nd one. */
7950 gas_assert (i
.imm_operands
== 2
7951 && i
.tm
.opcode_modifier
.sae
7952 && operand_type_check (i
.types
[0], imm
));
7953 if (operand_type_check (i
.types
[1], imm
))
7955 else if (operand_type_check (i
.types
[4], imm
))
7969 /* RC/SAE operand could be between DEST and SRC. That happens
7970 when one operand is GPR and the other one is XMM/YMM/ZMM
7972 if (i
.rounding
&& i
.rounding
->operand
== dest
)
7975 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7977 /* For instructions with VexNDS, the register-only source
7978 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7979 register. It is encoded in VEX prefix. */
7981 i386_operand_type op
;
7984 /* Swap two source operands if needed. */
7985 if (i
.tm
.opcode_modifier
.swapsources
)
7993 op
= i
.tm
.operand_types
[vvvv
];
7994 if ((dest
+ 1) >= i
.operands
7995 || ((op
.bitfield
.class != Reg
7996 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7997 && op
.bitfield
.class != RegSIMD
7998 && !operand_type_equal (&op
, ®mask
)))
8000 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
8006 /* One of the register operands will be encoded in the i.rm.reg
8007 field, the other in the combined i.rm.mode and i.rm.regmem
8008 fields. If no form of this instruction supports a memory
8009 destination operand, then we assume the source operand may
8010 sometimes be a memory operand and so we need to store the
8011 destination in the i.rm.reg field. */
8012 if (!i
.tm
.opcode_modifier
.regmem
8013 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
8015 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
8016 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
8017 set_rex_vrex (i
.op
[dest
].regs
, REX_R
, i
.tm
.opcode_modifier
.sse2avx
);
8018 set_rex_vrex (i
.op
[source
].regs
, REX_B
, FALSE
);
8022 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
8023 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
8024 set_rex_vrex (i
.op
[dest
].regs
, REX_B
, i
.tm
.opcode_modifier
.sse2avx
);
8025 set_rex_vrex (i
.op
[source
].regs
, REX_R
, FALSE
);
8027 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
8029 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
8032 add_prefix (LOCK_PREFIX_OPCODE
);
8036 { /* If it's not 2 reg operands... */
8041 unsigned int fake_zero_displacement
= 0;
8044 for (op
= 0; op
< i
.operands
; op
++)
8045 if (i
.flags
[op
] & Operand_Mem
)
8047 gas_assert (op
< i
.operands
);
8049 if (i
.tm
.opcode_modifier
.sib
)
8051 /* The index register of VSIB shouldn't be RegIZ. */
8052 if (i
.tm
.opcode_modifier
.sib
!= SIBMEM
8053 && i
.index_reg
->reg_num
== RegIZ
)
8056 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8059 i
.sib
.base
= NO_BASE_REGISTER
;
8060 i
.sib
.scale
= i
.log2_scale_factor
;
8061 i
.types
[op
].bitfield
.disp8
= 0;
8062 i
.types
[op
].bitfield
.disp16
= 0;
8063 i
.types
[op
].bitfield
.disp64
= 0;
8064 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
8066 /* Must be 32 bit */
8067 i
.types
[op
].bitfield
.disp32
= 1;
8068 i
.types
[op
].bitfield
.disp32s
= 0;
8072 i
.types
[op
].bitfield
.disp32
= 0;
8073 i
.types
[op
].bitfield
.disp32s
= 1;
8077 /* Since the mandatory SIB always has index register, so
8078 the code logic remains unchanged. The non-mandatory SIB
8079 without index register is allowed and will be handled
8083 if (i
.index_reg
->reg_num
== RegIZ
)
8084 i
.sib
.index
= NO_INDEX_REGISTER
;
8086 i
.sib
.index
= i
.index_reg
->reg_num
;
8087 set_rex_vrex (i
.index_reg
, REX_X
, FALSE
);
8093 if (i
.base_reg
== 0)
8096 if (!i
.disp_operands
)
8097 fake_zero_displacement
= 1;
8098 if (i
.index_reg
== 0)
8100 i386_operand_type newdisp
;
8102 /* Both check for VSIB and mandatory non-vector SIB. */
8103 gas_assert (!i
.tm
.opcode_modifier
.sib
8104 || i
.tm
.opcode_modifier
.sib
== SIBMEM
);
8105 /* Operand is just <disp> */
8106 if (flag_code
== CODE_64BIT
)
8108 /* 64bit mode overwrites the 32bit absolute
8109 addressing by RIP relative addressing and
8110 absolute addressing is encoded by one of the
8111 redundant SIB forms. */
8112 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8113 i
.sib
.base
= NO_BASE_REGISTER
;
8114 i
.sib
.index
= NO_INDEX_REGISTER
;
8115 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
8117 else if ((flag_code
== CODE_16BIT
)
8118 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
8120 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
8125 i
.rm
.regmem
= NO_BASE_REGISTER
;
8128 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
8129 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
8131 else if (!i
.tm
.opcode_modifier
.sib
)
8133 /* !i.base_reg && i.index_reg */
8134 if (i
.index_reg
->reg_num
== RegIZ
)
8135 i
.sib
.index
= NO_INDEX_REGISTER
;
8137 i
.sib
.index
= i
.index_reg
->reg_num
;
8138 i
.sib
.base
= NO_BASE_REGISTER
;
8139 i
.sib
.scale
= i
.log2_scale_factor
;
8140 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8141 i
.types
[op
].bitfield
.disp8
= 0;
8142 i
.types
[op
].bitfield
.disp16
= 0;
8143 i
.types
[op
].bitfield
.disp64
= 0;
8144 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
8146 /* Must be 32 bit */
8147 i
.types
[op
].bitfield
.disp32
= 1;
8148 i
.types
[op
].bitfield
.disp32s
= 0;
8152 i
.types
[op
].bitfield
.disp32
= 0;
8153 i
.types
[op
].bitfield
.disp32s
= 1;
8155 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8159 /* RIP addressing for 64bit mode. */
8160 else if (i
.base_reg
->reg_num
== RegIP
)
8162 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8163 i
.rm
.regmem
= NO_BASE_REGISTER
;
8164 i
.types
[op
].bitfield
.disp8
= 0;
8165 i
.types
[op
].bitfield
.disp16
= 0;
8166 i
.types
[op
].bitfield
.disp32
= 0;
8167 i
.types
[op
].bitfield
.disp32s
= 1;
8168 i
.types
[op
].bitfield
.disp64
= 0;
8169 i
.flags
[op
] |= Operand_PCrel
;
8170 if (! i
.disp_operands
)
8171 fake_zero_displacement
= 1;
8173 else if (i
.base_reg
->reg_type
.bitfield
.word
)
8175 gas_assert (!i
.tm
.opcode_modifier
.sib
);
8176 switch (i
.base_reg
->reg_num
)
8179 if (i
.index_reg
== 0)
8181 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8182 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
8186 if (i
.index_reg
== 0)
8189 if (operand_type_check (i
.types
[op
], disp
) == 0)
8191 /* fake (%bp) into 0(%bp) */
8192 if (i
.disp_encoding
== disp_encoding_16bit
)
8193 i
.types
[op
].bitfield
.disp16
= 1;
8195 i
.types
[op
].bitfield
.disp8
= 1;
8196 fake_zero_displacement
= 1;
8199 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8200 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
8202 default: /* (%si) -> 4 or (%di) -> 5 */
8203 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
8205 if (!fake_zero_displacement
8209 fake_zero_displacement
= 1;
8210 if (i
.disp_encoding
== disp_encoding_8bit
)
8211 i
.types
[op
].bitfield
.disp8
= 1;
8213 i
.types
[op
].bitfield
.disp16
= 1;
8215 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8217 else /* i.base_reg and 32/64 bit mode */
8219 if (flag_code
== CODE_64BIT
8220 && operand_type_check (i
.types
[op
], disp
))
8222 i
.types
[op
].bitfield
.disp16
= 0;
8223 i
.types
[op
].bitfield
.disp64
= 0;
8224 if (i
.prefix
[ADDR_PREFIX
] == 0)
8226 i
.types
[op
].bitfield
.disp32
= 0;
8227 i
.types
[op
].bitfield
.disp32s
= 1;
8231 i
.types
[op
].bitfield
.disp32
= 1;
8232 i
.types
[op
].bitfield
.disp32s
= 0;
8236 if (!i
.tm
.opcode_modifier
.sib
)
8237 i
.rm
.regmem
= i
.base_reg
->reg_num
;
8238 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
8240 i
.sib
.base
= i
.base_reg
->reg_num
;
8241 /* x86-64 ignores REX prefix bit here to avoid decoder
8243 if (!(i
.base_reg
->reg_flags
& RegRex
)
8244 && (i
.base_reg
->reg_num
== EBP_REG_NUM
8245 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
8247 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
8249 fake_zero_displacement
= 1;
8250 if (i
.disp_encoding
== disp_encoding_32bit
)
8251 i
.types
[op
].bitfield
.disp32
= 1;
8253 i
.types
[op
].bitfield
.disp8
= 1;
8255 i
.sib
.scale
= i
.log2_scale_factor
;
8256 if (i
.index_reg
== 0)
8258 /* Only check for VSIB. */
8259 gas_assert (i
.tm
.opcode_modifier
.sib
!= VECSIB128
8260 && i
.tm
.opcode_modifier
.sib
!= VECSIB256
8261 && i
.tm
.opcode_modifier
.sib
!= VECSIB512
);
8263 /* <disp>(%esp) becomes two byte modrm with no index
8264 register. We've already stored the code for esp
8265 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8266 Any base register besides %esp will not use the
8267 extra modrm byte. */
8268 i
.sib
.index
= NO_INDEX_REGISTER
;
8270 else if (!i
.tm
.opcode_modifier
.sib
)
8272 if (i
.index_reg
->reg_num
== RegIZ
)
8273 i
.sib
.index
= NO_INDEX_REGISTER
;
8275 i
.sib
.index
= i
.index_reg
->reg_num
;
8276 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
8277 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
8282 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
8283 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
8287 if (!fake_zero_displacement
8291 fake_zero_displacement
= 1;
8292 if (i
.disp_encoding
== disp_encoding_8bit
)
8293 i
.types
[op
].bitfield
.disp8
= 1;
8295 i
.types
[op
].bitfield
.disp32
= 1;
8297 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
8301 if (fake_zero_displacement
)
8303 /* Fakes a zero displacement assuming that i.types[op]
8304 holds the correct displacement size. */
8307 gas_assert (i
.op
[op
].disps
== 0);
8308 exp
= &disp_expressions
[i
.disp_operands
++];
8309 i
.op
[op
].disps
= exp
;
8310 exp
->X_op
= O_constant
;
8311 exp
->X_add_number
= 0;
8312 exp
->X_add_symbol
= (symbolS
*) 0;
8313 exp
->X_op_symbol
= (symbolS
*) 0;
8321 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
8323 if (operand_type_check (i
.types
[0], imm
))
8324 i
.vex
.register_specifier
= NULL
;
8327 /* VEX.vvvv encodes one of the sources when the first
8328 operand is not an immediate. */
8329 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8330 i
.vex
.register_specifier
= i
.op
[0].regs
;
8332 i
.vex
.register_specifier
= i
.op
[1].regs
;
8335 /* Destination is a XMM register encoded in the ModRM.reg
8337 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
8338 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
8341 /* ModRM.rm and VEX.B encodes the other source. */
8342 if (!i
.mem_operands
)
8346 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
8347 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8349 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
8351 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8355 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
8357 i
.vex
.register_specifier
= i
.op
[2].regs
;
8358 if (!i
.mem_operands
)
8361 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
8362 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
8366 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8367 (if any) based on i.tm.extension_opcode. Again, we must be
8368 careful to make sure that segment/control/debug/test/MMX
8369 registers are coded into the i.rm.reg field. */
8370 else if (i
.reg_operands
)
8373 unsigned int vex_reg
= ~0;
8375 for (op
= 0; op
< i
.operands
; op
++)
8376 if (i
.types
[op
].bitfield
.class == Reg
8377 || i
.types
[op
].bitfield
.class == RegBND
8378 || i
.types
[op
].bitfield
.class == RegMask
8379 || i
.types
[op
].bitfield
.class == SReg
8380 || i
.types
[op
].bitfield
.class == RegCR
8381 || i
.types
[op
].bitfield
.class == RegDR
8382 || i
.types
[op
].bitfield
.class == RegTR
8383 || i
.types
[op
].bitfield
.class == RegSIMD
8384 || i
.types
[op
].bitfield
.class == RegMMX
)
8389 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
8391 /* For instructions with VexNDS, the register-only
8392 source operand is encoded in VEX prefix. */
8393 gas_assert (mem
!= (unsigned int) ~0);
8398 gas_assert (op
< i
.operands
);
8402 /* Check register-only source operand when two source
8403 operands are swapped. */
8404 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
8405 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
8409 gas_assert (mem
== (vex_reg
+ 1)
8410 && op
< i
.operands
);
8415 gas_assert (vex_reg
< i
.operands
);
8419 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
8421 /* For instructions with VexNDD, the register destination
8422 is encoded in VEX prefix. */
8423 if (i
.mem_operands
== 0)
8425 /* There is no memory operand. */
8426 gas_assert ((op
+ 2) == i
.operands
);
8431 /* There are only 2 non-immediate operands. */
8432 gas_assert (op
< i
.imm_operands
+ 2
8433 && i
.operands
== i
.imm_operands
+ 2);
8434 vex_reg
= i
.imm_operands
+ 1;
8438 gas_assert (op
< i
.operands
);
8440 if (vex_reg
!= (unsigned int) ~0)
8442 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
8444 if ((type
->bitfield
.class != Reg
8445 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
8446 && type
->bitfield
.class != RegSIMD
8447 && !operand_type_equal (type
, ®mask
))
8450 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
8453 /* Don't set OP operand twice. */
8456 /* If there is an extension opcode to put here, the
8457 register number must be put into the regmem field. */
8458 if (i
.tm
.extension_opcode
!= None
)
8460 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
8461 set_rex_vrex (i
.op
[op
].regs
, REX_B
,
8462 i
.tm
.opcode_modifier
.sse2avx
);
8466 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
8467 set_rex_vrex (i
.op
[op
].regs
, REX_R
,
8468 i
.tm
.opcode_modifier
.sse2avx
);
8472 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8473 must set it to 3 to indicate this is a register operand
8474 in the regmem field. */
8475 if (!i
.mem_operands
)
8479 /* Fill in i.rm.reg field with extension opcode (if any). */
8480 if (i
.tm
.extension_opcode
!= None
)
8481 i
.rm
.reg
= i
.tm
.extension_opcode
;
8487 frag_opcode_byte (unsigned char byte
)
8489 if (now_seg
!= absolute_section
)
8490 FRAG_APPEND_1_CHAR (byte
);
8492 ++abs_section_offset
;
8496 flip_code16 (unsigned int code16
)
8498 gas_assert (i
.tm
.operands
== 1);
8500 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
8501 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
8502 || i
.tm
.operand_types
[0].bitfield
.disp32s
8503 : i
.tm
.operand_types
[0].bitfield
.disp16
)
8508 output_branch (void)
8514 relax_substateT subtype
;
8518 if (now_seg
== absolute_section
)
8520 as_bad (_("relaxable branches not supported in absolute section"));
8524 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
8525 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
8528 if (i
.prefix
[DATA_PREFIX
] != 0)
8532 code16
^= flip_code16(code16
);
8534 /* Pentium4 branch hints. */
8535 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8536 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8541 if (i
.prefix
[REX_PREFIX
] != 0)
8547 /* BND prefixed jump. */
8548 if (i
.prefix
[BND_PREFIX
] != 0)
8554 if (i
.prefixes
!= 0)
8555 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8557 /* It's always a symbol; End frag & setup for relax.
8558 Make sure there is enough room in this frag for the largest
8559 instruction we may generate in md_convert_frag. This is 2
8560 bytes for the opcode and room for the prefix and largest
8562 frag_grow (prefix
+ 2 + 4);
8563 /* Prefix and 1 opcode byte go in fr_fix. */
8564 p
= frag_more (prefix
+ 1);
8565 if (i
.prefix
[DATA_PREFIX
] != 0)
8566 *p
++ = DATA_PREFIX_OPCODE
;
8567 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
8568 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
8569 *p
++ = i
.prefix
[SEG_PREFIX
];
8570 if (i
.prefix
[BND_PREFIX
] != 0)
8571 *p
++ = BND_PREFIX_OPCODE
;
8572 if (i
.prefix
[REX_PREFIX
] != 0)
8573 *p
++ = i
.prefix
[REX_PREFIX
];
8574 *p
= i
.tm
.base_opcode
;
8576 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
8577 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
8578 else if (cpu_arch_flags
.bitfield
.cpui386
)
8579 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
8581 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
8584 sym
= i
.op
[0].disps
->X_add_symbol
;
8585 off
= i
.op
[0].disps
->X_add_number
;
8587 if (i
.op
[0].disps
->X_op
!= O_constant
8588 && i
.op
[0].disps
->X_op
!= O_symbol
)
8590 /* Handle complex expressions. */
8591 sym
= make_expr_symbol (i
.op
[0].disps
);
8595 /* 1 possible extra opcode + 4 byte displacement go in var part.
8596 Pass reloc in fr_var. */
8597 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
8600 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8601 /* Return TRUE iff PLT32 relocation should be used for branching to
8605 need_plt32_p (symbolS
*s
)
8607 /* PLT32 relocation is ELF only. */
8612 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8613 krtld support it. */
8617 /* Since there is no need to prepare for PLT branch on x86-64, we
8618 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8619 be used as a marker for 32-bit PC-relative branches. */
8626 /* Weak or undefined symbol need PLT32 relocation. */
8627 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
8630 /* Non-global symbol doesn't need PLT32 relocation. */
8631 if (! S_IS_EXTERNAL (s
))
8634 /* Other global symbols need PLT32 relocation. NB: Symbol with
8635 non-default visibilities are treated as normal global symbol
8636 so that PLT32 relocation can be used as a marker for 32-bit
8637 PC-relative branches. It is useful for linker relaxation. */
8648 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8650 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8652 /* This is a loop or jecxz type instruction. */
8654 if (i
.prefix
[ADDR_PREFIX
] != 0)
8656 frag_opcode_byte (ADDR_PREFIX_OPCODE
);
8659 /* Pentium4 branch hints. */
8660 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8661 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8663 frag_opcode_byte (i
.prefix
[SEG_PREFIX
]);
8672 if (flag_code
== CODE_16BIT
)
8675 if (i
.prefix
[DATA_PREFIX
] != 0)
8677 frag_opcode_byte (DATA_PREFIX_OPCODE
);
8679 code16
^= flip_code16(code16
);
8687 /* BND prefixed jump. */
8688 if (i
.prefix
[BND_PREFIX
] != 0)
8690 frag_opcode_byte (i
.prefix
[BND_PREFIX
]);
8694 if (i
.prefix
[REX_PREFIX
] != 0)
8696 frag_opcode_byte (i
.prefix
[REX_PREFIX
]);
8700 if (i
.prefixes
!= 0)
8701 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8703 if (now_seg
== absolute_section
)
8705 abs_section_offset
+= i
.opcode_length
+ size
;
8709 p
= frag_more (i
.opcode_length
+ size
);
8710 switch (i
.opcode_length
)
8713 *p
++ = i
.tm
.base_opcode
>> 8;
8716 *p
++ = i
.tm
.base_opcode
;
8722 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8724 && jump_reloc
== NO_RELOC
8725 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8726 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8729 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8731 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8732 i
.op
[0].disps
, 1, jump_reloc
);
8734 /* All jumps handled here are signed, but don't use a signed limit
8735 check for 32 and 16 bit jumps as we want to allow wrap around at
8736 4G and 64k respectively. */
8738 fixP
->fx_signed
= 1;
8742 output_interseg_jump (void)
8750 if (flag_code
== CODE_16BIT
)
8754 if (i
.prefix
[DATA_PREFIX
] != 0)
8761 gas_assert (!i
.prefix
[REX_PREFIX
]);
8767 if (i
.prefixes
!= 0)
8768 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8770 if (now_seg
== absolute_section
)
8772 abs_section_offset
+= prefix
+ 1 + 2 + size
;
8776 /* 1 opcode; 2 segment; offset */
8777 p
= frag_more (prefix
+ 1 + 2 + size
);
8779 if (i
.prefix
[DATA_PREFIX
] != 0)
8780 *p
++ = DATA_PREFIX_OPCODE
;
8782 if (i
.prefix
[REX_PREFIX
] != 0)
8783 *p
++ = i
.prefix
[REX_PREFIX
];
8785 *p
++ = i
.tm
.base_opcode
;
8786 if (i
.op
[1].imms
->X_op
== O_constant
)
8788 offsetT n
= i
.op
[1].imms
->X_add_number
;
8791 && !fits_in_unsigned_word (n
)
8792 && !fits_in_signed_word (n
))
8794 as_bad (_("16-bit jump out of range"));
8797 md_number_to_chars (p
, n
, size
);
8800 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8801 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8804 if (i
.op
[0].imms
->X_op
== O_constant
)
8805 md_number_to_chars (p
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8807 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, 2,
8808 i
.op
[0].imms
, 0, reloc (2, 0, 0, i
.reloc
[0]));
8811 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8816 asection
*seg
= now_seg
;
8817 subsegT subseg
= now_subseg
;
8819 unsigned int alignment
, align_size_1
;
8820 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8821 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8822 unsigned int padding
;
8824 if (!IS_ELF
|| !x86_used_note
)
8827 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8829 /* The .note.gnu.property section layout:
8831 Field Length Contents
8834 n_descsz 4 The note descriptor size
8835 n_type 4 NT_GNU_PROPERTY_TYPE_0
8837 n_desc n_descsz The program property array
8841 /* Create the .note.gnu.property section. */
8842 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8843 bfd_set_section_flags (sec
,
8850 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8861 bfd_set_section_alignment (sec
, alignment
);
8862 elf_section_type (sec
) = SHT_NOTE
;
8864 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8866 isa_1_descsz_raw
= 4 + 4 + 4;
8867 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8868 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8870 feature_2_descsz_raw
= isa_1_descsz
;
8871 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8873 feature_2_descsz_raw
+= 4 + 4 + 4;
8874 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8875 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8878 descsz
= feature_2_descsz
;
8879 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8880 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8882 /* Write n_namsz. */
8883 md_number_to_chars (p
, (valueT
) 4, 4);
8885 /* Write n_descsz. */
8886 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8889 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8892 memcpy (p
+ 4 * 3, "GNU", 4);
8894 /* Write 4-byte type. */
8895 md_number_to_chars (p
+ 4 * 4,
8896 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8898 /* Write 4-byte data size. */
8899 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8901 /* Write 4-byte data. */
8902 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8904 /* Zero out paddings. */
8905 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8907 memset (p
+ 4 * 7, 0, padding
);
8909 /* Write 4-byte type. */
8910 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8911 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8913 /* Write 4-byte data size. */
8914 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8916 /* Write 4-byte data. */
8917 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8918 (valueT
) x86_feature_2_used
, 4);
8920 /* Zero out paddings. */
8921 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8923 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8925 /* We probably can't restore the current segment, for there likely
8928 subseg_set (seg
, subseg
);
8933 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8934 const char *frag_now_ptr
)
8936 unsigned int len
= 0;
8938 if (start_frag
!= frag_now
)
8940 const fragS
*fr
= start_frag
;
8945 } while (fr
&& fr
!= frag_now
);
8948 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8951 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8952 be macro-fused with conditional jumps.
8953 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8954 or is one of the following format:
8967 maybe_fused_with_jcc_p (enum mf_cmp_kind
* mf_cmp_p
)
8969 /* No RIP address. */
8970 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
8973 /* No opcodes outside of base encoding space. */
8974 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
8977 /* add, sub without add/sub m, imm. */
8978 if (i
.tm
.base_opcode
<= 5
8979 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
8980 || ((i
.tm
.base_opcode
| 3) == 0x83
8981 && (i
.tm
.extension_opcode
== 0x5
8982 || i
.tm
.extension_opcode
== 0x0)))
8984 *mf_cmp_p
= mf_cmp_alu_cmp
;
8985 return !(i
.mem_operands
&& i
.imm_operands
);
8988 /* and without and m, imm. */
8989 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
8990 || ((i
.tm
.base_opcode
| 3) == 0x83
8991 && i
.tm
.extension_opcode
== 0x4))
8993 *mf_cmp_p
= mf_cmp_test_and
;
8994 return !(i
.mem_operands
&& i
.imm_operands
);
8997 /* test without test m imm. */
8998 if ((i
.tm
.base_opcode
| 1) == 0x85
8999 || (i
.tm
.base_opcode
| 1) == 0xa9
9000 || ((i
.tm
.base_opcode
| 1) == 0xf7
9001 && i
.tm
.extension_opcode
== 0))
9003 *mf_cmp_p
= mf_cmp_test_and
;
9004 return !(i
.mem_operands
&& i
.imm_operands
);
9007 /* cmp without cmp m, imm. */
9008 if ((i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
9009 || ((i
.tm
.base_opcode
| 3) == 0x83
9010 && (i
.tm
.extension_opcode
== 0x7)))
9012 *mf_cmp_p
= mf_cmp_alu_cmp
;
9013 return !(i
.mem_operands
&& i
.imm_operands
);
9016 /* inc, dec without inc/dec m. */
9017 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
9018 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
9019 || ((i
.tm
.base_opcode
| 1) == 0xff
9020 && i
.tm
.extension_opcode
<= 0x1))
9022 *mf_cmp_p
= mf_cmp_incdec
;
9023 return !i
.mem_operands
;
9029 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
9032 add_fused_jcc_padding_frag_p (enum mf_cmp_kind
* mf_cmp_p
)
9034 /* NB: Don't work with COND_JUMP86 without i386. */
9035 if (!align_branch_power
9036 || now_seg
== absolute_section
9037 || !cpu_arch_flags
.bitfield
.cpui386
9038 || !(align_branch
& align_branch_fused_bit
))
9041 if (maybe_fused_with_jcc_p (mf_cmp_p
))
9043 if (last_insn
.kind
== last_insn_other
9044 || last_insn
.seg
!= now_seg
)
9047 as_warn_where (last_insn
.file
, last_insn
.line
,
9048 _("`%s` skips -malign-branch-boundary on `%s`"),
9049 last_insn
.name
, i
.tm
.name
);
9055 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
9058 add_branch_prefix_frag_p (void)
9060 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
9061 to PadLock instructions since they include prefixes in opcode. */
9062 if (!align_branch_power
9063 || !align_branch_prefix_size
9064 || now_seg
== absolute_section
9065 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
9066 || !cpu_arch_flags
.bitfield
.cpui386
)
9069 /* Don't add prefix if it is a prefix or there is no operand in case
9070 that segment prefix is special. */
9071 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
9074 if (last_insn
.kind
== last_insn_other
9075 || last_insn
.seg
!= now_seg
)
9079 as_warn_where (last_insn
.file
, last_insn
.line
,
9080 _("`%s` skips -malign-branch-boundary on `%s`"),
9081 last_insn
.name
, i
.tm
.name
);
9086 /* Return 1 if a BRANCH_PADDING frag should be generated. */
9089 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
,
9090 enum mf_jcc_kind
*mf_jcc_p
)
9094 /* NB: Don't work with COND_JUMP86 without i386. */
9095 if (!align_branch_power
9096 || now_seg
== absolute_section
9097 || !cpu_arch_flags
.bitfield
.cpui386
9098 || i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9103 /* Check for jcc and direct jmp. */
9104 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9106 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
9108 *branch_p
= align_branch_jmp
;
9109 add_padding
= align_branch
& align_branch_jmp_bit
;
9113 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
9114 igore the lowest bit. */
9115 *mf_jcc_p
= (i
.tm
.base_opcode
& 0x0e) >> 1;
9116 *branch_p
= align_branch_jcc
;
9117 if ((align_branch
& align_branch_jcc_bit
))
9121 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
9124 *branch_p
= align_branch_ret
;
9125 if ((align_branch
& align_branch_ret_bit
))
9130 /* Check for indirect jmp, direct and indirect calls. */
9131 if (i
.tm
.base_opcode
== 0xe8)
9134 *branch_p
= align_branch_call
;
9135 if ((align_branch
& align_branch_call_bit
))
9138 else if (i
.tm
.base_opcode
== 0xff
9139 && (i
.tm
.extension_opcode
== 2
9140 || i
.tm
.extension_opcode
== 4))
9142 /* Indirect call and jmp. */
9143 *branch_p
= align_branch_indirect
;
9144 if ((align_branch
& align_branch_indirect_bit
))
9151 && (i
.op
[0].disps
->X_op
== O_symbol
9152 || (i
.op
[0].disps
->X_op
== O_subtract
9153 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
9155 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
9156 /* No padding to call to global or undefined tls_get_addr. */
9157 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
9158 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
9164 && last_insn
.kind
!= last_insn_other
9165 && last_insn
.seg
== now_seg
)
9168 as_warn_where (last_insn
.file
, last_insn
.line
,
9169 _("`%s` skips -malign-branch-boundary on `%s`"),
9170 last_insn
.name
, i
.tm
.name
);
9180 fragS
*insn_start_frag
;
9181 offsetT insn_start_off
;
9182 fragS
*fragP
= NULL
;
9183 enum align_branch_kind branch
= align_branch_none
;
9184 /* The initializer is arbitrary just to avoid uninitialized error.
9185 it's actually either assigned in add_branch_padding_frag_p
9186 or never be used. */
9187 enum mf_jcc_kind mf_jcc
= mf_jcc_jo
;
9189 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9190 if (IS_ELF
&& x86_used_note
&& now_seg
!= absolute_section
)
9192 if ((i
.xstate
& xstate_tmm
) == xstate_tmm
9193 || i
.tm
.cpu_flags
.bitfield
.cpuamx_tile
)
9194 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_TMM
;
9196 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
9197 || i
.tm
.cpu_flags
.bitfield
.cpu287
9198 || i
.tm
.cpu_flags
.bitfield
.cpu387
9199 || i
.tm
.cpu_flags
.bitfield
.cpu687
9200 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
9201 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
9203 if ((i
.xstate
& xstate_mmx
)
9204 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9205 && !is_any_vex_encoding (&i
.tm
)
9206 && (i
.tm
.base_opcode
== 0x77 /* emms */
9207 || i
.tm
.base_opcode
== 0x0e /* femms */)))
9208 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
9212 if (i
.index_reg
->reg_type
.bitfield
.zmmword
)
9213 i
.xstate
|= xstate_zmm
;
9214 else if (i
.index_reg
->reg_type
.bitfield
.ymmword
)
9215 i
.xstate
|= xstate_ymm
;
9216 else if (i
.index_reg
->reg_type
.bitfield
.xmmword
)
9217 i
.xstate
|= xstate_xmm
;
9220 /* vzeroall / vzeroupper */
9221 if (i
.tm
.base_opcode
== 0x77 && i
.tm
.cpu_flags
.bitfield
.cpuavx
)
9222 i
.xstate
|= xstate_ymm
;
9224 if ((i
.xstate
& xstate_xmm
)
9225 /* ldmxcsr / stmxcsr / vldmxcsr / vstmxcsr */
9226 || (i
.tm
.base_opcode
== 0xae
9227 && (i
.tm
.cpu_flags
.bitfield
.cpusse
9228 || i
.tm
.cpu_flags
.bitfield
.cpuavx
))
9229 || i
.tm
.cpu_flags
.bitfield
.cpuwidekl
9230 || i
.tm
.cpu_flags
.bitfield
.cpukl
)
9231 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
9233 if ((i
.xstate
& xstate_ymm
) == xstate_ymm
)
9234 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
9235 if ((i
.xstate
& xstate_zmm
) == xstate_zmm
)
9236 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
9237 if (i
.mask
.reg
|| (i
.xstate
& xstate_mask
) == xstate_mask
)
9238 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MASK
;
9239 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
9240 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
9241 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
9242 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
9243 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
9244 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
9245 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
9246 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
9248 if (x86_feature_2_used
9249 || i
.tm
.cpu_flags
.bitfield
.cpucmov
9250 || i
.tm
.cpu_flags
.bitfield
.cpusyscall
9251 || (i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F
9252 && i
.tm
.base_opcode
== 0xc7
9253 && i
.tm
.opcode_modifier
.opcodeprefix
== PREFIX_NONE
9254 && i
.tm
.extension_opcode
== 1) /* cmpxchg8b */)
9255 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_BASELINE
;
9256 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
9257 || i
.tm
.cpu_flags
.bitfield
.cpussse3
9258 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
9259 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
9260 || i
.tm
.cpu_flags
.bitfield
.cpucx16
9261 || i
.tm
.cpu_flags
.bitfield
.cpupopcnt
9262 /* LAHF-SAHF insns in 64-bit mode. */
9263 || (flag_code
== CODE_64BIT
9264 && (i
.tm
.base_opcode
| 1) == 0x9f
9265 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
))
9266 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V2
;
9267 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
9268 || i
.tm
.cpu_flags
.bitfield
.cpuavx2
9269 /* Any VEX encoded insns execpt for CpuAVX512F, CpuAVX512BW,
9270 CpuAVX512DQ, LPW, TBM and AMX. */
9271 || (i
.tm
.opcode_modifier
.vex
9272 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9273 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9274 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9275 && !i
.tm
.cpu_flags
.bitfield
.cpulwp
9276 && !i
.tm
.cpu_flags
.bitfield
.cputbm
9277 && !(x86_feature_2_used
& GNU_PROPERTY_X86_FEATURE_2_TMM
))
9278 || i
.tm
.cpu_flags
.bitfield
.cpuf16c
9279 || i
.tm
.cpu_flags
.bitfield
.cpufma
9280 || i
.tm
.cpu_flags
.bitfield
.cpulzcnt
9281 || i
.tm
.cpu_flags
.bitfield
.cpumovbe
9282 || i
.tm
.cpu_flags
.bitfield
.cpuxsaves
9283 || (x86_feature_2_used
9284 & (GNU_PROPERTY_X86_FEATURE_2_XSAVE
9285 | GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
9286 | GNU_PROPERTY_X86_FEATURE_2_XSAVEC
)) != 0)
9287 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V3
;
9288 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
9289 || i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
9290 || i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
9291 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
9292 /* Any EVEX encoded insns except for AVX512ER, AVX512PF and
9294 || (i
.tm
.opcode_modifier
.evex
9295 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512er
9296 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
9297 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
))
9298 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_V4
;
9302 /* Tie dwarf2 debug info to the address at the start of the insn.
9303 We can't do this after the insn has been output as the current
9304 frag may have been closed off. eg. by frag_var. */
9305 dwarf2_emit_insn (0);
9307 insn_start_frag
= frag_now
;
9308 insn_start_off
= frag_now_fix ();
9310 if (add_branch_padding_frag_p (&branch
, &mf_jcc
))
9313 /* Branch can be 8 bytes. Leave some room for prefixes. */
9314 unsigned int max_branch_padding_size
= 14;
9316 /* Align section to boundary. */
9317 record_alignment (now_seg
, align_branch_power
);
9319 /* Make room for padding. */
9320 frag_grow (max_branch_padding_size
);
9322 /* Start of the padding. */
9327 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
9328 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
9331 fragP
->tc_frag_data
.mf_type
= mf_jcc
;
9332 fragP
->tc_frag_data
.branch_type
= branch
;
9333 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
9337 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
9339 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
9340 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
9342 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
9343 output_interseg_jump ();
9346 /* Output normal instructions here. */
9350 enum mf_cmp_kind mf_cmp
;
9353 && (i
.tm
.base_opcode
== 0xaee8
9354 || i
.tm
.base_opcode
== 0xaef0
9355 || i
.tm
.base_opcode
== 0xaef8))
9357 /* Encode lfence, mfence, and sfence as
9358 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9359 if (now_seg
!= absolute_section
)
9361 offsetT val
= 0x240483f0ULL
;
9364 md_number_to_chars (p
, val
, 5);
9367 abs_section_offset
+= 5;
9371 /* Some processors fail on LOCK prefix. This options makes
9372 assembler ignore LOCK prefix and serves as a workaround. */
9373 if (omit_lock_prefix
)
9375 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
9376 && i
.tm
.opcode_modifier
.isprefix
)
9378 i
.prefix
[LOCK_PREFIX
] = 0;
9382 /* Skip if this is a branch. */
9384 else if (add_fused_jcc_padding_frag_p (&mf_cmp
))
9386 /* Make room for padding. */
9387 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
9392 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
9393 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
9396 fragP
->tc_frag_data
.mf_type
= mf_cmp
;
9397 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
9398 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
9400 else if (add_branch_prefix_frag_p ())
9402 unsigned int max_prefix_size
= align_branch_prefix_size
;
9404 /* Make room for padding. */
9405 frag_grow (max_prefix_size
);
9410 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
9411 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
9414 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
9417 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9418 don't need the explicit prefix. */
9419 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
9421 switch (i
.tm
.opcode_modifier
.opcodeprefix
)
9430 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
9431 || (i
.prefix
[REP_PREFIX
] != 0xf3))
9435 switch (i
.opcode_length
)
9440 /* Check for pseudo prefixes. */
9441 if (!i
.tm
.opcode_modifier
.isprefix
|| i
.tm
.base_opcode
)
9443 as_bad_where (insn_start_frag
->fr_file
,
9444 insn_start_frag
->fr_line
,
9445 _("pseudo prefix without instruction"));
9455 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9456 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9457 R_X86_64_GOTTPOFF relocation so that linker can safely
9458 perform IE->LE optimization. A dummy REX_OPCODE prefix
9459 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9460 relocation for GDesc -> IE/LE optimization. */
9461 if (x86_elf_abi
== X86_64_X32_ABI
9463 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
9464 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
9465 && i
.prefix
[REX_PREFIX
] == 0)
9466 add_prefix (REX_OPCODE
);
9469 /* The prefix bytes. */
9470 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
9472 frag_opcode_byte (*q
);
9476 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
9482 frag_opcode_byte (*q
);
9485 /* There should be no other prefixes for instructions
9490 /* For EVEX instructions i.vrex should become 0 after
9491 build_evex_prefix. For VEX instructions upper 16 registers
9492 aren't available, so VREX should be 0. */
9495 /* Now the VEX prefix. */
9496 if (now_seg
!= absolute_section
)
9498 p
= frag_more (i
.vex
.length
);
9499 for (j
= 0; j
< i
.vex
.length
; j
++)
9500 p
[j
] = i
.vex
.bytes
[j
];
9503 abs_section_offset
+= i
.vex
.length
;
9506 /* Now the opcode; be careful about word order here! */
9507 j
= i
.opcode_length
;
9509 switch (i
.tm
.opcode_modifier
.opcodespace
)
9524 if (now_seg
== absolute_section
)
9525 abs_section_offset
+= j
;
9528 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
9534 && i
.tm
.opcode_modifier
.opcodespace
!= SPACE_BASE
)
9537 if (i
.tm
.opcode_modifier
.opcodespace
!= SPACE_0F
)
9538 *p
++ = i
.tm
.opcode_modifier
.opcodespace
== SPACE_0F38
9542 switch (i
.opcode_length
)
9545 /* Put out high byte first: can't use md_number_to_chars! */
9546 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
9549 *p
= i
.tm
.base_opcode
& 0xff;
9558 /* Now the modrm byte and sib byte (if present). */
9559 if (i
.tm
.opcode_modifier
.modrm
)
9561 frag_opcode_byte ((i
.rm
.regmem
<< 0)
9563 | (i
.rm
.mode
<< 6));
9564 /* If i.rm.regmem == ESP (4)
9565 && i.rm.mode != (Register mode)
9567 ==> need second modrm byte. */
9568 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
9570 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
9571 frag_opcode_byte ((i
.sib
.base
<< 0)
9572 | (i
.sib
.index
<< 3)
9573 | (i
.sib
.scale
<< 6));
9576 if (i
.disp_operands
)
9577 output_disp (insn_start_frag
, insn_start_off
);
9580 output_imm (insn_start_frag
, insn_start_off
);
9583 * frag_now_fix () returning plain abs_section_offset when we're in the
9584 * absolute section, and abs_section_offset not getting updated as data
9585 * gets added to the frag breaks the logic below.
9587 if (now_seg
!= absolute_section
)
9589 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
9591 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9595 /* NB: Don't add prefix with GOTPC relocation since
9596 output_disp() above depends on the fixed encoding
9597 length. Can't add prefix with TLS relocation since
9598 it breaks TLS linker optimization. */
9599 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
9600 /* Prefix count on the current instruction. */
9601 unsigned int count
= i
.vex
.length
;
9603 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
9604 /* REX byte is encoded in VEX/EVEX prefix. */
9605 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
9608 /* Count prefixes for extended opcode maps. */
9610 switch (i
.tm
.opcode_modifier
.opcodespace
)
9625 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
9628 /* Set the maximum prefix size in BRANCH_PREFIX
9630 if (fragP
->tc_frag_data
.max_bytes
> max
)
9631 fragP
->tc_frag_data
.max_bytes
= max
;
9632 if (fragP
->tc_frag_data
.max_bytes
> count
)
9633 fragP
->tc_frag_data
.max_bytes
-= count
;
9635 fragP
->tc_frag_data
.max_bytes
= 0;
9639 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9641 unsigned int max_prefix_size
;
9642 if (align_branch_prefix_size
> max
)
9643 max_prefix_size
= max
;
9645 max_prefix_size
= align_branch_prefix_size
;
9646 if (max_prefix_size
> count
)
9647 fragP
->tc_frag_data
.max_prefix_length
9648 = max_prefix_size
- count
;
9651 /* Use existing segment prefix if possible. Use CS
9652 segment prefix in 64-bit mode. In 32-bit mode, use SS
9653 segment prefix with ESP/EBP base register and use DS
9654 segment prefix without ESP/EBP base register. */
9655 if (i
.prefix
[SEG_PREFIX
])
9656 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
9657 else if (flag_code
== CODE_64BIT
)
9658 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
9660 && (i
.base_reg
->reg_num
== 4
9661 || i
.base_reg
->reg_num
== 5))
9662 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
9664 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
9669 /* NB: Don't work with COND_JUMP86 without i386. */
9670 if (align_branch_power
9671 && now_seg
!= absolute_section
9672 && cpu_arch_flags
.bitfield
.cpui386
)
9674 /* Terminate each frag so that we can add prefix and check for
9676 frag_wane (frag_now
);
9683 pi ("" /*line*/, &i
);
9685 #endif /* DEBUG386 */
9688 /* Return the size of the displacement operand N. */
9691 disp_size (unsigned int n
)
9695 if (i
.types
[n
].bitfield
.disp64
)
9697 else if (i
.types
[n
].bitfield
.disp8
)
9699 else if (i
.types
[n
].bitfield
.disp16
)
9704 /* Return the size of the immediate operand N. */
9707 imm_size (unsigned int n
)
9710 if (i
.types
[n
].bitfield
.imm64
)
9712 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
9714 else if (i
.types
[n
].bitfield
.imm16
)
9720 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
9725 for (n
= 0; n
< i
.operands
; n
++)
9727 if (operand_type_check (i
.types
[n
], disp
))
9729 int size
= disp_size (n
);
9731 if (now_seg
== absolute_section
)
9732 abs_section_offset
+= size
;
9733 else if (i
.op
[n
].disps
->X_op
== O_constant
)
9735 offsetT val
= i
.op
[n
].disps
->X_add_number
;
9737 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
9739 p
= frag_more (size
);
9740 md_number_to_chars (p
, val
, size
);
9744 enum bfd_reloc_code_real reloc_type
;
9745 int sign
= i
.types
[n
].bitfield
.disp32s
;
9746 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9749 /* We can't have 8 bit displacement here. */
9750 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9752 /* The PC relative address is computed relative
9753 to the instruction boundary, so in case immediate
9754 fields follows, we need to adjust the value. */
9755 if (pcrel
&& i
.imm_operands
)
9760 for (n1
= 0; n1
< i
.operands
; n1
++)
9761 if (operand_type_check (i
.types
[n1
], imm
))
9763 /* Only one immediate is allowed for PC
9764 relative address. */
9765 gas_assert (sz
== 0);
9767 i
.op
[n
].disps
->X_add_number
-= sz
;
9769 /* We should find the immediate. */
9770 gas_assert (sz
!= 0);
9773 p
= frag_more (size
);
9774 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9776 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9777 && (((reloc_type
== BFD_RELOC_32
9778 || reloc_type
== BFD_RELOC_X86_64_32S
9779 || (reloc_type
== BFD_RELOC_64
9781 && (i
.op
[n
].disps
->X_op
== O_symbol
9782 || (i
.op
[n
].disps
->X_op
== O_add
9783 && ((symbol_get_value_expression
9784 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9786 || reloc_type
== BFD_RELOC_32_PCREL
))
9790 reloc_type
= BFD_RELOC_386_GOTPC
;
9791 i
.has_gotpc_tls_reloc
= TRUE
;
9792 i
.op
[n
].imms
->X_add_number
+=
9793 encoding_length (insn_start_frag
, insn_start_off
, p
);
9795 else if (reloc_type
== BFD_RELOC_64
)
9796 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9798 /* Don't do the adjustment for x86-64, as there
9799 the pcrel addressing is relative to the _next_
9800 insn, and that is taken care of in other code. */
9801 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9803 else if (align_branch_power
)
9807 case BFD_RELOC_386_TLS_GD
:
9808 case BFD_RELOC_386_TLS_LDM
:
9809 case BFD_RELOC_386_TLS_IE
:
9810 case BFD_RELOC_386_TLS_IE_32
:
9811 case BFD_RELOC_386_TLS_GOTIE
:
9812 case BFD_RELOC_386_TLS_GOTDESC
:
9813 case BFD_RELOC_386_TLS_DESC_CALL
:
9814 case BFD_RELOC_X86_64_TLSGD
:
9815 case BFD_RELOC_X86_64_TLSLD
:
9816 case BFD_RELOC_X86_64_GOTTPOFF
:
9817 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9818 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9819 i
.has_gotpc_tls_reloc
= TRUE
;
9824 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9825 size
, i
.op
[n
].disps
, pcrel
,
9827 /* Check for "call/jmp *mem", "mov mem, %reg",
9828 "test %reg, mem" and "binop mem, %reg" where binop
9829 is one of adc, add, and, cmp, or, sbb, sub, xor
9830 instructions without data prefix. Always generate
9831 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9832 if (i
.prefix
[DATA_PREFIX
] == 0
9833 && (generate_relax_relocations
9836 && i
.rm
.regmem
== 5))
9838 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9839 && i
.tm
.opcode_modifier
.opcodespace
== SPACE_BASE
9840 && ((i
.operands
== 1
9841 && i
.tm
.base_opcode
== 0xff
9842 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9844 && (i
.tm
.base_opcode
== 0x8b
9845 || i
.tm
.base_opcode
== 0x85
9846 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9850 fixP
->fx_tcbit
= i
.rex
!= 0;
9852 && (i
.base_reg
->reg_num
== RegIP
))
9853 fixP
->fx_tcbit2
= 1;
9856 fixP
->fx_tcbit2
= 1;
9864 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9869 for (n
= 0; n
< i
.operands
; n
++)
9871 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9872 if (i
.rounding
&& n
== i
.rounding
->operand
)
9875 if (operand_type_check (i
.types
[n
], imm
))
9877 int size
= imm_size (n
);
9879 if (now_seg
== absolute_section
)
9880 abs_section_offset
+= size
;
9881 else if (i
.op
[n
].imms
->X_op
== O_constant
)
9885 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9887 p
= frag_more (size
);
9888 md_number_to_chars (p
, val
, size
);
9892 /* Not absolute_section.
9893 Need a 32-bit fixup (don't support 8bit
9894 non-absolute imms). Try to support other
9896 enum bfd_reloc_code_real reloc_type
;
9899 if (i
.types
[n
].bitfield
.imm32s
9900 && (i
.suffix
== QWORD_MNEM_SUFFIX
9901 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9906 p
= frag_more (size
);
9907 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9909 /* This is tough to explain. We end up with this one if we
9910 * have operands that look like
9911 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9912 * obtain the absolute address of the GOT, and it is strongly
9913 * preferable from a performance point of view to avoid using
9914 * a runtime relocation for this. The actual sequence of
9915 * instructions often look something like:
9920 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9922 * The call and pop essentially return the absolute address
9923 * of the label .L66 and store it in %ebx. The linker itself
9924 * will ultimately change the first operand of the addl so
9925 * that %ebx points to the GOT, but to keep things simple, the
9926 * .o file must have this operand set so that it generates not
9927 * the absolute address of .L66, but the absolute address of
9928 * itself. This allows the linker itself simply treat a GOTPC
9929 * relocation as asking for a pcrel offset to the GOT to be
9930 * added in, and the addend of the relocation is stored in the
9931 * operand field for the instruction itself.
9933 * Our job here is to fix the operand so that it would add
9934 * the correct offset so that %ebx would point to itself. The
9935 * thing that is tricky is that .-.L66 will point to the
9936 * beginning of the instruction, so we need to further modify
9937 * the operand so that it will point to itself. There are
9938 * other cases where you have something like:
9940 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9942 * and here no correction would be required. Internally in
9943 * the assembler we treat operands of this form as not being
9944 * pcrel since the '.' is explicitly mentioned, and I wonder
9945 * whether it would simplify matters to do it this way. Who
9946 * knows. In earlier versions of the PIC patches, the
9947 * pcrel_adjust field was used to store the correction, but
9948 * since the expression is not pcrel, I felt it would be
9949 * confusing to do it this way. */
9951 if ((reloc_type
== BFD_RELOC_32
9952 || reloc_type
== BFD_RELOC_X86_64_32S
9953 || reloc_type
== BFD_RELOC_64
)
9955 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9956 && (i
.op
[n
].imms
->X_op
== O_symbol
9957 || (i
.op
[n
].imms
->X_op
== O_add
9958 && ((symbol_get_value_expression
9959 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9963 reloc_type
= BFD_RELOC_386_GOTPC
;
9965 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9967 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9968 i
.has_gotpc_tls_reloc
= TRUE
;
9969 i
.op
[n
].imms
->X_add_number
+=
9970 encoding_length (insn_start_frag
, insn_start_off
, p
);
9972 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9973 i
.op
[n
].imms
, 0, reloc_type
);
9979 /* x86_cons_fix_new is called via the expression parsing code when a
9980 reloc is needed. We use this hook to get the correct .got reloc. */
9981 static int cons_sign
= -1;
9984 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
9985 expressionS
*exp
, bfd_reloc_code_real_type r
)
9987 r
= reloc (len
, 0, cons_sign
, r
);
9990 if (exp
->X_op
== O_secrel
)
9992 exp
->X_op
= O_symbol
;
9993 r
= BFD_RELOC_32_SECREL
;
9997 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
10000 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
10001 purpose of the `.dc.a' internal pseudo-op. */
10004 x86_address_bytes (void)
10006 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
10008 return stdoutput
->arch_info
->bits_per_address
/ 8;
10011 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
10012 || defined (LEX_AT)
10013 # define lex_got(reloc, adjust, types) NULL
10015 /* Parse operands of the form
10016 <symbol>@GOTOFF+<nnn>
10017 and similar .plt or .got references.
10019 If we find one, set up the correct relocation in RELOC and copy the
10020 input string, minus the `@GOTOFF' into a malloc'd buffer for
10021 parsing by the calling routine. Return this buffer, and if ADJUST
10022 is non-null set it to the length of the string we removed from the
10023 input line. Otherwise return NULL. */
10025 lex_got (enum bfd_reloc_code_real
*rel
,
10027 i386_operand_type
*types
)
10029 /* Some of the relocations depend on the size of what field is to
10030 be relocated. But in our callers i386_immediate and i386_displacement
10031 we don't yet know the operand size (this will be set by insn
10032 matching). Hence we record the word32 relocation here,
10033 and adjust the reloc according to the real size in reloc(). */
10034 static const struct {
10037 const enum bfd_reloc_code_real rel
[2];
10038 const i386_operand_type types64
;
10039 bfd_boolean need_GOT_symbol
;
10041 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10042 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
10043 BFD_RELOC_SIZE32
},
10044 OPERAND_TYPE_IMM32_64
, FALSE
},
10046 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
10047 BFD_RELOC_X86_64_PLTOFF64
},
10048 OPERAND_TYPE_IMM64
, TRUE
},
10049 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
10050 BFD_RELOC_X86_64_PLT32
},
10051 OPERAND_TYPE_IMM32_32S_DISP32
, FALSE
},
10052 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
10053 BFD_RELOC_X86_64_GOTPLT64
},
10054 OPERAND_TYPE_IMM64_DISP64
, TRUE
},
10055 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
10056 BFD_RELOC_X86_64_GOTOFF64
},
10057 OPERAND_TYPE_IMM64_DISP64
, TRUE
},
10058 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
10059 BFD_RELOC_X86_64_GOTPCREL
},
10060 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10061 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
10062 BFD_RELOC_X86_64_TLSGD
},
10063 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10064 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
10065 _dummy_first_bfd_reloc_code_real
},
10066 OPERAND_TYPE_NONE
, TRUE
},
10067 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
10068 BFD_RELOC_X86_64_TLSLD
},
10069 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10070 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
10071 BFD_RELOC_X86_64_GOTTPOFF
},
10072 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10073 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
10074 BFD_RELOC_X86_64_TPOFF32
},
10075 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, TRUE
},
10076 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
10077 _dummy_first_bfd_reloc_code_real
},
10078 OPERAND_TYPE_NONE
, TRUE
},
10079 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
10080 BFD_RELOC_X86_64_DTPOFF32
},
10081 OPERAND_TYPE_IMM32_32S_64_DISP32_64
, TRUE
},
10082 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
10083 _dummy_first_bfd_reloc_code_real
},
10084 OPERAND_TYPE_NONE
, TRUE
},
10085 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
10086 _dummy_first_bfd_reloc_code_real
},
10087 OPERAND_TYPE_NONE
, TRUE
},
10088 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
10089 BFD_RELOC_X86_64_GOT32
},
10090 OPERAND_TYPE_IMM32_32S_64_DISP32
, TRUE
},
10091 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
10092 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
10093 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10094 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
10095 BFD_RELOC_X86_64_TLSDESC_CALL
},
10096 OPERAND_TYPE_IMM32_32S_DISP32
, TRUE
},
10101 #if defined (OBJ_MAYBE_ELF)
10106 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10107 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10110 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10112 int len
= gotrel
[j
].len
;
10113 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10115 if (gotrel
[j
].rel
[object_64bit
] != 0)
10118 char *tmpbuf
, *past_reloc
;
10120 *rel
= gotrel
[j
].rel
[object_64bit
];
10124 if (flag_code
!= CODE_64BIT
)
10126 types
->bitfield
.imm32
= 1;
10127 types
->bitfield
.disp32
= 1;
10130 *types
= gotrel
[j
].types64
;
10133 if (gotrel
[j
].need_GOT_symbol
&& GOT_symbol
== NULL
)
10134 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
10136 /* The length of the first part of our input line. */
10137 first
= cp
- input_line_pointer
;
10139 /* The second part goes from after the reloc token until
10140 (and including) an end_of_line char or comma. */
10141 past_reloc
= cp
+ 1 + len
;
10143 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10145 second
= cp
+ 1 - past_reloc
;
10147 /* Allocate and copy string. The trailing NUL shouldn't
10148 be necessary, but be safe. */
10149 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10150 memcpy (tmpbuf
, input_line_pointer
, first
);
10151 if (second
!= 0 && *past_reloc
!= ' ')
10152 /* Replace the relocation token with ' ', so that
10153 errors like foo@GOTOFF1 will be detected. */
10154 tmpbuf
[first
++] = ' ';
10156 /* Increment length by 1 if the relocation token is
10161 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10162 tmpbuf
[first
+ second
] = '\0';
10166 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10167 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10172 /* Might be a symbol version string. Don't as_bad here. */
10181 /* Parse operands of the form
10182 <symbol>@SECREL32+<nnn>
10184 If we find one, set up the correct relocation in RELOC and copy the
10185 input string, minus the `@SECREL32' into a malloc'd buffer for
10186 parsing by the calling routine. Return this buffer, and if ADJUST
10187 is non-null set it to the length of the string we removed from the
10188 input line. Otherwise return NULL.
10190 This function is copied from the ELF version above adjusted for PE targets. */
10193 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
10194 int *adjust ATTRIBUTE_UNUSED
,
10195 i386_operand_type
*types
)
10197 static const struct
10201 const enum bfd_reloc_code_real rel
[2];
10202 const i386_operand_type types64
;
10206 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
10207 BFD_RELOC_32_SECREL
},
10208 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
10214 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
10215 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
10218 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
10220 int len
= gotrel
[j
].len
;
10222 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
10224 if (gotrel
[j
].rel
[object_64bit
] != 0)
10227 char *tmpbuf
, *past_reloc
;
10229 *rel
= gotrel
[j
].rel
[object_64bit
];
10235 if (flag_code
!= CODE_64BIT
)
10237 types
->bitfield
.imm32
= 1;
10238 types
->bitfield
.disp32
= 1;
10241 *types
= gotrel
[j
].types64
;
10244 /* The length of the first part of our input line. */
10245 first
= cp
- input_line_pointer
;
10247 /* The second part goes from after the reloc token until
10248 (and including) an end_of_line char or comma. */
10249 past_reloc
= cp
+ 1 + len
;
10251 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
10253 second
= cp
+ 1 - past_reloc
;
10255 /* Allocate and copy string. The trailing NUL shouldn't
10256 be necessary, but be safe. */
10257 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
10258 memcpy (tmpbuf
, input_line_pointer
, first
);
10259 if (second
!= 0 && *past_reloc
!= ' ')
10260 /* Replace the relocation token with ' ', so that
10261 errors like foo@SECLREL321 will be detected. */
10262 tmpbuf
[first
++] = ' ';
10263 memcpy (tmpbuf
+ first
, past_reloc
, second
);
10264 tmpbuf
[first
+ second
] = '\0';
10268 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10269 gotrel
[j
].str
, 1 << (5 + object_64bit
));
10274 /* Might be a symbol version string. Don't as_bad here. */
10280 bfd_reloc_code_real_type
10281 x86_cons (expressionS
*exp
, int size
)
10283 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
10285 intel_syntax
= -intel_syntax
;
10288 if (size
== 4 || (object_64bit
&& size
== 8))
10290 /* Handle @GOTOFF and the like in an expression. */
10292 char *gotfree_input_line
;
10295 save
= input_line_pointer
;
10296 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
10297 if (gotfree_input_line
)
10298 input_line_pointer
= gotfree_input_line
;
10302 if (gotfree_input_line
)
10304 /* expression () has merrily parsed up to the end of line,
10305 or a comma - in the wrong buffer. Transfer how far
10306 input_line_pointer has moved to the right buffer. */
10307 input_line_pointer
= (save
10308 + (input_line_pointer
- gotfree_input_line
)
10310 free (gotfree_input_line
);
10311 if (exp
->X_op
== O_constant
10312 || exp
->X_op
== O_absent
10313 || exp
->X_op
== O_illegal
10314 || exp
->X_op
== O_register
10315 || exp
->X_op
== O_big
)
10317 char c
= *input_line_pointer
;
10318 *input_line_pointer
= 0;
10319 as_bad (_("missing or invalid expression `%s'"), save
);
10320 *input_line_pointer
= c
;
10322 else if ((got_reloc
== BFD_RELOC_386_PLT32
10323 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
10324 && exp
->X_op
!= O_symbol
)
10326 char c
= *input_line_pointer
;
10327 *input_line_pointer
= 0;
10328 as_bad (_("invalid PLT expression `%s'"), save
);
10329 *input_line_pointer
= c
;
10336 intel_syntax
= -intel_syntax
;
10339 i386_intel_simplify (exp
);
10345 signed_cons (int size
)
10347 if (flag_code
== CODE_64BIT
)
10355 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
10362 if (exp
.X_op
== O_symbol
)
10363 exp
.X_op
= O_secrel
;
10365 emit_expr (&exp
, 4);
10367 while (*input_line_pointer
++ == ',');
10369 input_line_pointer
--;
10370 demand_empty_rest_of_line ();
10374 /* Handle Vector operations. */
10377 check_VecOperations (char *op_string
, char *op_end
)
10379 const reg_entry
*mask
;
10384 && (op_end
== NULL
|| op_string
< op_end
))
10387 if (*op_string
== '{')
10391 /* Check broadcasts. */
10392 if (strncmp (op_string
, "1to", 3) == 0)
10397 goto duplicated_vec_op
;
10400 if (*op_string
== '8')
10402 else if (*op_string
== '4')
10404 else if (*op_string
== '2')
10406 else if (*op_string
== '1'
10407 && *(op_string
+1) == '6')
10414 as_bad (_("Unsupported broadcast: `%s'"), saved
);
10419 broadcast_op
.type
= bcst_type
;
10420 broadcast_op
.operand
= this_operand
;
10421 broadcast_op
.bytes
= 0;
10422 i
.broadcast
= &broadcast_op
;
10424 /* Check masking operation. */
10425 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
10427 if (mask
== &bad_reg
)
10430 /* k0 can't be used for write mask. */
10431 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
10433 as_bad (_("`%s%s' can't be used for write mask"),
10434 register_prefix
, mask
->reg_name
);
10441 i
.mask
.operand
= this_operand
;
10443 else if (i
.mask
.reg
->reg_num
)
10444 goto duplicated_vec_op
;
10449 /* Only "{z}" is allowed here. No need to check
10450 zeroing mask explicitly. */
10451 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10453 as_bad (_("invalid write mask `%s'"), saved
);
10458 op_string
= end_op
;
10460 /* Check zeroing-flag for masking operation. */
10461 else if (*op_string
== 'z')
10465 i
.mask
.reg
= reg_k0
;
10466 i
.mask
.zeroing
= 1;
10467 i
.mask
.operand
= this_operand
;
10471 if (i
.mask
.zeroing
)
10474 as_bad (_("duplicated `%s'"), saved
);
10478 i
.mask
.zeroing
= 1;
10480 /* Only "{%k}" is allowed here. No need to check mask
10481 register explicitly. */
10482 if (i
.mask
.operand
!= (unsigned int) this_operand
)
10484 as_bad (_("invalid zeroing-masking `%s'"),
10493 goto unknown_vec_op
;
10495 if (*op_string
!= '}')
10497 as_bad (_("missing `}' in `%s'"), saved
);
10502 /* Strip whitespace since the addition of pseudo prefixes
10503 changed how the scrubber treats '{'. */
10504 if (is_space_char (*op_string
))
10510 /* We don't know this one. */
10511 as_bad (_("unknown vector operation: `%s'"), saved
);
10515 if (i
.mask
.reg
&& i
.mask
.zeroing
&& !i
.mask
.reg
->reg_num
)
10517 as_bad (_("zeroing-masking only allowed with write mask"));
10525 i386_immediate (char *imm_start
)
10527 char *save_input_line_pointer
;
10528 char *gotfree_input_line
;
10531 i386_operand_type types
;
10533 operand_type_set (&types
, ~0);
10535 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
10537 as_bad (_("at most %d immediate operands are allowed"),
10538 MAX_IMMEDIATE_OPERANDS
);
10542 exp
= &im_expressions
[i
.imm_operands
++];
10543 i
.op
[this_operand
].imms
= exp
;
10545 if (is_space_char (*imm_start
))
10548 save_input_line_pointer
= input_line_pointer
;
10549 input_line_pointer
= imm_start
;
10551 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10552 if (gotfree_input_line
)
10553 input_line_pointer
= gotfree_input_line
;
10555 exp_seg
= expression (exp
);
10557 SKIP_WHITESPACE ();
10559 /* Handle vector operations. */
10560 if (*input_line_pointer
== '{')
10562 input_line_pointer
= check_VecOperations (input_line_pointer
,
10564 if (input_line_pointer
== NULL
)
10568 if (*input_line_pointer
)
10569 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10571 input_line_pointer
= save_input_line_pointer
;
10572 if (gotfree_input_line
)
10574 free (gotfree_input_line
);
10576 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10577 exp
->X_op
= O_illegal
;
10580 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
10584 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10585 i386_operand_type types
, const char *imm_start
)
10587 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
10590 as_bad (_("missing or invalid immediate expression `%s'"),
10594 else if (exp
->X_op
== O_constant
)
10596 /* Size it properly later. */
10597 i
.types
[this_operand
].bitfield
.imm64
= 1;
10598 /* If not 64bit, sign extend val. */
10599 if (flag_code
!= CODE_64BIT
10600 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
10602 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
10604 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10605 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
10606 && exp_seg
!= absolute_section
10607 && exp_seg
!= text_section
10608 && exp_seg
!= data_section
10609 && exp_seg
!= bss_section
10610 && exp_seg
!= undefined_section
10611 && !bfd_is_com_section (exp_seg
))
10613 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10617 else if (!intel_syntax
&& exp_seg
== reg_section
)
10620 as_bad (_("illegal immediate register operand %s"), imm_start
);
10625 /* This is an address. The size of the address will be
10626 determined later, depending on destination register,
10627 suffix, or the default for the section. */
10628 i
.types
[this_operand
].bitfield
.imm8
= 1;
10629 i
.types
[this_operand
].bitfield
.imm16
= 1;
10630 i
.types
[this_operand
].bitfield
.imm32
= 1;
10631 i
.types
[this_operand
].bitfield
.imm32s
= 1;
10632 i
.types
[this_operand
].bitfield
.imm64
= 1;
10633 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10641 i386_scale (char *scale
)
10644 char *save
= input_line_pointer
;
10646 input_line_pointer
= scale
;
10647 val
= get_absolute_expression ();
10652 i
.log2_scale_factor
= 0;
10655 i
.log2_scale_factor
= 1;
10658 i
.log2_scale_factor
= 2;
10661 i
.log2_scale_factor
= 3;
10665 char sep
= *input_line_pointer
;
10667 *input_line_pointer
= '\0';
10668 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10670 *input_line_pointer
= sep
;
10671 input_line_pointer
= save
;
10675 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
10677 as_warn (_("scale factor of %d without an index register"),
10678 1 << i
.log2_scale_factor
);
10679 i
.log2_scale_factor
= 0;
10681 scale
= input_line_pointer
;
10682 input_line_pointer
= save
;
10687 i386_displacement (char *disp_start
, char *disp_end
)
10691 char *save_input_line_pointer
;
10692 char *gotfree_input_line
;
10694 i386_operand_type bigdisp
, types
= anydisp
;
10697 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
10699 as_bad (_("at most %d displacement operands are allowed"),
10700 MAX_MEMORY_OPERANDS
);
10704 operand_type_set (&bigdisp
, 0);
10706 || i
.types
[this_operand
].bitfield
.baseindex
10707 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
10708 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
10710 i386_addressing_mode ();
10711 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
10712 if (flag_code
== CODE_64BIT
)
10716 bigdisp
.bitfield
.disp32s
= 1;
10717 bigdisp
.bitfield
.disp64
= 1;
10720 bigdisp
.bitfield
.disp32
= 1;
10722 else if ((flag_code
== CODE_16BIT
) ^ override
)
10723 bigdisp
.bitfield
.disp16
= 1;
10725 bigdisp
.bitfield
.disp32
= 1;
10729 /* For PC-relative branches, the width of the displacement may be
10730 dependent upon data size, but is never dependent upon address size.
10731 Also make sure to not unintentionally match against a non-PC-relative
10732 branch template. */
10733 static templates aux_templates
;
10734 const insn_template
*t
= current_templates
->start
;
10735 bfd_boolean has_intel64
= FALSE
;
10737 aux_templates
.start
= t
;
10738 while (++t
< current_templates
->end
)
10740 if (t
->opcode_modifier
.jump
10741 != current_templates
->start
->opcode_modifier
.jump
)
10743 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
10744 has_intel64
= TRUE
;
10746 if (t
< current_templates
->end
)
10748 aux_templates
.end
= t
;
10749 current_templates
= &aux_templates
;
10752 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10753 if (flag_code
== CODE_64BIT
)
10755 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10756 && (!intel64
|| !has_intel64
))
10757 bigdisp
.bitfield
.disp16
= 1;
10759 bigdisp
.bitfield
.disp32s
= 1;
10764 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10766 : LONG_MNEM_SUFFIX
));
10767 bigdisp
.bitfield
.disp32
= 1;
10768 if ((flag_code
== CODE_16BIT
) ^ override
)
10770 bigdisp
.bitfield
.disp32
= 0;
10771 bigdisp
.bitfield
.disp16
= 1;
10775 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10778 exp
= &disp_expressions
[i
.disp_operands
];
10779 i
.op
[this_operand
].disps
= exp
;
10781 save_input_line_pointer
= input_line_pointer
;
10782 input_line_pointer
= disp_start
;
10783 END_STRING_AND_SAVE (disp_end
);
10785 #ifndef GCC_ASM_O_HACK
10786 #define GCC_ASM_O_HACK 0
10789 END_STRING_AND_SAVE (disp_end
+ 1);
10790 if (i
.types
[this_operand
].bitfield
.baseIndex
10791 && displacement_string_end
[-1] == '+')
10793 /* This hack is to avoid a warning when using the "o"
10794 constraint within gcc asm statements.
10797 #define _set_tssldt_desc(n,addr,limit,type) \
10798 __asm__ __volatile__ ( \
10799 "movw %w2,%0\n\t" \
10800 "movw %w1,2+%0\n\t" \
10801 "rorl $16,%1\n\t" \
10802 "movb %b1,4+%0\n\t" \
10803 "movb %4,5+%0\n\t" \
10804 "movb $0,6+%0\n\t" \
10805 "movb %h1,7+%0\n\t" \
10807 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10809 This works great except that the output assembler ends
10810 up looking a bit weird if it turns out that there is
10811 no offset. You end up producing code that looks like:
10824 So here we provide the missing zero. */
10826 *displacement_string_end
= '0';
10829 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10830 if (gotfree_input_line
)
10831 input_line_pointer
= gotfree_input_line
;
10833 exp_seg
= expression (exp
);
10835 SKIP_WHITESPACE ();
10836 if (*input_line_pointer
)
10837 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10839 RESTORE_END_STRING (disp_end
+ 1);
10841 input_line_pointer
= save_input_line_pointer
;
10842 if (gotfree_input_line
)
10844 free (gotfree_input_line
);
10846 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10847 exp
->X_op
= O_illegal
;
10850 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10852 RESTORE_END_STRING (disp_end
);
10858 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10859 i386_operand_type types
, const char *disp_start
)
10861 i386_operand_type bigdisp
;
10864 /* We do this to make sure that the section symbol is in
10865 the symbol table. We will ultimately change the relocation
10866 to be relative to the beginning of the section. */
10867 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10868 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10869 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10871 if (exp
->X_op
!= O_symbol
)
10874 if (S_IS_LOCAL (exp
->X_add_symbol
)
10875 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10876 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10877 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10878 exp
->X_op
= O_subtract
;
10879 exp
->X_op_symbol
= GOT_symbol
;
10880 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10881 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10882 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10883 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10885 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10888 else if (exp
->X_op
== O_absent
10889 || exp
->X_op
== O_illegal
10890 || exp
->X_op
== O_big
)
10893 as_bad (_("missing or invalid displacement expression `%s'"),
10898 else if (flag_code
== CODE_64BIT
10899 && !i
.prefix
[ADDR_PREFIX
]
10900 && exp
->X_op
== O_constant
)
10902 /* Since displacement is signed extended to 64bit, don't allow
10903 disp32 and turn off disp32s if they are out of range. */
10904 i
.types
[this_operand
].bitfield
.disp32
= 0;
10905 if (!fits_in_signed_long (exp
->X_add_number
))
10907 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10908 if (i
.types
[this_operand
].bitfield
.baseindex
)
10910 as_bad (_("0x%lx out range of signed 32bit displacement"),
10911 (long) exp
->X_add_number
);
10917 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10918 else if (exp
->X_op
!= O_constant
10919 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10920 && exp_seg
!= absolute_section
10921 && exp_seg
!= text_section
10922 && exp_seg
!= data_section
10923 && exp_seg
!= bss_section
10924 && exp_seg
!= undefined_section
10925 && !bfd_is_com_section (exp_seg
))
10927 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10932 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10933 /* Constants get taken care of by optimize_disp(). */
10934 && exp
->X_op
!= O_constant
)
10935 i
.types
[this_operand
].bitfield
.disp8
= 1;
10937 /* Check if this is a displacement only operand. */
10938 bigdisp
= i
.types
[this_operand
];
10939 bigdisp
.bitfield
.disp8
= 0;
10940 bigdisp
.bitfield
.disp16
= 0;
10941 bigdisp
.bitfield
.disp32
= 0;
10942 bigdisp
.bitfield
.disp32s
= 0;
10943 bigdisp
.bitfield
.disp64
= 0;
10944 if (operand_type_all_zero (&bigdisp
))
10945 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10951 /* Return the active addressing mode, taking address override and
10952 registers forming the address into consideration. Update the
10953 address override prefix if necessary. */
10955 static enum flag_code
10956 i386_addressing_mode (void)
10958 enum flag_code addr_mode
;
10960 if (i
.prefix
[ADDR_PREFIX
])
10961 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10962 else if (flag_code
== CODE_16BIT
10963 && current_templates
->start
->cpu_flags
.bitfield
.cpumpx
10964 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10965 from md_assemble() by "is not a valid base/index expression"
10966 when there is a base and/or index. */
10967 && !i
.types
[this_operand
].bitfield
.baseindex
)
10969 /* MPX insn memory operands with neither base nor index must be forced
10970 to use 32-bit addressing in 16-bit mode. */
10971 addr_mode
= CODE_32BIT
;
10972 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10974 gas_assert (!i
.types
[this_operand
].bitfield
.disp16
);
10975 gas_assert (!i
.types
[this_operand
].bitfield
.disp32
);
10979 addr_mode
= flag_code
;
10981 #if INFER_ADDR_PREFIX
10982 if (i
.mem_operands
== 0)
10984 /* Infer address prefix from the first memory operand. */
10985 const reg_entry
*addr_reg
= i
.base_reg
;
10987 if (addr_reg
== NULL
)
10988 addr_reg
= i
.index_reg
;
10992 if (addr_reg
->reg_type
.bitfield
.dword
)
10993 addr_mode
= CODE_32BIT
;
10994 else if (flag_code
!= CODE_64BIT
10995 && addr_reg
->reg_type
.bitfield
.word
)
10996 addr_mode
= CODE_16BIT
;
10998 if (addr_mode
!= flag_code
)
11000 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
11002 /* Change the size of any displacement too. At most one
11003 of Disp16 or Disp32 is set.
11004 FIXME. There doesn't seem to be any real need for
11005 separate Disp16 and Disp32 flags. The same goes for
11006 Imm16 and Imm32. Removing them would probably clean
11007 up the code quite a lot. */
11008 if (flag_code
!= CODE_64BIT
11009 && (i
.types
[this_operand
].bitfield
.disp16
11010 || i
.types
[this_operand
].bitfield
.disp32
))
11011 i
.types
[this_operand
]
11012 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
11022 /* Make sure the memory operand we've been dealt is valid.
11023 Return 1 on success, 0 on a failure. */
11026 i386_index_check (const char *operand_string
)
11028 const char *kind
= "base/index";
11029 enum flag_code addr_mode
= i386_addressing_mode ();
11030 const insn_template
*t
= current_templates
->start
;
11032 if (t
->opcode_modifier
.isstring
11033 && !t
->cpu_flags
.bitfield
.cpupadlock
11034 && (current_templates
->end
[-1].opcode_modifier
.isstring
11035 || i
.mem_operands
))
11037 /* Memory operands of string insns are special in that they only allow
11038 a single register (rDI, rSI, or rBX) as their memory address. */
11039 const reg_entry
*expected_reg
;
11040 static const char *di_si
[][2] =
11046 static const char *bx
[] = { "ebx", "bx", "rbx" };
11048 kind
= "string address";
11050 if (t
->opcode_modifier
.prefixok
== PrefixRep
)
11052 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
11053 - IS_STRING_ES_OP0
;
11056 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
11057 || ((!i
.mem_operands
!= !intel_syntax
)
11058 && current_templates
->end
[-1].operand_types
[1]
11059 .bitfield
.baseindex
))
11062 = (const reg_entry
*) str_hash_find (reg_hash
,
11063 di_si
[addr_mode
][op
== es_op
]);
11067 = (const reg_entry
*)str_hash_find (reg_hash
, bx
[addr_mode
]);
11069 if (i
.base_reg
!= expected_reg
11071 || operand_type_check (i
.types
[this_operand
], disp
))
11073 /* The second memory operand must have the same size as
11077 && !((addr_mode
== CODE_64BIT
11078 && i
.base_reg
->reg_type
.bitfield
.qword
)
11079 || (addr_mode
== CODE_32BIT
11080 ? i
.base_reg
->reg_type
.bitfield
.dword
11081 : i
.base_reg
->reg_type
.bitfield
.word
)))
11084 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
11086 intel_syntax
? '[' : '(',
11088 expected_reg
->reg_name
,
11089 intel_syntax
? ']' : ')');
11096 as_bad (_("`%s' is not a valid %s expression"),
11097 operand_string
, kind
);
11102 if (addr_mode
!= CODE_16BIT
)
11104 /* 32-bit/64-bit checks. */
11105 if (i
.disp_encoding
== disp_encoding_16bit
)
11108 as_bad (_("invalid `%s' prefix"),
11109 addr_mode
== CODE_16BIT
? "{disp32}" : "{disp16}");
11114 && ((addr_mode
== CODE_64BIT
11115 ? !i
.base_reg
->reg_type
.bitfield
.qword
11116 : !i
.base_reg
->reg_type
.bitfield
.dword
)
11117 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
11118 || i
.base_reg
->reg_num
== RegIZ
))
11120 && !i
.index_reg
->reg_type
.bitfield
.xmmword
11121 && !i
.index_reg
->reg_type
.bitfield
.ymmword
11122 && !i
.index_reg
->reg_type
.bitfield
.zmmword
11123 && ((addr_mode
== CODE_64BIT
11124 ? !i
.index_reg
->reg_type
.bitfield
.qword
11125 : !i
.index_reg
->reg_type
.bitfield
.dword
)
11126 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
11129 /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
11130 if ((t
->opcode_modifier
.opcodeprefix
== PREFIX_0XF3
11131 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11132 && t
->base_opcode
== 0x1b)
11133 || (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11134 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11135 && (t
->base_opcode
& ~1) == 0x1a)
11136 || t
->opcode_modifier
.sib
== SIBMEM
)
11138 /* They cannot use RIP-relative addressing. */
11139 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
11141 as_bad (_("`%s' cannot be used here"), operand_string
);
11145 /* bndldx and bndstx ignore their scale factor. */
11146 if (t
->opcode_modifier
.opcodeprefix
== PREFIX_NONE
11147 && t
->opcode_modifier
.opcodespace
== SPACE_0F
11148 && (t
->base_opcode
& ~1) == 0x1a
11149 && i
.log2_scale_factor
)
11150 as_warn (_("register scaling is being ignored here"));
11155 /* 16-bit checks. */
11156 if (i
.disp_encoding
== disp_encoding_32bit
)
11160 && (!i
.base_reg
->reg_type
.bitfield
.word
11161 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
11163 && (!i
.index_reg
->reg_type
.bitfield
.word
11164 || !i
.index_reg
->reg_type
.bitfield
.baseindex
11166 && i
.base_reg
->reg_num
< 6
11167 && i
.index_reg
->reg_num
>= 6
11168 && i
.log2_scale_factor
== 0))))
11175 /* Handle vector immediates. */
11178 RC_SAE_immediate (const char *imm_start
)
11180 unsigned int match_found
, j
;
11181 const char *pstr
= imm_start
;
11189 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
11191 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
11195 rc_op
.type
= RC_NamesTable
[j
].type
;
11196 rc_op
.operand
= this_operand
;
11197 i
.rounding
= &rc_op
;
11201 as_bad (_("duplicated `%s'"), imm_start
);
11204 pstr
+= RC_NamesTable
[j
].len
;
11212 if (*pstr
++ != '}')
11214 as_bad (_("Missing '}': '%s'"), imm_start
);
11217 /* RC/SAE immediate string should contain nothing more. */;
11220 as_bad (_("Junk after '}': '%s'"), imm_start
);
11224 exp
= &im_expressions
[i
.imm_operands
++];
11225 i
.op
[this_operand
].imms
= exp
;
11227 exp
->X_op
= O_constant
;
11228 exp
->X_add_number
= 0;
11229 exp
->X_add_symbol
= (symbolS
*) 0;
11230 exp
->X_op_symbol
= (symbolS
*) 0;
11232 i
.types
[this_operand
].bitfield
.imm8
= 1;
11236 /* Only string instructions can have a second memory operand, so
11237 reduce current_templates to just those if it contains any. */
11239 maybe_adjust_templates (void)
11241 const insn_template
*t
;
11243 gas_assert (i
.mem_operands
== 1);
11245 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
11246 if (t
->opcode_modifier
.isstring
)
11249 if (t
< current_templates
->end
)
11251 static templates aux_templates
;
11252 bfd_boolean recheck
;
11254 aux_templates
.start
= t
;
11255 for (; t
< current_templates
->end
; ++t
)
11256 if (!t
->opcode_modifier
.isstring
)
11258 aux_templates
.end
= t
;
11260 /* Determine whether to re-check the first memory operand. */
11261 recheck
= (aux_templates
.start
!= current_templates
->start
11262 || t
!= current_templates
->end
);
11264 current_templates
= &aux_templates
;
11268 i
.mem_operands
= 0;
11269 if (i
.memop1_string
!= NULL
11270 && i386_index_check (i
.memop1_string
) == 0)
11272 i
.mem_operands
= 1;
11279 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11283 i386_att_operand (char *operand_string
)
11285 const reg_entry
*r
;
11287 char *op_string
= operand_string
;
11289 if (is_space_char (*op_string
))
11292 /* We check for an absolute prefix (differentiating,
11293 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11294 if (*op_string
== ABSOLUTE_PREFIX
)
11297 if (is_space_char (*op_string
))
11299 i
.jumpabsolute
= TRUE
;
11302 /* Check if operand is a register. */
11303 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
11305 i386_operand_type temp
;
11310 /* Check for a segment override by searching for ':' after a
11311 segment register. */
11312 op_string
= end_op
;
11313 if (is_space_char (*op_string
))
11315 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
11317 switch (r
->reg_num
)
11320 i
.seg
[i
.mem_operands
] = &es
;
11323 i
.seg
[i
.mem_operands
] = &cs
;
11326 i
.seg
[i
.mem_operands
] = &ss
;
11329 i
.seg
[i
.mem_operands
] = &ds
;
11332 i
.seg
[i
.mem_operands
] = &fs
;
11335 i
.seg
[i
.mem_operands
] = &gs
;
11339 /* Skip the ':' and whitespace. */
11341 if (is_space_char (*op_string
))
11344 if (!is_digit_char (*op_string
)
11345 && !is_identifier_char (*op_string
)
11346 && *op_string
!= '('
11347 && *op_string
!= ABSOLUTE_PREFIX
)
11349 as_bad (_("bad memory operand `%s'"), op_string
);
11352 /* Handle case of %es:*foo. */
11353 if (*op_string
== ABSOLUTE_PREFIX
)
11356 if (is_space_char (*op_string
))
11358 i
.jumpabsolute
= TRUE
;
11360 goto do_memory_reference
;
11363 /* Handle vector operations. */
11364 if (*op_string
== '{')
11366 op_string
= check_VecOperations (op_string
, NULL
);
11367 if (op_string
== NULL
)
11373 as_bad (_("junk `%s' after register"), op_string
);
11376 temp
= r
->reg_type
;
11377 temp
.bitfield
.baseindex
= 0;
11378 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
11380 i
.types
[this_operand
].bitfield
.unspecified
= 0;
11381 i
.op
[this_operand
].regs
= r
;
11384 else if (*op_string
== REGISTER_PREFIX
)
11386 as_bad (_("bad register name `%s'"), op_string
);
11389 else if (*op_string
== IMMEDIATE_PREFIX
)
11392 if (i
.jumpabsolute
)
11394 as_bad (_("immediate operand illegal with absolute jump"));
11397 if (!i386_immediate (op_string
))
11400 else if (RC_SAE_immediate (operand_string
))
11402 /* If it is a RC or SAE immediate, do nothing. */
11405 else if (is_digit_char (*op_string
)
11406 || is_identifier_char (*op_string
)
11407 || *op_string
== '"'
11408 || *op_string
== '(')
11410 /* This is a memory reference of some sort. */
11413 /* Start and end of displacement string expression (if found). */
11414 char *displacement_string_start
;
11415 char *displacement_string_end
;
11418 do_memory_reference
:
11419 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
11421 if ((i
.mem_operands
== 1
11422 && !current_templates
->start
->opcode_modifier
.isstring
)
11423 || i
.mem_operands
== 2)
11425 as_bad (_("too many memory references for `%s'"),
11426 current_templates
->start
->name
);
11430 /* Check for base index form. We detect the base index form by
11431 looking for an ')' at the end of the operand, searching
11432 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11434 base_string
= op_string
+ strlen (op_string
);
11436 /* Handle vector operations. */
11437 vop_start
= strchr (op_string
, '{');
11438 if (vop_start
&& vop_start
< base_string
)
11440 if (check_VecOperations (vop_start
, base_string
) == NULL
)
11442 base_string
= vop_start
;
11446 if (is_space_char (*base_string
))
11449 /* If we only have a displacement, set-up for it to be parsed later. */
11450 displacement_string_start
= op_string
;
11451 displacement_string_end
= base_string
+ 1;
11453 if (*base_string
== ')')
11456 unsigned int parens_balanced
= 1;
11457 /* We've already checked that the number of left & right ()'s are
11458 equal, so this loop will not be infinite. */
11462 if (*base_string
== ')')
11464 if (*base_string
== '(')
11467 while (parens_balanced
);
11469 temp_string
= base_string
;
11471 /* Skip past '(' and whitespace. */
11473 if (is_space_char (*base_string
))
11476 if (*base_string
== ','
11477 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
11480 displacement_string_end
= temp_string
;
11482 i
.types
[this_operand
].bitfield
.baseindex
= 1;
11486 if (i
.base_reg
== &bad_reg
)
11488 base_string
= end_op
;
11489 if (is_space_char (*base_string
))
11493 /* There may be an index reg or scale factor here. */
11494 if (*base_string
== ',')
11497 if (is_space_char (*base_string
))
11500 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
11503 if (i
.index_reg
== &bad_reg
)
11505 base_string
= end_op
;
11506 if (is_space_char (*base_string
))
11508 if (*base_string
== ',')
11511 if (is_space_char (*base_string
))
11514 else if (*base_string
!= ')')
11516 as_bad (_("expecting `,' or `)' "
11517 "after index register in `%s'"),
11522 else if (*base_string
== REGISTER_PREFIX
)
11524 end_op
= strchr (base_string
, ',');
11527 as_bad (_("bad register name `%s'"), base_string
);
11531 /* Check for scale factor. */
11532 if (*base_string
!= ')')
11534 char *end_scale
= i386_scale (base_string
);
11539 base_string
= end_scale
;
11540 if (is_space_char (*base_string
))
11542 if (*base_string
!= ')')
11544 as_bad (_("expecting `)' "
11545 "after scale factor in `%s'"),
11550 else if (!i
.index_reg
)
11552 as_bad (_("expecting index register or scale factor "
11553 "after `,'; got '%c'"),
11558 else if (*base_string
!= ')')
11560 as_bad (_("expecting `,' or `)' "
11561 "after base register in `%s'"),
11566 else if (*base_string
== REGISTER_PREFIX
)
11568 end_op
= strchr (base_string
, ',');
11571 as_bad (_("bad register name `%s'"), base_string
);
11576 /* If there's an expression beginning the operand, parse it,
11577 assuming displacement_string_start and
11578 displacement_string_end are meaningful. */
11579 if (displacement_string_start
!= displacement_string_end
)
11581 if (!i386_displacement (displacement_string_start
,
11582 displacement_string_end
))
11586 /* Special case for (%dx) while doing input/output op. */
11588 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
11589 && i
.base_reg
->reg_type
.bitfield
.word
11590 && i
.index_reg
== 0
11591 && i
.log2_scale_factor
== 0
11592 && i
.seg
[i
.mem_operands
] == 0
11593 && !operand_type_check (i
.types
[this_operand
], disp
))
11595 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
11599 if (i386_index_check (operand_string
) == 0)
11601 i
.flags
[this_operand
] |= Operand_Mem
;
11602 if (i
.mem_operands
== 0)
11603 i
.memop1_string
= xstrdup (operand_string
);
11608 /* It's not a memory operand; argh! */
11609 as_bad (_("invalid char %s beginning operand %d `%s'"),
11610 output_invalid (*op_string
),
11615 return 1; /* Normal return. */
11618 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11619 that an rs_machine_dependent frag may reach. */
11622 i386_frag_max_var (fragS
*frag
)
11624 /* The only relaxable frags are for jumps.
11625 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11626 gas_assert (frag
->fr_type
== rs_machine_dependent
);
11627 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
11630 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11632 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
11634 /* STT_GNU_IFUNC symbol must go through PLT. */
11635 if ((symbol_get_bfdsym (fr_symbol
)->flags
11636 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
11639 if (!S_IS_EXTERNAL (fr_symbol
))
11640 /* Symbol may be weak or local. */
11641 return !S_IS_WEAK (fr_symbol
);
11643 /* Global symbols with non-default visibility can't be preempted. */
11644 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
11647 if (fr_var
!= NO_RELOC
)
11648 switch ((enum bfd_reloc_code_real
) fr_var
)
11650 case BFD_RELOC_386_PLT32
:
11651 case BFD_RELOC_X86_64_PLT32
:
11652 /* Symbol with PLT relocation may be preempted. */
11658 /* Global symbols with default visibility in a shared library may be
11659 preempted by another definition. */
11664 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11665 Note also work for Skylake and Cascadelake.
11666 ---------------------------------------------------------------------
11667 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11668 | ------ | ----------- | ------- | -------- |
11670 | Jno | N | N | Y |
11671 | Jc/Jb | Y | N | Y |
11672 | Jae/Jnb | Y | N | Y |
11673 | Je/Jz | Y | Y | Y |
11674 | Jne/Jnz | Y | Y | Y |
11675 | Jna/Jbe | Y | N | Y |
11676 | Ja/Jnbe | Y | N | Y |
11678 | Jns | N | N | Y |
11679 | Jp/Jpe | N | N | Y |
11680 | Jnp/Jpo | N | N | Y |
11681 | Jl/Jnge | Y | Y | Y |
11682 | Jge/Jnl | Y | Y | Y |
11683 | Jle/Jng | Y | Y | Y |
11684 | Jg/Jnle | Y | Y | Y |
11685 --------------------------------------------------------------------- */
11687 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp
, enum mf_jcc_kind mf_jcc
)
11689 if (mf_cmp
== mf_cmp_alu_cmp
)
11690 return ((mf_jcc
>= mf_jcc_jc
&& mf_jcc
<= mf_jcc_jna
)
11691 || mf_jcc
== mf_jcc_jl
|| mf_jcc
== mf_jcc_jle
);
11692 if (mf_cmp
== mf_cmp_incdec
)
11693 return (mf_jcc
== mf_jcc_je
|| mf_jcc
== mf_jcc_jl
11694 || mf_jcc
== mf_jcc_jle
);
11695 if (mf_cmp
== mf_cmp_test_and
)
11700 /* Return the next non-empty frag. */
11703 i386_next_non_empty_frag (fragS
*fragP
)
11705 /* There may be a frag with a ".fill 0" when there is no room in
11706 the current frag for frag_grow in output_insn. */
11707 for (fragP
= fragP
->fr_next
;
11709 && fragP
->fr_type
== rs_fill
11710 && fragP
->fr_fix
== 0);
11711 fragP
= fragP
->fr_next
)
11716 /* Return the next jcc frag after BRANCH_PADDING. */
11719 i386_next_fusible_jcc_frag (fragS
*maybe_cmp_fragP
, fragS
*pad_fragP
)
11721 fragS
*branch_fragP
;
11725 if (pad_fragP
->fr_type
== rs_machine_dependent
11726 && (TYPE_FROM_RELAX_STATE (pad_fragP
->fr_subtype
)
11727 == BRANCH_PADDING
))
11729 branch_fragP
= i386_next_non_empty_frag (pad_fragP
);
11730 if (branch_fragP
->fr_type
!= rs_machine_dependent
)
11732 if (TYPE_FROM_RELAX_STATE (branch_fragP
->fr_subtype
) == COND_JUMP
11733 && i386_macro_fusible_p (maybe_cmp_fragP
->tc_frag_data
.mf_type
,
11734 pad_fragP
->tc_frag_data
.mf_type
))
11735 return branch_fragP
;
11741 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11744 i386_classify_machine_dependent_frag (fragS
*fragP
)
11748 fragS
*branch_fragP
;
11750 unsigned int max_prefix_length
;
11752 if (fragP
->tc_frag_data
.classified
)
11755 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11756 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11757 for (next_fragP
= fragP
;
11758 next_fragP
!= NULL
;
11759 next_fragP
= next_fragP
->fr_next
)
11761 next_fragP
->tc_frag_data
.classified
= 1;
11762 if (next_fragP
->fr_type
== rs_machine_dependent
)
11763 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
11765 case BRANCH_PADDING
:
11766 /* The BRANCH_PADDING frag must be followed by a branch
11768 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
11769 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11771 case FUSED_JCC_PADDING
:
11772 /* Check if this is a fused jcc:
11774 CMP like instruction
11778 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
11779 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
11780 branch_fragP
= i386_next_fusible_jcc_frag (next_fragP
, pad_fragP
);
11783 /* The BRANCH_PADDING frag is merged with the
11784 FUSED_JCC_PADDING frag. */
11785 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
11786 /* CMP like instruction size. */
11787 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
11788 frag_wane (pad_fragP
);
11789 /* Skip to branch_fragP. */
11790 next_fragP
= branch_fragP
;
11792 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
11794 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11796 next_fragP
->fr_subtype
11797 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
11798 next_fragP
->tc_frag_data
.max_bytes
11799 = next_fragP
->tc_frag_data
.max_prefix_length
;
11800 /* This will be updated in the BRANCH_PREFIX scan. */
11801 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
11804 frag_wane (next_fragP
);
11809 /* Stop if there is no BRANCH_PREFIX. */
11810 if (!align_branch_prefix_size
)
11813 /* Scan for BRANCH_PREFIX. */
11814 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
11816 if (fragP
->fr_type
!= rs_machine_dependent
11817 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11821 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11822 COND_JUMP_PREFIX. */
11823 max_prefix_length
= 0;
11824 for (next_fragP
= fragP
;
11825 next_fragP
!= NULL
;
11826 next_fragP
= next_fragP
->fr_next
)
11828 if (next_fragP
->fr_type
== rs_fill
)
11829 /* Skip rs_fill frags. */
11831 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
11832 /* Stop for all other frags. */
11835 /* rs_machine_dependent frags. */
11836 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11839 /* Count BRANCH_PREFIX frags. */
11840 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11842 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11843 frag_wane (next_fragP
);
11847 += next_fragP
->tc_frag_data
.max_bytes
;
11849 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11851 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11852 == FUSED_JCC_PADDING
))
11854 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11855 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11859 /* Stop for other rs_machine_dependent frags. */
11863 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11865 /* Skip to the next frag. */
11866 fragP
= next_fragP
;
11870 /* Compute padding size for
11873 CMP like instruction
11875 COND_JUMP/UNCOND_JUMP
11880 COND_JUMP/UNCOND_JUMP
11884 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11886 unsigned int offset
, size
, padding_size
;
11887 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11889 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11891 address
= fragP
->fr_address
;
11892 address
+= fragP
->fr_fix
;
11894 /* CMP like instrunction size. */
11895 size
= fragP
->tc_frag_data
.cmp_size
;
11897 /* The base size of the branch frag. */
11898 size
+= branch_fragP
->fr_fix
;
11900 /* Add opcode and displacement bytes for the rs_machine_dependent
11902 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11903 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11905 /* Check if branch is within boundary and doesn't end at the last
11907 offset
= address
& ((1U << align_branch_power
) - 1);
11908 if ((offset
+ size
) >= (1U << align_branch_power
))
11909 /* Padding needed to avoid crossing boundary. */
11910 padding_size
= (1U << align_branch_power
) - offset
;
11912 /* No padding needed. */
11915 /* The return value may be saved in tc_frag_data.length which is
11917 if (!fits_in_unsigned_byte (padding_size
))
11920 return padding_size
;
11923 /* i386_generic_table_relax_frag()
11925 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11926 grow/shrink padding to align branch frags. Hand others to
11930 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11932 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11933 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11935 long padding_size
= i386_branch_padding_size (fragP
, 0);
11936 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11938 /* When the BRANCH_PREFIX frag is used, the computed address
11939 must match the actual address and there should be no padding. */
11940 if (fragP
->tc_frag_data
.padding_address
11941 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11945 /* Update the padding size. */
11947 fragP
->tc_frag_data
.length
= padding_size
;
11951 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11953 fragS
*padding_fragP
, *next_fragP
;
11954 long padding_size
, left_size
, last_size
;
11956 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11957 if (!padding_fragP
)
11958 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11959 return (fragP
->tc_frag_data
.length
11960 - fragP
->tc_frag_data
.last_length
);
11962 /* Compute the relative address of the padding frag in the very
11963 first time where the BRANCH_PREFIX frag sizes are zero. */
11964 if (!fragP
->tc_frag_data
.padding_address
)
11965 fragP
->tc_frag_data
.padding_address
11966 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11968 /* First update the last length from the previous interation. */
11969 left_size
= fragP
->tc_frag_data
.prefix_length
;
11970 for (next_fragP
= fragP
;
11971 next_fragP
!= padding_fragP
;
11972 next_fragP
= next_fragP
->fr_next
)
11973 if (next_fragP
->fr_type
== rs_machine_dependent
11974 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11979 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11983 if (max
> left_size
)
11988 next_fragP
->tc_frag_data
.last_length
= size
;
11992 next_fragP
->tc_frag_data
.last_length
= 0;
11995 /* Check the padding size for the padding frag. */
11996 padding_size
= i386_branch_padding_size
11997 (padding_fragP
, (fragP
->fr_address
11998 + fragP
->tc_frag_data
.padding_address
));
12000 last_size
= fragP
->tc_frag_data
.prefix_length
;
12001 /* Check if there is change from the last interation. */
12002 if (padding_size
== last_size
)
12004 /* Update the expected address of the padding frag. */
12005 padding_fragP
->tc_frag_data
.padding_address
12006 = (fragP
->fr_address
+ padding_size
12007 + fragP
->tc_frag_data
.padding_address
);
12011 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
12013 /* No padding if there is no sufficient room. Clear the
12014 expected address of the padding frag. */
12015 padding_fragP
->tc_frag_data
.padding_address
= 0;
12019 /* Store the expected address of the padding frag. */
12020 padding_fragP
->tc_frag_data
.padding_address
12021 = (fragP
->fr_address
+ padding_size
12022 + fragP
->tc_frag_data
.padding_address
);
12024 fragP
->tc_frag_data
.prefix_length
= padding_size
;
12026 /* Update the length for the current interation. */
12027 left_size
= padding_size
;
12028 for (next_fragP
= fragP
;
12029 next_fragP
!= padding_fragP
;
12030 next_fragP
= next_fragP
->fr_next
)
12031 if (next_fragP
->fr_type
== rs_machine_dependent
12032 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
12037 int max
= next_fragP
->tc_frag_data
.max_bytes
;
12041 if (max
> left_size
)
12046 next_fragP
->tc_frag_data
.length
= size
;
12050 next_fragP
->tc_frag_data
.length
= 0;
12053 return (fragP
->tc_frag_data
.length
12054 - fragP
->tc_frag_data
.last_length
);
12056 return relax_frag (segment
, fragP
, stretch
);
12059 /* md_estimate_size_before_relax()
12061 Called just before relax() for rs_machine_dependent frags. The x86
12062 assembler uses these frags to handle variable size jump
12065 Any symbol that is now undefined will not become defined.
12066 Return the correct fr_subtype in the frag.
12067 Return the initial "guess for variable size of frag" to caller.
12068 The guess is actually the growth beyond the fixed part. Whatever
12069 we do to grow the fixed or variable part contributes to our
12073 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
12075 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12076 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
12077 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
12079 i386_classify_machine_dependent_frag (fragP
);
12080 return fragP
->tc_frag_data
.length
;
12083 /* We've already got fragP->fr_subtype right; all we have to do is
12084 check for un-relaxable symbols. On an ELF system, we can't relax
12085 an externally visible symbol, because it may be overridden by a
12087 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
12088 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12090 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
12093 #if defined (OBJ_COFF) && defined (TE_PE)
12094 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
12095 && S_IS_WEAK (fragP
->fr_symbol
))
12099 /* Symbol is undefined in this segment, or we need to keep a
12100 reloc so that weak symbols can be overridden. */
12101 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
12102 enum bfd_reloc_code_real reloc_type
;
12103 unsigned char *opcode
;
12106 if (fragP
->fr_var
!= NO_RELOC
)
12107 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
12108 else if (size
== 2)
12109 reloc_type
= BFD_RELOC_16_PCREL
;
12110 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12111 else if (need_plt32_p (fragP
->fr_symbol
))
12112 reloc_type
= BFD_RELOC_X86_64_PLT32
;
12115 reloc_type
= BFD_RELOC_32_PCREL
;
12117 old_fr_fix
= fragP
->fr_fix
;
12118 opcode
= (unsigned char *) fragP
->fr_opcode
;
12120 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
12123 /* Make jmp (0xeb) a (d)word displacement jump. */
12125 fragP
->fr_fix
+= size
;
12126 fix_new (fragP
, old_fr_fix
, size
,
12128 fragP
->fr_offset
, 1,
12134 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
12136 /* Negate the condition, and branch past an
12137 unconditional jump. */
12140 /* Insert an unconditional jump. */
12142 /* We added two extra opcode bytes, and have a two byte
12144 fragP
->fr_fix
+= 2 + 2;
12145 fix_new (fragP
, old_fr_fix
+ 2, 2,
12147 fragP
->fr_offset
, 1,
12151 /* Fall through. */
12154 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
12158 fragP
->fr_fix
+= 1;
12159 fixP
= fix_new (fragP
, old_fr_fix
, 1,
12161 fragP
->fr_offset
, 1,
12162 BFD_RELOC_8_PCREL
);
12163 fixP
->fx_signed
= 1;
12167 /* This changes the byte-displacement jump 0x7N
12168 to the (d)word-displacement jump 0x0f,0x8N. */
12169 opcode
[1] = opcode
[0] + 0x10;
12170 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12171 /* We've added an opcode byte. */
12172 fragP
->fr_fix
+= 1 + size
;
12173 fix_new (fragP
, old_fr_fix
+ 1, size
,
12175 fragP
->fr_offset
, 1,
12180 BAD_CASE (fragP
->fr_subtype
);
12184 return fragP
->fr_fix
- old_fr_fix
;
12187 /* Guess size depending on current relax state. Initially the relax
12188 state will correspond to a short jump and we return 1, because
12189 the variable part of the frag (the branch offset) is one byte
12190 long. However, we can relax a section more than once and in that
12191 case we must either set fr_subtype back to the unrelaxed state,
12192 or return the value for the appropriate branch. */
12193 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
12196 /* Called after relax() is finished.
12198 In: Address of frag.
12199 fr_type == rs_machine_dependent.
12200 fr_subtype is what the address relaxed to.
12202 Out: Any fixSs and constants are set up.
12203 Caller will turn frag into a ".space 0". */
12206 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
12209 unsigned char *opcode
;
12210 unsigned char *where_to_put_displacement
= NULL
;
12211 offsetT target_address
;
12212 offsetT opcode_address
;
12213 unsigned int extension
= 0;
12214 offsetT displacement_from_opcode_start
;
12216 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
12217 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
12218 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12220 /* Generate nop padding. */
12221 unsigned int size
= fragP
->tc_frag_data
.length
;
12224 if (size
> fragP
->tc_frag_data
.max_bytes
)
12230 const char *branch
= "branch";
12231 const char *prefix
= "";
12232 fragS
*padding_fragP
;
12233 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
12236 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
12237 switch (fragP
->tc_frag_data
.default_prefix
)
12242 case CS_PREFIX_OPCODE
:
12245 case DS_PREFIX_OPCODE
:
12248 case ES_PREFIX_OPCODE
:
12251 case FS_PREFIX_OPCODE
:
12254 case GS_PREFIX_OPCODE
:
12257 case SS_PREFIX_OPCODE
:
12262 msg
= _("%s:%u: add %d%s at 0x%llx to align "
12263 "%s within %d-byte boundary\n");
12265 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
12266 "align %s within %d-byte boundary\n");
12270 padding_fragP
= fragP
;
12271 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12272 "%s within %d-byte boundary\n");
12276 switch (padding_fragP
->tc_frag_data
.branch_type
)
12278 case align_branch_jcc
:
12281 case align_branch_fused
:
12282 branch
= "fused jcc";
12284 case align_branch_jmp
:
12287 case align_branch_call
:
12290 case align_branch_indirect
:
12291 branch
= "indiret branch";
12293 case align_branch_ret
:
12300 fprintf (stdout
, msg
,
12301 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
12302 (long long) fragP
->fr_address
, branch
,
12303 1 << align_branch_power
);
12305 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
12306 memset (fragP
->fr_opcode
,
12307 fragP
->tc_frag_data
.default_prefix
, size
);
12309 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
12311 fragP
->fr_fix
+= size
;
12316 opcode
= (unsigned char *) fragP
->fr_opcode
;
12318 /* Address we want to reach in file space. */
12319 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
12321 /* Address opcode resides at in file space. */
12322 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
12324 /* Displacement from opcode start to fill into instruction. */
12325 displacement_from_opcode_start
= target_address
- opcode_address
;
12327 if ((fragP
->fr_subtype
& BIG
) == 0)
12329 /* Don't have to change opcode. */
12330 extension
= 1; /* 1 opcode + 1 displacement */
12331 where_to_put_displacement
= &opcode
[1];
12335 if (no_cond_jump_promotion
12336 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
12337 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
12338 _("long jump required"));
12340 switch (fragP
->fr_subtype
)
12342 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
12343 extension
= 4; /* 1 opcode + 4 displacement */
12345 where_to_put_displacement
= &opcode
[1];
12348 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
12349 extension
= 2; /* 1 opcode + 2 displacement */
12351 where_to_put_displacement
= &opcode
[1];
12354 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
12355 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
12356 extension
= 5; /* 2 opcode + 4 displacement */
12357 opcode
[1] = opcode
[0] + 0x10;
12358 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12359 where_to_put_displacement
= &opcode
[2];
12362 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
12363 extension
= 3; /* 2 opcode + 2 displacement */
12364 opcode
[1] = opcode
[0] + 0x10;
12365 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
12366 where_to_put_displacement
= &opcode
[2];
12369 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
12374 where_to_put_displacement
= &opcode
[3];
12378 BAD_CASE (fragP
->fr_subtype
);
12383 /* If size if less then four we are sure that the operand fits,
12384 but if it's 4, then it could be that the displacement is larger
12386 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
12388 && ((addressT
) (displacement_from_opcode_start
- extension
12389 + ((addressT
) 1 << 31))
12390 > (((addressT
) 2 << 31) - 1)))
12392 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
12393 _("jump target out of range"));
12394 /* Make us emit 0. */
12395 displacement_from_opcode_start
= extension
;
12397 /* Now put displacement after opcode. */
12398 md_number_to_chars ((char *) where_to_put_displacement
,
12399 (valueT
) (displacement_from_opcode_start
- extension
),
12400 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
12401 fragP
->fr_fix
+= extension
;
12404 /* Apply a fixup (fixP) to segment data, once it has been determined
12405 by our caller that we have all the info we need to fix it up.
12407 Parameter valP is the pointer to the value of the bits.
12409 On the 386, immediates, displacements, and data pointers are all in
12410 the same (little-endian) format, so we don't need to care about which
12411 we are handling. */
12414 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
12416 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
12417 valueT value
= *valP
;
12419 #if !defined (TE_Mach)
12420 if (fixP
->fx_pcrel
)
12422 switch (fixP
->fx_r_type
)
12428 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
12431 case BFD_RELOC_X86_64_32S
:
12432 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
12435 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
12438 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
12443 if (fixP
->fx_addsy
!= NULL
12444 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
12445 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
12446 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
12447 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
12448 && !use_rela_relocations
)
12450 /* This is a hack. There should be a better way to handle this.
12451 This covers for the fact that bfd_install_relocation will
12452 subtract the current location (for partial_inplace, PC relative
12453 relocations); see more below. */
12457 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
12460 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12462 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12465 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
12467 if ((sym_seg
== seg
12468 || (symbol_section_p (fixP
->fx_addsy
)
12469 && sym_seg
!= absolute_section
))
12470 && !generic_force_reloc (fixP
))
12472 /* Yes, we add the values in twice. This is because
12473 bfd_install_relocation subtracts them out again. I think
12474 bfd_install_relocation is broken, but I don't dare change
12476 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
12480 #if defined (OBJ_COFF) && defined (TE_PE)
12481 /* For some reason, the PE format does not store a
12482 section address offset for a PC relative symbol. */
12483 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
12484 || S_IS_WEAK (fixP
->fx_addsy
))
12485 value
+= md_pcrel_from (fixP
);
12488 #if defined (OBJ_COFF) && defined (TE_PE)
12489 if (fixP
->fx_addsy
!= NULL
12490 && S_IS_WEAK (fixP
->fx_addsy
)
12491 /* PR 16858: Do not modify weak function references. */
12492 && ! fixP
->fx_pcrel
)
12494 #if !defined (TE_PEP)
12495 /* For x86 PE weak function symbols are neither PC-relative
12496 nor do they set S_IS_FUNCTION. So the only reliable way
12497 to detect them is to check the flags of their containing
12499 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
12500 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
12504 value
-= S_GET_VALUE (fixP
->fx_addsy
);
12508 /* Fix a few things - the dynamic linker expects certain values here,
12509 and we must not disappoint it. */
12510 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12511 if (IS_ELF
&& fixP
->fx_addsy
)
12512 switch (fixP
->fx_r_type
)
12514 case BFD_RELOC_386_PLT32
:
12515 case BFD_RELOC_X86_64_PLT32
:
12516 /* Make the jump instruction point to the address of the operand.
12517 At runtime we merely add the offset to the actual PLT entry.
12518 NB: Subtract the offset size only for jump instructions. */
12519 if (fixP
->fx_pcrel
)
12523 case BFD_RELOC_386_TLS_GD
:
12524 case BFD_RELOC_386_TLS_LDM
:
12525 case BFD_RELOC_386_TLS_IE_32
:
12526 case BFD_RELOC_386_TLS_IE
:
12527 case BFD_RELOC_386_TLS_GOTIE
:
12528 case BFD_RELOC_386_TLS_GOTDESC
:
12529 case BFD_RELOC_X86_64_TLSGD
:
12530 case BFD_RELOC_X86_64_TLSLD
:
12531 case BFD_RELOC_X86_64_GOTTPOFF
:
12532 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
12533 value
= 0; /* Fully resolved at runtime. No addend. */
12535 case BFD_RELOC_386_TLS_LE
:
12536 case BFD_RELOC_386_TLS_LDO_32
:
12537 case BFD_RELOC_386_TLS_LE_32
:
12538 case BFD_RELOC_X86_64_DTPOFF32
:
12539 case BFD_RELOC_X86_64_DTPOFF64
:
12540 case BFD_RELOC_X86_64_TPOFF32
:
12541 case BFD_RELOC_X86_64_TPOFF64
:
12542 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12545 case BFD_RELOC_386_TLS_DESC_CALL
:
12546 case BFD_RELOC_X86_64_TLSDESC_CALL
:
12547 value
= 0; /* Fully resolved at runtime. No addend. */
12548 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
12552 case BFD_RELOC_VTABLE_INHERIT
:
12553 case BFD_RELOC_VTABLE_ENTRY
:
12560 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12562 #endif /* !defined (TE_Mach) */
12564 /* Are we finished with this relocation now? */
12565 if (fixP
->fx_addsy
== NULL
)
12567 #if defined (OBJ_COFF) && defined (TE_PE)
12568 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
12571 /* Remember value for tc_gen_reloc. */
12572 fixP
->fx_addnumber
= value
;
12573 /* Clear out the frag for now. */
12577 else if (use_rela_relocations
)
12579 fixP
->fx_no_overflow
= 1;
12580 /* Remember value for tc_gen_reloc. */
12581 fixP
->fx_addnumber
= value
;
12585 md_number_to_chars (p
, value
, fixP
->fx_size
);
12589 md_atof (int type
, char *litP
, int *sizeP
)
12591 /* This outputs the LITTLENUMs in REVERSE order;
12592 in accord with the bigendian 386. */
12593 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
12596 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
12599 output_invalid (int c
)
12602 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12605 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
12606 "(0x%x)", (unsigned char) c
);
12607 return output_invalid_buf
;
12610 /* Verify that @r can be used in the current context. */
12612 static bfd_boolean
check_register (const reg_entry
*r
)
12614 if (allow_pseudo_reg
)
12617 if (operand_type_all_zero (&r
->reg_type
))
12620 if ((r
->reg_type
.bitfield
.dword
12621 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
12622 || r
->reg_type
.bitfield
.class == RegCR
12623 || r
->reg_type
.bitfield
.class == RegDR
)
12624 && !cpu_arch_flags
.bitfield
.cpui386
)
12627 if (r
->reg_type
.bitfield
.class == RegTR
12628 && (flag_code
== CODE_64BIT
12629 || !cpu_arch_flags
.bitfield
.cpui386
12630 || cpu_arch_isa_flags
.bitfield
.cpui586
12631 || cpu_arch_isa_flags
.bitfield
.cpui686
))
12634 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
12637 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
12639 if (r
->reg_type
.bitfield
.zmmword
12640 || r
->reg_type
.bitfield
.class == RegMask
)
12643 if (!cpu_arch_flags
.bitfield
.cpuavx
)
12645 if (r
->reg_type
.bitfield
.ymmword
)
12648 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
12653 if (r
->reg_type
.bitfield
.tmmword
12654 && (!cpu_arch_flags
.bitfield
.cpuamx_tile
12655 || flag_code
!= CODE_64BIT
))
12658 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
12661 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12662 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
12665 /* Upper 16 vector registers are only available with VREX in 64bit
12666 mode, and require EVEX encoding. */
12667 if (r
->reg_flags
& RegVRex
)
12669 if (!cpu_arch_flags
.bitfield
.cpuavx512f
12670 || flag_code
!= CODE_64BIT
)
12673 if (i
.vec_encoding
== vex_encoding_default
)
12674 i
.vec_encoding
= vex_encoding_evex
;
12675 else if (i
.vec_encoding
!= vex_encoding_evex
)
12676 i
.vec_encoding
= vex_encoding_error
;
12679 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
12680 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
12681 && flag_code
!= CODE_64BIT
)
12684 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
12691 /* REG_STRING starts *before* REGISTER_PREFIX. */
12693 static const reg_entry
*
12694 parse_real_register (char *reg_string
, char **end_op
)
12696 char *s
= reg_string
;
12698 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
12699 const reg_entry
*r
;
12701 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12702 if (*s
== REGISTER_PREFIX
)
12705 if (is_space_char (*s
))
12708 p
= reg_name_given
;
12709 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
12711 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
12712 return (const reg_entry
*) NULL
;
12716 /* For naked regs, make sure that we are not dealing with an identifier.
12717 This prevents confusing an identifier like `eax_var' with register
12719 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
12720 return (const reg_entry
*) NULL
;
12724 r
= (const reg_entry
*) str_hash_find (reg_hash
, reg_name_given
);
12726 /* Handle floating point regs, allowing spaces in the (i) part. */
12727 if (r
== i386_regtab
/* %st is first entry of table */)
12729 if (!cpu_arch_flags
.bitfield
.cpu8087
12730 && !cpu_arch_flags
.bitfield
.cpu287
12731 && !cpu_arch_flags
.bitfield
.cpu387
12732 && !allow_pseudo_reg
)
12733 return (const reg_entry
*) NULL
;
12735 if (is_space_char (*s
))
12740 if (is_space_char (*s
))
12742 if (*s
>= '0' && *s
<= '7')
12744 int fpr
= *s
- '0';
12746 if (is_space_char (*s
))
12751 r
= (const reg_entry
*) str_hash_find (reg_hash
, "st(0)");
12756 /* We have "%st(" then garbage. */
12757 return (const reg_entry
*) NULL
;
12761 return r
&& check_register (r
) ? r
: NULL
;
12764 /* REG_STRING starts *before* REGISTER_PREFIX. */
12766 static const reg_entry
*
12767 parse_register (char *reg_string
, char **end_op
)
12769 const reg_entry
*r
;
12771 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
12772 r
= parse_real_register (reg_string
, end_op
);
12777 char *save
= input_line_pointer
;
12781 input_line_pointer
= reg_string
;
12782 c
= get_symbol_name (®_string
);
12783 symbolP
= symbol_find (reg_string
);
12784 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
12786 const expressionS
*e
= symbol_get_value_expression (symbolP
);
12788 know (e
->X_op
== O_register
);
12789 know (e
->X_add_number
>= 0
12790 && (valueT
) e
->X_add_number
< i386_regtab_size
);
12791 r
= i386_regtab
+ e
->X_add_number
;
12792 if (!check_register (r
))
12794 as_bad (_("register '%s%s' cannot be used here"),
12795 register_prefix
, r
->reg_name
);
12798 *end_op
= input_line_pointer
;
12800 *input_line_pointer
= c
;
12801 input_line_pointer
= save
;
12807 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
12809 const reg_entry
*r
;
12810 char *end
= input_line_pointer
;
12813 r
= parse_register (name
, &input_line_pointer
);
12814 if (r
&& end
<= input_line_pointer
)
12816 *nextcharP
= *input_line_pointer
;
12817 *input_line_pointer
= 0;
12820 e
->X_op
= O_register
;
12821 e
->X_add_number
= r
- i386_regtab
;
12824 e
->X_op
= O_illegal
;
12827 input_line_pointer
= end
;
12829 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
12833 md_operand (expressionS
*e
)
12836 const reg_entry
*r
;
12838 switch (*input_line_pointer
)
12840 case REGISTER_PREFIX
:
12841 r
= parse_real_register (input_line_pointer
, &end
);
12844 e
->X_op
= O_register
;
12845 e
->X_add_number
= r
- i386_regtab
;
12846 input_line_pointer
= end
;
12851 gas_assert (intel_syntax
);
12852 end
= input_line_pointer
++;
12854 if (*input_line_pointer
== ']')
12856 ++input_line_pointer
;
12857 e
->X_op_symbol
= make_expr_symbol (e
);
12858 e
->X_add_symbol
= NULL
;
12859 e
->X_add_number
= 0;
12864 e
->X_op
= O_absent
;
12865 input_line_pointer
= end
;
12872 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12873 const char *md_shortopts
= "kVQ:sqnO::";
12875 const char *md_shortopts
= "qnO::";
12878 #define OPTION_32 (OPTION_MD_BASE + 0)
12879 #define OPTION_64 (OPTION_MD_BASE + 1)
12880 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12881 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12882 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12883 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12884 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12885 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12886 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12887 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12888 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12889 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12890 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12891 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12892 #define OPTION_X32 (OPTION_MD_BASE + 14)
12893 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12894 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12895 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12896 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12897 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12898 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12899 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12900 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12901 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12902 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12903 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12904 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12905 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12906 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12907 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12908 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12909 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12910 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12911 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12913 struct option md_longopts
[] =
12915 {"32", no_argument
, NULL
, OPTION_32
},
12916 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12917 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12918 {"64", no_argument
, NULL
, OPTION_64
},
12920 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12921 {"x32", no_argument
, NULL
, OPTION_X32
},
12922 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12923 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12925 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12926 {"march", required_argument
, NULL
, OPTION_MARCH
},
12927 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12928 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12929 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12930 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12931 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12932 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12933 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12934 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12935 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12936 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12937 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12938 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12939 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12940 # if defined (TE_PE) || defined (TE_PEP)
12941 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12943 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12944 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12945 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12946 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12947 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12948 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12949 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12950 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12951 {"mlfence-after-load", required_argument
, NULL
, OPTION_MLFENCE_AFTER_LOAD
},
12952 {"mlfence-before-indirect-branch", required_argument
, NULL
,
12953 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
},
12954 {"mlfence-before-ret", required_argument
, NULL
, OPTION_MLFENCE_BEFORE_RET
},
12955 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12956 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12957 {NULL
, no_argument
, NULL
, 0}
12959 size_t md_longopts_size
= sizeof (md_longopts
);
12962 md_parse_option (int c
, const char *arg
)
12965 char *arch
, *next
, *saved
, *type
;
12970 optimize_align_code
= 0;
12974 quiet_warnings
= 1;
12977 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12978 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12979 should be emitted or not. FIXME: Not implemented. */
12981 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12985 /* -V: SVR4 argument to print version ID. */
12987 print_version_id ();
12990 /* -k: Ignore for FreeBSD compatibility. */
12995 /* -s: On i386 Solaris, this tells the native assembler to use
12996 .stab instead of .stab.excl. We always use .stab anyhow. */
12999 case OPTION_MSHARED
:
13003 case OPTION_X86_USED_NOTE
:
13004 if (strcasecmp (arg
, "yes") == 0)
13006 else if (strcasecmp (arg
, "no") == 0)
13009 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
13014 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13015 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13018 const char **list
, **l
;
13020 list
= bfd_target_list ();
13021 for (l
= list
; *l
!= NULL
; l
++)
13022 if (startswith (*l
, "elf64-x86-64")
13023 || strcmp (*l
, "coff-x86-64") == 0
13024 || strcmp (*l
, "pe-x86-64") == 0
13025 || strcmp (*l
, "pei-x86-64") == 0
13026 || strcmp (*l
, "mach-o-x86-64") == 0)
13028 default_arch
= "x86_64";
13032 as_fatal (_("no compiled in support for x86_64"));
13038 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13042 const char **list
, **l
;
13044 list
= bfd_target_list ();
13045 for (l
= list
; *l
!= NULL
; l
++)
13046 if (startswith (*l
, "elf32-x86-64"))
13048 default_arch
= "x86_64:32";
13052 as_fatal (_("no compiled in support for 32bit x86_64"));
13056 as_fatal (_("32bit x86_64 is only supported for ELF"));
13061 default_arch
= "i386";
13064 case OPTION_DIVIDE
:
13065 #ifdef SVR4_COMMENT_CHARS
13070 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
13072 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
13076 i386_comment_chars
= n
;
13082 saved
= xstrdup (arg
);
13084 /* Allow -march=+nosse. */
13090 as_fatal (_("invalid -march= option: `%s'"), arg
);
13091 next
= strchr (arch
, '+');
13094 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13096 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
13099 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13102 cpu_arch_name
= cpu_arch
[j
].name
;
13103 cpu_sub_arch_name
= NULL
;
13104 cpu_arch_flags
= cpu_arch
[j
].flags
;
13105 cpu_arch_isa
= cpu_arch
[j
].type
;
13106 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
13107 if (!cpu_arch_tune_set
)
13109 cpu_arch_tune
= cpu_arch_isa
;
13110 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13114 else if (*cpu_arch
[j
].name
== '.'
13115 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
13117 /* ISA extension. */
13118 i386_cpu_flags flags
;
13120 flags
= cpu_flags_or (cpu_arch_flags
,
13121 cpu_arch
[j
].flags
);
13123 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13125 if (cpu_sub_arch_name
)
13127 char *name
= cpu_sub_arch_name
;
13128 cpu_sub_arch_name
= concat (name
,
13130 (const char *) NULL
);
13134 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
13135 cpu_arch_flags
= flags
;
13136 cpu_arch_isa_flags
= flags
;
13140 = cpu_flags_or (cpu_arch_isa_flags
,
13141 cpu_arch
[j
].flags
);
13146 if (j
>= ARRAY_SIZE (cpu_arch
))
13148 /* Disable an ISA extension. */
13149 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13150 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
13152 i386_cpu_flags flags
;
13154 flags
= cpu_flags_and_not (cpu_arch_flags
,
13155 cpu_noarch
[j
].flags
);
13156 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
13158 if (cpu_sub_arch_name
)
13160 char *name
= cpu_sub_arch_name
;
13161 cpu_sub_arch_name
= concat (arch
,
13162 (const char *) NULL
);
13166 cpu_sub_arch_name
= xstrdup (arch
);
13167 cpu_arch_flags
= flags
;
13168 cpu_arch_isa_flags
= flags
;
13173 if (j
>= ARRAY_SIZE (cpu_noarch
))
13174 j
= ARRAY_SIZE (cpu_arch
);
13177 if (j
>= ARRAY_SIZE (cpu_arch
))
13178 as_fatal (_("invalid -march= option: `%s'"), arg
);
13182 while (next
!= NULL
);
13188 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13189 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13191 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
13193 cpu_arch_tune_set
= 1;
13194 cpu_arch_tune
= cpu_arch
[j
].type
;
13195 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
13199 if (j
>= ARRAY_SIZE (cpu_arch
))
13200 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
13203 case OPTION_MMNEMONIC
:
13204 if (strcasecmp (arg
, "att") == 0)
13205 intel_mnemonic
= 0;
13206 else if (strcasecmp (arg
, "intel") == 0)
13207 intel_mnemonic
= 1;
13209 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
13212 case OPTION_MSYNTAX
:
13213 if (strcasecmp (arg
, "att") == 0)
13215 else if (strcasecmp (arg
, "intel") == 0)
13218 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
13221 case OPTION_MINDEX_REG
:
13222 allow_index_reg
= 1;
13225 case OPTION_MNAKED_REG
:
13226 allow_naked_reg
= 1;
13229 case OPTION_MSSE2AVX
:
13233 case OPTION_MSSE_CHECK
:
13234 if (strcasecmp (arg
, "error") == 0)
13235 sse_check
= check_error
;
13236 else if (strcasecmp (arg
, "warning") == 0)
13237 sse_check
= check_warning
;
13238 else if (strcasecmp (arg
, "none") == 0)
13239 sse_check
= check_none
;
13241 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
13244 case OPTION_MOPERAND_CHECK
:
13245 if (strcasecmp (arg
, "error") == 0)
13246 operand_check
= check_error
;
13247 else if (strcasecmp (arg
, "warning") == 0)
13248 operand_check
= check_warning
;
13249 else if (strcasecmp (arg
, "none") == 0)
13250 operand_check
= check_none
;
13252 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
13255 case OPTION_MAVXSCALAR
:
13256 if (strcasecmp (arg
, "128") == 0)
13257 avxscalar
= vex128
;
13258 else if (strcasecmp (arg
, "256") == 0)
13259 avxscalar
= vex256
;
13261 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
13264 case OPTION_MVEXWIG
:
13265 if (strcmp (arg
, "0") == 0)
13267 else if (strcmp (arg
, "1") == 0)
13270 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
13273 case OPTION_MADD_BND_PREFIX
:
13274 add_bnd_prefix
= 1;
13277 case OPTION_MEVEXLIG
:
13278 if (strcmp (arg
, "128") == 0)
13279 evexlig
= evexl128
;
13280 else if (strcmp (arg
, "256") == 0)
13281 evexlig
= evexl256
;
13282 else if (strcmp (arg
, "512") == 0)
13283 evexlig
= evexl512
;
13285 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
13288 case OPTION_MEVEXRCIG
:
13289 if (strcmp (arg
, "rne") == 0)
13291 else if (strcmp (arg
, "rd") == 0)
13293 else if (strcmp (arg
, "ru") == 0)
13295 else if (strcmp (arg
, "rz") == 0)
13298 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
13301 case OPTION_MEVEXWIG
:
13302 if (strcmp (arg
, "0") == 0)
13304 else if (strcmp (arg
, "1") == 0)
13307 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
13310 # if defined (TE_PE) || defined (TE_PEP)
13311 case OPTION_MBIG_OBJ
:
13316 case OPTION_MOMIT_LOCK_PREFIX
:
13317 if (strcasecmp (arg
, "yes") == 0)
13318 omit_lock_prefix
= 1;
13319 else if (strcasecmp (arg
, "no") == 0)
13320 omit_lock_prefix
= 0;
13322 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
13325 case OPTION_MFENCE_AS_LOCK_ADD
:
13326 if (strcasecmp (arg
, "yes") == 0)
13328 else if (strcasecmp (arg
, "no") == 0)
13331 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
13334 case OPTION_MLFENCE_AFTER_LOAD
:
13335 if (strcasecmp (arg
, "yes") == 0)
13336 lfence_after_load
= 1;
13337 else if (strcasecmp (arg
, "no") == 0)
13338 lfence_after_load
= 0;
13340 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg
);
13343 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH
:
13344 if (strcasecmp (arg
, "all") == 0)
13346 lfence_before_indirect_branch
= lfence_branch_all
;
13347 if (lfence_before_ret
== lfence_before_ret_none
)
13348 lfence_before_ret
= lfence_before_ret_shl
;
13350 else if (strcasecmp (arg
, "memory") == 0)
13351 lfence_before_indirect_branch
= lfence_branch_memory
;
13352 else if (strcasecmp (arg
, "register") == 0)
13353 lfence_before_indirect_branch
= lfence_branch_register
;
13354 else if (strcasecmp (arg
, "none") == 0)
13355 lfence_before_indirect_branch
= lfence_branch_none
;
13357 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13361 case OPTION_MLFENCE_BEFORE_RET
:
13362 if (strcasecmp (arg
, "or") == 0)
13363 lfence_before_ret
= lfence_before_ret_or
;
13364 else if (strcasecmp (arg
, "not") == 0)
13365 lfence_before_ret
= lfence_before_ret_not
;
13366 else if (strcasecmp (arg
, "shl") == 0 || strcasecmp (arg
, "yes") == 0)
13367 lfence_before_ret
= lfence_before_ret_shl
;
13368 else if (strcasecmp (arg
, "none") == 0)
13369 lfence_before_ret
= lfence_before_ret_none
;
13371 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13375 case OPTION_MRELAX_RELOCATIONS
:
13376 if (strcasecmp (arg
, "yes") == 0)
13377 generate_relax_relocations
= 1;
13378 else if (strcasecmp (arg
, "no") == 0)
13379 generate_relax_relocations
= 0;
13381 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
13384 case OPTION_MALIGN_BRANCH_BOUNDARY
:
13387 long int align
= strtoul (arg
, &end
, 0);
13392 align_branch_power
= 0;
13395 else if (align
>= 16)
13398 for (align_power
= 0;
13400 align
>>= 1, align_power
++)
13402 /* Limit alignment power to 31. */
13403 if (align
== 1 && align_power
< 32)
13405 align_branch_power
= align_power
;
13410 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
13414 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
13417 int align
= strtoul (arg
, &end
, 0);
13418 /* Some processors only support 5 prefixes. */
13419 if (*end
== '\0' && align
>= 0 && align
< 6)
13421 align_branch_prefix_size
= align
;
13424 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13429 case OPTION_MALIGN_BRANCH
:
13431 saved
= xstrdup (arg
);
13435 next
= strchr (type
, '+');
13438 if (strcasecmp (type
, "jcc") == 0)
13439 align_branch
|= align_branch_jcc_bit
;
13440 else if (strcasecmp (type
, "fused") == 0)
13441 align_branch
|= align_branch_fused_bit
;
13442 else if (strcasecmp (type
, "jmp") == 0)
13443 align_branch
|= align_branch_jmp_bit
;
13444 else if (strcasecmp (type
, "call") == 0)
13445 align_branch
|= align_branch_call_bit
;
13446 else if (strcasecmp (type
, "ret") == 0)
13447 align_branch
|= align_branch_ret_bit
;
13448 else if (strcasecmp (type
, "indirect") == 0)
13449 align_branch
|= align_branch_indirect_bit
;
13451 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
13454 while (next
!= NULL
);
13458 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
13459 align_branch_power
= 5;
13460 align_branch_prefix_size
= 5;
13461 align_branch
= (align_branch_jcc_bit
13462 | align_branch_fused_bit
13463 | align_branch_jmp_bit
);
13466 case OPTION_MAMD64
:
13470 case OPTION_MINTEL64
:
13478 /* Turn off -Os. */
13479 optimize_for_space
= 0;
13481 else if (*arg
== 's')
13483 optimize_for_space
= 1;
13484 /* Turn on all encoding optimizations. */
13485 optimize
= INT_MAX
;
13489 optimize
= atoi (arg
);
13490 /* Turn off -Os. */
13491 optimize_for_space
= 0;
13501 #define MESSAGE_TEMPLATE \
13505 output_message (FILE *stream
, char *p
, char *message
, char *start
,
13506 int *left_p
, const char *name
, int len
)
13508 int size
= sizeof (MESSAGE_TEMPLATE
);
13509 int left
= *left_p
;
13511 /* Reserve 2 spaces for ", " or ",\0" */
13514 /* Check if there is any room. */
13522 p
= mempcpy (p
, name
, len
);
13526 /* Output the current message now and start a new one. */
13529 fprintf (stream
, "%s\n", message
);
13531 left
= size
- (start
- message
) - len
- 2;
13533 gas_assert (left
>= 0);
13535 p
= mempcpy (p
, name
, len
);
13543 show_arch (FILE *stream
, int ext
, int check
)
13545 static char message
[] = MESSAGE_TEMPLATE
;
13546 char *start
= message
+ 27;
13548 int size
= sizeof (MESSAGE_TEMPLATE
);
13555 left
= size
- (start
- message
);
13556 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
13558 /* Should it be skipped? */
13559 if (cpu_arch
[j
].skip
)
13562 name
= cpu_arch
[j
].name
;
13563 len
= cpu_arch
[j
].len
;
13566 /* It is an extension. Skip if we aren't asked to show it. */
13577 /* It is an processor. Skip if we show only extension. */
13580 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
13582 /* It is an impossible processor - skip. */
13586 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
13589 /* Display disabled extensions. */
13591 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
13593 name
= cpu_noarch
[j
].name
;
13594 len
= cpu_noarch
[j
].len
;
13595 p
= output_message (stream
, p
, message
, start
, &left
, name
,
13600 fprintf (stream
, "%s\n", message
);
13604 md_show_usage (FILE *stream
)
13606 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13607 fprintf (stream
, _("\
13608 -Qy, -Qn ignored\n\
13609 -V print assembler version number\n\
13612 fprintf (stream
, _("\
13613 -n Do not optimize code alignment\n\
13614 -q quieten some warnings\n"));
13615 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13616 fprintf (stream
, _("\
13619 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13620 || defined (TE_PE) || defined (TE_PEP))
13621 fprintf (stream
, _("\
13622 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13624 #ifdef SVR4_COMMENT_CHARS
13625 fprintf (stream
, _("\
13626 --divide do not treat `/' as a comment character\n"));
13628 fprintf (stream
, _("\
13629 --divide ignored\n"));
13631 fprintf (stream
, _("\
13632 -march=CPU[,+EXTENSION...]\n\
13633 generate code for CPU and EXTENSION, CPU is one of:\n"));
13634 show_arch (stream
, 0, 1);
13635 fprintf (stream
, _("\
13636 EXTENSION is combination of:\n"));
13637 show_arch (stream
, 1, 0);
13638 fprintf (stream
, _("\
13639 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13640 show_arch (stream
, 0, 0);
13641 fprintf (stream
, _("\
13642 -msse2avx encode SSE instructions with VEX prefix\n"));
13643 fprintf (stream
, _("\
13644 -msse-check=[none|error|warning] (default: warning)\n\
13645 check SSE instructions\n"));
13646 fprintf (stream
, _("\
13647 -moperand-check=[none|error|warning] (default: warning)\n\
13648 check operand combinations for validity\n"));
13649 fprintf (stream
, _("\
13650 -mavxscalar=[128|256] (default: 128)\n\
13651 encode scalar AVX instructions with specific vector\n\
13653 fprintf (stream
, _("\
13654 -mvexwig=[0|1] (default: 0)\n\
13655 encode VEX instructions with specific VEX.W value\n\
13656 for VEX.W bit ignored instructions\n"));
13657 fprintf (stream
, _("\
13658 -mevexlig=[128|256|512] (default: 128)\n\
13659 encode scalar EVEX instructions with specific vector\n\
13661 fprintf (stream
, _("\
13662 -mevexwig=[0|1] (default: 0)\n\
13663 encode EVEX instructions with specific EVEX.W value\n\
13664 for EVEX.W bit ignored instructions\n"));
13665 fprintf (stream
, _("\
13666 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13667 encode EVEX instructions with specific EVEX.RC value\n\
13668 for SAE-only ignored instructions\n"));
13669 fprintf (stream
, _("\
13670 -mmnemonic=[att|intel] "));
13671 if (SYSV386_COMPAT
)
13672 fprintf (stream
, _("(default: att)\n"));
13674 fprintf (stream
, _("(default: intel)\n"));
13675 fprintf (stream
, _("\
13676 use AT&T/Intel mnemonic\n"));
13677 fprintf (stream
, _("\
13678 -msyntax=[att|intel] (default: att)\n\
13679 use AT&T/Intel syntax\n"));
13680 fprintf (stream
, _("\
13681 -mindex-reg support pseudo index registers\n"));
13682 fprintf (stream
, _("\
13683 -mnaked-reg don't require `%%' prefix for registers\n"));
13684 fprintf (stream
, _("\
13685 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13686 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13687 fprintf (stream
, _("\
13688 -mshared disable branch optimization for shared code\n"));
13689 fprintf (stream
, _("\
13690 -mx86-used-note=[no|yes] "));
13691 if (DEFAULT_X86_USED_NOTE
)
13692 fprintf (stream
, _("(default: yes)\n"));
13694 fprintf (stream
, _("(default: no)\n"));
13695 fprintf (stream
, _("\
13696 generate x86 used ISA and feature properties\n"));
13698 #if defined (TE_PE) || defined (TE_PEP)
13699 fprintf (stream
, _("\
13700 -mbig-obj generate big object files\n"));
13702 fprintf (stream
, _("\
13703 -momit-lock-prefix=[no|yes] (default: no)\n\
13704 strip all lock prefixes\n"));
13705 fprintf (stream
, _("\
13706 -mfence-as-lock-add=[no|yes] (default: no)\n\
13707 encode lfence, mfence and sfence as\n\
13708 lock addl $0x0, (%%{re}sp)\n"));
13709 fprintf (stream
, _("\
13710 -mrelax-relocations=[no|yes] "));
13711 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
13712 fprintf (stream
, _("(default: yes)\n"));
13714 fprintf (stream
, _("(default: no)\n"));
13715 fprintf (stream
, _("\
13716 generate relax relocations\n"));
13717 fprintf (stream
, _("\
13718 -malign-branch-boundary=NUM (default: 0)\n\
13719 align branches within NUM byte boundary\n"));
13720 fprintf (stream
, _("\
13721 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13722 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13724 specify types of branches to align\n"));
13725 fprintf (stream
, _("\
13726 -malign-branch-prefix-size=NUM (default: 5)\n\
13727 align branches with NUM prefixes per instruction\n"));
13728 fprintf (stream
, _("\
13729 -mbranches-within-32B-boundaries\n\
13730 align branches within 32 byte boundary\n"));
13731 fprintf (stream
, _("\
13732 -mlfence-after-load=[no|yes] (default: no)\n\
13733 generate lfence after load\n"));
13734 fprintf (stream
, _("\
13735 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13736 generate lfence before indirect near branch\n"));
13737 fprintf (stream
, _("\
13738 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13739 generate lfence before ret\n"));
13740 fprintf (stream
, _("\
13741 -mamd64 accept only AMD64 ISA [default]\n"));
13742 fprintf (stream
, _("\
13743 -mintel64 accept only Intel64 ISA\n"));
13746 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13747 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13748 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13750 /* Pick the target format to use. */
13753 i386_target_format (void)
13755 if (!strncmp (default_arch
, "x86_64", 6))
13757 update_code_flag (CODE_64BIT
, 1);
13758 if (default_arch
[6] == '\0')
13759 x86_elf_abi
= X86_64_ABI
;
13761 x86_elf_abi
= X86_64_X32_ABI
;
13763 else if (!strcmp (default_arch
, "i386"))
13764 update_code_flag (CODE_32BIT
, 1);
13765 else if (!strcmp (default_arch
, "iamcu"))
13767 update_code_flag (CODE_32BIT
, 1);
13768 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
13770 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
13771 cpu_arch_name
= "iamcu";
13772 cpu_sub_arch_name
= NULL
;
13773 cpu_arch_flags
= iamcu_flags
;
13774 cpu_arch_isa
= PROCESSOR_IAMCU
;
13775 cpu_arch_isa_flags
= iamcu_flags
;
13776 if (!cpu_arch_tune_set
)
13778 cpu_arch_tune
= cpu_arch_isa
;
13779 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
13782 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
13783 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13787 as_fatal (_("unknown architecture"));
13789 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
13790 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13791 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
13792 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
13794 switch (OUTPUT_FLAVOR
)
13796 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13797 case bfd_target_aout_flavour
:
13798 return AOUT_TARGET_FORMAT
;
13800 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13801 # if defined (TE_PE) || defined (TE_PEP)
13802 case bfd_target_coff_flavour
:
13803 if (flag_code
== CODE_64BIT
)
13804 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
13806 return use_big_obj
? "pe-bigobj-i386" : "pe-i386";
13807 # elif defined (TE_GO32)
13808 case bfd_target_coff_flavour
:
13809 return "coff-go32";
13811 case bfd_target_coff_flavour
:
13812 return "coff-i386";
13815 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13816 case bfd_target_elf_flavour
:
13818 const char *format
;
13820 switch (x86_elf_abi
)
13823 format
= ELF_TARGET_FORMAT
;
13825 tls_get_addr
= "___tls_get_addr";
13829 use_rela_relocations
= 1;
13832 tls_get_addr
= "__tls_get_addr";
13834 format
= ELF_TARGET_FORMAT64
;
13836 case X86_64_X32_ABI
:
13837 use_rela_relocations
= 1;
13840 tls_get_addr
= "__tls_get_addr";
13842 disallow_64bit_reloc
= 1;
13843 format
= ELF_TARGET_FORMAT32
;
13846 if (cpu_arch_isa
== PROCESSOR_L1OM
)
13848 if (x86_elf_abi
!= X86_64_ABI
)
13849 as_fatal (_("Intel L1OM is 64bit only"));
13850 return ELF_TARGET_L1OM_FORMAT
;
13852 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
13854 if (x86_elf_abi
!= X86_64_ABI
)
13855 as_fatal (_("Intel K1OM is 64bit only"));
13856 return ELF_TARGET_K1OM_FORMAT
;
13858 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
13860 if (x86_elf_abi
!= I386_ABI
)
13861 as_fatal (_("Intel MCU is 32bit only"));
13862 return ELF_TARGET_IAMCU_FORMAT
;
13868 #if defined (OBJ_MACH_O)
13869 case bfd_target_mach_o_flavour
:
13870 if (flag_code
== CODE_64BIT
)
13872 use_rela_relocations
= 1;
13874 return "mach-o-x86-64";
13877 return "mach-o-i386";
13885 #endif /* OBJ_MAYBE_ more than one */
13888 md_undefined_symbol (char *name
)
13890 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
13891 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
13892 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
13893 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
13897 if (symbol_find (name
))
13898 as_bad (_("GOT already in symbol table"));
13899 GOT_symbol
= symbol_new (name
, undefined_section
,
13900 &zero_address_frag
, 0);
13907 /* Round up a section size to the appropriate boundary. */
13910 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
13912 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13913 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
13915 /* For a.out, force the section size to be aligned. If we don't do
13916 this, BFD will align it for us, but it will not write out the
13917 final bytes of the section. This may be a bug in BFD, but it is
13918 easier to fix it here since that is how the other a.out targets
13922 align
= bfd_section_alignment (segment
);
13923 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13930 /* On the i386, PC-relative offsets are relative to the start of the
13931 next instruction. That is, the address of the offset, plus its
13932 size, since the offset is always the last part of the insn. */
13935 md_pcrel_from (fixS
*fixP
)
13937 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13943 s_bss (int ignore ATTRIBUTE_UNUSED
)
13947 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13949 obj_elf_section_change_hook ();
13951 temp
= get_absolute_expression ();
13952 subseg_set (bss_section
, (subsegT
) temp
);
13953 demand_empty_rest_of_line ();
13958 /* Remember constant directive. */
13961 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13963 if (last_insn
.kind
!= last_insn_directive
13964 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13966 last_insn
.seg
= now_seg
;
13967 last_insn
.kind
= last_insn_directive
;
13968 last_insn
.name
= "constant directive";
13969 last_insn
.file
= as_where (&last_insn
.line
);
13970 if (lfence_before_ret
!= lfence_before_ret_none
)
13972 if (lfence_before_indirect_branch
!= lfence_branch_none
)
13973 as_warn (_("constant directive skips -mlfence-before-ret "
13974 "and -mlfence-before-indirect-branch"));
13976 as_warn (_("constant directive skips -mlfence-before-ret"));
13978 else if (lfence_before_indirect_branch
!= lfence_branch_none
)
13979 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13984 i386_validate_fix (fixS
*fixp
)
13986 if (fixp
->fx_subsy
)
13988 if (fixp
->fx_subsy
== GOT_symbol
)
13990 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13994 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13995 if (fixp
->fx_tcbit2
)
13996 fixp
->fx_r_type
= (fixp
->fx_tcbit
13997 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13998 : BFD_RELOC_X86_64_GOTPCRELX
);
14001 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
14006 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
14008 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
14010 fixp
->fx_subsy
= 0;
14013 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14016 /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
14017 to section. Since PLT32 relocation must be against symbols,
14018 turn such PLT32 relocation into PC32 relocation. */
14020 && (fixp
->fx_r_type
== BFD_RELOC_386_PLT32
14021 || fixp
->fx_r_type
== BFD_RELOC_X86_64_PLT32
)
14022 && symbol_section_p (fixp
->fx_addsy
))
14023 fixp
->fx_r_type
= BFD_RELOC_32_PCREL
;
14026 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
14027 && fixp
->fx_tcbit2
)
14028 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
14035 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
14038 bfd_reloc_code_real_type code
;
14040 switch (fixp
->fx_r_type
)
14042 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14043 case BFD_RELOC_SIZE32
:
14044 case BFD_RELOC_SIZE64
:
14045 if (S_IS_DEFINED (fixp
->fx_addsy
)
14046 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
14048 /* Resolve size relocation against local symbol to size of
14049 the symbol plus addend. */
14050 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
14051 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
14052 && !fits_in_unsigned_long (value
))
14053 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14054 _("symbol size computation overflow"));
14055 fixp
->fx_addsy
= NULL
;
14056 fixp
->fx_subsy
= NULL
;
14057 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
14061 /* Fall through. */
14063 case BFD_RELOC_X86_64_PLT32
:
14064 case BFD_RELOC_X86_64_GOT32
:
14065 case BFD_RELOC_X86_64_GOTPCREL
:
14066 case BFD_RELOC_X86_64_GOTPCRELX
:
14067 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14068 case BFD_RELOC_386_PLT32
:
14069 case BFD_RELOC_386_GOT32
:
14070 case BFD_RELOC_386_GOT32X
:
14071 case BFD_RELOC_386_GOTOFF
:
14072 case BFD_RELOC_386_GOTPC
:
14073 case BFD_RELOC_386_TLS_GD
:
14074 case BFD_RELOC_386_TLS_LDM
:
14075 case BFD_RELOC_386_TLS_LDO_32
:
14076 case BFD_RELOC_386_TLS_IE_32
:
14077 case BFD_RELOC_386_TLS_IE
:
14078 case BFD_RELOC_386_TLS_GOTIE
:
14079 case BFD_RELOC_386_TLS_LE_32
:
14080 case BFD_RELOC_386_TLS_LE
:
14081 case BFD_RELOC_386_TLS_GOTDESC
:
14082 case BFD_RELOC_386_TLS_DESC_CALL
:
14083 case BFD_RELOC_X86_64_TLSGD
:
14084 case BFD_RELOC_X86_64_TLSLD
:
14085 case BFD_RELOC_X86_64_DTPOFF32
:
14086 case BFD_RELOC_X86_64_DTPOFF64
:
14087 case BFD_RELOC_X86_64_GOTTPOFF
:
14088 case BFD_RELOC_X86_64_TPOFF32
:
14089 case BFD_RELOC_X86_64_TPOFF64
:
14090 case BFD_RELOC_X86_64_GOTOFF64
:
14091 case BFD_RELOC_X86_64_GOTPC32
:
14092 case BFD_RELOC_X86_64_GOT64
:
14093 case BFD_RELOC_X86_64_GOTPCREL64
:
14094 case BFD_RELOC_X86_64_GOTPC64
:
14095 case BFD_RELOC_X86_64_GOTPLT64
:
14096 case BFD_RELOC_X86_64_PLTOFF64
:
14097 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14098 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14099 case BFD_RELOC_RVA
:
14100 case BFD_RELOC_VTABLE_ENTRY
:
14101 case BFD_RELOC_VTABLE_INHERIT
:
14103 case BFD_RELOC_32_SECREL
:
14105 code
= fixp
->fx_r_type
;
14107 case BFD_RELOC_X86_64_32S
:
14108 if (!fixp
->fx_pcrel
)
14110 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
14111 code
= fixp
->fx_r_type
;
14114 /* Fall through. */
14116 if (fixp
->fx_pcrel
)
14118 switch (fixp
->fx_size
)
14121 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14122 _("can not do %d byte pc-relative relocation"),
14124 code
= BFD_RELOC_32_PCREL
;
14126 case 1: code
= BFD_RELOC_8_PCREL
; break;
14127 case 2: code
= BFD_RELOC_16_PCREL
; break;
14128 case 4: code
= BFD_RELOC_32_PCREL
; break;
14130 case 8: code
= BFD_RELOC_64_PCREL
; break;
14136 switch (fixp
->fx_size
)
14139 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14140 _("can not do %d byte relocation"),
14142 code
= BFD_RELOC_32
;
14144 case 1: code
= BFD_RELOC_8
; break;
14145 case 2: code
= BFD_RELOC_16
; break;
14146 case 4: code
= BFD_RELOC_32
; break;
14148 case 8: code
= BFD_RELOC_64
; break;
14155 if ((code
== BFD_RELOC_32
14156 || code
== BFD_RELOC_32_PCREL
14157 || code
== BFD_RELOC_X86_64_32S
)
14159 && fixp
->fx_addsy
== GOT_symbol
)
14162 code
= BFD_RELOC_386_GOTPC
;
14164 code
= BFD_RELOC_X86_64_GOTPC32
;
14166 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
14168 && fixp
->fx_addsy
== GOT_symbol
)
14170 code
= BFD_RELOC_X86_64_GOTPC64
;
14173 rel
= XNEW (arelent
);
14174 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
14175 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
14177 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
14179 if (!use_rela_relocations
)
14181 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
14182 vtable entry to be used in the relocation's section offset. */
14183 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
14184 rel
->address
= fixp
->fx_offset
;
14185 #if defined (OBJ_COFF) && defined (TE_PE)
14186 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
14187 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
14192 /* Use the rela in 64bit mode. */
14195 if (disallow_64bit_reloc
)
14198 case BFD_RELOC_X86_64_DTPOFF64
:
14199 case BFD_RELOC_X86_64_TPOFF64
:
14200 case BFD_RELOC_64_PCREL
:
14201 case BFD_RELOC_X86_64_GOTOFF64
:
14202 case BFD_RELOC_X86_64_GOT64
:
14203 case BFD_RELOC_X86_64_GOTPCREL64
:
14204 case BFD_RELOC_X86_64_GOTPC64
:
14205 case BFD_RELOC_X86_64_GOTPLT64
:
14206 case BFD_RELOC_X86_64_PLTOFF64
:
14207 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14208 _("cannot represent relocation type %s in x32 mode"),
14209 bfd_get_reloc_code_name (code
));
14215 if (!fixp
->fx_pcrel
)
14216 rel
->addend
= fixp
->fx_offset
;
14220 case BFD_RELOC_X86_64_PLT32
:
14221 case BFD_RELOC_X86_64_GOT32
:
14222 case BFD_RELOC_X86_64_GOTPCREL
:
14223 case BFD_RELOC_X86_64_GOTPCRELX
:
14224 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
14225 case BFD_RELOC_X86_64_TLSGD
:
14226 case BFD_RELOC_X86_64_TLSLD
:
14227 case BFD_RELOC_X86_64_GOTTPOFF
:
14228 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
14229 case BFD_RELOC_X86_64_TLSDESC_CALL
:
14230 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
14233 rel
->addend
= (section
->vma
14235 + fixp
->fx_addnumber
14236 + md_pcrel_from (fixp
));
14241 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
14242 if (rel
->howto
== NULL
)
14244 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
14245 _("cannot represent relocation type %s"),
14246 bfd_get_reloc_code_name (code
));
14247 /* Set howto to a garbage value so that we can keep going. */
14248 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
14249 gas_assert (rel
->howto
!= NULL
);
14255 #include "tc-i386-intel.c"
14258 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
14260 int saved_naked_reg
;
14261 char saved_register_dot
;
14263 saved_naked_reg
= allow_naked_reg
;
14264 allow_naked_reg
= 1;
14265 saved_register_dot
= register_chars
['.'];
14266 register_chars
['.'] = '.';
14267 allow_pseudo_reg
= 1;
14268 expression_and_evaluate (exp
);
14269 allow_pseudo_reg
= 0;
14270 register_chars
['.'] = saved_register_dot
;
14271 allow_naked_reg
= saved_naked_reg
;
14273 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
14275 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
14277 exp
->X_op
= O_constant
;
14278 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
14279 .dw2_regnum
[flag_code
>> 1];
14282 exp
->X_op
= O_illegal
;
14287 tc_x86_frame_initial_instructions (void)
14289 static unsigned int sp_regno
[2];
14291 if (!sp_regno
[flag_code
>> 1])
14293 char *saved_input
= input_line_pointer
;
14294 char sp
[][4] = {"esp", "rsp"};
14297 input_line_pointer
= sp
[flag_code
>> 1];
14298 tc_x86_parse_to_dw2regnum (&exp
);
14299 gas_assert (exp
.X_op
== O_constant
);
14300 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
14301 input_line_pointer
= saved_input
;
14304 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
14305 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
14309 x86_dwarf2_addr_size (void)
14311 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14312 if (x86_elf_abi
== X86_64_X32_ABI
)
14315 return bfd_arch_bits_per_address (stdoutput
) / 8;
14319 i386_elf_section_type (const char *str
, size_t len
)
14321 if (flag_code
== CODE_64BIT
14322 && len
== sizeof ("unwind") - 1
14323 && strncmp (str
, "unwind", 6) == 0)
14324 return SHT_X86_64_UNWIND
;
14331 i386_solaris_fix_up_eh_frame (segT sec
)
14333 if (flag_code
== CODE_64BIT
)
14334 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
14340 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
14344 exp
.X_op
= O_secrel
;
14345 exp
.X_add_symbol
= symbol
;
14346 exp
.X_add_number
= 0;
14347 emit_expr (&exp
, size
);
14351 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14352 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14355 x86_64_section_letter (int letter
, const char **ptr_msg
)
14357 if (flag_code
== CODE_64BIT
)
14360 return SHF_X86_64_LARGE
;
14362 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14365 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
14370 x86_64_section_word (char *str
, size_t len
)
14372 if (len
== 5 && flag_code
== CODE_64BIT
&& startswith (str
, "large"))
14373 return SHF_X86_64_LARGE
;
14379 handle_large_common (int small ATTRIBUTE_UNUSED
)
14381 if (flag_code
!= CODE_64BIT
)
14383 s_comm_internal (0, elf_common_parse
);
14384 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14388 static segT lbss_section
;
14389 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
14390 asection
*saved_bss_section
= bss_section
;
14392 if (lbss_section
== NULL
)
14394 flagword applicable
;
14395 segT seg
= now_seg
;
14396 subsegT subseg
= now_subseg
;
14398 /* The .lbss section is for local .largecomm symbols. */
14399 lbss_section
= subseg_new (".lbss", 0);
14400 applicable
= bfd_applicable_section_flags (stdoutput
);
14401 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
14402 seg_info (lbss_section
)->bss
= 1;
14404 subseg_set (seg
, subseg
);
14407 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
14408 bss_section
= lbss_section
;
14410 s_comm_internal (0, elf_common_parse
);
14412 elf_com_section_ptr
= saved_com_section_ptr
;
14413 bss_section
= saved_bss_section
;
14416 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */