1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
47 #ifndef REGISTER_WARNINGS
48 #define REGISTER_WARNINGS 1
51 #ifndef INFER_ADDR_PREFIX
52 #define INFER_ADDR_PREFIX 1
56 #define DEFAULT_ARCH "i386"
61 #define INLINE __inline__
67 /* Prefixes will be emitted in the order defined below.
68 WAIT_PREFIX must be the first prefix since FWAIT is really is an
69 instruction, and so must come before any prefixes.
70 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
71 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
77 #define HLE_PREFIX REP_PREFIX
78 #define BND_PREFIX REP_PREFIX
80 #define REX_PREFIX 6 /* must come last. */
81 #define MAX_PREFIXES 7 /* max prefixes per opcode */
83 /* we define the syntax here (modulo base,index,scale syntax) */
84 #define REGISTER_PREFIX '%'
85 #define IMMEDIATE_PREFIX '$'
86 #define ABSOLUTE_PREFIX '*'
88 /* these are the instruction mnemonic suffixes in AT&T syntax or
89 memory operand size in Intel syntax. */
90 #define WORD_MNEM_SUFFIX 'w'
91 #define BYTE_MNEM_SUFFIX 'b'
92 #define SHORT_MNEM_SUFFIX 's'
93 #define LONG_MNEM_SUFFIX 'l'
94 #define QWORD_MNEM_SUFFIX 'q'
95 /* Intel Syntax. Use a non-ascii letter since since it never appears
97 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
99 #define END_OF_INSN '\0'
101 /* This matches the C -> StaticRounding alias in the opcode table. */
102 #define commutative staticrounding
105 'templates' is for grouping together 'template' structures for opcodes
106 of the same name. This is only used for storing the insns in the grand
107 ole hash table of insns.
108 The templates themselves start at START and range up to (but not including)
113 const insn_template
*start
;
114 const insn_template
*end
;
118 /* 386 operand encoding bytes: see 386 book for details of this. */
121 unsigned int regmem
; /* codes register or memory operand */
122 unsigned int reg
; /* codes register operand (or extended opcode) */
123 unsigned int mode
; /* how to interpret regmem & reg */
127 /* x86-64 extension prefix. */
128 typedef int rex_byte
;
130 /* 386 opcode byte to code indirect addressing. */
139 /* x86 arch names, types and features */
142 const char *name
; /* arch name */
143 unsigned int len
; /* arch string length */
144 enum processor_type type
; /* arch type */
145 i386_cpu_flags flags
; /* cpu feature flags */
146 unsigned int skip
; /* show_arch should skip this. */
150 /* Used to turn off indicated flags. */
153 const char *name
; /* arch name */
154 unsigned int len
; /* arch string length */
155 i386_cpu_flags flags
; /* cpu feature flags */
159 static void update_code_flag (int, int);
160 static void set_code_flag (int);
161 static void set_16bit_gcc_code_flag (int);
162 static void set_intel_syntax (int);
163 static void set_intel_mnemonic (int);
164 static void set_allow_index_reg (int);
165 static void set_check (int);
166 static void set_cpu_arch (int);
168 static void pe_directive_secrel (int);
170 static void signed_cons (int);
171 static char *output_invalid (int c
);
172 static int i386_finalize_immediate (segT
, expressionS
*, i386_operand_type
,
174 static int i386_finalize_displacement (segT
, expressionS
*, i386_operand_type
,
176 static int i386_att_operand (char *);
177 static int i386_intel_operand (char *, int);
178 static int i386_intel_simplify (expressionS
*);
179 static int i386_intel_parse_name (const char *, expressionS
*);
180 static const reg_entry
*parse_register (char *, char **);
181 static char *parse_insn (char *, char *);
182 static char *parse_operands (char *, const char *);
183 static void swap_operands (void);
184 static void swap_2_operands (int, int);
185 static enum flag_code
i386_addressing_mode (void);
186 static void optimize_imm (void);
187 static void optimize_disp (void);
188 static const insn_template
*match_template (char);
189 static int check_string (void);
190 static int process_suffix (void);
191 static int check_byte_reg (void);
192 static int check_long_reg (void);
193 static int check_qword_reg (void);
194 static int check_word_reg (void);
195 static int finalize_imm (void);
196 static int process_operands (void);
197 static const seg_entry
*build_modrm_byte (void);
198 static void output_insn (void);
199 static void output_imm (fragS
*, offsetT
);
200 static void output_disp (fragS
*, offsetT
);
202 static void s_bss (int);
204 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
205 static void handle_large_common (int small ATTRIBUTE_UNUSED
);
207 /* GNU_PROPERTY_X86_ISA_1_USED. */
208 static unsigned int x86_isa_1_used
;
209 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
210 static unsigned int x86_feature_2_used
;
211 /* Generate x86 used ISA and feature properties. */
212 static unsigned int x86_used_note
= DEFAULT_X86_USED_NOTE
;
215 static const char *default_arch
= DEFAULT_ARCH
;
217 /* This struct describes rounding control and SAE in the instruction. */
231 static struct RC_Operation rc_op
;
233 /* The struct describes masking, applied to OPERAND in the instruction.
234 MASK is a pointer to the corresponding mask register. ZEROING tells
235 whether merging or zeroing mask is used. */
236 struct Mask_Operation
238 const reg_entry
*mask
;
239 unsigned int zeroing
;
240 /* The operand where this operation is associated. */
244 static struct Mask_Operation mask_op
;
246 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
248 struct Broadcast_Operation
250 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
253 /* Index of broadcasted operand. */
256 /* Number of bytes to broadcast. */
260 static struct Broadcast_Operation broadcast_op
;
265 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
266 unsigned char bytes
[4];
268 /* Destination or source register specifier. */
269 const reg_entry
*register_specifier
;
272 /* 'md_assemble ()' gathers together information and puts it into a
279 const reg_entry
*regs
;
284 operand_size_mismatch
,
285 operand_type_mismatch
,
286 register_type_mismatch
,
287 number_of_operands_mismatch
,
288 invalid_instruction_suffix
,
290 unsupported_with_intel_mnemonic
,
293 invalid_vsib_address
,
294 invalid_vector_register_set
,
295 unsupported_vector_index_register
,
296 unsupported_broadcast
,
299 mask_not_on_destination
,
302 rc_sae_operand_not_last_imm
,
303 invalid_register_operand
,
308 /* TM holds the template for the insn were currently assembling. */
311 /* SUFFIX holds the instruction size suffix for byte, word, dword
312 or qword, if given. */
315 /* OPERANDS gives the number of given operands. */
316 unsigned int operands
;
318 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
319 of given register, displacement, memory operands and immediate
321 unsigned int reg_operands
, disp_operands
, mem_operands
, imm_operands
;
323 /* TYPES [i] is the type (see above #defines) which tells us how to
324 use OP[i] for the corresponding operand. */
325 i386_operand_type types
[MAX_OPERANDS
];
327 /* Displacement expression, immediate expression, or register for each
329 union i386_op op
[MAX_OPERANDS
];
331 /* Flags for operands. */
332 unsigned int flags
[MAX_OPERANDS
];
333 #define Operand_PCrel 1
334 #define Operand_Mem 2
336 /* Relocation type for operand */
337 enum bfd_reloc_code_real reloc
[MAX_OPERANDS
];
339 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
340 the base index byte below. */
341 const reg_entry
*base_reg
;
342 const reg_entry
*index_reg
;
343 unsigned int log2_scale_factor
;
345 /* SEG gives the seg_entries of this insn. They are zero unless
346 explicit segment overrides are given. */
347 const seg_entry
*seg
[2];
349 /* Copied first memory operand string, for re-checking. */
352 /* PREFIX holds all the given prefix opcodes (usually null).
353 PREFIXES is the number of prefix opcodes. */
354 unsigned int prefixes
;
355 unsigned char prefix
[MAX_PREFIXES
];
357 /* Register is in low 3 bits of opcode. */
358 bfd_boolean short_form
;
360 /* The operand to a branch insn indicates an absolute branch. */
361 bfd_boolean jumpabsolute
;
363 /* Has MMX register operands. */
364 bfd_boolean has_regmmx
;
366 /* Has XMM register operands. */
367 bfd_boolean has_regxmm
;
369 /* Has YMM register operands. */
370 bfd_boolean has_regymm
;
372 /* Has ZMM register operands. */
373 bfd_boolean has_regzmm
;
375 /* Has GOTPC or TLS relocation. */
376 bfd_boolean has_gotpc_tls_reloc
;
378 /* RM and SIB are the modrm byte and the sib byte where the
379 addressing modes of this insn are encoded. */
386 /* Masking attributes. */
387 struct Mask_Operation
*mask
;
389 /* Rounding control and SAE attributes. */
390 struct RC_Operation
*rounding
;
392 /* Broadcasting attributes. */
393 struct Broadcast_Operation
*broadcast
;
395 /* Compressed disp8*N attribute. */
396 unsigned int memshift
;
398 /* Prefer load or store in encoding. */
401 dir_encoding_default
= 0,
407 /* Prefer 8bit or 32bit displacement in encoding. */
410 disp_encoding_default
= 0,
415 /* Prefer the REX byte in encoding. */
416 bfd_boolean rex_encoding
;
418 /* Disable instruction size optimization. */
419 bfd_boolean no_optimize
;
421 /* How to encode vector instructions. */
424 vex_encoding_default
= 0,
431 const char *rep_prefix
;
434 const char *hle_prefix
;
436 /* Have BND prefix. */
437 const char *bnd_prefix
;
439 /* Have NOTRACK prefix. */
440 const char *notrack_prefix
;
443 enum i386_error error
;
446 typedef struct _i386_insn i386_insn
;
448 /* Link RC type with corresponding string, that'll be looked for in
457 static const struct RC_name RC_NamesTable
[] =
459 { rne
, STRING_COMMA_LEN ("rn-sae") },
460 { rd
, STRING_COMMA_LEN ("rd-sae") },
461 { ru
, STRING_COMMA_LEN ("ru-sae") },
462 { rz
, STRING_COMMA_LEN ("rz-sae") },
463 { saeonly
, STRING_COMMA_LEN ("sae") },
466 /* List of chars besides those in app.c:symbol_chars that can start an
467 operand. Used to prevent the scrubber eating vital white-space. */
468 const char extra_symbol_chars
[] = "*%-([{}"
477 #if (defined (TE_I386AIX) \
478 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
479 && !defined (TE_GNU) \
480 && !defined (TE_LINUX) \
481 && !defined (TE_NACL) \
482 && !defined (TE_FreeBSD) \
483 && !defined (TE_DragonFly) \
484 && !defined (TE_NetBSD)))
485 /* This array holds the chars that always start a comment. If the
486 pre-processor is disabled, these aren't very useful. The option
487 --divide will remove '/' from this list. */
488 const char *i386_comment_chars
= "#/";
489 #define SVR4_COMMENT_CHARS 1
490 #define PREFIX_SEPARATOR '\\'
493 const char *i386_comment_chars
= "#";
494 #define PREFIX_SEPARATOR '/'
497 /* This array holds the chars that only start a comment at the beginning of
498 a line. If the line seems to have the form '# 123 filename'
499 .line and .file directives will appear in the pre-processed output.
500 Note that input_file.c hand checks for '#' at the beginning of the
501 first line of the input file. This is because the compiler outputs
502 #NO_APP at the beginning of its output.
503 Also note that comments started like this one will always work if
504 '/' isn't otherwise defined. */
505 const char line_comment_chars
[] = "#/";
507 const char line_separator_chars
[] = ";";
509 /* Chars that can be used to separate mant from exp in floating point
511 const char EXP_CHARS
[] = "eE";
513 /* Chars that mean this number is a floating point constant
516 const char FLT_CHARS
[] = "fFdDxX";
518 /* Tables for lexical analysis. */
519 static char mnemonic_chars
[256];
520 static char register_chars
[256];
521 static char operand_chars
[256];
522 static char identifier_chars
[256];
523 static char digit_chars
[256];
525 /* Lexical macros. */
526 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
527 #define is_operand_char(x) (operand_chars[(unsigned char) x])
528 #define is_register_char(x) (register_chars[(unsigned char) x])
529 #define is_space_char(x) ((x) == ' ')
530 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
531 #define is_digit_char(x) (digit_chars[(unsigned char) x])
533 /* All non-digit non-letter characters that may occur in an operand. */
534 static char operand_special_chars
[] = "%$-+(,)*._~/<>|&^!:[@]";
536 /* md_assemble() always leaves the strings it's passed unaltered. To
537 effect this we maintain a stack of saved characters that we've smashed
538 with '\0's (indicating end of strings for various sub-fields of the
539 assembler instruction). */
540 static char save_stack
[32];
541 static char *save_stack_p
;
542 #define END_STRING_AND_SAVE(s) \
543 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
544 #define RESTORE_END_STRING(s) \
545 do { *(s) = *--save_stack_p; } while (0)
547 /* The instruction we're assembling. */
550 /* Possible templates for current insn. */
551 static const templates
*current_templates
;
553 /* Per instruction expressionS buffers: max displacements & immediates. */
554 static expressionS disp_expressions
[MAX_MEMORY_OPERANDS
];
555 static expressionS im_expressions
[MAX_IMMEDIATE_OPERANDS
];
557 /* Current operand we are working on. */
558 static int this_operand
= -1;
560 /* We support four different modes. FLAG_CODE variable is used to distinguish
568 static enum flag_code flag_code
;
569 static unsigned int object_64bit
;
570 static unsigned int disallow_64bit_reloc
;
571 static int use_rela_relocations
= 0;
572 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
573 static const char *tls_get_addr
;
575 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
576 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
577 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
579 /* The ELF ABI to use. */
587 static enum x86_elf_abi x86_elf_abi
= I386_ABI
;
590 #if defined (TE_PE) || defined (TE_PEP)
591 /* Use big object file format. */
592 static int use_big_obj
= 0;
595 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
596 /* 1 if generating code for a shared library. */
597 static int shared
= 0;
600 /* 1 for intel syntax,
602 static int intel_syntax
= 0;
604 static enum x86_64_isa
606 amd64
= 1, /* AMD64 ISA. */
607 intel64
/* Intel64 ISA. */
610 /* 1 for intel mnemonic,
611 0 if att mnemonic. */
612 static int intel_mnemonic
= !SYSV386_COMPAT
;
614 /* 1 if pseudo registers are permitted. */
615 static int allow_pseudo_reg
= 0;
617 /* 1 if register prefix % not required. */
618 static int allow_naked_reg
= 0;
620 /* 1 if the assembler should add BND prefix for all control-transferring
621 instructions supporting it, even if this prefix wasn't specified
623 static int add_bnd_prefix
= 0;
625 /* 1 if pseudo index register, eiz/riz, is allowed . */
626 static int allow_index_reg
= 0;
628 /* 1 if the assembler should ignore LOCK prefix, even if it was
629 specified explicitly. */
630 static int omit_lock_prefix
= 0;
632 /* 1 if the assembler should encode lfence, mfence, and sfence as
633 "lock addl $0, (%{re}sp)". */
634 static int avoid_fence
= 0;
636 /* Type of the previous instruction. */
651 /* 1 if the assembler should generate relax relocations. */
653 static int generate_relax_relocations
654 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
;
656 static enum check_kind
662 sse_check
, operand_check
= check_warning
;
664 /* Non-zero if branches should be aligned within power of 2 boundary. */
665 static int align_branch_power
= 0;
667 /* Types of branches to align. */
668 enum align_branch_kind
670 align_branch_none
= 0,
671 align_branch_jcc
= 1,
672 align_branch_fused
= 2,
673 align_branch_jmp
= 3,
674 align_branch_call
= 4,
675 align_branch_indirect
= 5,
679 /* Type bits of branches to align. */
680 enum align_branch_bit
682 align_branch_jcc_bit
= 1 << align_branch_jcc
,
683 align_branch_fused_bit
= 1 << align_branch_fused
,
684 align_branch_jmp_bit
= 1 << align_branch_jmp
,
685 align_branch_call_bit
= 1 << align_branch_call
,
686 align_branch_indirect_bit
= 1 << align_branch_indirect
,
687 align_branch_ret_bit
= 1 << align_branch_ret
690 static unsigned int align_branch
= (align_branch_jcc_bit
691 | align_branch_fused_bit
692 | align_branch_jmp_bit
);
694 /* The maximum padding size for fused jcc. CMP like instruction can
695 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
697 #define MAX_FUSED_JCC_PADDING_SIZE 20
699 /* The maximum number of prefixes added for an instruction. */
700 static unsigned int align_branch_prefix_size
= 5;
703 1. Clear the REX_W bit with register operand if possible.
704 2. Above plus use 128bit vector instruction to clear the full vector
707 static int optimize
= 0;
710 1. Clear the REX_W bit with register operand if possible.
711 2. Above plus use 128bit vector instruction to clear the full vector
713 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
716 static int optimize_for_space
= 0;
718 /* Register prefix used for error message. */
719 static const char *register_prefix
= "%";
721 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
722 leave, push, and pop instructions so that gcc has the same stack
723 frame as in 32 bit mode. */
724 static char stackop_size
= '\0';
726 /* Non-zero to optimize code alignment. */
727 int optimize_align_code
= 1;
729 /* Non-zero to quieten some warnings. */
730 static int quiet_warnings
= 0;
733 static const char *cpu_arch_name
= NULL
;
734 static char *cpu_sub_arch_name
= NULL
;
736 /* CPU feature flags. */
737 static i386_cpu_flags cpu_arch_flags
= CPU_UNKNOWN_FLAGS
;
739 /* If we have selected a cpu we are generating instructions for. */
740 static int cpu_arch_tune_set
= 0;
742 /* Cpu we are generating instructions for. */
743 enum processor_type cpu_arch_tune
= PROCESSOR_UNKNOWN
;
745 /* CPU feature flags of cpu we are generating instructions for. */
746 static i386_cpu_flags cpu_arch_tune_flags
;
748 /* CPU instruction set architecture used. */
749 enum processor_type cpu_arch_isa
= PROCESSOR_UNKNOWN
;
751 /* CPU feature flags of instruction set architecture used. */
752 i386_cpu_flags cpu_arch_isa_flags
;
754 /* If set, conditional jumps are not automatically promoted to handle
755 larger than a byte offset. */
756 static unsigned int no_cond_jump_promotion
= 0;
758 /* Encode SSE instructions with VEX prefix. */
759 static unsigned int sse2avx
;
761 /* Encode scalar AVX instructions with specific vector length. */
768 /* Encode VEX WIG instructions with specific vex.w. */
775 /* Encode scalar EVEX LIG instructions with specific vector length. */
783 /* Encode EVEX WIG instructions with specific evex.w. */
790 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
791 static enum rc_type evexrcig
= rne
;
793 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
794 static symbolS
*GOT_symbol
;
796 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
797 unsigned int x86_dwarf2_return_column
;
799 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
800 int x86_cie_data_alignment
;
802 /* Interface to relax_segment.
803 There are 3 major relax states for 386 jump insns because the
804 different types of jumps add different sizes to frags when we're
805 figuring out what sort of jump to choose to reach a given label.
807 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
808 branches which are handled by md_estimate_size_before_relax() and
809 i386_generic_table_relax_frag(). */
812 #define UNCOND_JUMP 0
814 #define COND_JUMP86 2
815 #define BRANCH_PADDING 3
816 #define BRANCH_PREFIX 4
817 #define FUSED_JCC_PADDING 5
822 #define SMALL16 (SMALL | CODE16)
824 #define BIG16 (BIG | CODE16)
828 #define INLINE __inline__
834 #define ENCODE_RELAX_STATE(type, size) \
835 ((relax_substateT) (((type) << 2) | (size)))
836 #define TYPE_FROM_RELAX_STATE(s) \
838 #define DISP_SIZE_FROM_RELAX_STATE(s) \
839 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
841 /* This table is used by relax_frag to promote short jumps to long
842 ones where necessary. SMALL (short) jumps may be promoted to BIG
843 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
844 don't allow a short jump in a 32 bit code segment to be promoted to
845 a 16 bit offset jump because it's slower (requires data size
846 prefix), and doesn't work, unless the destination is in the bottom
847 64k of the code segment (The top 16 bits of eip are zeroed). */
849 const relax_typeS md_relax_table
[] =
852 1) most positive reach of this state,
853 2) most negative reach of this state,
854 3) how many bytes this mode will have in the variable part of the frag
855 4) which index into the table to try if we can't fit into this one. */
857 /* UNCOND_JUMP states. */
858 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
)},
859 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
)},
860 /* dword jmp adds 4 bytes to frag:
861 0 extra opcode bytes, 4 displacement bytes. */
863 /* word jmp adds 2 byte2 to frag:
864 0 extra opcode bytes, 2 displacement bytes. */
867 /* COND_JUMP states. */
868 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG
)},
869 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP
, BIG16
)},
870 /* dword conditionals adds 5 bytes to frag:
871 1 extra opcode byte, 4 displacement bytes. */
873 /* word conditionals add 3 bytes to frag:
874 1 extra opcode byte, 2 displacement bytes. */
877 /* COND_JUMP86 states. */
878 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG
)},
879 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
)},
880 /* dword conditionals adds 5 bytes to frag:
881 1 extra opcode byte, 4 displacement bytes. */
883 /* word conditionals add 4 bytes to frag:
884 1 displacement byte and a 3 byte long branch insn. */
888 static const arch_entry cpu_arch
[] =
890 /* Do not replace the first two entries - i386_target_format()
891 relies on them being there in this order. */
892 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32
,
893 CPU_GENERIC32_FLAGS
, 0 },
894 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64
,
895 CPU_GENERIC64_FLAGS
, 0 },
896 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN
,
898 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN
,
900 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN
,
902 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386
,
904 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486
,
906 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM
,
908 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO
,
910 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM
,
912 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO
,
913 CPU_PENTIUMPRO_FLAGS
, 0 },
914 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO
,
916 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO
,
918 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4
,
920 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA
,
922 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA
,
923 CPU_NOCONA_FLAGS
, 0 },
924 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE
,
926 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE
,
928 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2
,
929 CPU_CORE2_FLAGS
, 1 },
930 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2
,
931 CPU_CORE2_FLAGS
, 0 },
932 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7
,
933 CPU_COREI7_FLAGS
, 0 },
934 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM
,
936 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM
,
938 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU
,
939 CPU_IAMCU_FLAGS
, 0 },
940 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6
,
942 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6
,
944 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON
,
945 CPU_ATHLON_FLAGS
, 0 },
946 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8
,
948 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8
,
950 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8
,
952 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10
,
953 CPU_AMDFAM10_FLAGS
, 0 },
954 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD
,
955 CPU_BDVER1_FLAGS
, 0 },
956 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD
,
957 CPU_BDVER2_FLAGS
, 0 },
958 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD
,
959 CPU_BDVER3_FLAGS
, 0 },
960 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD
,
961 CPU_BDVER4_FLAGS
, 0 },
962 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER
,
963 CPU_ZNVER1_FLAGS
, 0 },
964 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER
,
965 CPU_ZNVER2_FLAGS
, 0 },
966 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT
,
967 CPU_BTVER1_FLAGS
, 0 },
968 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT
,
969 CPU_BTVER2_FLAGS
, 0 },
970 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN
,
972 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN
,
974 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN
,
976 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN
,
978 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN
,
980 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN
,
982 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN
,
984 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN
,
986 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN
,
988 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN
,
990 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN
,
991 CPU_SSSE3_FLAGS
, 0 },
992 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN
,
993 CPU_SSE4_1_FLAGS
, 0 },
994 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN
,
995 CPU_SSE4_2_FLAGS
, 0 },
996 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN
,
997 CPU_SSE4_2_FLAGS
, 0 },
998 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN
,
1000 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN
,
1001 CPU_AVX2_FLAGS
, 0 },
1002 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN
,
1003 CPU_AVX512F_FLAGS
, 0 },
1004 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN
,
1005 CPU_AVX512CD_FLAGS
, 0 },
1006 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN
,
1007 CPU_AVX512ER_FLAGS
, 0 },
1008 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN
,
1009 CPU_AVX512PF_FLAGS
, 0 },
1010 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN
,
1011 CPU_AVX512DQ_FLAGS
, 0 },
1012 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN
,
1013 CPU_AVX512BW_FLAGS
, 0 },
1014 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN
,
1015 CPU_AVX512VL_FLAGS
, 0 },
1016 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN
,
1018 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN
,
1019 CPU_VMFUNC_FLAGS
, 0 },
1020 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN
,
1022 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN
,
1023 CPU_XSAVE_FLAGS
, 0 },
1024 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN
,
1025 CPU_XSAVEOPT_FLAGS
, 0 },
1026 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN
,
1027 CPU_XSAVEC_FLAGS
, 0 },
1028 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN
,
1029 CPU_XSAVES_FLAGS
, 0 },
1030 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN
,
1032 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN
,
1033 CPU_PCLMUL_FLAGS
, 0 },
1034 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN
,
1035 CPU_PCLMUL_FLAGS
, 1 },
1036 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN
,
1037 CPU_FSGSBASE_FLAGS
, 0 },
1038 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN
,
1039 CPU_RDRND_FLAGS
, 0 },
1040 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN
,
1041 CPU_F16C_FLAGS
, 0 },
1042 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN
,
1043 CPU_BMI2_FLAGS
, 0 },
1044 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN
,
1046 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN
,
1047 CPU_FMA4_FLAGS
, 0 },
1048 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN
,
1050 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN
,
1052 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN
,
1053 CPU_MOVBE_FLAGS
, 0 },
1054 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN
,
1055 CPU_CX16_FLAGS
, 0 },
1056 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN
,
1058 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN
,
1059 CPU_LZCNT_FLAGS
, 0 },
1060 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN
,
1062 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN
,
1064 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN
,
1065 CPU_INVPCID_FLAGS
, 0 },
1066 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN
,
1067 CPU_CLFLUSH_FLAGS
, 0 },
1068 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN
,
1070 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN
,
1071 CPU_SYSCALL_FLAGS
, 0 },
1072 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN
,
1073 CPU_RDTSCP_FLAGS
, 0 },
1074 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN
,
1075 CPU_3DNOW_FLAGS
, 0 },
1076 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN
,
1077 CPU_3DNOWA_FLAGS
, 0 },
1078 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN
,
1079 CPU_PADLOCK_FLAGS
, 0 },
1080 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN
,
1081 CPU_SVME_FLAGS
, 1 },
1082 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN
,
1083 CPU_SVME_FLAGS
, 0 },
1084 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN
,
1085 CPU_SSE4A_FLAGS
, 0 },
1086 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN
,
1088 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN
,
1090 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN
,
1092 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN
,
1094 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN
,
1095 CPU_RDSEED_FLAGS
, 0 },
1096 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN
,
1097 CPU_PRFCHW_FLAGS
, 0 },
1098 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN
,
1099 CPU_SMAP_FLAGS
, 0 },
1100 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN
,
1102 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN
,
1104 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN
,
1105 CPU_CLFLUSHOPT_FLAGS
, 0 },
1106 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN
,
1107 CPU_PREFETCHWT1_FLAGS
, 0 },
1108 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN
,
1110 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN
,
1111 CPU_CLWB_FLAGS
, 0 },
1112 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN
,
1113 CPU_AVX512IFMA_FLAGS
, 0 },
1114 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN
,
1115 CPU_AVX512VBMI_FLAGS
, 0 },
1116 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN
,
1117 CPU_AVX512_4FMAPS_FLAGS
, 0 },
1118 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN
,
1119 CPU_AVX512_4VNNIW_FLAGS
, 0 },
1120 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN
,
1121 CPU_AVX512_VPOPCNTDQ_FLAGS
, 0 },
1122 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN
,
1123 CPU_AVX512_VBMI2_FLAGS
, 0 },
1124 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN
,
1125 CPU_AVX512_VNNI_FLAGS
, 0 },
1126 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN
,
1127 CPU_AVX512_BITALG_FLAGS
, 0 },
1128 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN
,
1129 CPU_CLZERO_FLAGS
, 0 },
1130 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN
,
1131 CPU_MWAITX_FLAGS
, 0 },
1132 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN
,
1133 CPU_OSPKE_FLAGS
, 0 },
1134 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN
,
1135 CPU_RDPID_FLAGS
, 0 },
1136 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN
,
1137 CPU_PTWRITE_FLAGS
, 0 },
1138 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN
,
1140 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN
,
1141 CPU_SHSTK_FLAGS
, 0 },
1142 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN
,
1143 CPU_GFNI_FLAGS
, 0 },
1144 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN
,
1145 CPU_VAES_FLAGS
, 0 },
1146 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN
,
1147 CPU_VPCLMULQDQ_FLAGS
, 0 },
1148 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN
,
1149 CPU_WBNOINVD_FLAGS
, 0 },
1150 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN
,
1151 CPU_PCONFIG_FLAGS
, 0 },
1152 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN
,
1153 CPU_WAITPKG_FLAGS
, 0 },
1154 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN
,
1155 CPU_CLDEMOTE_FLAGS
, 0 },
1156 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN
,
1157 CPU_MOVDIRI_FLAGS
, 0 },
1158 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN
,
1159 CPU_MOVDIR64B_FLAGS
, 0 },
1160 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN
,
1161 CPU_AVX512_BF16_FLAGS
, 0 },
1162 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN
,
1163 CPU_AVX512_VP2INTERSECT_FLAGS
, 0 },
1164 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN
,
1165 CPU_ENQCMD_FLAGS
, 0 },
1166 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN
,
1167 CPU_RDPRU_FLAGS
, 0 },
1168 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN
,
1169 CPU_MCOMMIT_FLAGS
, 0 },
1172 static const noarch_entry cpu_noarch
[] =
1174 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS
},
1175 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS
},
1176 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS
},
1177 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS
},
1178 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS
},
1179 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS
},
1180 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS
},
1181 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS
},
1182 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS
},
1183 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS
},
1184 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS
},
1185 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS
},
1186 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS
},
1187 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS
},
1188 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS
},
1189 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS
},
1190 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS
},
1191 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS
},
1192 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS
},
1193 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS
},
1194 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS
},
1195 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS
},
1196 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS
},
1197 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS
},
1198 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS
},
1199 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS
},
1200 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS
},
1201 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS
},
1202 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS
},
1203 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS
},
1204 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS
},
1205 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS
},
1206 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS
},
1207 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS
},
1208 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS
},
1209 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS
},
1210 { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS
},
1211 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS
},
1215 /* Like s_lcomm_internal in gas/read.c but the alignment string
1216 is allowed to be optional. */
1219 pe_lcomm_internal (int needs_align
, symbolS
*symbolP
, addressT size
)
1226 && *input_line_pointer
== ',')
1228 align
= parse_align (needs_align
- 1);
1230 if (align
== (addressT
) -1)
1245 bss_alloc (symbolP
, size
, align
);
1250 pe_lcomm (int needs_align
)
1252 s_comm_internal (needs_align
* 2, pe_lcomm_internal
);
1256 const pseudo_typeS md_pseudo_table
[] =
1258 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1259 {"align", s_align_bytes
, 0},
1261 {"align", s_align_ptwo
, 0},
1263 {"arch", set_cpu_arch
, 0},
1267 {"lcomm", pe_lcomm
, 1},
1269 {"ffloat", float_cons
, 'f'},
1270 {"dfloat", float_cons
, 'd'},
1271 {"tfloat", float_cons
, 'x'},
1273 {"slong", signed_cons
, 4},
1274 {"noopt", s_ignore
, 0},
1275 {"optim", s_ignore
, 0},
1276 {"code16gcc", set_16bit_gcc_code_flag
, CODE_16BIT
},
1277 {"code16", set_code_flag
, CODE_16BIT
},
1278 {"code32", set_code_flag
, CODE_32BIT
},
1280 {"code64", set_code_flag
, CODE_64BIT
},
1282 {"intel_syntax", set_intel_syntax
, 1},
1283 {"att_syntax", set_intel_syntax
, 0},
1284 {"intel_mnemonic", set_intel_mnemonic
, 1},
1285 {"att_mnemonic", set_intel_mnemonic
, 0},
1286 {"allow_index_reg", set_allow_index_reg
, 1},
1287 {"disallow_index_reg", set_allow_index_reg
, 0},
1288 {"sse_check", set_check
, 0},
1289 {"operand_check", set_check
, 1},
1290 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1291 {"largecomm", handle_large_common
, 0},
1293 {"file", dwarf2_directive_file
, 0},
1294 {"loc", dwarf2_directive_loc
, 0},
1295 {"loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0},
1298 {"secrel32", pe_directive_secrel
, 0},
1303 /* For interface with expression (). */
1304 extern char *input_line_pointer
;
1306 /* Hash table for instruction mnemonic lookup. */
1307 static struct hash_control
*op_hash
;
1309 /* Hash table for register lookup. */
1310 static struct hash_control
*reg_hash
;
1312 /* Various efficient no-op patterns for aligning code labels.
1313 Note: Don't try to assemble the instructions in the comments.
1314 0L and 0w are not legal. */
1315 static const unsigned char f32_1
[] =
1317 static const unsigned char f32_2
[] =
1318 {0x66,0x90}; /* xchg %ax,%ax */
1319 static const unsigned char f32_3
[] =
1320 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1321 static const unsigned char f32_4
[] =
1322 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1323 static const unsigned char f32_6
[] =
1324 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1325 static const unsigned char f32_7
[] =
1326 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1327 static const unsigned char f16_3
[] =
1328 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1329 static const unsigned char f16_4
[] =
1330 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1331 static const unsigned char jump_disp8
[] =
1332 {0xeb}; /* jmp disp8 */
1333 static const unsigned char jump32_disp32
[] =
1334 {0xe9}; /* jmp disp32 */
1335 static const unsigned char jump16_disp32
[] =
1336 {0x66,0xe9}; /* jmp disp32 */
1337 /* 32-bit NOPs patterns. */
1338 static const unsigned char *const f32_patt
[] = {
1339 f32_1
, f32_2
, f32_3
, f32_4
, NULL
, f32_6
, f32_7
1341 /* 16-bit NOPs patterns. */
1342 static const unsigned char *const f16_patt
[] = {
1343 f32_1
, f32_2
, f16_3
, f16_4
1345 /* nopl (%[re]ax) */
1346 static const unsigned char alt_3
[] =
1348 /* nopl 0(%[re]ax) */
1349 static const unsigned char alt_4
[] =
1350 {0x0f,0x1f,0x40,0x00};
1351 /* nopl 0(%[re]ax,%[re]ax,1) */
1352 static const unsigned char alt_5
[] =
1353 {0x0f,0x1f,0x44,0x00,0x00};
1354 /* nopw 0(%[re]ax,%[re]ax,1) */
1355 static const unsigned char alt_6
[] =
1356 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1357 /* nopl 0L(%[re]ax) */
1358 static const unsigned char alt_7
[] =
1359 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1360 /* nopl 0L(%[re]ax,%[re]ax,1) */
1361 static const unsigned char alt_8
[] =
1362 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1363 /* nopw 0L(%[re]ax,%[re]ax,1) */
1364 static const unsigned char alt_9
[] =
1365 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1366 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1367 static const unsigned char alt_10
[] =
1368 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1369 /* data16 nopw %cs:0L(%eax,%eax,1) */
1370 static const unsigned char alt_11
[] =
1371 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1372 /* 32-bit and 64-bit NOPs patterns. */
1373 static const unsigned char *const alt_patt
[] = {
1374 f32_1
, f32_2
, alt_3
, alt_4
, alt_5
, alt_6
, alt_7
, alt_8
,
1375 alt_9
, alt_10
, alt_11
1378 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1379 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1382 i386_output_nops (char *where
, const unsigned char *const *patt
,
1383 int count
, int max_single_nop_size
)
1386 /* Place the longer NOP first. */
1389 const unsigned char *nops
;
1391 if (max_single_nop_size
< 1)
1393 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1394 max_single_nop_size
);
1398 nops
= patt
[max_single_nop_size
- 1];
1400 /* Use the smaller one if the requsted one isn't available. */
1403 max_single_nop_size
--;
1404 nops
= patt
[max_single_nop_size
- 1];
1407 last
= count
% max_single_nop_size
;
1410 for (offset
= 0; offset
< count
; offset
+= max_single_nop_size
)
1411 memcpy (where
+ offset
, nops
, max_single_nop_size
);
1415 nops
= patt
[last
- 1];
1418 /* Use the smaller one plus one-byte NOP if the needed one
1421 nops
= patt
[last
- 1];
1422 memcpy (where
+ offset
, nops
, last
);
1423 where
[offset
+ last
] = *patt
[0];
1426 memcpy (where
+ offset
, nops
, last
);
1431 fits_in_imm7 (offsetT num
)
1433 return (num
& 0x7f) == num
;
1437 fits_in_imm31 (offsetT num
)
1439 return (num
& 0x7fffffff) == num
;
1442 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1443 single NOP instruction LIMIT. */
1446 i386_generate_nops (fragS
*fragP
, char *where
, offsetT count
, int limit
)
1448 const unsigned char *const *patt
= NULL
;
1449 int max_single_nop_size
;
1450 /* Maximum number of NOPs before switching to jump over NOPs. */
1451 int max_number_of_nops
;
1453 switch (fragP
->fr_type
)
1458 case rs_machine_dependent
:
1459 /* Allow NOP padding for jumps and calls. */
1460 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
1461 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
1468 /* We need to decide which NOP sequence to use for 32bit and
1469 64bit. When -mtune= is used:
1471 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1472 PROCESSOR_GENERIC32, f32_patt will be used.
1473 2. For the rest, alt_patt will be used.
1475 When -mtune= isn't used, alt_patt will be used if
1476 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1479 When -march= or .arch is used, we can't use anything beyond
1480 cpu_arch_isa_flags. */
1482 if (flag_code
== CODE_16BIT
)
1485 max_single_nop_size
= sizeof (f16_patt
) / sizeof (f16_patt
[0]);
1486 /* Limit number of NOPs to 2 in 16-bit mode. */
1487 max_number_of_nops
= 2;
1491 if (fragP
->tc_frag_data
.isa
== PROCESSOR_UNKNOWN
)
1493 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1494 switch (cpu_arch_tune
)
1496 case PROCESSOR_UNKNOWN
:
1497 /* We use cpu_arch_isa_flags to check if we SHOULD
1498 optimize with nops. */
1499 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1504 case PROCESSOR_PENTIUM4
:
1505 case PROCESSOR_NOCONA
:
1506 case PROCESSOR_CORE
:
1507 case PROCESSOR_CORE2
:
1508 case PROCESSOR_COREI7
:
1509 case PROCESSOR_L1OM
:
1510 case PROCESSOR_K1OM
:
1511 case PROCESSOR_GENERIC64
:
1513 case PROCESSOR_ATHLON
:
1515 case PROCESSOR_AMDFAM10
:
1517 case PROCESSOR_ZNVER
:
1521 case PROCESSOR_I386
:
1522 case PROCESSOR_I486
:
1523 case PROCESSOR_PENTIUM
:
1524 case PROCESSOR_PENTIUMPRO
:
1525 case PROCESSOR_IAMCU
:
1526 case PROCESSOR_GENERIC32
:
1533 switch (fragP
->tc_frag_data
.tune
)
1535 case PROCESSOR_UNKNOWN
:
1536 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1537 PROCESSOR_UNKNOWN. */
1541 case PROCESSOR_I386
:
1542 case PROCESSOR_I486
:
1543 case PROCESSOR_PENTIUM
:
1544 case PROCESSOR_IAMCU
:
1546 case PROCESSOR_ATHLON
:
1548 case PROCESSOR_AMDFAM10
:
1550 case PROCESSOR_ZNVER
:
1552 case PROCESSOR_GENERIC32
:
1553 /* We use cpu_arch_isa_flags to check if we CAN optimize
1555 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1560 case PROCESSOR_PENTIUMPRO
:
1561 case PROCESSOR_PENTIUM4
:
1562 case PROCESSOR_NOCONA
:
1563 case PROCESSOR_CORE
:
1564 case PROCESSOR_CORE2
:
1565 case PROCESSOR_COREI7
:
1566 case PROCESSOR_L1OM
:
1567 case PROCESSOR_K1OM
:
1568 if (fragP
->tc_frag_data
.isa_flags
.bitfield
.cpunop
)
1573 case PROCESSOR_GENERIC64
:
1579 if (patt
== f32_patt
)
1581 max_single_nop_size
= sizeof (f32_patt
) / sizeof (f32_patt
[0]);
1582 /* Limit number of NOPs to 2 for older processors. */
1583 max_number_of_nops
= 2;
1587 max_single_nop_size
= sizeof (alt_patt
) / sizeof (alt_patt
[0]);
1588 /* Limit number of NOPs to 7 for newer processors. */
1589 max_number_of_nops
= 7;
1594 limit
= max_single_nop_size
;
1596 if (fragP
->fr_type
== rs_fill_nop
)
1598 /* Output NOPs for .nop directive. */
1599 if (limit
> max_single_nop_size
)
1601 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1602 _("invalid single nop size: %d "
1603 "(expect within [0, %d])"),
1604 limit
, max_single_nop_size
);
1608 else if (fragP
->fr_type
!= rs_machine_dependent
)
1609 fragP
->fr_var
= count
;
1611 if ((count
/ max_single_nop_size
) > max_number_of_nops
)
1613 /* Generate jump over NOPs. */
1614 offsetT disp
= count
- 2;
1615 if (fits_in_imm7 (disp
))
1617 /* Use "jmp disp8" if possible. */
1619 where
[0] = jump_disp8
[0];
1625 unsigned int size_of_jump
;
1627 if (flag_code
== CODE_16BIT
)
1629 where
[0] = jump16_disp32
[0];
1630 where
[1] = jump16_disp32
[1];
1635 where
[0] = jump32_disp32
[0];
1639 count
-= size_of_jump
+ 4;
1640 if (!fits_in_imm31 (count
))
1642 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
1643 _("jump over nop padding out of range"));
1647 md_number_to_chars (where
+ size_of_jump
, count
, 4);
1648 where
+= size_of_jump
+ 4;
1652 /* Generate multiple NOPs. */
1653 i386_output_nops (where
, patt
, count
, limit
);
1657 operand_type_all_zero (const union i386_operand_type
*x
)
1659 switch (ARRAY_SIZE(x
->array
))
1670 return !x
->array
[0];
1677 operand_type_set (union i386_operand_type
*x
, unsigned int v
)
1679 switch (ARRAY_SIZE(x
->array
))
1695 x
->bitfield
.class = ClassNone
;
1696 x
->bitfield
.instance
= InstanceNone
;
1700 operand_type_equal (const union i386_operand_type
*x
,
1701 const union i386_operand_type
*y
)
1703 switch (ARRAY_SIZE(x
->array
))
1706 if (x
->array
[2] != y
->array
[2])
1710 if (x
->array
[1] != y
->array
[1])
1714 return x
->array
[0] == y
->array
[0];
1722 cpu_flags_all_zero (const union i386_cpu_flags
*x
)
1724 switch (ARRAY_SIZE(x
->array
))
1739 return !x
->array
[0];
1746 cpu_flags_equal (const union i386_cpu_flags
*x
,
1747 const union i386_cpu_flags
*y
)
1749 switch (ARRAY_SIZE(x
->array
))
1752 if (x
->array
[3] != y
->array
[3])
1756 if (x
->array
[2] != y
->array
[2])
1760 if (x
->array
[1] != y
->array
[1])
1764 return x
->array
[0] == y
->array
[0];
1772 cpu_flags_check_cpu64 (i386_cpu_flags f
)
1774 return !((flag_code
== CODE_64BIT
&& f
.bitfield
.cpuno64
)
1775 || (flag_code
!= CODE_64BIT
&& f
.bitfield
.cpu64
));
1778 static INLINE i386_cpu_flags
1779 cpu_flags_and (i386_cpu_flags x
, i386_cpu_flags y
)
1781 switch (ARRAY_SIZE (x
.array
))
1784 x
.array
[3] &= y
.array
[3];
1787 x
.array
[2] &= y
.array
[2];
1790 x
.array
[1] &= y
.array
[1];
1793 x
.array
[0] &= y
.array
[0];
1801 static INLINE i386_cpu_flags
1802 cpu_flags_or (i386_cpu_flags x
, i386_cpu_flags y
)
1804 switch (ARRAY_SIZE (x
.array
))
1807 x
.array
[3] |= y
.array
[3];
1810 x
.array
[2] |= y
.array
[2];
1813 x
.array
[1] |= y
.array
[1];
1816 x
.array
[0] |= y
.array
[0];
1824 static INLINE i386_cpu_flags
1825 cpu_flags_and_not (i386_cpu_flags x
, i386_cpu_flags y
)
1827 switch (ARRAY_SIZE (x
.array
))
1830 x
.array
[3] &= ~y
.array
[3];
1833 x
.array
[2] &= ~y
.array
[2];
1836 x
.array
[1] &= ~y
.array
[1];
1839 x
.array
[0] &= ~y
.array
[0];
1847 #define CPU_FLAGS_ARCH_MATCH 0x1
1848 #define CPU_FLAGS_64BIT_MATCH 0x2
1850 #define CPU_FLAGS_PERFECT_MATCH \
1851 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1853 /* Return CPU flags match bits. */
1856 cpu_flags_match (const insn_template
*t
)
1858 i386_cpu_flags x
= t
->cpu_flags
;
1859 int match
= cpu_flags_check_cpu64 (x
) ? CPU_FLAGS_64BIT_MATCH
: 0;
1861 x
.bitfield
.cpu64
= 0;
1862 x
.bitfield
.cpuno64
= 0;
1864 if (cpu_flags_all_zero (&x
))
1866 /* This instruction is available on all archs. */
1867 match
|= CPU_FLAGS_ARCH_MATCH
;
1871 /* This instruction is available only on some archs. */
1872 i386_cpu_flags cpu
= cpu_arch_flags
;
1874 /* AVX512VL is no standalone feature - match it and then strip it. */
1875 if (x
.bitfield
.cpuavx512vl
&& !cpu
.bitfield
.cpuavx512vl
)
1877 x
.bitfield
.cpuavx512vl
= 0;
1879 cpu
= cpu_flags_and (x
, cpu
);
1880 if (!cpu_flags_all_zero (&cpu
))
1882 if (x
.bitfield
.cpuavx
)
1884 /* We need to check a few extra flags with AVX. */
1885 if (cpu
.bitfield
.cpuavx
1886 && (!t
->opcode_modifier
.sse2avx
|| sse2avx
)
1887 && (!x
.bitfield
.cpuaes
|| cpu
.bitfield
.cpuaes
)
1888 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1889 && (!x
.bitfield
.cpupclmul
|| cpu
.bitfield
.cpupclmul
))
1890 match
|= CPU_FLAGS_ARCH_MATCH
;
1892 else if (x
.bitfield
.cpuavx512f
)
1894 /* We need to check a few extra flags with AVX512F. */
1895 if (cpu
.bitfield
.cpuavx512f
1896 && (!x
.bitfield
.cpugfni
|| cpu
.bitfield
.cpugfni
)
1897 && (!x
.bitfield
.cpuvaes
|| cpu
.bitfield
.cpuvaes
)
1898 && (!x
.bitfield
.cpuvpclmulqdq
|| cpu
.bitfield
.cpuvpclmulqdq
))
1899 match
|= CPU_FLAGS_ARCH_MATCH
;
1902 match
|= CPU_FLAGS_ARCH_MATCH
;
1908 static INLINE i386_operand_type
1909 operand_type_and (i386_operand_type x
, i386_operand_type y
)
1911 if (x
.bitfield
.class != y
.bitfield
.class)
1912 x
.bitfield
.class = ClassNone
;
1913 if (x
.bitfield
.instance
!= y
.bitfield
.instance
)
1914 x
.bitfield
.instance
= InstanceNone
;
1916 switch (ARRAY_SIZE (x
.array
))
1919 x
.array
[2] &= y
.array
[2];
1922 x
.array
[1] &= y
.array
[1];
1925 x
.array
[0] &= y
.array
[0];
1933 static INLINE i386_operand_type
1934 operand_type_and_not (i386_operand_type x
, i386_operand_type y
)
1936 gas_assert (y
.bitfield
.class == ClassNone
);
1937 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1939 switch (ARRAY_SIZE (x
.array
))
1942 x
.array
[2] &= ~y
.array
[2];
1945 x
.array
[1] &= ~y
.array
[1];
1948 x
.array
[0] &= ~y
.array
[0];
1956 static INLINE i386_operand_type
1957 operand_type_or (i386_operand_type x
, i386_operand_type y
)
1959 gas_assert (x
.bitfield
.class == ClassNone
||
1960 y
.bitfield
.class == ClassNone
||
1961 x
.bitfield
.class == y
.bitfield
.class);
1962 gas_assert (x
.bitfield
.instance
== InstanceNone
||
1963 y
.bitfield
.instance
== InstanceNone
||
1964 x
.bitfield
.instance
== y
.bitfield
.instance
);
1966 switch (ARRAY_SIZE (x
.array
))
1969 x
.array
[2] |= y
.array
[2];
1972 x
.array
[1] |= y
.array
[1];
1975 x
.array
[0] |= y
.array
[0];
1983 static INLINE i386_operand_type
1984 operand_type_xor (i386_operand_type x
, i386_operand_type y
)
1986 gas_assert (y
.bitfield
.class == ClassNone
);
1987 gas_assert (y
.bitfield
.instance
== InstanceNone
);
1989 switch (ARRAY_SIZE (x
.array
))
1992 x
.array
[2] ^= y
.array
[2];
1995 x
.array
[1] ^= y
.array
[1];
1998 x
.array
[0] ^= y
.array
[0];
2006 static const i386_operand_type disp16
= OPERAND_TYPE_DISP16
;
2007 static const i386_operand_type disp32
= OPERAND_TYPE_DISP32
;
2008 static const i386_operand_type disp32s
= OPERAND_TYPE_DISP32S
;
2009 static const i386_operand_type disp16_32
= OPERAND_TYPE_DISP16_32
;
2010 static const i386_operand_type anydisp
= OPERAND_TYPE_ANYDISP
;
2011 static const i386_operand_type anyimm
= OPERAND_TYPE_ANYIMM
;
2012 static const i386_operand_type regxmm
= OPERAND_TYPE_REGXMM
;
2013 static const i386_operand_type regmask
= OPERAND_TYPE_REGMASK
;
2014 static const i386_operand_type imm8
= OPERAND_TYPE_IMM8
;
2015 static const i386_operand_type imm8s
= OPERAND_TYPE_IMM8S
;
2016 static const i386_operand_type imm16
= OPERAND_TYPE_IMM16
;
2017 static const i386_operand_type imm32
= OPERAND_TYPE_IMM32
;
2018 static const i386_operand_type imm32s
= OPERAND_TYPE_IMM32S
;
2019 static const i386_operand_type imm64
= OPERAND_TYPE_IMM64
;
2020 static const i386_operand_type imm16_32
= OPERAND_TYPE_IMM16_32
;
2021 static const i386_operand_type imm16_32s
= OPERAND_TYPE_IMM16_32S
;
2022 static const i386_operand_type imm16_32_32s
= OPERAND_TYPE_IMM16_32_32S
;
2033 operand_type_check (i386_operand_type t
, enum operand_type c
)
2038 return t
.bitfield
.class == Reg
;
2041 return (t
.bitfield
.imm8
2045 || t
.bitfield
.imm32s
2046 || t
.bitfield
.imm64
);
2049 return (t
.bitfield
.disp8
2050 || t
.bitfield
.disp16
2051 || t
.bitfield
.disp32
2052 || t
.bitfield
.disp32s
2053 || t
.bitfield
.disp64
);
2056 return (t
.bitfield
.disp8
2057 || t
.bitfield
.disp16
2058 || t
.bitfield
.disp32
2059 || t
.bitfield
.disp32s
2060 || t
.bitfield
.disp64
2061 || t
.bitfield
.baseindex
);
2070 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2071 between operand GIVEN and opeand WANTED for instruction template T. */
2074 match_operand_size (const insn_template
*t
, unsigned int wanted
,
2077 return !((i
.types
[given
].bitfield
.byte
2078 && !t
->operand_types
[wanted
].bitfield
.byte
)
2079 || (i
.types
[given
].bitfield
.word
2080 && !t
->operand_types
[wanted
].bitfield
.word
)
2081 || (i
.types
[given
].bitfield
.dword
2082 && !t
->operand_types
[wanted
].bitfield
.dword
)
2083 || (i
.types
[given
].bitfield
.qword
2084 && !t
->operand_types
[wanted
].bitfield
.qword
)
2085 || (i
.types
[given
].bitfield
.tbyte
2086 && !t
->operand_types
[wanted
].bitfield
.tbyte
));
2089 /* Return 1 if there is no conflict in SIMD register between operand
2090 GIVEN and opeand WANTED for instruction template T. */
2093 match_simd_size (const insn_template
*t
, unsigned int wanted
,
2096 return !((i
.types
[given
].bitfield
.xmmword
2097 && !t
->operand_types
[wanted
].bitfield
.xmmword
)
2098 || (i
.types
[given
].bitfield
.ymmword
2099 && !t
->operand_types
[wanted
].bitfield
.ymmword
)
2100 || (i
.types
[given
].bitfield
.zmmword
2101 && !t
->operand_types
[wanted
].bitfield
.zmmword
));
2104 /* Return 1 if there is no conflict in any size between operand GIVEN
2105 and opeand WANTED for instruction template T. */
2108 match_mem_size (const insn_template
*t
, unsigned int wanted
,
2111 return (match_operand_size (t
, wanted
, given
)
2112 && !((i
.types
[given
].bitfield
.unspecified
2114 && !t
->operand_types
[wanted
].bitfield
.unspecified
)
2115 || (i
.types
[given
].bitfield
.fword
2116 && !t
->operand_types
[wanted
].bitfield
.fword
)
2117 /* For scalar opcode templates to allow register and memory
2118 operands at the same time, some special casing is needed
2119 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2120 down-conversion vpmov*. */
2121 || ((t
->operand_types
[wanted
].bitfield
.class == RegSIMD
2122 && !t
->opcode_modifier
.broadcast
2123 && (t
->operand_types
[wanted
].bitfield
.byte
2124 || t
->operand_types
[wanted
].bitfield
.word
2125 || t
->operand_types
[wanted
].bitfield
.dword
2126 || t
->operand_types
[wanted
].bitfield
.qword
))
2127 ? (i
.types
[given
].bitfield
.xmmword
2128 || i
.types
[given
].bitfield
.ymmword
2129 || i
.types
[given
].bitfield
.zmmword
)
2130 : !match_simd_size(t
, wanted
, given
))));
2133 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2134 operands for instruction template T, and it has MATCH_REVERSE set if there
2135 is no size conflict on any operands for the template with operands reversed
2136 (and the template allows for reversing in the first place). */
2138 #define MATCH_STRAIGHT 1
2139 #define MATCH_REVERSE 2
2141 static INLINE
unsigned int
2142 operand_size_match (const insn_template
*t
)
2144 unsigned int j
, match
= MATCH_STRAIGHT
;
2146 /* Don't check non-absolute jump instructions. */
2147 if (t
->opcode_modifier
.jump
2148 && t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
2151 /* Check memory and accumulator operand size. */
2152 for (j
= 0; j
< i
.operands
; j
++)
2154 if (i
.types
[j
].bitfield
.class != Reg
2155 && i
.types
[j
].bitfield
.class != RegSIMD
2156 && t
->opcode_modifier
.anysize
)
2159 if (t
->operand_types
[j
].bitfield
.class == Reg
2160 && !match_operand_size (t
, j
, j
))
2166 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2167 && !match_simd_size (t
, j
, j
))
2173 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2174 && (!match_operand_size (t
, j
, j
) || !match_simd_size (t
, j
, j
)))
2180 if ((i
.flags
[j
] & Operand_Mem
) && !match_mem_size (t
, j
, j
))
2187 if (!t
->opcode_modifier
.d
)
2191 i
.error
= operand_size_mismatch
;
2195 /* Check reverse. */
2196 gas_assert (i
.operands
>= 2 && i
.operands
<= 3);
2198 for (j
= 0; j
< i
.operands
; j
++)
2200 unsigned int given
= i
.operands
- j
- 1;
2202 if (t
->operand_types
[j
].bitfield
.class == Reg
2203 && !match_operand_size (t
, j
, given
))
2206 if (t
->operand_types
[j
].bitfield
.class == RegSIMD
2207 && !match_simd_size (t
, j
, given
))
2210 if (t
->operand_types
[j
].bitfield
.instance
== Accum
2211 && (!match_operand_size (t
, j
, given
)
2212 || !match_simd_size (t
, j
, given
)))
2215 if ((i
.flags
[given
] & Operand_Mem
) && !match_mem_size (t
, j
, given
))
2219 return match
| MATCH_REVERSE
;
2223 operand_type_match (i386_operand_type overlap
,
2224 i386_operand_type given
)
2226 i386_operand_type temp
= overlap
;
2228 temp
.bitfield
.unspecified
= 0;
2229 temp
.bitfield
.byte
= 0;
2230 temp
.bitfield
.word
= 0;
2231 temp
.bitfield
.dword
= 0;
2232 temp
.bitfield
.fword
= 0;
2233 temp
.bitfield
.qword
= 0;
2234 temp
.bitfield
.tbyte
= 0;
2235 temp
.bitfield
.xmmword
= 0;
2236 temp
.bitfield
.ymmword
= 0;
2237 temp
.bitfield
.zmmword
= 0;
2238 if (operand_type_all_zero (&temp
))
2241 if (given
.bitfield
.baseindex
== overlap
.bitfield
.baseindex
)
2245 i
.error
= operand_type_mismatch
;
2249 /* If given types g0 and g1 are registers they must be of the same type
2250 unless the expected operand type register overlap is null.
2251 Memory operand size of certain SIMD instructions is also being checked
2255 operand_type_register_match (i386_operand_type g0
,
2256 i386_operand_type t0
,
2257 i386_operand_type g1
,
2258 i386_operand_type t1
)
2260 if (g0
.bitfield
.class != Reg
2261 && g0
.bitfield
.class != RegSIMD
2262 && (!operand_type_check (g0
, anymem
)
2263 || g0
.bitfield
.unspecified
2264 || t0
.bitfield
.class != RegSIMD
))
2267 if (g1
.bitfield
.class != Reg
2268 && g1
.bitfield
.class != RegSIMD
2269 && (!operand_type_check (g1
, anymem
)
2270 || g1
.bitfield
.unspecified
2271 || t1
.bitfield
.class != RegSIMD
))
2274 if (g0
.bitfield
.byte
== g1
.bitfield
.byte
2275 && g0
.bitfield
.word
== g1
.bitfield
.word
2276 && g0
.bitfield
.dword
== g1
.bitfield
.dword
2277 && g0
.bitfield
.qword
== g1
.bitfield
.qword
2278 && g0
.bitfield
.xmmword
== g1
.bitfield
.xmmword
2279 && g0
.bitfield
.ymmword
== g1
.bitfield
.ymmword
2280 && g0
.bitfield
.zmmword
== g1
.bitfield
.zmmword
)
2283 if (!(t0
.bitfield
.byte
& t1
.bitfield
.byte
)
2284 && !(t0
.bitfield
.word
& t1
.bitfield
.word
)
2285 && !(t0
.bitfield
.dword
& t1
.bitfield
.dword
)
2286 && !(t0
.bitfield
.qword
& t1
.bitfield
.qword
)
2287 && !(t0
.bitfield
.xmmword
& t1
.bitfield
.xmmword
)
2288 && !(t0
.bitfield
.ymmword
& t1
.bitfield
.ymmword
)
2289 && !(t0
.bitfield
.zmmword
& t1
.bitfield
.zmmword
))
2292 i
.error
= register_type_mismatch
;
2297 static INLINE
unsigned int
2298 register_number (const reg_entry
*r
)
2300 unsigned int nr
= r
->reg_num
;
2302 if (r
->reg_flags
& RegRex
)
2305 if (r
->reg_flags
& RegVRex
)
2311 static INLINE
unsigned int
2312 mode_from_disp_size (i386_operand_type t
)
2314 if (t
.bitfield
.disp8
)
2316 else if (t
.bitfield
.disp16
2317 || t
.bitfield
.disp32
2318 || t
.bitfield
.disp32s
)
2325 fits_in_signed_byte (addressT num
)
2327 return num
+ 0x80 <= 0xff;
2331 fits_in_unsigned_byte (addressT num
)
2337 fits_in_unsigned_word (addressT num
)
2339 return num
<= 0xffff;
2343 fits_in_signed_word (addressT num
)
2345 return num
+ 0x8000 <= 0xffff;
2349 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED
)
2354 return num
+ 0x80000000 <= 0xffffffff;
2356 } /* fits_in_signed_long() */
2359 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED
)
2364 return num
<= 0xffffffff;
2366 } /* fits_in_unsigned_long() */
2369 fits_in_disp8 (offsetT num
)
2371 int shift
= i
.memshift
;
2377 mask
= (1 << shift
) - 1;
2379 /* Return 0 if NUM isn't properly aligned. */
2383 /* Check if NUM will fit in 8bit after shift. */
2384 return fits_in_signed_byte (num
>> shift
);
2388 fits_in_imm4 (offsetT num
)
2390 return (num
& 0xf) == num
;
2393 static i386_operand_type
2394 smallest_imm_type (offsetT num
)
2396 i386_operand_type t
;
2398 operand_type_set (&t
, 0);
2399 t
.bitfield
.imm64
= 1;
2401 if (cpu_arch_tune
!= PROCESSOR_I486
&& num
== 1)
2403 /* This code is disabled on the 486 because all the Imm1 forms
2404 in the opcode table are slower on the i486. They're the
2405 versions with the implicitly specified single-position
2406 displacement, which has another syntax if you really want to
2408 t
.bitfield
.imm1
= 1;
2409 t
.bitfield
.imm8
= 1;
2410 t
.bitfield
.imm8s
= 1;
2411 t
.bitfield
.imm16
= 1;
2412 t
.bitfield
.imm32
= 1;
2413 t
.bitfield
.imm32s
= 1;
2415 else if (fits_in_signed_byte (num
))
2417 t
.bitfield
.imm8
= 1;
2418 t
.bitfield
.imm8s
= 1;
2419 t
.bitfield
.imm16
= 1;
2420 t
.bitfield
.imm32
= 1;
2421 t
.bitfield
.imm32s
= 1;
2423 else if (fits_in_unsigned_byte (num
))
2425 t
.bitfield
.imm8
= 1;
2426 t
.bitfield
.imm16
= 1;
2427 t
.bitfield
.imm32
= 1;
2428 t
.bitfield
.imm32s
= 1;
2430 else if (fits_in_signed_word (num
) || fits_in_unsigned_word (num
))
2432 t
.bitfield
.imm16
= 1;
2433 t
.bitfield
.imm32
= 1;
2434 t
.bitfield
.imm32s
= 1;
2436 else if (fits_in_signed_long (num
))
2438 t
.bitfield
.imm32
= 1;
2439 t
.bitfield
.imm32s
= 1;
2441 else if (fits_in_unsigned_long (num
))
2442 t
.bitfield
.imm32
= 1;
2448 offset_in_range (offsetT val
, int size
)
2454 case 1: mask
= ((addressT
) 1 << 8) - 1; break;
2455 case 2: mask
= ((addressT
) 1 << 16) - 1; break;
2456 case 4: mask
= ((addressT
) 2 << 31) - 1; break;
2458 case 8: mask
= ((addressT
) 2 << 63) - 1; break;
2464 /* If BFD64, sign extend val for 32bit address mode. */
2465 if (flag_code
!= CODE_64BIT
2466 || i
.prefix
[ADDR_PREFIX
])
2467 if ((val
& ~(((addressT
) 2 << 31) - 1)) == 0)
2468 val
= (val
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
2471 if ((val
& ~mask
) != 0 && (val
& ~mask
) != ~mask
)
2473 char buf1
[40], buf2
[40];
2475 sprint_value (buf1
, val
);
2476 sprint_value (buf2
, val
& mask
);
2477 as_warn (_("%s shortened to %s"), buf1
, buf2
);
2492 a. PREFIX_EXIST if attempting to add a prefix where one from the
2493 same class already exists.
2494 b. PREFIX_LOCK if lock prefix is added.
2495 c. PREFIX_REP if rep/repne prefix is added.
2496 d. PREFIX_DS if ds prefix is added.
2497 e. PREFIX_OTHER if other prefix is added.
2500 static enum PREFIX_GROUP
2501 add_prefix (unsigned int prefix
)
2503 enum PREFIX_GROUP ret
= PREFIX_OTHER
;
2506 if (prefix
>= REX_OPCODE
&& prefix
< REX_OPCODE
+ 16
2507 && flag_code
== CODE_64BIT
)
2509 if ((i
.prefix
[REX_PREFIX
] & prefix
& REX_W
)
2510 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_R
)
2511 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_X
)
2512 || (i
.prefix
[REX_PREFIX
] & prefix
& REX_B
))
2523 case DS_PREFIX_OPCODE
:
2526 case CS_PREFIX_OPCODE
:
2527 case ES_PREFIX_OPCODE
:
2528 case FS_PREFIX_OPCODE
:
2529 case GS_PREFIX_OPCODE
:
2530 case SS_PREFIX_OPCODE
:
2534 case REPNE_PREFIX_OPCODE
:
2535 case REPE_PREFIX_OPCODE
:
2540 case LOCK_PREFIX_OPCODE
:
2549 case ADDR_PREFIX_OPCODE
:
2553 case DATA_PREFIX_OPCODE
:
2557 if (i
.prefix
[q
] != 0)
2565 i
.prefix
[q
] |= prefix
;
2568 as_bad (_("same type of prefix used twice"));
2574 update_code_flag (int value
, int check
)
2576 PRINTF_LIKE ((*as_error
));
2578 flag_code
= (enum flag_code
) value
;
2579 if (flag_code
== CODE_64BIT
)
2581 cpu_arch_flags
.bitfield
.cpu64
= 1;
2582 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2586 cpu_arch_flags
.bitfield
.cpu64
= 0;
2587 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2589 if (value
== CODE_64BIT
&& !cpu_arch_flags
.bitfield
.cpulm
)
2592 as_error
= as_fatal
;
2595 (*as_error
) (_("64bit mode not supported on `%s'."),
2596 cpu_arch_name
? cpu_arch_name
: default_arch
);
2598 if (value
== CODE_32BIT
&& !cpu_arch_flags
.bitfield
.cpui386
)
2601 as_error
= as_fatal
;
2604 (*as_error
) (_("32bit mode not supported on `%s'."),
2605 cpu_arch_name
? cpu_arch_name
: default_arch
);
2607 stackop_size
= '\0';
2611 set_code_flag (int value
)
2613 update_code_flag (value
, 0);
2617 set_16bit_gcc_code_flag (int new_code_flag
)
2619 flag_code
= (enum flag_code
) new_code_flag
;
2620 if (flag_code
!= CODE_16BIT
)
2622 cpu_arch_flags
.bitfield
.cpu64
= 0;
2623 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2624 stackop_size
= LONG_MNEM_SUFFIX
;
2628 set_intel_syntax (int syntax_flag
)
2630 /* Find out if register prefixing is specified. */
2631 int ask_naked_reg
= 0;
2634 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2637 int e
= get_symbol_name (&string
);
2639 if (strcmp (string
, "prefix") == 0)
2641 else if (strcmp (string
, "noprefix") == 0)
2644 as_bad (_("bad argument to syntax directive."));
2645 (void) restore_line_pointer (e
);
2647 demand_empty_rest_of_line ();
2649 intel_syntax
= syntax_flag
;
2651 if (ask_naked_reg
== 0)
2652 allow_naked_reg
= (intel_syntax
2653 && (bfd_get_symbol_leading_char (stdoutput
) != '\0'));
2655 allow_naked_reg
= (ask_naked_reg
< 0);
2657 expr_set_rank (O_full_ptr
, syntax_flag
? 10 : 0);
2659 identifier_chars
['%'] = intel_syntax
&& allow_naked_reg
? '%' : 0;
2660 identifier_chars
['$'] = intel_syntax
? '$' : 0;
2661 register_prefix
= allow_naked_reg
? "" : "%";
2665 set_intel_mnemonic (int mnemonic_flag
)
2667 intel_mnemonic
= mnemonic_flag
;
2671 set_allow_index_reg (int flag
)
2673 allow_index_reg
= flag
;
2677 set_check (int what
)
2679 enum check_kind
*kind
;
2684 kind
= &operand_check
;
2695 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2698 int e
= get_symbol_name (&string
);
2700 if (strcmp (string
, "none") == 0)
2702 else if (strcmp (string
, "warning") == 0)
2703 *kind
= check_warning
;
2704 else if (strcmp (string
, "error") == 0)
2705 *kind
= check_error
;
2707 as_bad (_("bad argument to %s_check directive."), str
);
2708 (void) restore_line_pointer (e
);
2711 as_bad (_("missing argument for %s_check directive"), str
);
2713 demand_empty_rest_of_line ();
2717 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED
,
2718 i386_cpu_flags new_flag ATTRIBUTE_UNUSED
)
2720 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2721 static const char *arch
;
2723 /* Intel LIOM is only supported on ELF. */
2729 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2730 use default_arch. */
2731 arch
= cpu_arch_name
;
2733 arch
= default_arch
;
2736 /* If we are targeting Intel MCU, we must enable it. */
2737 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_IAMCU
2738 || new_flag
.bitfield
.cpuiamcu
)
2741 /* If we are targeting Intel L1OM, we must enable it. */
2742 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_L1OM
2743 || new_flag
.bitfield
.cpul1om
)
2746 /* If we are targeting Intel K1OM, we must enable it. */
2747 if (get_elf_backend_data (stdoutput
)->elf_machine_code
!= EM_K1OM
2748 || new_flag
.bitfield
.cpuk1om
)
2751 as_bad (_("`%s' is not supported on `%s'"), name
, arch
);
2756 set_cpu_arch (int dummy ATTRIBUTE_UNUSED
)
2760 if (!is_end_of_line
[(unsigned char) *input_line_pointer
])
2763 int e
= get_symbol_name (&string
);
2765 i386_cpu_flags flags
;
2767 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
2769 if (strcmp (string
, cpu_arch
[j
].name
) == 0)
2771 check_cpu_arch_compatible (string
, cpu_arch
[j
].flags
);
2775 cpu_arch_name
= cpu_arch
[j
].name
;
2776 cpu_sub_arch_name
= NULL
;
2777 cpu_arch_flags
= cpu_arch
[j
].flags
;
2778 if (flag_code
== CODE_64BIT
)
2780 cpu_arch_flags
.bitfield
.cpu64
= 1;
2781 cpu_arch_flags
.bitfield
.cpuno64
= 0;
2785 cpu_arch_flags
.bitfield
.cpu64
= 0;
2786 cpu_arch_flags
.bitfield
.cpuno64
= 1;
2788 cpu_arch_isa
= cpu_arch
[j
].type
;
2789 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
2790 if (!cpu_arch_tune_set
)
2792 cpu_arch_tune
= cpu_arch_isa
;
2793 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
2798 flags
= cpu_flags_or (cpu_arch_flags
,
2801 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2803 if (cpu_sub_arch_name
)
2805 char *name
= cpu_sub_arch_name
;
2806 cpu_sub_arch_name
= concat (name
,
2808 (const char *) NULL
);
2812 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
2813 cpu_arch_flags
= flags
;
2814 cpu_arch_isa_flags
= flags
;
2818 = cpu_flags_or (cpu_arch_isa_flags
,
2820 (void) restore_line_pointer (e
);
2821 demand_empty_rest_of_line ();
2826 if (*string
== '.' && j
>= ARRAY_SIZE (cpu_arch
))
2828 /* Disable an ISA extension. */
2829 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
2830 if (strcmp (string
+ 1, cpu_noarch
[j
].name
) == 0)
2832 flags
= cpu_flags_and_not (cpu_arch_flags
,
2833 cpu_noarch
[j
].flags
);
2834 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
2836 if (cpu_sub_arch_name
)
2838 char *name
= cpu_sub_arch_name
;
2839 cpu_sub_arch_name
= concat (name
, string
,
2840 (const char *) NULL
);
2844 cpu_sub_arch_name
= xstrdup (string
);
2845 cpu_arch_flags
= flags
;
2846 cpu_arch_isa_flags
= flags
;
2848 (void) restore_line_pointer (e
);
2849 demand_empty_rest_of_line ();
2853 j
= ARRAY_SIZE (cpu_arch
);
2856 if (j
>= ARRAY_SIZE (cpu_arch
))
2857 as_bad (_("no such architecture: `%s'"), string
);
2859 *input_line_pointer
= e
;
2862 as_bad (_("missing cpu architecture"));
2864 no_cond_jump_promotion
= 0;
2865 if (*input_line_pointer
== ','
2866 && !is_end_of_line
[(unsigned char) input_line_pointer
[1]])
2871 ++input_line_pointer
;
2872 e
= get_symbol_name (&string
);
2874 if (strcmp (string
, "nojumps") == 0)
2875 no_cond_jump_promotion
= 1;
2876 else if (strcmp (string
, "jumps") == 0)
2879 as_bad (_("no such architecture modifier: `%s'"), string
);
2881 (void) restore_line_pointer (e
);
2884 demand_empty_rest_of_line ();
2887 enum bfd_architecture
2890 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2892 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2893 || flag_code
!= CODE_64BIT
)
2894 as_fatal (_("Intel L1OM is 64bit ELF only"));
2895 return bfd_arch_l1om
;
2897 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2899 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2900 || flag_code
!= CODE_64BIT
)
2901 as_fatal (_("Intel K1OM is 64bit ELF only"));
2902 return bfd_arch_k1om
;
2904 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2906 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2907 || flag_code
== CODE_64BIT
)
2908 as_fatal (_("Intel MCU is 32bit ELF only"));
2909 return bfd_arch_iamcu
;
2912 return bfd_arch_i386
;
2918 if (!strncmp (default_arch
, "x86_64", 6))
2920 if (cpu_arch_isa
== PROCESSOR_L1OM
)
2922 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2923 || default_arch
[6] != '\0')
2924 as_fatal (_("Intel L1OM is 64bit ELF only"));
2925 return bfd_mach_l1om
;
2927 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
2929 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
2930 || default_arch
[6] != '\0')
2931 as_fatal (_("Intel K1OM is 64bit ELF only"));
2932 return bfd_mach_k1om
;
2934 else if (default_arch
[6] == '\0')
2935 return bfd_mach_x86_64
;
2937 return bfd_mach_x64_32
;
2939 else if (!strcmp (default_arch
, "i386")
2940 || !strcmp (default_arch
, "iamcu"))
2942 if (cpu_arch_isa
== PROCESSOR_IAMCU
)
2944 if (OUTPUT_FLAVOR
!= bfd_target_elf_flavour
)
2945 as_fatal (_("Intel MCU is 32bit ELF only"));
2946 return bfd_mach_i386_iamcu
;
2949 return bfd_mach_i386_i386
;
2952 as_fatal (_("unknown architecture"));
2958 const char *hash_err
;
2960 /* Support pseudo prefixes like {disp32}. */
2961 lex_type
['{'] = LEX_BEGIN_NAME
;
2963 /* Initialize op_hash hash table. */
2964 op_hash
= hash_new ();
2967 const insn_template
*optab
;
2968 templates
*core_optab
;
2970 /* Setup for loop. */
2972 core_optab
= XNEW (templates
);
2973 core_optab
->start
= optab
;
2978 if (optab
->name
== NULL
2979 || strcmp (optab
->name
, (optab
- 1)->name
) != 0)
2981 /* different name --> ship out current template list;
2982 add to hash table; & begin anew. */
2983 core_optab
->end
= optab
;
2984 hash_err
= hash_insert (op_hash
,
2986 (void *) core_optab
);
2989 as_fatal (_("can't hash %s: %s"),
2993 if (optab
->name
== NULL
)
2995 core_optab
= XNEW (templates
);
2996 core_optab
->start
= optab
;
3001 /* Initialize reg_hash hash table. */
3002 reg_hash
= hash_new ();
3004 const reg_entry
*regtab
;
3005 unsigned int regtab_size
= i386_regtab_size
;
3007 for (regtab
= i386_regtab
; regtab_size
--; regtab
++)
3009 hash_err
= hash_insert (reg_hash
, regtab
->reg_name
, (void *) regtab
);
3011 as_fatal (_("can't hash %s: %s"),
3017 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3022 for (c
= 0; c
< 256; c
++)
3027 mnemonic_chars
[c
] = c
;
3028 register_chars
[c
] = c
;
3029 operand_chars
[c
] = c
;
3031 else if (ISLOWER (c
))
3033 mnemonic_chars
[c
] = c
;
3034 register_chars
[c
] = c
;
3035 operand_chars
[c
] = c
;
3037 else if (ISUPPER (c
))
3039 mnemonic_chars
[c
] = TOLOWER (c
);
3040 register_chars
[c
] = mnemonic_chars
[c
];
3041 operand_chars
[c
] = c
;
3043 else if (c
== '{' || c
== '}')
3045 mnemonic_chars
[c
] = c
;
3046 operand_chars
[c
] = c
;
3049 if (ISALPHA (c
) || ISDIGIT (c
))
3050 identifier_chars
[c
] = c
;
3053 identifier_chars
[c
] = c
;
3054 operand_chars
[c
] = c
;
3059 identifier_chars
['@'] = '@';
3062 identifier_chars
['?'] = '?';
3063 operand_chars
['?'] = '?';
3065 digit_chars
['-'] = '-';
3066 mnemonic_chars
['_'] = '_';
3067 mnemonic_chars
['-'] = '-';
3068 mnemonic_chars
['.'] = '.';
3069 identifier_chars
['_'] = '_';
3070 identifier_chars
['.'] = '.';
3072 for (p
= operand_special_chars
; *p
!= '\0'; p
++)
3073 operand_chars
[(unsigned char) *p
] = *p
;
3076 if (flag_code
== CODE_64BIT
)
3078 #if defined (OBJ_COFF) && defined (TE_PE)
3079 x86_dwarf2_return_column
= (OUTPUT_FLAVOR
== bfd_target_coff_flavour
3082 x86_dwarf2_return_column
= 16;
3084 x86_cie_data_alignment
= -8;
3088 x86_dwarf2_return_column
= 8;
3089 x86_cie_data_alignment
= -4;
3092 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3093 can be turned into BRANCH_PREFIX frag. */
3094 if (align_branch_prefix_size
> MAX_FUSED_JCC_PADDING_SIZE
)
3099 i386_print_statistics (FILE *file
)
3101 hash_print_statistics (file
, "i386 opcode", op_hash
);
3102 hash_print_statistics (file
, "i386 register", reg_hash
);
3107 /* Debugging routines for md_assemble. */
3108 static void pte (insn_template
*);
3109 static void pt (i386_operand_type
);
3110 static void pe (expressionS
*);
3111 static void ps (symbolS
*);
3114 pi (const char *line
, i386_insn
*x
)
3118 fprintf (stdout
, "%s: template ", line
);
3120 fprintf (stdout
, " address: base %s index %s scale %x\n",
3121 x
->base_reg
? x
->base_reg
->reg_name
: "none",
3122 x
->index_reg
? x
->index_reg
->reg_name
: "none",
3123 x
->log2_scale_factor
);
3124 fprintf (stdout
, " modrm: mode %x reg %x reg/mem %x\n",
3125 x
->rm
.mode
, x
->rm
.reg
, x
->rm
.regmem
);
3126 fprintf (stdout
, " sib: base %x index %x scale %x\n",
3127 x
->sib
.base
, x
->sib
.index
, x
->sib
.scale
);
3128 fprintf (stdout
, " rex: 64bit %x extX %x extY %x extZ %x\n",
3129 (x
->rex
& REX_W
) != 0,
3130 (x
->rex
& REX_R
) != 0,
3131 (x
->rex
& REX_X
) != 0,
3132 (x
->rex
& REX_B
) != 0);
3133 for (j
= 0; j
< x
->operands
; j
++)
3135 fprintf (stdout
, " #%d: ", j
+ 1);
3137 fprintf (stdout
, "\n");
3138 if (x
->types
[j
].bitfield
.class == Reg
3139 || x
->types
[j
].bitfield
.class == RegMMX
3140 || x
->types
[j
].bitfield
.class == RegSIMD
3141 || x
->types
[j
].bitfield
.class == SReg
3142 || x
->types
[j
].bitfield
.class == RegCR
3143 || x
->types
[j
].bitfield
.class == RegDR
3144 || x
->types
[j
].bitfield
.class == RegTR
)
3145 fprintf (stdout
, "%s\n", x
->op
[j
].regs
->reg_name
);
3146 if (operand_type_check (x
->types
[j
], imm
))
3148 if (operand_type_check (x
->types
[j
], disp
))
3149 pe (x
->op
[j
].disps
);
3154 pte (insn_template
*t
)
3157 fprintf (stdout
, " %d operands ", t
->operands
);
3158 fprintf (stdout
, "opcode %x ", t
->base_opcode
);
3159 if (t
->extension_opcode
!= None
)
3160 fprintf (stdout
, "ext %x ", t
->extension_opcode
);
3161 if (t
->opcode_modifier
.d
)
3162 fprintf (stdout
, "D");
3163 if (t
->opcode_modifier
.w
)
3164 fprintf (stdout
, "W");
3165 fprintf (stdout
, "\n");
3166 for (j
= 0; j
< t
->operands
; j
++)
3168 fprintf (stdout
, " #%d type ", j
+ 1);
3169 pt (t
->operand_types
[j
]);
3170 fprintf (stdout
, "\n");
3177 fprintf (stdout
, " operation %d\n", e
->X_op
);
3178 fprintf (stdout
, " add_number %ld (%lx)\n",
3179 (long) e
->X_add_number
, (long) e
->X_add_number
);
3180 if (e
->X_add_symbol
)
3182 fprintf (stdout
, " add_symbol ");
3183 ps (e
->X_add_symbol
);
3184 fprintf (stdout
, "\n");
3188 fprintf (stdout
, " op_symbol ");
3189 ps (e
->X_op_symbol
);
3190 fprintf (stdout
, "\n");
3197 fprintf (stdout
, "%s type %s%s",
3199 S_IS_EXTERNAL (s
) ? "EXTERNAL " : "",
3200 segment_name (S_GET_SEGMENT (s
)));
3203 static struct type_name
3205 i386_operand_type mask
;
3208 const type_names
[] =
3210 { OPERAND_TYPE_REG8
, "r8" },
3211 { OPERAND_TYPE_REG16
, "r16" },
3212 { OPERAND_TYPE_REG32
, "r32" },
3213 { OPERAND_TYPE_REG64
, "r64" },
3214 { OPERAND_TYPE_ACC8
, "acc8" },
3215 { OPERAND_TYPE_ACC16
, "acc16" },
3216 { OPERAND_TYPE_ACC32
, "acc32" },
3217 { OPERAND_TYPE_ACC64
, "acc64" },
3218 { OPERAND_TYPE_IMM8
, "i8" },
3219 { OPERAND_TYPE_IMM8
, "i8s" },
3220 { OPERAND_TYPE_IMM16
, "i16" },
3221 { OPERAND_TYPE_IMM32
, "i32" },
3222 { OPERAND_TYPE_IMM32S
, "i32s" },
3223 { OPERAND_TYPE_IMM64
, "i64" },
3224 { OPERAND_TYPE_IMM1
, "i1" },
3225 { OPERAND_TYPE_BASEINDEX
, "BaseIndex" },
3226 { OPERAND_TYPE_DISP8
, "d8" },
3227 { OPERAND_TYPE_DISP16
, "d16" },
3228 { OPERAND_TYPE_DISP32
, "d32" },
3229 { OPERAND_TYPE_DISP32S
, "d32s" },
3230 { OPERAND_TYPE_DISP64
, "d64" },
3231 { OPERAND_TYPE_INOUTPORTREG
, "InOutPortReg" },
3232 { OPERAND_TYPE_SHIFTCOUNT
, "ShiftCount" },
3233 { OPERAND_TYPE_CONTROL
, "control reg" },
3234 { OPERAND_TYPE_TEST
, "test reg" },
3235 { OPERAND_TYPE_DEBUG
, "debug reg" },
3236 { OPERAND_TYPE_FLOATREG
, "FReg" },
3237 { OPERAND_TYPE_FLOATACC
, "FAcc" },
3238 { OPERAND_TYPE_SREG
, "SReg" },
3239 { OPERAND_TYPE_REGMMX
, "rMMX" },
3240 { OPERAND_TYPE_REGXMM
, "rXMM" },
3241 { OPERAND_TYPE_REGYMM
, "rYMM" },
3242 { OPERAND_TYPE_REGZMM
, "rZMM" },
3243 { OPERAND_TYPE_REGMASK
, "Mask reg" },
3247 pt (i386_operand_type t
)
3250 i386_operand_type a
;
3252 for (j
= 0; j
< ARRAY_SIZE (type_names
); j
++)
3254 a
= operand_type_and (t
, type_names
[j
].mask
);
3255 if (operand_type_equal (&a
, &type_names
[j
].mask
))
3256 fprintf (stdout
, "%s, ", type_names
[j
].name
);
3261 #endif /* DEBUG386 */
3263 static bfd_reloc_code_real_type
3264 reloc (unsigned int size
,
3267 bfd_reloc_code_real_type other
)
3269 if (other
!= NO_RELOC
)
3271 reloc_howto_type
*rel
;
3276 case BFD_RELOC_X86_64_GOT32
:
3277 return BFD_RELOC_X86_64_GOT64
;
3279 case BFD_RELOC_X86_64_GOTPLT64
:
3280 return BFD_RELOC_X86_64_GOTPLT64
;
3282 case BFD_RELOC_X86_64_PLTOFF64
:
3283 return BFD_RELOC_X86_64_PLTOFF64
;
3285 case BFD_RELOC_X86_64_GOTPC32
:
3286 other
= BFD_RELOC_X86_64_GOTPC64
;
3288 case BFD_RELOC_X86_64_GOTPCREL
:
3289 other
= BFD_RELOC_X86_64_GOTPCREL64
;
3291 case BFD_RELOC_X86_64_TPOFF32
:
3292 other
= BFD_RELOC_X86_64_TPOFF64
;
3294 case BFD_RELOC_X86_64_DTPOFF32
:
3295 other
= BFD_RELOC_X86_64_DTPOFF64
;
3301 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3302 if (other
== BFD_RELOC_SIZE32
)
3305 other
= BFD_RELOC_SIZE64
;
3308 as_bad (_("there are no pc-relative size relocations"));
3314 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3315 if (size
== 4 && (flag_code
!= CODE_64BIT
|| disallow_64bit_reloc
))
3318 rel
= bfd_reloc_type_lookup (stdoutput
, other
);
3320 as_bad (_("unknown relocation (%u)"), other
);
3321 else if (size
!= bfd_get_reloc_size (rel
))
3322 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3323 bfd_get_reloc_size (rel
),
3325 else if (pcrel
&& !rel
->pc_relative
)
3326 as_bad (_("non-pc-relative relocation for pc-relative field"));
3327 else if ((rel
->complain_on_overflow
== complain_overflow_signed
3329 || (rel
->complain_on_overflow
== complain_overflow_unsigned
3331 as_bad (_("relocated field and relocation type differ in signedness"));
3340 as_bad (_("there are no unsigned pc-relative relocations"));
3343 case 1: return BFD_RELOC_8_PCREL
;
3344 case 2: return BFD_RELOC_16_PCREL
;
3345 case 4: return BFD_RELOC_32_PCREL
;
3346 case 8: return BFD_RELOC_64_PCREL
;
3348 as_bad (_("cannot do %u byte pc-relative relocation"), size
);
3355 case 4: return BFD_RELOC_X86_64_32S
;
3360 case 1: return BFD_RELOC_8
;
3361 case 2: return BFD_RELOC_16
;
3362 case 4: return BFD_RELOC_32
;
3363 case 8: return BFD_RELOC_64
;
3365 as_bad (_("cannot do %s %u byte relocation"),
3366 sign
> 0 ? "signed" : "unsigned", size
);
3372 /* Here we decide which fixups can be adjusted to make them relative to
3373 the beginning of the section instead of the symbol. Basically we need
3374 to make sure that the dynamic relocations are done correctly, so in
3375 some cases we force the original symbol to be used. */
3378 tc_i386_fix_adjustable (fixS
*fixP ATTRIBUTE_UNUSED
)
3380 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3384 /* Don't adjust pc-relative references to merge sections in 64-bit
3386 if (use_rela_relocations
3387 && (S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_MERGE
) != 0
3391 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3392 and changed later by validate_fix. */
3393 if (GOT_symbol
&& fixP
->fx_subsy
== GOT_symbol
3394 && fixP
->fx_r_type
== BFD_RELOC_32_PCREL
)
3397 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3398 for size relocations. */
3399 if (fixP
->fx_r_type
== BFD_RELOC_SIZE32
3400 || fixP
->fx_r_type
== BFD_RELOC_SIZE64
3401 || fixP
->fx_r_type
== BFD_RELOC_386_GOTOFF
3402 || fixP
->fx_r_type
== BFD_RELOC_386_PLT32
3403 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32
3404 || fixP
->fx_r_type
== BFD_RELOC_386_GOT32X
3405 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GD
3406 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDM
3407 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LDO_32
3408 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE_32
3409 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_IE
3410 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTIE
3411 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE_32
3412 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_LE
3413 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_GOTDESC
3414 || fixP
->fx_r_type
== BFD_RELOC_386_TLS_DESC_CALL
3415 || fixP
->fx_r_type
== BFD_RELOC_X86_64_PLT32
3416 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOT32
3417 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCREL
3418 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPCRELX
3419 || fixP
->fx_r_type
== BFD_RELOC_X86_64_REX_GOTPCRELX
3420 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSGD
3421 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSLD
3422 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF32
3423 || fixP
->fx_r_type
== BFD_RELOC_X86_64_DTPOFF64
3424 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTTPOFF
3425 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF32
3426 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TPOFF64
3427 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTOFF64
3428 || fixP
->fx_r_type
== BFD_RELOC_X86_64_GOTPC32_TLSDESC
3429 || fixP
->fx_r_type
== BFD_RELOC_X86_64_TLSDESC_CALL
3430 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
3431 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
3438 intel_float_operand (const char *mnemonic
)
3440 /* Note that the value returned is meaningful only for opcodes with (memory)
3441 operands, hence the code here is free to improperly handle opcodes that
3442 have no operands (for better performance and smaller code). */
3444 if (mnemonic
[0] != 'f')
3445 return 0; /* non-math */
3447 switch (mnemonic
[1])
3449 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3450 the fs segment override prefix not currently handled because no
3451 call path can make opcodes without operands get here */
3453 return 2 /* integer op */;
3455 if (mnemonic
[2] == 'd' && (mnemonic
[3] == 'c' || mnemonic
[3] == 'e'))
3456 return 3; /* fldcw/fldenv */
3459 if (mnemonic
[2] != 'o' /* fnop */)
3460 return 3; /* non-waiting control op */
3463 if (mnemonic
[2] == 's')
3464 return 3; /* frstor/frstpm */
3467 if (mnemonic
[2] == 'a')
3468 return 3; /* fsave */
3469 if (mnemonic
[2] == 't')
3471 switch (mnemonic
[3])
3473 case 'c': /* fstcw */
3474 case 'd': /* fstdw */
3475 case 'e': /* fstenv */
3476 case 's': /* fsts[gw] */
3482 if (mnemonic
[2] == 'r' || mnemonic
[2] == 's')
3483 return 0; /* fxsave/fxrstor are not really math ops */
3490 /* Build the VEX prefix. */
3493 build_vex_prefix (const insn_template
*t
)
3495 unsigned int register_specifier
;
3496 unsigned int implied_prefix
;
3497 unsigned int vector_length
;
3500 /* Check register specifier. */
3501 if (i
.vex
.register_specifier
)
3503 register_specifier
=
3504 ~register_number (i
.vex
.register_specifier
) & 0xf;
3505 gas_assert ((i
.vex
.register_specifier
->reg_flags
& RegVRex
) == 0);
3508 register_specifier
= 0xf;
3510 /* Use 2-byte VEX prefix by swapping destination and source operand
3511 if there are more than 1 register operand. */
3512 if (i
.reg_operands
> 1
3513 && i
.vec_encoding
!= vex_encoding_vex3
3514 && i
.dir_encoding
== dir_encoding_default
3515 && i
.operands
== i
.reg_operands
3516 && operand_type_equal (&i
.types
[0], &i
.types
[i
.operands
- 1])
3517 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3518 && (i
.tm
.opcode_modifier
.load
|| i
.tm
.opcode_modifier
.d
)
3521 unsigned int xchg
= i
.operands
- 1;
3522 union i386_op temp_op
;
3523 i386_operand_type temp_type
;
3525 temp_type
= i
.types
[xchg
];
3526 i
.types
[xchg
] = i
.types
[0];
3527 i
.types
[0] = temp_type
;
3528 temp_op
= i
.op
[xchg
];
3529 i
.op
[xchg
] = i
.op
[0];
3532 gas_assert (i
.rm
.mode
== 3);
3536 i
.rm
.regmem
= i
.rm
.reg
;
3539 if (i
.tm
.opcode_modifier
.d
)
3540 i
.tm
.base_opcode
^= (i
.tm
.base_opcode
& 0xee) != 0x6e
3541 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
3542 else /* Use the next insn. */
3546 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3547 are no memory operands and at least 3 register ones. */
3548 if (i
.reg_operands
>= 3
3549 && i
.vec_encoding
!= vex_encoding_vex3
3550 && i
.reg_operands
== i
.operands
- i
.imm_operands
3551 && i
.tm
.opcode_modifier
.vex
3552 && i
.tm
.opcode_modifier
.commutative
3553 && (i
.tm
.opcode_modifier
.sse2avx
|| optimize
> 1)
3555 && i
.vex
.register_specifier
3556 && !(i
.vex
.register_specifier
->reg_flags
& RegRex
))
3558 unsigned int xchg
= i
.operands
- i
.reg_operands
;
3559 union i386_op temp_op
;
3560 i386_operand_type temp_type
;
3562 gas_assert (i
.tm
.opcode_modifier
.vexopcode
== VEX0F
);
3563 gas_assert (!i
.tm
.opcode_modifier
.sae
);
3564 gas_assert (operand_type_equal (&i
.types
[i
.operands
- 2],
3565 &i
.types
[i
.operands
- 3]));
3566 gas_assert (i
.rm
.mode
== 3);
3568 temp_type
= i
.types
[xchg
];
3569 i
.types
[xchg
] = i
.types
[xchg
+ 1];
3570 i
.types
[xchg
+ 1] = temp_type
;
3571 temp_op
= i
.op
[xchg
];
3572 i
.op
[xchg
] = i
.op
[xchg
+ 1];
3573 i
.op
[xchg
+ 1] = temp_op
;
3576 xchg
= i
.rm
.regmem
| 8;
3577 i
.rm
.regmem
= ~register_specifier
& 0xf;
3578 gas_assert (!(i
.rm
.regmem
& 8));
3579 i
.vex
.register_specifier
+= xchg
- i
.rm
.regmem
;
3580 register_specifier
= ~xchg
& 0xf;
3583 if (i
.tm
.opcode_modifier
.vex
== VEXScalar
)
3584 vector_length
= avxscalar
;
3585 else if (i
.tm
.opcode_modifier
.vex
== VEX256
)
3591 /* Determine vector length from the last multi-length vector
3594 for (op
= t
->operands
; op
--;)
3595 if (t
->operand_types
[op
].bitfield
.xmmword
3596 && t
->operand_types
[op
].bitfield
.ymmword
3597 && i
.types
[op
].bitfield
.ymmword
)
3604 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3609 case DATA_PREFIX_OPCODE
:
3612 case REPE_PREFIX_OPCODE
:
3615 case REPNE_PREFIX_OPCODE
:
3622 /* Check the REX.W bit and VEXW. */
3623 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3624 w
= (vexwig
== vexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3625 else if (i
.tm
.opcode_modifier
.vexw
)
3626 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3628 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: vexwig
== vexw1
) ? 1 : 0;
3630 /* Use 2-byte VEX prefix if possible. */
3632 && i
.vec_encoding
!= vex_encoding_vex3
3633 && i
.tm
.opcode_modifier
.vexopcode
== VEX0F
3634 && (i
.rex
& (REX_W
| REX_X
| REX_B
)) == 0)
3636 /* 2-byte VEX prefix. */
3640 i
.vex
.bytes
[0] = 0xc5;
3642 /* Check the REX.R bit. */
3643 r
= (i
.rex
& REX_R
) ? 0 : 1;
3644 i
.vex
.bytes
[1] = (r
<< 7
3645 | register_specifier
<< 3
3646 | vector_length
<< 2
3651 /* 3-byte VEX prefix. */
3656 switch (i
.tm
.opcode_modifier
.vexopcode
)
3660 i
.vex
.bytes
[0] = 0xc4;
3664 i
.vex
.bytes
[0] = 0xc4;
3668 i
.vex
.bytes
[0] = 0xc4;
3672 i
.vex
.bytes
[0] = 0x8f;
3676 i
.vex
.bytes
[0] = 0x8f;
3680 i
.vex
.bytes
[0] = 0x8f;
3686 /* The high 3 bits of the second VEX byte are 1's compliment
3687 of RXB bits from REX. */
3688 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3690 i
.vex
.bytes
[2] = (w
<< 7
3691 | register_specifier
<< 3
3692 | vector_length
<< 2
3697 static INLINE bfd_boolean
3698 is_evex_encoding (const insn_template
*t
)
3700 return t
->opcode_modifier
.evex
|| t
->opcode_modifier
.disp8memshift
3701 || t
->opcode_modifier
.broadcast
|| t
->opcode_modifier
.masking
3702 || t
->opcode_modifier
.sae
;
3705 static INLINE bfd_boolean
3706 is_any_vex_encoding (const insn_template
*t
)
3708 return t
->opcode_modifier
.vex
|| t
->opcode_modifier
.vexopcode
3709 || is_evex_encoding (t
);
3712 /* Build the EVEX prefix. */
3715 build_evex_prefix (void)
3717 unsigned int register_specifier
;
3718 unsigned int implied_prefix
;
3720 rex_byte vrex_used
= 0;
3722 /* Check register specifier. */
3723 if (i
.vex
.register_specifier
)
3725 gas_assert ((i
.vrex
& REX_X
) == 0);
3727 register_specifier
= i
.vex
.register_specifier
->reg_num
;
3728 if ((i
.vex
.register_specifier
->reg_flags
& RegRex
))
3729 register_specifier
+= 8;
3730 /* The upper 16 registers are encoded in the fourth byte of the
3732 if (!(i
.vex
.register_specifier
->reg_flags
& RegVRex
))
3733 i
.vex
.bytes
[3] = 0x8;
3734 register_specifier
= ~register_specifier
& 0xf;
3738 register_specifier
= 0xf;
3740 /* Encode upper 16 vector index register in the fourth byte of
3742 if (!(i
.vrex
& REX_X
))
3743 i
.vex
.bytes
[3] = 0x8;
3748 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
3753 case DATA_PREFIX_OPCODE
:
3756 case REPE_PREFIX_OPCODE
:
3759 case REPNE_PREFIX_OPCODE
:
3766 /* 4 byte EVEX prefix. */
3768 i
.vex
.bytes
[0] = 0x62;
3771 switch (i
.tm
.opcode_modifier
.vexopcode
)
3787 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3789 i
.vex
.bytes
[1] = (~i
.rex
& 0x7) << 5 | m
;
3791 /* The fifth bit of the second EVEX byte is 1's compliment of the
3792 REX_R bit in VREX. */
3793 if (!(i
.vrex
& REX_R
))
3794 i
.vex
.bytes
[1] |= 0x10;
3798 if ((i
.reg_operands
+ i
.imm_operands
) == i
.operands
)
3800 /* When all operands are registers, the REX_X bit in REX is not
3801 used. We reuse it to encode the upper 16 registers, which is
3802 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3803 as 1's compliment. */
3804 if ((i
.vrex
& REX_B
))
3807 i
.vex
.bytes
[1] &= ~0x40;
3811 /* EVEX instructions shouldn't need the REX prefix. */
3812 i
.vrex
&= ~vrex_used
;
3813 gas_assert (i
.vrex
== 0);
3815 /* Check the REX.W bit and VEXW. */
3816 if (i
.tm
.opcode_modifier
.vexw
== VEXWIG
)
3817 w
= (evexwig
== evexw1
|| (i
.rex
& REX_W
)) ? 1 : 0;
3818 else if (i
.tm
.opcode_modifier
.vexw
)
3819 w
= i
.tm
.opcode_modifier
.vexw
== VEXW1
? 1 : 0;
3821 w
= (flag_code
== CODE_64BIT
? i
.rex
& REX_W
: evexwig
== evexw1
) ? 1 : 0;
3823 /* Encode the U bit. */
3824 implied_prefix
|= 0x4;
3826 /* The third byte of the EVEX prefix. */
3827 i
.vex
.bytes
[2] = (w
<< 7 | register_specifier
<< 3 | implied_prefix
);
3829 /* The fourth byte of the EVEX prefix. */
3830 /* The zeroing-masking bit. */
3831 if (i
.mask
&& i
.mask
->zeroing
)
3832 i
.vex
.bytes
[3] |= 0x80;
3834 /* Don't always set the broadcast bit if there is no RC. */
3837 /* Encode the vector length. */
3838 unsigned int vec_length
;
3840 if (!i
.tm
.opcode_modifier
.evex
3841 || i
.tm
.opcode_modifier
.evex
== EVEXDYN
)
3845 /* Determine vector length from the last multi-length vector
3848 for (op
= i
.operands
; op
--;)
3849 if (i
.tm
.operand_types
[op
].bitfield
.xmmword
3850 + i
.tm
.operand_types
[op
].bitfield
.ymmword
3851 + i
.tm
.operand_types
[op
].bitfield
.zmmword
> 1)
3853 if (i
.types
[op
].bitfield
.zmmword
)
3855 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3858 else if (i
.types
[op
].bitfield
.ymmword
)
3860 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3863 else if (i
.types
[op
].bitfield
.xmmword
)
3865 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3868 else if (i
.broadcast
&& (int) op
== i
.broadcast
->operand
)
3870 switch (i
.broadcast
->bytes
)
3873 i
.tm
.opcode_modifier
.evex
= EVEX512
;
3876 i
.tm
.opcode_modifier
.evex
= EVEX256
;
3879 i
.tm
.opcode_modifier
.evex
= EVEX128
;
3888 if (op
>= MAX_OPERANDS
)
3892 switch (i
.tm
.opcode_modifier
.evex
)
3894 case EVEXLIG
: /* LL' is ignored */
3895 vec_length
= evexlig
<< 5;
3898 vec_length
= 0 << 5;
3901 vec_length
= 1 << 5;
3904 vec_length
= 2 << 5;
3910 i
.vex
.bytes
[3] |= vec_length
;
3911 /* Encode the broadcast bit. */
3913 i
.vex
.bytes
[3] |= 0x10;
3917 if (i
.rounding
->type
!= saeonly
)
3918 i
.vex
.bytes
[3] |= 0x10 | (i
.rounding
->type
<< 5);
3920 i
.vex
.bytes
[3] |= 0x10 | (evexrcig
<< 5);
3923 if (i
.mask
&& i
.mask
->mask
)
3924 i
.vex
.bytes
[3] |= i
.mask
->mask
->reg_num
;
3928 process_immext (void)
3932 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3933 which is coded in the same place as an 8-bit immediate field
3934 would be. Here we fake an 8-bit immediate operand from the
3935 opcode suffix stored in tm.extension_opcode.
3937 AVX instructions also use this encoding, for some of
3938 3 argument instructions. */
3940 gas_assert (i
.imm_operands
<= 1
3942 || (is_any_vex_encoding (&i
.tm
)
3943 && i
.operands
<= 4)));
3945 exp
= &im_expressions
[i
.imm_operands
++];
3946 i
.op
[i
.operands
].imms
= exp
;
3947 i
.types
[i
.operands
] = imm8
;
3949 exp
->X_op
= O_constant
;
3950 exp
->X_add_number
= i
.tm
.extension_opcode
;
3951 i
.tm
.extension_opcode
= None
;
3958 switch (i
.tm
.opcode_modifier
.hleprefixok
)
3963 as_bad (_("invalid instruction `%s' after `%s'"),
3964 i
.tm
.name
, i
.hle_prefix
);
3967 if (i
.prefix
[LOCK_PREFIX
])
3969 as_bad (_("missing `lock' with `%s'"), i
.hle_prefix
);
3973 case HLEPrefixRelease
:
3974 if (i
.prefix
[HLE_PREFIX
] != XRELEASE_PREFIX_OPCODE
)
3976 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3980 if (i
.mem_operands
== 0 || !(i
.flags
[i
.operands
- 1] & Operand_Mem
))
3982 as_bad (_("memory destination needed for instruction `%s'"
3983 " after `xrelease'"), i
.tm
.name
);
3990 /* Try the shortest encoding by shortening operand size. */
3993 optimize_encoding (void)
3997 if (optimize_for_space
3998 && !is_any_vex_encoding (&i
.tm
)
3999 && i
.reg_operands
== 1
4000 && i
.imm_operands
== 1
4001 && !i
.types
[1].bitfield
.byte
4002 && i
.op
[0].imms
->X_op
== O_constant
4003 && fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4004 && (i
.tm
.base_opcode
== 0xa8
4005 || (i
.tm
.base_opcode
== 0xf6
4006 && i
.tm
.extension_opcode
== 0x0)))
4009 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4011 unsigned int base_regnum
= i
.op
[1].regs
->reg_num
;
4012 if (flag_code
== CODE_64BIT
|| base_regnum
< 4)
4014 i
.types
[1].bitfield
.byte
= 1;
4015 /* Ignore the suffix. */
4017 /* Convert to byte registers. */
4018 if (i
.types
[1].bitfield
.word
)
4020 else if (i
.types
[1].bitfield
.dword
)
4024 if (!(i
.op
[1].regs
->reg_flags
& RegRex
) && base_regnum
< 4)
4029 else if (flag_code
== CODE_64BIT
4030 && !is_any_vex_encoding (&i
.tm
)
4031 && ((i
.types
[1].bitfield
.qword
4032 && i
.reg_operands
== 1
4033 && i
.imm_operands
== 1
4034 && i
.op
[0].imms
->X_op
== O_constant
4035 && ((i
.tm
.base_opcode
== 0xb8
4036 && i
.tm
.extension_opcode
== None
4037 && fits_in_unsigned_long (i
.op
[0].imms
->X_add_number
))
4038 || (fits_in_imm31 (i
.op
[0].imms
->X_add_number
)
4039 && ((i
.tm
.base_opcode
== 0x24
4040 || i
.tm
.base_opcode
== 0xa8)
4041 || (i
.tm
.base_opcode
== 0x80
4042 && i
.tm
.extension_opcode
== 0x4)
4043 || ((i
.tm
.base_opcode
== 0xf6
4044 || (i
.tm
.base_opcode
| 1) == 0xc7)
4045 && i
.tm
.extension_opcode
== 0x0)))
4046 || (fits_in_imm7 (i
.op
[0].imms
->X_add_number
)
4047 && i
.tm
.base_opcode
== 0x83
4048 && i
.tm
.extension_opcode
== 0x4)))
4049 || (i
.types
[0].bitfield
.qword
4050 && ((i
.reg_operands
== 2
4051 && i
.op
[0].regs
== i
.op
[1].regs
4052 && (i
.tm
.base_opcode
== 0x30
4053 || i
.tm
.base_opcode
== 0x28))
4054 || (i
.reg_operands
== 1
4056 && i
.tm
.base_opcode
== 0x30)))))
4059 andq $imm31, %r64 -> andl $imm31, %r32
4060 andq $imm7, %r64 -> andl $imm7, %r32
4061 testq $imm31, %r64 -> testl $imm31, %r32
4062 xorq %r64, %r64 -> xorl %r32, %r32
4063 subq %r64, %r64 -> subl %r32, %r32
4064 movq $imm31, %r64 -> movl $imm31, %r32
4065 movq $imm32, %r64 -> movl $imm32, %r32
4067 i
.tm
.opcode_modifier
.norex64
= 1;
4068 if (i
.tm
.base_opcode
== 0xb8 || (i
.tm
.base_opcode
| 1) == 0xc7)
4071 movq $imm31, %r64 -> movl $imm31, %r32
4072 movq $imm32, %r64 -> movl $imm32, %r32
4074 i
.tm
.operand_types
[0].bitfield
.imm32
= 1;
4075 i
.tm
.operand_types
[0].bitfield
.imm32s
= 0;
4076 i
.tm
.operand_types
[0].bitfield
.imm64
= 0;
4077 i
.types
[0].bitfield
.imm32
= 1;
4078 i
.types
[0].bitfield
.imm32s
= 0;
4079 i
.types
[0].bitfield
.imm64
= 0;
4080 i
.types
[1].bitfield
.dword
= 1;
4081 i
.types
[1].bitfield
.qword
= 0;
4082 if ((i
.tm
.base_opcode
| 1) == 0xc7)
4085 movq $imm31, %r64 -> movl $imm31, %r32
4087 i
.tm
.base_opcode
= 0xb8;
4088 i
.tm
.extension_opcode
= None
;
4089 i
.tm
.opcode_modifier
.w
= 0;
4090 i
.tm
.opcode_modifier
.modrm
= 0;
4094 else if (optimize
> 1
4095 && !optimize_for_space
4096 && !is_any_vex_encoding (&i
.tm
)
4097 && i
.reg_operands
== 2
4098 && i
.op
[0].regs
== i
.op
[1].regs
4099 && ((i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x8
4100 || (i
.tm
.base_opcode
& ~(Opcode_D
| 1)) == 0x20)
4101 && (flag_code
!= CODE_64BIT
|| !i
.types
[0].bitfield
.dword
))
4104 andb %rN, %rN -> testb %rN, %rN
4105 andw %rN, %rN -> testw %rN, %rN
4106 andq %rN, %rN -> testq %rN, %rN
4107 orb %rN, %rN -> testb %rN, %rN
4108 orw %rN, %rN -> testw %rN, %rN
4109 orq %rN, %rN -> testq %rN, %rN
4111 and outside of 64-bit mode
4113 andl %rN, %rN -> testl %rN, %rN
4114 orl %rN, %rN -> testl %rN, %rN
4116 i
.tm
.base_opcode
= 0x84 | (i
.tm
.base_opcode
& 1);
4118 else if (i
.reg_operands
== 3
4119 && i
.op
[0].regs
== i
.op
[1].regs
4120 && !i
.types
[2].bitfield
.xmmword
4121 && (i
.tm
.opcode_modifier
.vex
4122 || ((!i
.mask
|| i
.mask
->zeroing
)
4124 && is_evex_encoding (&i
.tm
)
4125 && (i
.vec_encoding
!= vex_encoding_evex
4126 || cpu_arch_isa_flags
.bitfield
.cpuavx512vl
4127 || i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
4128 || (i
.tm
.operand_types
[2].bitfield
.zmmword
4129 && i
.types
[2].bitfield
.ymmword
))))
4130 && ((i
.tm
.base_opcode
== 0x55
4131 || i
.tm
.base_opcode
== 0x6655
4132 || i
.tm
.base_opcode
== 0x66df
4133 || i
.tm
.base_opcode
== 0x57
4134 || i
.tm
.base_opcode
== 0x6657
4135 || i
.tm
.base_opcode
== 0x66ef
4136 || i
.tm
.base_opcode
== 0x66f8
4137 || i
.tm
.base_opcode
== 0x66f9
4138 || i
.tm
.base_opcode
== 0x66fa
4139 || i
.tm
.base_opcode
== 0x66fb
4140 || i
.tm
.base_opcode
== 0x42
4141 || i
.tm
.base_opcode
== 0x6642
4142 || i
.tm
.base_opcode
== 0x47
4143 || i
.tm
.base_opcode
== 0x6647)
4144 && i
.tm
.extension_opcode
== None
))
4147 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4149 EVEX VOP %zmmM, %zmmM, %zmmN
4150 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4151 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4152 EVEX VOP %ymmM, %ymmM, %ymmN
4153 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4154 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4155 VEX VOP %ymmM, %ymmM, %ymmN
4156 -> VEX VOP %xmmM, %xmmM, %xmmN
4157 VOP, one of vpandn and vpxor:
4158 VEX VOP %ymmM, %ymmM, %ymmN
4159 -> VEX VOP %xmmM, %xmmM, %xmmN
4160 VOP, one of vpandnd and vpandnq:
4161 EVEX VOP %zmmM, %zmmM, %zmmN
4162 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4163 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4164 EVEX VOP %ymmM, %ymmM, %ymmN
4165 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4166 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4167 VOP, one of vpxord and vpxorq:
4168 EVEX VOP %zmmM, %zmmM, %zmmN
4169 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4170 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4171 EVEX VOP %ymmM, %ymmM, %ymmN
4172 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4173 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4174 VOP, one of kxord and kxorq:
4175 VEX VOP %kM, %kM, %kN
4176 -> VEX kxorw %kM, %kM, %kN
4177 VOP, one of kandnd and kandnq:
4178 VEX VOP %kM, %kM, %kN
4179 -> VEX kandnw %kM, %kM, %kN
4181 if (is_evex_encoding (&i
.tm
))
4183 if (i
.vec_encoding
!= vex_encoding_evex
)
4185 i
.tm
.opcode_modifier
.vex
= VEX128
;
4186 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4187 i
.tm
.opcode_modifier
.evex
= 0;
4189 else if (optimize
> 1)
4190 i
.tm
.opcode_modifier
.evex
= EVEX128
;
4194 else if (i
.tm
.operand_types
[0].bitfield
.class == RegMask
)
4196 i
.tm
.base_opcode
&= 0xff;
4197 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4200 i
.tm
.opcode_modifier
.vex
= VEX128
;
4202 if (i
.tm
.opcode_modifier
.vex
)
4203 for (j
= 0; j
< 3; j
++)
4205 i
.types
[j
].bitfield
.xmmword
= 1;
4206 i
.types
[j
].bitfield
.ymmword
= 0;
4209 else if (i
.vec_encoding
!= vex_encoding_evex
4210 && !i
.types
[0].bitfield
.zmmword
4211 && !i
.types
[1].bitfield
.zmmword
4214 && is_evex_encoding (&i
.tm
)
4215 && ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0x666f
4216 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf36f
4217 || (i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f
4218 || (i
.tm
.base_opcode
& ~4) == 0x66db
4219 || (i
.tm
.base_opcode
& ~4) == 0x66eb)
4220 && i
.tm
.extension_opcode
== None
)
4223 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4224 vmovdqu32 and vmovdqu64:
4225 EVEX VOP %xmmM, %xmmN
4226 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4227 EVEX VOP %ymmM, %ymmN
4228 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4230 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4232 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4234 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4236 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4237 VOP, one of vpand, vpandn, vpor, vpxor:
4238 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4239 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4240 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4241 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4242 EVEX VOP{d,q} mem, %xmmM, %xmmN
4243 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4244 EVEX VOP{d,q} mem, %ymmM, %ymmN
4245 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4247 for (j
= 0; j
< i
.operands
; j
++)
4248 if (operand_type_check (i
.types
[j
], disp
)
4249 && i
.op
[j
].disps
->X_op
== O_constant
)
4251 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4252 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4253 bytes, we choose EVEX Disp8 over VEX Disp32. */
4254 int evex_disp8
, vex_disp8
;
4255 unsigned int memshift
= i
.memshift
;
4256 offsetT n
= i
.op
[j
].disps
->X_add_number
;
4258 evex_disp8
= fits_in_disp8 (n
);
4260 vex_disp8
= fits_in_disp8 (n
);
4261 if (evex_disp8
!= vex_disp8
)
4263 i
.memshift
= memshift
;
4267 i
.types
[j
].bitfield
.disp8
= vex_disp8
;
4270 if ((i
.tm
.base_opcode
& ~Opcode_SIMD_IntD
) == 0xf26f)
4271 i
.tm
.base_opcode
^= 0xf36f ^ 0xf26f;
4272 i
.tm
.opcode_modifier
.vex
4273 = i
.types
[0].bitfield
.ymmword
? VEX256
: VEX128
;
4274 i
.tm
.opcode_modifier
.vexw
= VEXW0
;
4275 /* VPAND, VPOR, and VPXOR are commutative. */
4276 if (i
.reg_operands
== 3 && i
.tm
.base_opcode
!= 0x66df)
4277 i
.tm
.opcode_modifier
.commutative
= 1;
4278 i
.tm
.opcode_modifier
.evex
= 0;
4279 i
.tm
.opcode_modifier
.masking
= 0;
4280 i
.tm
.opcode_modifier
.broadcast
= 0;
4281 i
.tm
.opcode_modifier
.disp8memshift
= 0;
4284 i
.types
[j
].bitfield
.disp8
4285 = fits_in_disp8 (i
.op
[j
].disps
->X_add_number
);
4289 /* This is the guts of the machine-dependent assembler. LINE points to a
4290 machine dependent instruction. This function is supposed to emit
4291 the frags/bytes it assembles to. */
4294 md_assemble (char *line
)
4297 char mnemonic
[MAX_MNEM_SIZE
], mnem_suffix
;
4298 const insn_template
*t
;
4300 /* Initialize globals. */
4301 memset (&i
, '\0', sizeof (i
));
4302 for (j
= 0; j
< MAX_OPERANDS
; j
++)
4303 i
.reloc
[j
] = NO_RELOC
;
4304 memset (disp_expressions
, '\0', sizeof (disp_expressions
));
4305 memset (im_expressions
, '\0', sizeof (im_expressions
));
4306 save_stack_p
= save_stack
;
4308 /* First parse an instruction mnemonic & call i386_operand for the operands.
4309 We assume that the scrubber has arranged it so that line[0] is the valid
4310 start of a (possibly prefixed) mnemonic. */
4312 line
= parse_insn (line
, mnemonic
);
4315 mnem_suffix
= i
.suffix
;
4317 line
= parse_operands (line
, mnemonic
);
4319 xfree (i
.memop1_string
);
4320 i
.memop1_string
= NULL
;
4324 /* Now we've parsed the mnemonic into a set of templates, and have the
4325 operands at hand. */
4327 /* All intel opcodes have reversed operands except for "bound" and
4328 "enter". We also don't reverse intersegment "jmp" and "call"
4329 instructions with 2 immediate operands so that the immediate segment
4330 precedes the offset, as it does when in AT&T mode. */
4333 && (strcmp (mnemonic
, "bound") != 0)
4334 && (strcmp (mnemonic
, "invlpga") != 0)
4335 && !(operand_type_check (i
.types
[0], imm
)
4336 && operand_type_check (i
.types
[1], imm
)))
4339 /* The order of the immediates should be reversed
4340 for 2 immediates extrq and insertq instructions */
4341 if (i
.imm_operands
== 2
4342 && (strcmp (mnemonic
, "extrq") == 0
4343 || strcmp (mnemonic
, "insertq") == 0))
4344 swap_2_operands (0, 1);
4349 /* Don't optimize displacement for movabs since it only takes 64bit
4352 && i
.disp_encoding
!= disp_encoding_32bit
4353 && (flag_code
!= CODE_64BIT
4354 || strcmp (mnemonic
, "movabs") != 0))
4357 /* Next, we find a template that matches the given insn,
4358 making sure the overlap of the given operands types is consistent
4359 with the template operand types. */
4361 if (!(t
= match_template (mnem_suffix
)))
4364 if (sse_check
!= check_none
4365 && !i
.tm
.opcode_modifier
.noavx
4366 && !i
.tm
.cpu_flags
.bitfield
.cpuavx
4367 && !i
.tm
.cpu_flags
.bitfield
.cpuavx512f
4368 && (i
.tm
.cpu_flags
.bitfield
.cpusse
4369 || i
.tm
.cpu_flags
.bitfield
.cpusse2
4370 || i
.tm
.cpu_flags
.bitfield
.cpusse3
4371 || i
.tm
.cpu_flags
.bitfield
.cpussse3
4372 || i
.tm
.cpu_flags
.bitfield
.cpusse4_1
4373 || i
.tm
.cpu_flags
.bitfield
.cpusse4_2
4374 || i
.tm
.cpu_flags
.bitfield
.cpusse4a
4375 || i
.tm
.cpu_flags
.bitfield
.cpupclmul
4376 || i
.tm
.cpu_flags
.bitfield
.cpuaes
4377 || i
.tm
.cpu_flags
.bitfield
.cpusha
4378 || i
.tm
.cpu_flags
.bitfield
.cpugfni
))
4380 (sse_check
== check_warning
4382 : as_bad
) (_("SSE instruction `%s' is used"), i
.tm
.name
);
4385 /* Zap movzx and movsx suffix. The suffix has been set from
4386 "word ptr" or "byte ptr" on the source operand in Intel syntax
4387 or extracted from mnemonic in AT&T syntax. But we'll use
4388 the destination register to choose the suffix for encoding. */
4389 if ((i
.tm
.base_opcode
& ~9) == 0x0fb6)
4391 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
4392 there is no suffix, the default will be byte extension. */
4393 if (i
.reg_operands
!= 2
4396 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
4401 if (i
.tm
.opcode_modifier
.fwait
)
4402 if (!add_prefix (FWAIT_OPCODE
))
4405 /* Check if REP prefix is OK. */
4406 if (i
.rep_prefix
&& !i
.tm
.opcode_modifier
.repprefixok
)
4408 as_bad (_("invalid instruction `%s' after `%s'"),
4409 i
.tm
.name
, i
.rep_prefix
);
4413 /* Check for lock without a lockable instruction. Destination operand
4414 must be memory unless it is xchg (0x86). */
4415 if (i
.prefix
[LOCK_PREFIX
]
4416 && (!i
.tm
.opcode_modifier
.islockable
4417 || i
.mem_operands
== 0
4418 || (i
.tm
.base_opcode
!= 0x86
4419 && !(i
.flags
[i
.operands
- 1] & Operand_Mem
))))
4421 as_bad (_("expecting lockable instruction after `lock'"));
4425 /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
4426 if (i
.prefix
[DATA_PREFIX
] && is_any_vex_encoding (&i
.tm
))
4428 as_bad (_("data size prefix invalid with `%s'"), i
.tm
.name
);
4432 /* Check if HLE prefix is OK. */
4433 if (i
.hle_prefix
&& !check_hle ())
4436 /* Check BND prefix. */
4437 if (i
.bnd_prefix
&& !i
.tm
.opcode_modifier
.bndprefixok
)
4438 as_bad (_("expecting valid branch instruction after `bnd'"));
4440 /* Check NOTRACK prefix. */
4441 if (i
.notrack_prefix
&& !i
.tm
.opcode_modifier
.notrackprefixok
)
4442 as_bad (_("expecting indirect branch instruction after `notrack'"));
4444 if (i
.tm
.cpu_flags
.bitfield
.cpumpx
)
4446 if (flag_code
== CODE_64BIT
&& i
.prefix
[ADDR_PREFIX
])
4447 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4448 else if (flag_code
!= CODE_16BIT
4449 ? i
.prefix
[ADDR_PREFIX
]
4450 : i
.mem_operands
&& !i
.prefix
[ADDR_PREFIX
])
4451 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4454 /* Insert BND prefix. */
4455 if (add_bnd_prefix
&& i
.tm
.opcode_modifier
.bndprefixok
)
4457 if (!i
.prefix
[BND_PREFIX
])
4458 add_prefix (BND_PREFIX_OPCODE
);
4459 else if (i
.prefix
[BND_PREFIX
] != BND_PREFIX_OPCODE
)
4461 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4462 i
.prefix
[BND_PREFIX
] = BND_PREFIX_OPCODE
;
4466 /* Check string instruction segment overrides. */
4467 if (i
.tm
.opcode_modifier
.isstring
>= IS_STRING_ES_OP0
)
4469 gas_assert (i
.mem_operands
);
4470 if (!check_string ())
4472 i
.disp_operands
= 0;
4475 if (optimize
&& !i
.no_optimize
&& i
.tm
.opcode_modifier
.optimize
)
4476 optimize_encoding ();
4478 if (!process_suffix ())
4481 /* Update operand types. */
4482 for (j
= 0; j
< i
.operands
; j
++)
4483 i
.types
[j
] = operand_type_and (i
.types
[j
], i
.tm
.operand_types
[j
]);
4485 /* Make still unresolved immediate matches conform to size of immediate
4486 given in i.suffix. */
4487 if (!finalize_imm ())
4490 if (i
.types
[0].bitfield
.imm1
)
4491 i
.imm_operands
= 0; /* kludge for shift insns. */
4493 /* We only need to check those implicit registers for instructions
4494 with 3 operands or less. */
4495 if (i
.operands
<= 3)
4496 for (j
= 0; j
< i
.operands
; j
++)
4497 if (i
.types
[j
].bitfield
.instance
!= InstanceNone
4498 && !i
.types
[j
].bitfield
.xmmword
)
4501 /* ImmExt should be processed after SSE2AVX. */
4502 if (!i
.tm
.opcode_modifier
.sse2avx
4503 && i
.tm
.opcode_modifier
.immext
)
4506 /* For insns with operands there are more diddles to do to the opcode. */
4509 if (!process_operands ())
4512 else if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
4514 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4515 as_warn (_("translating to `%sp'"), i
.tm
.name
);
4518 if (is_any_vex_encoding (&i
.tm
))
4520 if (!cpu_arch_flags
.bitfield
.cpui286
)
4522 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4527 if (i
.tm
.opcode_modifier
.vex
)
4528 build_vex_prefix (t
);
4530 build_evex_prefix ();
4533 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4534 instructions may define INT_OPCODE as well, so avoid this corner
4535 case for those instructions that use MODRM. */
4536 if (i
.tm
.base_opcode
== INT_OPCODE
4537 && !i
.tm
.opcode_modifier
.modrm
4538 && i
.op
[0].imms
->X_add_number
== 3)
4540 i
.tm
.base_opcode
= INT3_OPCODE
;
4544 if ((i
.tm
.opcode_modifier
.jump
== JUMP
4545 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
4546 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
4547 && i
.op
[0].disps
->X_op
== O_constant
)
4549 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4550 the absolute address given by the constant. Since ix86 jumps and
4551 calls are pc relative, we need to generate a reloc. */
4552 i
.op
[0].disps
->X_add_symbol
= &abs_symbol
;
4553 i
.op
[0].disps
->X_op
= O_symbol
;
4556 if (i
.tm
.opcode_modifier
.rex64
)
4559 /* For 8 bit registers we need an empty rex prefix. Also if the
4560 instruction already has a prefix, we need to convert old
4561 registers to new ones. */
4563 if ((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
4564 && (i
.op
[0].regs
->reg_flags
& RegRex64
) != 0)
4565 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
4566 && (i
.op
[1].regs
->reg_flags
& RegRex64
) != 0)
4567 || (((i
.types
[0].bitfield
.class == Reg
&& i
.types
[0].bitfield
.byte
)
4568 || (i
.types
[1].bitfield
.class == Reg
&& i
.types
[1].bitfield
.byte
))
4573 i
.rex
|= REX_OPCODE
;
4574 for (x
= 0; x
< 2; x
++)
4576 /* Look for 8 bit operand that uses old registers. */
4577 if (i
.types
[x
].bitfield
.class == Reg
&& i
.types
[x
].bitfield
.byte
4578 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0)
4580 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4581 /* In case it is "hi" register, give up. */
4582 if (i
.op
[x
].regs
->reg_num
> 3)
4583 as_bad (_("can't encode register '%s%s' in an "
4584 "instruction requiring REX prefix."),
4585 register_prefix
, i
.op
[x
].regs
->reg_name
);
4587 /* Otherwise it is equivalent to the extended register.
4588 Since the encoding doesn't change this is merely
4589 cosmetic cleanup for debug output. */
4591 i
.op
[x
].regs
= i
.op
[x
].regs
+ 8;
4596 if (i
.rex
== 0 && i
.rex_encoding
)
4598 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4599 that uses legacy register. If it is "hi" register, don't add
4600 the REX_OPCODE byte. */
4602 for (x
= 0; x
< 2; x
++)
4603 if (i
.types
[x
].bitfield
.class == Reg
4604 && i
.types
[x
].bitfield
.byte
4605 && (i
.op
[x
].regs
->reg_flags
& RegRex64
) == 0
4606 && i
.op
[x
].regs
->reg_num
> 3)
4608 gas_assert (!(i
.op
[x
].regs
->reg_flags
& RegRex
));
4609 i
.rex_encoding
= FALSE
;
4618 add_prefix (REX_OPCODE
| i
.rex
);
4620 /* We are ready to output the insn. */
4623 last_insn
.seg
= now_seg
;
4625 if (i
.tm
.opcode_modifier
.isprefix
)
4627 last_insn
.kind
= last_insn_prefix
;
4628 last_insn
.name
= i
.tm
.name
;
4629 last_insn
.file
= as_where (&last_insn
.line
);
4632 last_insn
.kind
= last_insn_other
;
4636 parse_insn (char *line
, char *mnemonic
)
4639 char *token_start
= l
;
4642 const insn_template
*t
;
4648 while ((*mnem_p
= mnemonic_chars
[(unsigned char) *l
]) != 0)
4653 if (mnem_p
>= mnemonic
+ MAX_MNEM_SIZE
)
4655 as_bad (_("no such instruction: `%s'"), token_start
);
4660 if (!is_space_char (*l
)
4661 && *l
!= END_OF_INSN
4663 || (*l
!= PREFIX_SEPARATOR
4666 as_bad (_("invalid character %s in mnemonic"),
4667 output_invalid (*l
));
4670 if (token_start
== l
)
4672 if (!intel_syntax
&& *l
== PREFIX_SEPARATOR
)
4673 as_bad (_("expecting prefix; got nothing"));
4675 as_bad (_("expecting mnemonic; got nothing"));
4679 /* Look up instruction (or prefix) via hash table. */
4680 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4682 if (*l
!= END_OF_INSN
4683 && (!is_space_char (*l
) || l
[1] != END_OF_INSN
)
4684 && current_templates
4685 && current_templates
->start
->opcode_modifier
.isprefix
)
4687 if (!cpu_flags_check_cpu64 (current_templates
->start
->cpu_flags
))
4689 as_bad ((flag_code
!= CODE_64BIT
4690 ? _("`%s' is only supported in 64-bit mode")
4691 : _("`%s' is not supported in 64-bit mode")),
4692 current_templates
->start
->name
);
4695 /* If we are in 16-bit mode, do not allow addr16 or data16.
4696 Similarly, in 32-bit mode, do not allow addr32 or data32. */
4697 if ((current_templates
->start
->opcode_modifier
.size
== SIZE16
4698 || current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4699 && flag_code
!= CODE_64BIT
4700 && ((current_templates
->start
->opcode_modifier
.size
== SIZE32
)
4701 ^ (flag_code
== CODE_16BIT
)))
4703 as_bad (_("redundant %s prefix"),
4704 current_templates
->start
->name
);
4707 if (current_templates
->start
->opcode_length
== 0)
4709 /* Handle pseudo prefixes. */
4710 switch (current_templates
->start
->base_opcode
)
4714 i
.disp_encoding
= disp_encoding_8bit
;
4718 i
.disp_encoding
= disp_encoding_32bit
;
4722 i
.dir_encoding
= dir_encoding_load
;
4726 i
.dir_encoding
= dir_encoding_store
;
4730 i
.vec_encoding
= vex_encoding_vex
;
4734 i
.vec_encoding
= vex_encoding_vex3
;
4738 i
.vec_encoding
= vex_encoding_evex
;
4742 i
.rex_encoding
= TRUE
;
4746 i
.no_optimize
= TRUE
;
4754 /* Add prefix, checking for repeated prefixes. */
4755 switch (add_prefix (current_templates
->start
->base_opcode
))
4760 if (current_templates
->start
->cpu_flags
.bitfield
.cpuibt
)
4761 i
.notrack_prefix
= current_templates
->start
->name
;
4764 if (current_templates
->start
->cpu_flags
.bitfield
.cpuhle
)
4765 i
.hle_prefix
= current_templates
->start
->name
;
4766 else if (current_templates
->start
->cpu_flags
.bitfield
.cpumpx
)
4767 i
.bnd_prefix
= current_templates
->start
->name
;
4769 i
.rep_prefix
= current_templates
->start
->name
;
4775 /* Skip past PREFIX_SEPARATOR and reset token_start. */
4782 if (!current_templates
)
4784 /* Deprecated functionality (new code should use pseudo-prefixes instead):
4785 Check if we should swap operand or force 32bit displacement in
4787 if (mnem_p
- 2 == dot_p
&& dot_p
[1] == 's')
4788 i
.dir_encoding
= dir_encoding_swap
;
4789 else if (mnem_p
- 3 == dot_p
4792 i
.disp_encoding
= disp_encoding_8bit
;
4793 else if (mnem_p
- 4 == dot_p
4797 i
.disp_encoding
= disp_encoding_32bit
;
4802 current_templates
= (const templates
*) hash_find (op_hash
, mnemonic
);
4805 if (!current_templates
)
4808 if (mnem_p
> mnemonic
)
4810 /* See if we can get a match by trimming off a suffix. */
4813 case WORD_MNEM_SUFFIX
:
4814 if (intel_syntax
&& (intel_float_operand (mnemonic
) & 2))
4815 i
.suffix
= SHORT_MNEM_SUFFIX
;
4818 case BYTE_MNEM_SUFFIX
:
4819 case QWORD_MNEM_SUFFIX
:
4820 i
.suffix
= mnem_p
[-1];
4822 current_templates
= (const templates
*) hash_find (op_hash
,
4825 case SHORT_MNEM_SUFFIX
:
4826 case LONG_MNEM_SUFFIX
:
4829 i
.suffix
= mnem_p
[-1];
4831 current_templates
= (const templates
*) hash_find (op_hash
,
4840 if (intel_float_operand (mnemonic
) == 1)
4841 i
.suffix
= SHORT_MNEM_SUFFIX
;
4843 i
.suffix
= LONG_MNEM_SUFFIX
;
4845 current_templates
= (const templates
*) hash_find (op_hash
,
4852 if (!current_templates
)
4854 as_bad (_("no such instruction: `%s'"), token_start
);
4859 if (current_templates
->start
->opcode_modifier
.jump
== JUMP
4860 || current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
)
4862 /* Check for a branch hint. We allow ",pt" and ",pn" for
4863 predict taken and predict not taken respectively.
4864 I'm not sure that branch hints actually do anything on loop
4865 and jcxz insns (JumpByte) for current Pentium4 chips. They
4866 may work in the future and it doesn't hurt to accept them
4868 if (l
[0] == ',' && l
[1] == 'p')
4872 if (!add_prefix (DS_PREFIX_OPCODE
))
4876 else if (l
[2] == 'n')
4878 if (!add_prefix (CS_PREFIX_OPCODE
))
4884 /* Any other comma loses. */
4887 as_bad (_("invalid character %s in mnemonic"),
4888 output_invalid (*l
));
4892 /* Check if instruction is supported on specified architecture. */
4894 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
4896 supported
|= cpu_flags_match (t
);
4897 if (supported
== CPU_FLAGS_PERFECT_MATCH
)
4899 if (!cpu_arch_flags
.bitfield
.cpui386
&& (flag_code
!= CODE_16BIT
))
4900 as_warn (_("use .code16 to ensure correct addressing mode"));
4906 if (!(supported
& CPU_FLAGS_64BIT_MATCH
))
4907 as_bad (flag_code
== CODE_64BIT
4908 ? _("`%s' is not supported in 64-bit mode")
4909 : _("`%s' is only supported in 64-bit mode"),
4910 current_templates
->start
->name
);
4912 as_bad (_("`%s' is not supported on `%s%s'"),
4913 current_templates
->start
->name
,
4914 cpu_arch_name
? cpu_arch_name
: default_arch
,
4915 cpu_sub_arch_name
? cpu_sub_arch_name
: "");
4921 parse_operands (char *l
, const char *mnemonic
)
4925 /* 1 if operand is pending after ','. */
4926 unsigned int expecting_operand
= 0;
4928 /* Non-zero if operand parens not balanced. */
4929 unsigned int paren_not_balanced
;
4931 while (*l
!= END_OF_INSN
)
4933 /* Skip optional white space before operand. */
4934 if (is_space_char (*l
))
4936 if (!is_operand_char (*l
) && *l
!= END_OF_INSN
&& *l
!= '"')
4938 as_bad (_("invalid character %s before operand %d"),
4939 output_invalid (*l
),
4943 token_start
= l
; /* After white space. */
4944 paren_not_balanced
= 0;
4945 while (paren_not_balanced
|| *l
!= ',')
4947 if (*l
== END_OF_INSN
)
4949 if (paren_not_balanced
)
4952 as_bad (_("unbalanced parenthesis in operand %d."),
4955 as_bad (_("unbalanced brackets in operand %d."),
4960 break; /* we are done */
4962 else if (!is_operand_char (*l
) && !is_space_char (*l
) && *l
!= '"')
4964 as_bad (_("invalid character %s in operand %d"),
4965 output_invalid (*l
),
4972 ++paren_not_balanced
;
4974 --paren_not_balanced
;
4979 ++paren_not_balanced
;
4981 --paren_not_balanced
;
4985 if (l
!= token_start
)
4986 { /* Yes, we've read in another operand. */
4987 unsigned int operand_ok
;
4988 this_operand
= i
.operands
++;
4989 if (i
.operands
> MAX_OPERANDS
)
4991 as_bad (_("spurious operands; (%d operands/instruction max)"),
4995 i
.types
[this_operand
].bitfield
.unspecified
= 1;
4996 /* Now parse operand adding info to 'i' as we go along. */
4997 END_STRING_AND_SAVE (l
);
4999 if (i
.mem_operands
> 1)
5001 as_bad (_("too many memory references for `%s'"),
5008 i386_intel_operand (token_start
,
5009 intel_float_operand (mnemonic
));
5011 operand_ok
= i386_att_operand (token_start
);
5013 RESTORE_END_STRING (l
);
5019 if (expecting_operand
)
5021 expecting_operand_after_comma
:
5022 as_bad (_("expecting operand after ','; got nothing"));
5027 as_bad (_("expecting operand before ','; got nothing"));
5032 /* Now *l must be either ',' or END_OF_INSN. */
5035 if (*++l
== END_OF_INSN
)
5037 /* Just skip it, if it's \n complain. */
5038 goto expecting_operand_after_comma
;
5040 expecting_operand
= 1;
5047 swap_2_operands (int xchg1
, int xchg2
)
5049 union i386_op temp_op
;
5050 i386_operand_type temp_type
;
5051 unsigned int temp_flags
;
5052 enum bfd_reloc_code_real temp_reloc
;
5054 temp_type
= i
.types
[xchg2
];
5055 i
.types
[xchg2
] = i
.types
[xchg1
];
5056 i
.types
[xchg1
] = temp_type
;
5058 temp_flags
= i
.flags
[xchg2
];
5059 i
.flags
[xchg2
] = i
.flags
[xchg1
];
5060 i
.flags
[xchg1
] = temp_flags
;
5062 temp_op
= i
.op
[xchg2
];
5063 i
.op
[xchg2
] = i
.op
[xchg1
];
5064 i
.op
[xchg1
] = temp_op
;
5066 temp_reloc
= i
.reloc
[xchg2
];
5067 i
.reloc
[xchg2
] = i
.reloc
[xchg1
];
5068 i
.reloc
[xchg1
] = temp_reloc
;
5072 if (i
.mask
->operand
== xchg1
)
5073 i
.mask
->operand
= xchg2
;
5074 else if (i
.mask
->operand
== xchg2
)
5075 i
.mask
->operand
= xchg1
;
5079 if (i
.broadcast
->operand
== xchg1
)
5080 i
.broadcast
->operand
= xchg2
;
5081 else if (i
.broadcast
->operand
== xchg2
)
5082 i
.broadcast
->operand
= xchg1
;
5086 if (i
.rounding
->operand
== xchg1
)
5087 i
.rounding
->operand
= xchg2
;
5088 else if (i
.rounding
->operand
== xchg2
)
5089 i
.rounding
->operand
= xchg1
;
5094 swap_operands (void)
5100 swap_2_operands (1, i
.operands
- 2);
5104 swap_2_operands (0, i
.operands
- 1);
5110 if (i
.mem_operands
== 2)
5112 const seg_entry
*temp_seg
;
5113 temp_seg
= i
.seg
[0];
5114 i
.seg
[0] = i
.seg
[1];
5115 i
.seg
[1] = temp_seg
;
5119 /* Try to ensure constant immediates are represented in the smallest
5124 char guess_suffix
= 0;
5128 guess_suffix
= i
.suffix
;
5129 else if (i
.reg_operands
)
5131 /* Figure out a suffix from the last register operand specified.
5132 We can't do this properly yet, i.e. excluding special register
5133 instances, but the following works for instructions with
5134 immediates. In any case, we can't set i.suffix yet. */
5135 for (op
= i
.operands
; --op
>= 0;)
5136 if (i
.types
[op
].bitfield
.class != Reg
)
5138 else if (i
.types
[op
].bitfield
.byte
)
5140 guess_suffix
= BYTE_MNEM_SUFFIX
;
5143 else if (i
.types
[op
].bitfield
.word
)
5145 guess_suffix
= WORD_MNEM_SUFFIX
;
5148 else if (i
.types
[op
].bitfield
.dword
)
5150 guess_suffix
= LONG_MNEM_SUFFIX
;
5153 else if (i
.types
[op
].bitfield
.qword
)
5155 guess_suffix
= QWORD_MNEM_SUFFIX
;
5159 else if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
5160 guess_suffix
= WORD_MNEM_SUFFIX
;
5162 for (op
= i
.operands
; --op
>= 0;)
5163 if (operand_type_check (i
.types
[op
], imm
))
5165 switch (i
.op
[op
].imms
->X_op
)
5168 /* If a suffix is given, this operand may be shortened. */
5169 switch (guess_suffix
)
5171 case LONG_MNEM_SUFFIX
:
5172 i
.types
[op
].bitfield
.imm32
= 1;
5173 i
.types
[op
].bitfield
.imm64
= 1;
5175 case WORD_MNEM_SUFFIX
:
5176 i
.types
[op
].bitfield
.imm16
= 1;
5177 i
.types
[op
].bitfield
.imm32
= 1;
5178 i
.types
[op
].bitfield
.imm32s
= 1;
5179 i
.types
[op
].bitfield
.imm64
= 1;
5181 case BYTE_MNEM_SUFFIX
:
5182 i
.types
[op
].bitfield
.imm8
= 1;
5183 i
.types
[op
].bitfield
.imm8s
= 1;
5184 i
.types
[op
].bitfield
.imm16
= 1;
5185 i
.types
[op
].bitfield
.imm32
= 1;
5186 i
.types
[op
].bitfield
.imm32s
= 1;
5187 i
.types
[op
].bitfield
.imm64
= 1;
5191 /* If this operand is at most 16 bits, convert it
5192 to a signed 16 bit number before trying to see
5193 whether it will fit in an even smaller size.
5194 This allows a 16-bit operand such as $0xffe0 to
5195 be recognised as within Imm8S range. */
5196 if ((i
.types
[op
].bitfield
.imm16
)
5197 && (i
.op
[op
].imms
->X_add_number
& ~(offsetT
) 0xffff) == 0)
5199 i
.op
[op
].imms
->X_add_number
=
5200 (((i
.op
[op
].imms
->X_add_number
& 0xffff) ^ 0x8000) - 0x8000);
5203 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5204 if ((i
.types
[op
].bitfield
.imm32
)
5205 && ((i
.op
[op
].imms
->X_add_number
& ~(((offsetT
) 2 << 31) - 1))
5208 i
.op
[op
].imms
->X_add_number
= ((i
.op
[op
].imms
->X_add_number
5209 ^ ((offsetT
) 1 << 31))
5210 - ((offsetT
) 1 << 31));
5214 = operand_type_or (i
.types
[op
],
5215 smallest_imm_type (i
.op
[op
].imms
->X_add_number
));
5217 /* We must avoid matching of Imm32 templates when 64bit
5218 only immediate is available. */
5219 if (guess_suffix
== QWORD_MNEM_SUFFIX
)
5220 i
.types
[op
].bitfield
.imm32
= 0;
5227 /* Symbols and expressions. */
5229 /* Convert symbolic operand to proper sizes for matching, but don't
5230 prevent matching a set of insns that only supports sizes other
5231 than those matching the insn suffix. */
5233 i386_operand_type mask
, allowed
;
5234 const insn_template
*t
;
5236 operand_type_set (&mask
, 0);
5237 operand_type_set (&allowed
, 0);
5239 for (t
= current_templates
->start
;
5240 t
< current_templates
->end
;
5243 allowed
= operand_type_or (allowed
, t
->operand_types
[op
]);
5244 allowed
= operand_type_and (allowed
, anyimm
);
5246 switch (guess_suffix
)
5248 case QWORD_MNEM_SUFFIX
:
5249 mask
.bitfield
.imm64
= 1;
5250 mask
.bitfield
.imm32s
= 1;
5252 case LONG_MNEM_SUFFIX
:
5253 mask
.bitfield
.imm32
= 1;
5255 case WORD_MNEM_SUFFIX
:
5256 mask
.bitfield
.imm16
= 1;
5258 case BYTE_MNEM_SUFFIX
:
5259 mask
.bitfield
.imm8
= 1;
5264 allowed
= operand_type_and (mask
, allowed
);
5265 if (!operand_type_all_zero (&allowed
))
5266 i
.types
[op
] = operand_type_and (i
.types
[op
], mask
);
5273 /* Try to use the smallest displacement type too. */
5275 optimize_disp (void)
5279 for (op
= i
.operands
; --op
>= 0;)
5280 if (operand_type_check (i
.types
[op
], disp
))
5282 if (i
.op
[op
].disps
->X_op
== O_constant
)
5284 offsetT op_disp
= i
.op
[op
].disps
->X_add_number
;
5286 if (i
.types
[op
].bitfield
.disp16
5287 && (op_disp
& ~(offsetT
) 0xffff) == 0)
5289 /* If this operand is at most 16 bits, convert
5290 to a signed 16 bit number and don't use 64bit
5292 op_disp
= (((op_disp
& 0xffff) ^ 0x8000) - 0x8000);
5293 i
.types
[op
].bitfield
.disp64
= 0;
5296 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5297 if (i
.types
[op
].bitfield
.disp32
5298 && (op_disp
& ~(((offsetT
) 2 << 31) - 1)) == 0)
5300 /* If this operand is at most 32 bits, convert
5301 to a signed 32 bit number and don't use 64bit
5303 op_disp
&= (((offsetT
) 2 << 31) - 1);
5304 op_disp
= (op_disp
^ ((offsetT
) 1 << 31)) - ((addressT
) 1 << 31);
5305 i
.types
[op
].bitfield
.disp64
= 0;
5308 if (!op_disp
&& i
.types
[op
].bitfield
.baseindex
)
5310 i
.types
[op
].bitfield
.disp8
= 0;
5311 i
.types
[op
].bitfield
.disp16
= 0;
5312 i
.types
[op
].bitfield
.disp32
= 0;
5313 i
.types
[op
].bitfield
.disp32s
= 0;
5314 i
.types
[op
].bitfield
.disp64
= 0;
5318 else if (flag_code
== CODE_64BIT
)
5320 if (fits_in_signed_long (op_disp
))
5322 i
.types
[op
].bitfield
.disp64
= 0;
5323 i
.types
[op
].bitfield
.disp32s
= 1;
5325 if (i
.prefix
[ADDR_PREFIX
]
5326 && fits_in_unsigned_long (op_disp
))
5327 i
.types
[op
].bitfield
.disp32
= 1;
5329 if ((i
.types
[op
].bitfield
.disp32
5330 || i
.types
[op
].bitfield
.disp32s
5331 || i
.types
[op
].bitfield
.disp16
)
5332 && fits_in_disp8 (op_disp
))
5333 i
.types
[op
].bitfield
.disp8
= 1;
5335 else if (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
5336 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
)
5338 fix_new_exp (frag_now
, frag_more (0) - frag_now
->fr_literal
, 0,
5339 i
.op
[op
].disps
, 0, i
.reloc
[op
]);
5340 i
.types
[op
].bitfield
.disp8
= 0;
5341 i
.types
[op
].bitfield
.disp16
= 0;
5342 i
.types
[op
].bitfield
.disp32
= 0;
5343 i
.types
[op
].bitfield
.disp32s
= 0;
5344 i
.types
[op
].bitfield
.disp64
= 0;
5347 /* We only support 64bit displacement on constants. */
5348 i
.types
[op
].bitfield
.disp64
= 0;
5352 /* Return 1 if there is a match in broadcast bytes between operand
5353 GIVEN and instruction template T. */
5356 match_broadcast_size (const insn_template
*t
, unsigned int given
)
5358 return ((t
->opcode_modifier
.broadcast
== BYTE_BROADCAST
5359 && i
.types
[given
].bitfield
.byte
)
5360 || (t
->opcode_modifier
.broadcast
== WORD_BROADCAST
5361 && i
.types
[given
].bitfield
.word
)
5362 || (t
->opcode_modifier
.broadcast
== DWORD_BROADCAST
5363 && i
.types
[given
].bitfield
.dword
)
5364 || (t
->opcode_modifier
.broadcast
== QWORD_BROADCAST
5365 && i
.types
[given
].bitfield
.qword
));
5368 /* Check if operands are valid for the instruction. */
5371 check_VecOperands (const insn_template
*t
)
5375 static const i386_cpu_flags avx512
= CPU_ANY_AVX512F_FLAGS
;
5377 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5378 any one operand are implicity requiring AVX512VL support if the actual
5379 operand size is YMMword or XMMword. Since this function runs after
5380 template matching, there's no need to check for YMMword/XMMword in
5382 cpu
= cpu_flags_and (t
->cpu_flags
, avx512
);
5383 if (!cpu_flags_all_zero (&cpu
)
5384 && !t
->cpu_flags
.bitfield
.cpuavx512vl
5385 && !cpu_arch_flags
.bitfield
.cpuavx512vl
)
5387 for (op
= 0; op
< t
->operands
; ++op
)
5389 if (t
->operand_types
[op
].bitfield
.zmmword
5390 && (i
.types
[op
].bitfield
.ymmword
5391 || i
.types
[op
].bitfield
.xmmword
))
5393 i
.error
= unsupported
;
5399 /* Without VSIB byte, we can't have a vector register for index. */
5400 if (!t
->opcode_modifier
.vecsib
5402 && (i
.index_reg
->reg_type
.bitfield
.xmmword
5403 || i
.index_reg
->reg_type
.bitfield
.ymmword
5404 || i
.index_reg
->reg_type
.bitfield
.zmmword
))
5406 i
.error
= unsupported_vector_index_register
;
5410 /* Check if default mask is allowed. */
5411 if (t
->opcode_modifier
.nodefmask
5412 && (!i
.mask
|| i
.mask
->mask
->reg_num
== 0))
5414 i
.error
= no_default_mask
;
5418 /* For VSIB byte, we need a vector register for index, and all vector
5419 registers must be distinct. */
5420 if (t
->opcode_modifier
.vecsib
)
5423 || !((t
->opcode_modifier
.vecsib
== VecSIB128
5424 && i
.index_reg
->reg_type
.bitfield
.xmmword
)
5425 || (t
->opcode_modifier
.vecsib
== VecSIB256
5426 && i
.index_reg
->reg_type
.bitfield
.ymmword
)
5427 || (t
->opcode_modifier
.vecsib
== VecSIB512
5428 && i
.index_reg
->reg_type
.bitfield
.zmmword
)))
5430 i
.error
= invalid_vsib_address
;
5434 gas_assert (i
.reg_operands
== 2 || i
.mask
);
5435 if (i
.reg_operands
== 2 && !i
.mask
)
5437 gas_assert (i
.types
[0].bitfield
.class == RegSIMD
);
5438 gas_assert (i
.types
[0].bitfield
.xmmword
5439 || i
.types
[0].bitfield
.ymmword
);
5440 gas_assert (i
.types
[2].bitfield
.class == RegSIMD
);
5441 gas_assert (i
.types
[2].bitfield
.xmmword
5442 || i
.types
[2].bitfield
.ymmword
);
5443 if (operand_check
== check_none
)
5445 if (register_number (i
.op
[0].regs
)
5446 != register_number (i
.index_reg
)
5447 && register_number (i
.op
[2].regs
)
5448 != register_number (i
.index_reg
)
5449 && register_number (i
.op
[0].regs
)
5450 != register_number (i
.op
[2].regs
))
5452 if (operand_check
== check_error
)
5454 i
.error
= invalid_vector_register_set
;
5457 as_warn (_("mask, index, and destination registers should be distinct"));
5459 else if (i
.reg_operands
== 1 && i
.mask
)
5461 if (i
.types
[1].bitfield
.class == RegSIMD
5462 && (i
.types
[1].bitfield
.xmmword
5463 || i
.types
[1].bitfield
.ymmword
5464 || i
.types
[1].bitfield
.zmmword
)
5465 && (register_number (i
.op
[1].regs
)
5466 == register_number (i
.index_reg
)))
5468 if (operand_check
== check_error
)
5470 i
.error
= invalid_vector_register_set
;
5473 if (operand_check
!= check_none
)
5474 as_warn (_("index and destination registers should be distinct"));
5479 /* Check if broadcast is supported by the instruction and is applied
5480 to the memory operand. */
5483 i386_operand_type type
, overlap
;
5485 /* Check if specified broadcast is supported in this instruction,
5486 and its broadcast bytes match the memory operand. */
5487 op
= i
.broadcast
->operand
;
5488 if (!t
->opcode_modifier
.broadcast
5489 || !(i
.flags
[op
] & Operand_Mem
)
5490 || (!i
.types
[op
].bitfield
.unspecified
5491 && !match_broadcast_size (t
, op
)))
5494 i
.error
= unsupported_broadcast
;
5498 i
.broadcast
->bytes
= ((1 << (t
->opcode_modifier
.broadcast
- 1))
5499 * i
.broadcast
->type
);
5500 operand_type_set (&type
, 0);
5501 switch (i
.broadcast
->bytes
)
5504 type
.bitfield
.word
= 1;
5507 type
.bitfield
.dword
= 1;
5510 type
.bitfield
.qword
= 1;
5513 type
.bitfield
.xmmword
= 1;
5516 type
.bitfield
.ymmword
= 1;
5519 type
.bitfield
.zmmword
= 1;
5525 overlap
= operand_type_and (type
, t
->operand_types
[op
]);
5526 if (operand_type_all_zero (&overlap
))
5529 if (t
->opcode_modifier
.checkregsize
)
5533 type
.bitfield
.baseindex
= 1;
5534 for (j
= 0; j
< i
.operands
; ++j
)
5537 && !operand_type_register_match(i
.types
[j
],
5538 t
->operand_types
[j
],
5540 t
->operand_types
[op
]))
5545 /* If broadcast is supported in this instruction, we need to check if
5546 operand of one-element size isn't specified without broadcast. */
5547 else if (t
->opcode_modifier
.broadcast
&& i
.mem_operands
)
5549 /* Find memory operand. */
5550 for (op
= 0; op
< i
.operands
; op
++)
5551 if (i
.flags
[op
] & Operand_Mem
)
5553 gas_assert (op
< i
.operands
);
5554 /* Check size of the memory operand. */
5555 if (match_broadcast_size (t
, op
))
5557 i
.error
= broadcast_needed
;
5562 op
= MAX_OPERANDS
- 1; /* Avoid uninitialized variable warning. */
5564 /* Check if requested masking is supported. */
5567 switch (t
->opcode_modifier
.masking
)
5571 case MERGING_MASKING
:
5572 if (i
.mask
->zeroing
)
5575 i
.error
= unsupported_masking
;
5579 case DYNAMIC_MASKING
:
5580 /* Memory destinations allow only merging masking. */
5581 if (i
.mask
->zeroing
&& i
.mem_operands
)
5583 /* Find memory operand. */
5584 for (op
= 0; op
< i
.operands
; op
++)
5585 if (i
.flags
[op
] & Operand_Mem
)
5587 gas_assert (op
< i
.operands
);
5588 if (op
== i
.operands
- 1)
5590 i
.error
= unsupported_masking
;
5600 /* Check if masking is applied to dest operand. */
5601 if (i
.mask
&& (i
.mask
->operand
!= (int) (i
.operands
- 1)))
5603 i
.error
= mask_not_on_destination
;
5610 if (!t
->opcode_modifier
.sae
5611 || (i
.rounding
->type
!= saeonly
&& !t
->opcode_modifier
.staticrounding
))
5613 i
.error
= unsupported_rc_sae
;
5616 /* If the instruction has several immediate operands and one of
5617 them is rounding, the rounding operand should be the last
5618 immediate operand. */
5619 if (i
.imm_operands
> 1
5620 && i
.rounding
->operand
!= (int) (i
.imm_operands
- 1))
5622 i
.error
= rc_sae_operand_not_last_imm
;
5627 /* Check vector Disp8 operand. */
5628 if (t
->opcode_modifier
.disp8memshift
5629 && i
.disp_encoding
!= disp_encoding_32bit
)
5632 i
.memshift
= t
->opcode_modifier
.broadcast
- 1;
5633 else if (t
->opcode_modifier
.disp8memshift
!= DISP8_SHIFT_VL
)
5634 i
.memshift
= t
->opcode_modifier
.disp8memshift
;
5637 const i386_operand_type
*type
= NULL
;
5640 for (op
= 0; op
< i
.operands
; op
++)
5641 if (i
.flags
[op
] & Operand_Mem
)
5643 if (t
->opcode_modifier
.evex
== EVEXLIG
)
5644 i
.memshift
= 2 + (i
.suffix
== QWORD_MNEM_SUFFIX
);
5645 else if (t
->operand_types
[op
].bitfield
.xmmword
5646 + t
->operand_types
[op
].bitfield
.ymmword
5647 + t
->operand_types
[op
].bitfield
.zmmword
<= 1)
5648 type
= &t
->operand_types
[op
];
5649 else if (!i
.types
[op
].bitfield
.unspecified
)
5650 type
= &i
.types
[op
];
5652 else if (i
.types
[op
].bitfield
.class == RegSIMD
5653 && t
->opcode_modifier
.evex
!= EVEXLIG
)
5655 if (i
.types
[op
].bitfield
.zmmword
)
5657 else if (i
.types
[op
].bitfield
.ymmword
&& i
.memshift
< 5)
5659 else if (i
.types
[op
].bitfield
.xmmword
&& i
.memshift
< 4)
5665 if (type
->bitfield
.zmmword
)
5667 else if (type
->bitfield
.ymmword
)
5669 else if (type
->bitfield
.xmmword
)
5673 /* For the check in fits_in_disp8(). */
5674 if (i
.memshift
== 0)
5678 for (op
= 0; op
< i
.operands
; op
++)
5679 if (operand_type_check (i
.types
[op
], disp
)
5680 && i
.op
[op
].disps
->X_op
== O_constant
)
5682 if (fits_in_disp8 (i
.op
[op
].disps
->X_add_number
))
5684 i
.types
[op
].bitfield
.disp8
= 1;
5687 i
.types
[op
].bitfield
.disp8
= 0;
5696 /* Check if operands are valid for the instruction. Update VEX
5700 VEX_check_operands (const insn_template
*t
)
5702 if (i
.vec_encoding
== vex_encoding_evex
)
5704 /* This instruction must be encoded with EVEX prefix. */
5705 if (!is_evex_encoding (t
))
5707 i
.error
= unsupported
;
5713 if (!t
->opcode_modifier
.vex
)
5715 /* This instruction template doesn't have VEX prefix. */
5716 if (i
.vec_encoding
!= vex_encoding_default
)
5718 i
.error
= unsupported
;
5724 /* Check the special Imm4 cases; must be the first operand. */
5725 if (t
->cpu_flags
.bitfield
.cpuxop
&& t
->operands
== 5)
5727 if (i
.op
[0].imms
->X_op
!= O_constant
5728 || !fits_in_imm4 (i
.op
[0].imms
->X_add_number
))
5734 /* Turn off Imm<N> so that update_imm won't complain. */
5735 operand_type_set (&i
.types
[0], 0);
5741 static const insn_template
*
5742 match_template (char mnem_suffix
)
5744 /* Points to template once we've found it. */
5745 const insn_template
*t
;
5746 i386_operand_type overlap0
, overlap1
, overlap2
, overlap3
;
5747 i386_operand_type overlap4
;
5748 unsigned int found_reverse_match
;
5749 i386_opcode_modifier suffix_check
;
5750 i386_operand_type operand_types
[MAX_OPERANDS
];
5751 int addr_prefix_disp
;
5752 unsigned int j
, size_match
, check_register
;
5753 enum i386_error specific_error
= 0;
5755 #if MAX_OPERANDS != 5
5756 # error "MAX_OPERANDS must be 5."
5759 found_reverse_match
= 0;
5760 addr_prefix_disp
= -1;
5762 /* Prepare for mnemonic suffix check. */
5763 memset (&suffix_check
, 0, sizeof (suffix_check
));
5764 switch (mnem_suffix
)
5766 case BYTE_MNEM_SUFFIX
:
5767 suffix_check
.no_bsuf
= 1;
5769 case WORD_MNEM_SUFFIX
:
5770 suffix_check
.no_wsuf
= 1;
5772 case SHORT_MNEM_SUFFIX
:
5773 suffix_check
.no_ssuf
= 1;
5775 case LONG_MNEM_SUFFIX
:
5776 suffix_check
.no_lsuf
= 1;
5778 case QWORD_MNEM_SUFFIX
:
5779 suffix_check
.no_qsuf
= 1;
5782 /* NB: In Intel syntax, normally we can check for memory operand
5783 size when there is no mnemonic suffix. But jmp and call have
5784 2 different encodings with Dword memory operand size, one with
5785 No_ldSuf and the other without. i.suffix is set to
5786 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
5787 if (i
.suffix
== LONG_DOUBLE_MNEM_SUFFIX
)
5788 suffix_check
.no_ldsuf
= 1;
5791 /* Must have right number of operands. */
5792 i
.error
= number_of_operands_mismatch
;
5794 for (t
= current_templates
->start
; t
< current_templates
->end
; t
++)
5796 addr_prefix_disp
= -1;
5797 found_reverse_match
= 0;
5799 if (i
.operands
!= t
->operands
)
5802 /* Check processor support. */
5803 i
.error
= unsupported
;
5804 if (cpu_flags_match (t
) != CPU_FLAGS_PERFECT_MATCH
)
5807 /* Check AT&T mnemonic. */
5808 i
.error
= unsupported_with_intel_mnemonic
;
5809 if (intel_mnemonic
&& t
->opcode_modifier
.attmnemonic
)
5812 /* Check AT&T/Intel syntax. */
5813 i
.error
= unsupported_syntax
;
5814 if ((intel_syntax
&& t
->opcode_modifier
.attsyntax
)
5815 || (!intel_syntax
&& t
->opcode_modifier
.intelsyntax
))
5818 /* Check Intel64/AMD64 ISA. */
5822 /* Default: Don't accept Intel64. */
5823 if (t
->opcode_modifier
.isa64
== INTEL64
)
5827 /* -mamd64: Don't accept Intel64 and Intel64 only. */
5828 if (t
->opcode_modifier
.isa64
>= INTEL64
)
5832 /* -mintel64: Don't accept AMD64. */
5833 if (t
->opcode_modifier
.isa64
== AMD64
)
5838 /* Check the suffix. */
5839 i
.error
= invalid_instruction_suffix
;
5840 if ((t
->opcode_modifier
.no_bsuf
&& suffix_check
.no_bsuf
)
5841 || (t
->opcode_modifier
.no_wsuf
&& suffix_check
.no_wsuf
)
5842 || (t
->opcode_modifier
.no_lsuf
&& suffix_check
.no_lsuf
)
5843 || (t
->opcode_modifier
.no_ssuf
&& suffix_check
.no_ssuf
)
5844 || (t
->opcode_modifier
.no_qsuf
&& suffix_check
.no_qsuf
)
5845 || (t
->opcode_modifier
.no_ldsuf
&& suffix_check
.no_ldsuf
))
5848 size_match
= operand_size_match (t
);
5852 /* This is intentionally not
5854 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
5856 as the case of a missing * on the operand is accepted (perhaps with
5857 a warning, issued further down). */
5858 if (i
.jumpabsolute
&& t
->opcode_modifier
.jump
!= JUMP_ABSOLUTE
)
5860 i
.error
= operand_type_mismatch
;
5864 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5865 operand_types
[j
] = t
->operand_types
[j
];
5867 /* In general, don't allow 64-bit operands in 32-bit mode. */
5868 if (i
.suffix
== QWORD_MNEM_SUFFIX
5869 && flag_code
!= CODE_64BIT
5871 ? (!t
->opcode_modifier
.ignoresize
5872 && !t
->opcode_modifier
.broadcast
5873 && !intel_float_operand (t
->name
))
5874 : intel_float_operand (t
->name
) != 2)
5875 && ((operand_types
[0].bitfield
.class != RegMMX
5876 && operand_types
[0].bitfield
.class != RegSIMD
)
5877 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5878 && operand_types
[t
->operands
> 1].bitfield
.class != RegSIMD
))
5879 && (t
->base_opcode
!= 0x0fc7
5880 || t
->extension_opcode
!= 1 /* cmpxchg8b */))
5883 /* In general, don't allow 32-bit operands on pre-386. */
5884 else if (i
.suffix
== LONG_MNEM_SUFFIX
5885 && !cpu_arch_flags
.bitfield
.cpui386
5887 ? (!t
->opcode_modifier
.ignoresize
5888 && !intel_float_operand (t
->name
))
5889 : intel_float_operand (t
->name
) != 2)
5890 && ((operand_types
[0].bitfield
.class != RegMMX
5891 && operand_types
[0].bitfield
.class != RegSIMD
)
5892 || (operand_types
[t
->operands
> 1].bitfield
.class != RegMMX
5893 && operand_types
[t
->operands
> 1].bitfield
.class
5897 /* Do not verify operands when there are none. */
5901 /* We've found a match; break out of loop. */
5905 if (!t
->opcode_modifier
.jump
5906 || t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)
5908 /* There should be only one Disp operand. */
5909 for (j
= 0; j
< MAX_OPERANDS
; j
++)
5910 if (operand_type_check (operand_types
[j
], disp
))
5912 if (j
< MAX_OPERANDS
)
5914 bfd_boolean override
= (i
.prefix
[ADDR_PREFIX
] != 0);
5916 addr_prefix_disp
= j
;
5918 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
5919 operand into Disp32/Disp32/Disp16/Disp32 operand. */
5923 override
= !override
;
5926 if (operand_types
[j
].bitfield
.disp32
5927 && operand_types
[j
].bitfield
.disp16
)
5929 operand_types
[j
].bitfield
.disp16
= override
;
5930 operand_types
[j
].bitfield
.disp32
= !override
;
5932 operand_types
[j
].bitfield
.disp32s
= 0;
5933 operand_types
[j
].bitfield
.disp64
= 0;
5937 if (operand_types
[j
].bitfield
.disp32s
5938 || operand_types
[j
].bitfield
.disp64
)
5940 operand_types
[j
].bitfield
.disp64
&= !override
;
5941 operand_types
[j
].bitfield
.disp32s
&= !override
;
5942 operand_types
[j
].bitfield
.disp32
= override
;
5944 operand_types
[j
].bitfield
.disp16
= 0;
5950 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
5951 if (i
.reloc
[0] == BFD_RELOC_386_GOT32
&& t
->base_opcode
== 0xa0)
5954 /* We check register size if needed. */
5955 if (t
->opcode_modifier
.checkregsize
)
5957 check_register
= (1 << t
->operands
) - 1;
5959 check_register
&= ~(1 << i
.broadcast
->operand
);
5964 overlap0
= operand_type_and (i
.types
[0], operand_types
[0]);
5965 switch (t
->operands
)
5968 if (!operand_type_match (overlap0
, i
.types
[0]))
5972 /* xchg %eax, %eax is a special case. It is an alias for nop
5973 only in 32bit mode and we can use opcode 0x90. In 64bit
5974 mode, we can't use 0x90 for xchg %eax, %eax since it should
5975 zero-extend %eax to %rax. */
5976 if (flag_code
== CODE_64BIT
5977 && t
->base_opcode
== 0x90
5978 && i
.types
[0].bitfield
.instance
== Accum
5979 && i
.types
[0].bitfield
.dword
5980 && i
.types
[1].bitfield
.instance
== Accum
5981 && i
.types
[1].bitfield
.dword
)
5983 /* xrelease mov %eax, <disp> is another special case. It must not
5984 match the accumulator-only encoding of mov. */
5985 if (flag_code
!= CODE_64BIT
5987 && t
->base_opcode
== 0xa0
5988 && i
.types
[0].bitfield
.instance
== Accum
5989 && (i
.flags
[1] & Operand_Mem
))
5994 if (!(size_match
& MATCH_STRAIGHT
))
5996 /* Reverse direction of operands if swapping is possible in the first
5997 place (operands need to be symmetric) and
5998 - the load form is requested, and the template is a store form,
5999 - the store form is requested, and the template is a load form,
6000 - the non-default (swapped) form is requested. */
6001 overlap1
= operand_type_and (operand_types
[0], operand_types
[1]);
6002 if (t
->opcode_modifier
.d
&& i
.reg_operands
== i
.operands
6003 && !operand_type_all_zero (&overlap1
))
6004 switch (i
.dir_encoding
)
6006 case dir_encoding_load
:
6007 if (operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6008 || t
->opcode_modifier
.regmem
)
6012 case dir_encoding_store
:
6013 if (!operand_type_check (operand_types
[i
.operands
- 1], anymem
)
6014 && !t
->opcode_modifier
.regmem
)
6018 case dir_encoding_swap
:
6021 case dir_encoding_default
:
6024 /* If we want store form, we skip the current load. */
6025 if ((i
.dir_encoding
== dir_encoding_store
6026 || i
.dir_encoding
== dir_encoding_swap
)
6027 && i
.mem_operands
== 0
6028 && t
->opcode_modifier
.load
)
6033 overlap1
= operand_type_and (i
.types
[1], operand_types
[1]);
6034 if (!operand_type_match (overlap0
, i
.types
[0])
6035 || !operand_type_match (overlap1
, i
.types
[1])
6036 || ((check_register
& 3) == 3
6037 && !operand_type_register_match (i
.types
[0],
6042 /* Check if other direction is valid ... */
6043 if (!t
->opcode_modifier
.d
)
6047 if (!(size_match
& MATCH_REVERSE
))
6049 /* Try reversing direction of operands. */
6050 overlap0
= operand_type_and (i
.types
[0], operand_types
[i
.operands
- 1]);
6051 overlap1
= operand_type_and (i
.types
[i
.operands
- 1], operand_types
[0]);
6052 if (!operand_type_match (overlap0
, i
.types
[0])
6053 || !operand_type_match (overlap1
, i
.types
[i
.operands
- 1])
6055 && !operand_type_register_match (i
.types
[0],
6056 operand_types
[i
.operands
- 1],
6057 i
.types
[i
.operands
- 1],
6060 /* Does not match either direction. */
6063 /* found_reverse_match holds which of D or FloatR
6065 if (!t
->opcode_modifier
.d
)
6066 found_reverse_match
= 0;
6067 else if (operand_types
[0].bitfield
.tbyte
)
6068 found_reverse_match
= Opcode_FloatD
;
6069 else if (operand_types
[0].bitfield
.xmmword
6070 || operand_types
[i
.operands
- 1].bitfield
.xmmword
6071 || operand_types
[0].bitfield
.class == RegMMX
6072 || operand_types
[i
.operands
- 1].bitfield
.class == RegMMX
6073 || is_any_vex_encoding(t
))
6074 found_reverse_match
= (t
->base_opcode
& 0xee) != 0x6e
6075 ? Opcode_SIMD_FloatD
: Opcode_SIMD_IntD
;
6077 found_reverse_match
= Opcode_D
;
6078 if (t
->opcode_modifier
.floatr
)
6079 found_reverse_match
|= Opcode_FloatR
;
6083 /* Found a forward 2 operand match here. */
6084 switch (t
->operands
)
6087 overlap4
= operand_type_and (i
.types
[4],
6091 overlap3
= operand_type_and (i
.types
[3],
6095 overlap2
= operand_type_and (i
.types
[2],
6100 switch (t
->operands
)
6103 if (!operand_type_match (overlap4
, i
.types
[4])
6104 || !operand_type_register_match (i
.types
[3],
6111 if (!operand_type_match (overlap3
, i
.types
[3])
6112 || ((check_register
& 0xa) == 0xa
6113 && !operand_type_register_match (i
.types
[1],
6117 || ((check_register
& 0xc) == 0xc
6118 && !operand_type_register_match (i
.types
[2],
6125 /* Here we make use of the fact that there are no
6126 reverse match 3 operand instructions. */
6127 if (!operand_type_match (overlap2
, i
.types
[2])
6128 || ((check_register
& 5) == 5
6129 && !operand_type_register_match (i
.types
[0],
6133 || ((check_register
& 6) == 6
6134 && !operand_type_register_match (i
.types
[1],
6142 /* Found either forward/reverse 2, 3 or 4 operand match here:
6143 slip through to break. */
6146 /* Check if vector and VEX operands are valid. */
6147 if (check_VecOperands (t
) || VEX_check_operands (t
))
6149 specific_error
= i
.error
;
6153 /* We've found a match; break out of loop. */
6157 if (t
== current_templates
->end
)
6159 /* We found no match. */
6160 const char *err_msg
;
6161 switch (specific_error
? specific_error
: i
.error
)
6165 case operand_size_mismatch
:
6166 err_msg
= _("operand size mismatch");
6168 case operand_type_mismatch
:
6169 err_msg
= _("operand type mismatch");
6171 case register_type_mismatch
:
6172 err_msg
= _("register type mismatch");
6174 case number_of_operands_mismatch
:
6175 err_msg
= _("number of operands mismatch");
6177 case invalid_instruction_suffix
:
6178 err_msg
= _("invalid instruction suffix");
6181 err_msg
= _("constant doesn't fit in 4 bits");
6183 case unsupported_with_intel_mnemonic
:
6184 err_msg
= _("unsupported with Intel mnemonic");
6186 case unsupported_syntax
:
6187 err_msg
= _("unsupported syntax");
6190 as_bad (_("unsupported instruction `%s'"),
6191 current_templates
->start
->name
);
6193 case invalid_vsib_address
:
6194 err_msg
= _("invalid VSIB address");
6196 case invalid_vector_register_set
:
6197 err_msg
= _("mask, index, and destination registers must be distinct");
6199 case unsupported_vector_index_register
:
6200 err_msg
= _("unsupported vector index register");
6202 case unsupported_broadcast
:
6203 err_msg
= _("unsupported broadcast");
6205 case broadcast_needed
:
6206 err_msg
= _("broadcast is needed for operand of such type");
6208 case unsupported_masking
:
6209 err_msg
= _("unsupported masking");
6211 case mask_not_on_destination
:
6212 err_msg
= _("mask not on destination operand");
6214 case no_default_mask
:
6215 err_msg
= _("default mask isn't allowed");
6217 case unsupported_rc_sae
:
6218 err_msg
= _("unsupported static rounding/sae");
6220 case rc_sae_operand_not_last_imm
:
6222 err_msg
= _("RC/SAE operand must precede immediate operands");
6224 err_msg
= _("RC/SAE operand must follow immediate operands");
6226 case invalid_register_operand
:
6227 err_msg
= _("invalid register operand");
6230 as_bad (_("%s for `%s'"), err_msg
,
6231 current_templates
->start
->name
);
6235 if (!quiet_warnings
)
6238 && (i
.jumpabsolute
!= (t
->opcode_modifier
.jump
== JUMP_ABSOLUTE
)))
6239 as_warn (_("indirect %s without `*'"), t
->name
);
6241 if (t
->opcode_modifier
.isprefix
6242 && t
->opcode_modifier
.ignoresize
)
6244 /* Warn them that a data or address size prefix doesn't
6245 affect assembly of the next line of code. */
6246 as_warn (_("stand-alone `%s' prefix"), t
->name
);
6250 /* Copy the template we found. */
6253 if (addr_prefix_disp
!= -1)
6254 i
.tm
.operand_types
[addr_prefix_disp
]
6255 = operand_types
[addr_prefix_disp
];
6257 if (found_reverse_match
)
6259 /* If we found a reverse match we must alter the opcode direction
6260 bit and clear/flip the regmem modifier one. found_reverse_match
6261 holds bits to change (different for int & float insns). */
6263 i
.tm
.base_opcode
^= found_reverse_match
;
6265 i
.tm
.operand_types
[0] = operand_types
[i
.operands
- 1];
6266 i
.tm
.operand_types
[i
.operands
- 1] = operand_types
[0];
6268 /* Certain SIMD insns have their load forms specified in the opcode
6269 table, and hence we need to _set_ RegMem instead of clearing it.
6270 We need to avoid setting the bit though on insns like KMOVW. */
6271 i
.tm
.opcode_modifier
.regmem
6272 = i
.tm
.opcode_modifier
.modrm
&& i
.tm
.opcode_modifier
.d
6273 && i
.tm
.operands
> 2U - i
.tm
.opcode_modifier
.sse2avx
6274 && !i
.tm
.opcode_modifier
.regmem
;
6283 unsigned int es_op
= i
.tm
.opcode_modifier
.isstring
- IS_STRING_ES_OP0
;
6284 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.baseindex
? es_op
: 0;
6286 if (i
.seg
[op
] != NULL
&& i
.seg
[op
] != &es
)
6288 as_bad (_("`%s' operand %u must use `%ses' segment"),
6290 intel_syntax
? i
.tm
.operands
- es_op
: es_op
+ 1,
6295 /* There's only ever one segment override allowed per instruction.
6296 This instruction possibly has a legal segment override on the
6297 second operand, so copy the segment to where non-string
6298 instructions store it, allowing common code. */
6299 i
.seg
[op
] = i
.seg
[1];
6305 process_suffix (void)
6307 /* If matched instruction specifies an explicit instruction mnemonic
6309 if (i
.tm
.opcode_modifier
.size
== SIZE16
)
6310 i
.suffix
= WORD_MNEM_SUFFIX
;
6311 else if (i
.tm
.opcode_modifier
.size
== SIZE32
)
6312 i
.suffix
= LONG_MNEM_SUFFIX
;
6313 else if (i
.tm
.opcode_modifier
.size
== SIZE64
)
6314 i
.suffix
= QWORD_MNEM_SUFFIX
;
6315 else if (i
.reg_operands
6316 && (i
.operands
> 1 || i
.types
[0].bitfield
.class == Reg
))
6318 /* If there's no instruction mnemonic suffix we try to invent one
6319 based on GPR operands. */
6322 /* We take i.suffix from the last register operand specified,
6323 Destination register type is more significant than source
6324 register type. crc32 in SSE4.2 prefers source register
6326 unsigned int op
= i
.tm
.base_opcode
!= 0xf20f38f0 ? i
.operands
: 1;
6329 if (i
.tm
.operand_types
[op
].bitfield
.instance
== InstanceNone
6330 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6332 if (i
.types
[op
].bitfield
.class != Reg
)
6334 if (i
.types
[op
].bitfield
.byte
)
6335 i
.suffix
= BYTE_MNEM_SUFFIX
;
6336 else if (i
.types
[op
].bitfield
.word
)
6337 i
.suffix
= WORD_MNEM_SUFFIX
;
6338 else if (i
.types
[op
].bitfield
.dword
)
6339 i
.suffix
= LONG_MNEM_SUFFIX
;
6340 else if (i
.types
[op
].bitfield
.qword
)
6341 i
.suffix
= QWORD_MNEM_SUFFIX
;
6347 else if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6350 && i
.tm
.opcode_modifier
.ignoresize
6351 && i
.tm
.opcode_modifier
.no_bsuf
)
6353 else if (!check_byte_reg ())
6356 else if (i
.suffix
== LONG_MNEM_SUFFIX
)
6359 && i
.tm
.opcode_modifier
.ignoresize
6360 && i
.tm
.opcode_modifier
.no_lsuf
6361 && !i
.tm
.opcode_modifier
.todword
6362 && !i
.tm
.opcode_modifier
.toqword
)
6364 else if (!check_long_reg ())
6367 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6370 && i
.tm
.opcode_modifier
.ignoresize
6371 && i
.tm
.opcode_modifier
.no_qsuf
6372 && !i
.tm
.opcode_modifier
.todword
6373 && !i
.tm
.opcode_modifier
.toqword
)
6375 else if (!check_qword_reg ())
6378 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6381 && i
.tm
.opcode_modifier
.ignoresize
6382 && i
.tm
.opcode_modifier
.no_wsuf
)
6384 else if (!check_word_reg ())
6387 else if (intel_syntax
&& i
.tm
.opcode_modifier
.ignoresize
)
6388 /* Do nothing if the instruction is going to ignore the prefix. */
6393 else if (i
.tm
.opcode_modifier
.defaultsize
&& !i
.suffix
)
6395 i
.suffix
= stackop_size
;
6396 if (stackop_size
== LONG_MNEM_SUFFIX
)
6398 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6399 .code16gcc directive to support 16-bit mode with
6400 32-bit address. For IRET without a suffix, generate
6401 16-bit IRET (opcode 0xcf) to return from an interrupt
6403 if (i
.tm
.base_opcode
== 0xcf)
6405 i
.suffix
= WORD_MNEM_SUFFIX
;
6406 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6408 /* Warn about changed behavior for segment register push/pop. */
6409 else if ((i
.tm
.base_opcode
| 1) == 0x07)
6410 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6415 && (i
.tm
.opcode_modifier
.jump
== JUMP_ABSOLUTE
6416 || i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
6417 || i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
6418 || (i
.tm
.base_opcode
== 0x0f01 /* [ls][gi]dt */
6419 && i
.tm
.extension_opcode
<= 3)))
6424 if (!i
.tm
.opcode_modifier
.no_qsuf
)
6426 i
.suffix
= QWORD_MNEM_SUFFIX
;
6431 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6432 i
.suffix
= LONG_MNEM_SUFFIX
;
6435 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6436 i
.suffix
= WORD_MNEM_SUFFIX
;
6442 && (!i
.tm
.opcode_modifier
.defaultsize
6443 /* Also cover lret/retf/iret in 64-bit mode. */
6444 || (flag_code
== CODE_64BIT
6445 && !i
.tm
.opcode_modifier
.no_lsuf
6446 && !i
.tm
.opcode_modifier
.no_qsuf
))
6447 && !i
.tm
.opcode_modifier
.ignoresize
6448 /* Accept FLDENV et al without suffix. */
6449 && (i
.tm
.opcode_modifier
.no_ssuf
|| i
.tm
.opcode_modifier
.floatmf
))
6451 unsigned int suffixes
;
6453 suffixes
= !i
.tm
.opcode_modifier
.no_bsuf
;
6454 if (!i
.tm
.opcode_modifier
.no_wsuf
)
6456 if (!i
.tm
.opcode_modifier
.no_lsuf
)
6458 if (!i
.tm
.opcode_modifier
.no_ldsuf
)
6460 if (!i
.tm
.opcode_modifier
.no_ssuf
)
6462 if (flag_code
== CODE_64BIT
&& !i
.tm
.opcode_modifier
.no_qsuf
)
6465 /* Are multiple suffixes allowed? */
6466 if (suffixes
& (suffixes
- 1))
6469 && (!i
.tm
.opcode_modifier
.defaultsize
6470 || operand_check
== check_error
))
6472 as_bad (_("ambiguous operand size for `%s'"), i
.tm
.name
);
6475 if (operand_check
== check_error
)
6477 as_bad (_("no instruction mnemonic suffix given and "
6478 "no register operands; can't size `%s'"), i
.tm
.name
);
6481 if (operand_check
== check_warning
)
6482 as_warn (_("%s; using default for `%s'"),
6484 ? _("ambiguous operand size")
6485 : _("no instruction mnemonic suffix given and "
6486 "no register operands"),
6489 if (i
.tm
.opcode_modifier
.floatmf
)
6490 i
.suffix
= SHORT_MNEM_SUFFIX
;
6491 else if (flag_code
== CODE_16BIT
)
6492 i
.suffix
= WORD_MNEM_SUFFIX
;
6493 else if (!i
.tm
.opcode_modifier
.no_lsuf
)
6494 i
.suffix
= LONG_MNEM_SUFFIX
;
6496 i
.suffix
= QWORD_MNEM_SUFFIX
;
6500 if (!i
.tm
.opcode_modifier
.modrm
&& i
.reg_operands
&& i
.tm
.operands
< 3)
6501 i
.short_form
= (i
.tm
.operand_types
[0].bitfield
.class == Reg
)
6502 != (i
.tm
.operand_types
[1].bitfield
.class == Reg
);
6504 /* Change the opcode based on the operand size given by i.suffix. */
6507 /* Size floating point instruction. */
6508 case LONG_MNEM_SUFFIX
:
6509 if (i
.tm
.opcode_modifier
.floatmf
)
6511 i
.tm
.base_opcode
^= 4;
6515 case WORD_MNEM_SUFFIX
:
6516 case QWORD_MNEM_SUFFIX
:
6517 /* It's not a byte, select word/dword operation. */
6518 if (i
.tm
.opcode_modifier
.w
)
6521 i
.tm
.base_opcode
|= 8;
6523 i
.tm
.base_opcode
|= 1;
6526 case SHORT_MNEM_SUFFIX
:
6527 /* Now select between word & dword operations via the operand
6528 size prefix, except for instructions that will ignore this
6530 if (i
.reg_operands
> 0
6531 && i
.types
[0].bitfield
.class == Reg
6532 && i
.tm
.opcode_modifier
.addrprefixopreg
6533 && (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6534 || i
.operands
== 1))
6536 /* The address size override prefix changes the size of the
6538 if ((flag_code
== CODE_32BIT
6539 && i
.op
[0].regs
->reg_type
.bitfield
.word
)
6540 || (flag_code
!= CODE_32BIT
6541 && i
.op
[0].regs
->reg_type
.bitfield
.dword
))
6542 if (!add_prefix (ADDR_PREFIX_OPCODE
))
6545 else if (i
.suffix
!= QWORD_MNEM_SUFFIX
6546 && !i
.tm
.opcode_modifier
.ignoresize
6547 && !i
.tm
.opcode_modifier
.floatmf
6548 && !is_any_vex_encoding (&i
.tm
)
6549 && ((i
.suffix
== LONG_MNEM_SUFFIX
) == (flag_code
== CODE_16BIT
)
6550 || (flag_code
== CODE_64BIT
6551 && i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)))
6553 unsigned int prefix
= DATA_PREFIX_OPCODE
;
6555 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
) /* jcxz, loop */
6556 prefix
= ADDR_PREFIX_OPCODE
;
6558 if (!add_prefix (prefix
))
6562 /* Set mode64 for an operand. */
6563 if (i
.suffix
== QWORD_MNEM_SUFFIX
6564 && flag_code
== CODE_64BIT
6565 && !i
.tm
.opcode_modifier
.norex64
6566 /* Special case for xchg %rax,%rax. It is NOP and doesn't
6568 && ! (i
.operands
== 2
6569 && i
.tm
.base_opcode
== 0x90
6570 && i
.tm
.extension_opcode
== None
6571 && i
.types
[0].bitfield
.instance
== Accum
6572 && i
.types
[0].bitfield
.qword
6573 && i
.types
[1].bitfield
.instance
== Accum
6574 && i
.types
[1].bitfield
.qword
))
6580 if (i
.reg_operands
!= 0
6582 && i
.tm
.opcode_modifier
.addrprefixopreg
6583 && i
.tm
.operand_types
[0].bitfield
.instance
!= Accum
)
6585 /* Check invalid register operand when the address size override
6586 prefix changes the size of register operands. */
6588 enum { need_word
, need_dword
, need_qword
} need
;
6590 if (flag_code
== CODE_32BIT
)
6591 need
= i
.prefix
[ADDR_PREFIX
] ? need_word
: need_dword
;
6594 if (i
.prefix
[ADDR_PREFIX
])
6597 need
= flag_code
== CODE_64BIT
? need_qword
: need_word
;
6600 for (op
= 0; op
< i
.operands
; op
++)
6601 if (i
.types
[op
].bitfield
.class == Reg
6602 && ((need
== need_word
6603 && !i
.op
[op
].regs
->reg_type
.bitfield
.word
)
6604 || (need
== need_dword
6605 && !i
.op
[op
].regs
->reg_type
.bitfield
.dword
)
6606 || (need
== need_qword
6607 && !i
.op
[op
].regs
->reg_type
.bitfield
.qword
)))
6609 as_bad (_("invalid register operand size for `%s'"),
6619 check_byte_reg (void)
6623 for (op
= i
.operands
; --op
>= 0;)
6625 /* Skip non-register operands. */
6626 if (i
.types
[op
].bitfield
.class != Reg
)
6629 /* If this is an eight bit register, it's OK. If it's the 16 or
6630 32 bit version of an eight bit register, we will just use the
6631 low portion, and that's OK too. */
6632 if (i
.types
[op
].bitfield
.byte
)
6635 /* I/O port address operands are OK too. */
6636 if (i
.tm
.operand_types
[op
].bitfield
.instance
== RegD
6637 && i
.tm
.operand_types
[op
].bitfield
.word
)
6640 /* crc32 doesn't generate this warning. */
6641 if (i
.tm
.base_opcode
== 0xf20f38f0)
6644 if ((i
.types
[op
].bitfield
.word
6645 || i
.types
[op
].bitfield
.dword
6646 || i
.types
[op
].bitfield
.qword
)
6647 && i
.op
[op
].regs
->reg_num
< 4
6648 /* Prohibit these changes in 64bit mode, since the lowering
6649 would be more complicated. */
6650 && flag_code
!= CODE_64BIT
)
6652 #if REGISTER_WARNINGS
6653 if (!quiet_warnings
)
6654 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6656 (i
.op
[op
].regs
+ (i
.types
[op
].bitfield
.word
6657 ? REGNAM_AL
- REGNAM_AX
6658 : REGNAM_AL
- REGNAM_EAX
))->reg_name
,
6660 i
.op
[op
].regs
->reg_name
,
6665 /* Any other register is bad. */
6666 if (i
.types
[op
].bitfield
.class == Reg
6667 || i
.types
[op
].bitfield
.class == RegMMX
6668 || i
.types
[op
].bitfield
.class == RegSIMD
6669 || i
.types
[op
].bitfield
.class == SReg
6670 || i
.types
[op
].bitfield
.class == RegCR
6671 || i
.types
[op
].bitfield
.class == RegDR
6672 || i
.types
[op
].bitfield
.class == RegTR
)
6674 as_bad (_("`%s%s' not allowed with `%s%c'"),
6676 i
.op
[op
].regs
->reg_name
,
6686 check_long_reg (void)
6690 for (op
= i
.operands
; --op
>= 0;)
6691 /* Skip non-register operands. */
6692 if (i
.types
[op
].bitfield
.class != Reg
)
6694 /* Reject eight bit registers, except where the template requires
6695 them. (eg. movzb) */
6696 else if (i
.types
[op
].bitfield
.byte
6697 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6698 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6699 && (i
.tm
.operand_types
[op
].bitfield
.word
6700 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6702 as_bad (_("`%s%s' not allowed with `%s%c'"),
6704 i
.op
[op
].regs
->reg_name
,
6709 /* Error if the e prefix on a general reg is missing. */
6710 else if (i
.types
[op
].bitfield
.word
6711 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6712 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6713 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6715 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6716 register_prefix
, i
.op
[op
].regs
->reg_name
,
6720 /* Warn if the r prefix on a general reg is present. */
6721 else if (i
.types
[op
].bitfield
.qword
6722 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6723 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6724 && i
.tm
.operand_types
[op
].bitfield
.dword
)
6727 && (i
.tm
.opcode_modifier
.toqword
6728 /* Also convert to QWORD for MOVSXD. */
6729 || i
.tm
.base_opcode
== 0x63)
6730 && i
.types
[0].bitfield
.class != RegSIMD
)
6732 /* Convert to QWORD. We want REX byte. */
6733 i
.suffix
= QWORD_MNEM_SUFFIX
;
6737 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6738 register_prefix
, i
.op
[op
].regs
->reg_name
,
6747 check_qword_reg (void)
6751 for (op
= i
.operands
; --op
>= 0; )
6752 /* Skip non-register operands. */
6753 if (i
.types
[op
].bitfield
.class != Reg
)
6755 /* Reject eight bit registers, except where the template requires
6756 them. (eg. movzb) */
6757 else if (i
.types
[op
].bitfield
.byte
6758 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6759 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6760 && (i
.tm
.operand_types
[op
].bitfield
.word
6761 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6763 as_bad (_("`%s%s' not allowed with `%s%c'"),
6765 i
.op
[op
].regs
->reg_name
,
6770 /* Warn if the r prefix on a general reg is missing. */
6771 else if ((i
.types
[op
].bitfield
.word
6772 || i
.types
[op
].bitfield
.dword
)
6773 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6774 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6775 && i
.tm
.operand_types
[op
].bitfield
.qword
)
6777 /* Prohibit these changes in the 64bit mode, since the
6778 lowering is more complicated. */
6780 && i
.tm
.opcode_modifier
.todword
6781 && i
.types
[0].bitfield
.class != RegSIMD
)
6783 /* Convert to DWORD. We don't want REX byte. */
6784 i
.suffix
= LONG_MNEM_SUFFIX
;
6788 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6789 register_prefix
, i
.op
[op
].regs
->reg_name
,
6798 check_word_reg (void)
6801 for (op
= i
.operands
; --op
>= 0;)
6802 /* Skip non-register operands. */
6803 if (i
.types
[op
].bitfield
.class != Reg
)
6805 /* Reject eight bit registers, except where the template requires
6806 them. (eg. movzb) */
6807 else if (i
.types
[op
].bitfield
.byte
6808 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6809 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6810 && (i
.tm
.operand_types
[op
].bitfield
.word
6811 || i
.tm
.operand_types
[op
].bitfield
.dword
))
6813 as_bad (_("`%s%s' not allowed with `%s%c'"),
6815 i
.op
[op
].regs
->reg_name
,
6820 /* Warn if the e or r prefix on a general reg is present. */
6821 else if ((!quiet_warnings
|| flag_code
== CODE_64BIT
)
6822 && (i
.types
[op
].bitfield
.dword
6823 || i
.types
[op
].bitfield
.qword
)
6824 && (i
.tm
.operand_types
[op
].bitfield
.class == Reg
6825 || i
.tm
.operand_types
[op
].bitfield
.instance
== Accum
)
6826 && i
.tm
.operand_types
[op
].bitfield
.word
)
6828 /* Prohibit these changes in the 64bit mode, since the
6829 lowering is more complicated. */
6830 if (flag_code
== CODE_64BIT
)
6832 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
6833 register_prefix
, i
.op
[op
].regs
->reg_name
,
6837 #if REGISTER_WARNINGS
6838 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
6840 (i
.op
[op
].regs
+ REGNAM_AX
- REGNAM_EAX
)->reg_name
,
6841 register_prefix
, i
.op
[op
].regs
->reg_name
, i
.suffix
);
6848 update_imm (unsigned int j
)
6850 i386_operand_type overlap
= i
.types
[j
];
6851 if ((overlap
.bitfield
.imm8
6852 || overlap
.bitfield
.imm8s
6853 || overlap
.bitfield
.imm16
6854 || overlap
.bitfield
.imm32
6855 || overlap
.bitfield
.imm32s
6856 || overlap
.bitfield
.imm64
)
6857 && !operand_type_equal (&overlap
, &imm8
)
6858 && !operand_type_equal (&overlap
, &imm8s
)
6859 && !operand_type_equal (&overlap
, &imm16
)
6860 && !operand_type_equal (&overlap
, &imm32
)
6861 && !operand_type_equal (&overlap
, &imm32s
)
6862 && !operand_type_equal (&overlap
, &imm64
))
6866 i386_operand_type temp
;
6868 operand_type_set (&temp
, 0);
6869 if (i
.suffix
== BYTE_MNEM_SUFFIX
)
6871 temp
.bitfield
.imm8
= overlap
.bitfield
.imm8
;
6872 temp
.bitfield
.imm8s
= overlap
.bitfield
.imm8s
;
6874 else if (i
.suffix
== WORD_MNEM_SUFFIX
)
6875 temp
.bitfield
.imm16
= overlap
.bitfield
.imm16
;
6876 else if (i
.suffix
== QWORD_MNEM_SUFFIX
)
6878 temp
.bitfield
.imm64
= overlap
.bitfield
.imm64
;
6879 temp
.bitfield
.imm32s
= overlap
.bitfield
.imm32s
;
6882 temp
.bitfield
.imm32
= overlap
.bitfield
.imm32
;
6885 else if (operand_type_equal (&overlap
, &imm16_32_32s
)
6886 || operand_type_equal (&overlap
, &imm16_32
)
6887 || operand_type_equal (&overlap
, &imm16_32s
))
6889 if ((flag_code
== CODE_16BIT
) ^ (i
.prefix
[DATA_PREFIX
] != 0))
6894 if (!operand_type_equal (&overlap
, &imm8
)
6895 && !operand_type_equal (&overlap
, &imm8s
)
6896 && !operand_type_equal (&overlap
, &imm16
)
6897 && !operand_type_equal (&overlap
, &imm32
)
6898 && !operand_type_equal (&overlap
, &imm32s
)
6899 && !operand_type_equal (&overlap
, &imm64
))
6901 as_bad (_("no instruction mnemonic suffix given; "
6902 "can't determine immediate size"));
6906 i
.types
[j
] = overlap
;
6916 /* Update the first 2 immediate operands. */
6917 n
= i
.operands
> 2 ? 2 : i
.operands
;
6920 for (j
= 0; j
< n
; j
++)
6921 if (update_imm (j
) == 0)
6924 /* The 3rd operand can't be immediate operand. */
6925 gas_assert (operand_type_check (i
.types
[2], imm
) == 0);
6932 process_operands (void)
6934 /* Default segment register this instruction will use for memory
6935 accesses. 0 means unknown. This is only for optimizing out
6936 unnecessary segment overrides. */
6937 const seg_entry
*default_seg
= 0;
6939 if (i
.tm
.opcode_modifier
.sse2avx
&& i
.tm
.opcode_modifier
.vexvvvv
)
6941 unsigned int dupl
= i
.operands
;
6942 unsigned int dest
= dupl
- 1;
6945 /* The destination must be an xmm register. */
6946 gas_assert (i
.reg_operands
6947 && MAX_OPERANDS
> dupl
6948 && operand_type_equal (&i
.types
[dest
], ®xmm
));
6950 if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
6951 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
6953 if (i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
)
6955 /* Keep xmm0 for instructions with VEX prefix and 3
6957 i
.tm
.operand_types
[0].bitfield
.instance
= InstanceNone
;
6958 i
.tm
.operand_types
[0].bitfield
.class = RegSIMD
;
6963 /* We remove the first xmm0 and keep the number of
6964 operands unchanged, which in fact duplicates the
6966 for (j
= 1; j
< i
.operands
; j
++)
6968 i
.op
[j
- 1] = i
.op
[j
];
6969 i
.types
[j
- 1] = i
.types
[j
];
6970 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
6971 i
.flags
[j
- 1] = i
.flags
[j
];
6975 else if (i
.tm
.opcode_modifier
.implicit1stxmm0
)
6977 gas_assert ((MAX_OPERANDS
- 1) > dupl
6978 && (i
.tm
.opcode_modifier
.vexsources
6981 /* Add the implicit xmm0 for instructions with VEX prefix
6983 for (j
= i
.operands
; j
> 0; j
--)
6985 i
.op
[j
] = i
.op
[j
- 1];
6986 i
.types
[j
] = i
.types
[j
- 1];
6987 i
.tm
.operand_types
[j
] = i
.tm
.operand_types
[j
- 1];
6988 i
.flags
[j
] = i
.flags
[j
- 1];
6991 = (const reg_entry
*) hash_find (reg_hash
, "xmm0");
6992 i
.types
[0] = regxmm
;
6993 i
.tm
.operand_types
[0] = regxmm
;
6996 i
.reg_operands
+= 2;
7001 i
.op
[dupl
] = i
.op
[dest
];
7002 i
.types
[dupl
] = i
.types
[dest
];
7003 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7004 i
.flags
[dupl
] = i
.flags
[dest
];
7013 i
.op
[dupl
] = i
.op
[dest
];
7014 i
.types
[dupl
] = i
.types
[dest
];
7015 i
.tm
.operand_types
[dupl
] = i
.tm
.operand_types
[dest
];
7016 i
.flags
[dupl
] = i
.flags
[dest
];
7019 if (i
.tm
.opcode_modifier
.immext
)
7022 else if (i
.tm
.operand_types
[0].bitfield
.instance
== Accum
7023 && i
.tm
.operand_types
[0].bitfield
.xmmword
)
7027 for (j
= 1; j
< i
.operands
; j
++)
7029 i
.op
[j
- 1] = i
.op
[j
];
7030 i
.types
[j
- 1] = i
.types
[j
];
7032 /* We need to adjust fields in i.tm since they are used by
7033 build_modrm_byte. */
7034 i
.tm
.operand_types
[j
- 1] = i
.tm
.operand_types
[j
];
7036 i
.flags
[j
- 1] = i
.flags
[j
];
7043 else if (i
.tm
.opcode_modifier
.implicitquadgroup
)
7045 unsigned int regnum
, first_reg_in_group
, last_reg_in_group
;
7047 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7048 gas_assert (i
.operands
>= 2 && i
.types
[1].bitfield
.class == RegSIMD
);
7049 regnum
= register_number (i
.op
[1].regs
);
7050 first_reg_in_group
= regnum
& ~3;
7051 last_reg_in_group
= first_reg_in_group
+ 3;
7052 if (regnum
!= first_reg_in_group
)
7053 as_warn (_("source register `%s%s' implicitly denotes"
7054 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7055 register_prefix
, i
.op
[1].regs
->reg_name
,
7056 register_prefix
, i
.op
[1].regs
->reg_name
, first_reg_in_group
,
7057 register_prefix
, i
.op
[1].regs
->reg_name
, last_reg_in_group
,
7060 else if (i
.tm
.opcode_modifier
.regkludge
)
7062 /* The imul $imm, %reg instruction is converted into
7063 imul $imm, %reg, %reg, and the clr %reg instruction
7064 is converted into xor %reg, %reg. */
7066 unsigned int first_reg_op
;
7068 if (operand_type_check (i
.types
[0], reg
))
7072 /* Pretend we saw the extra register operand. */
7073 gas_assert (i
.reg_operands
== 1
7074 && i
.op
[first_reg_op
+ 1].regs
== 0);
7075 i
.op
[first_reg_op
+ 1].regs
= i
.op
[first_reg_op
].regs
;
7076 i
.types
[first_reg_op
+ 1] = i
.types
[first_reg_op
];
7081 if (i
.tm
.opcode_modifier
.modrm
)
7083 /* The opcode is completed (modulo i.tm.extension_opcode which
7084 must be put into the modrm byte). Now, we make the modrm and
7085 index base bytes based on all the info we've collected. */
7087 default_seg
= build_modrm_byte ();
7089 else if (i
.types
[0].bitfield
.class == SReg
)
7091 if (flag_code
!= CODE_64BIT
7092 ? i
.tm
.base_opcode
== POP_SEG_SHORT
7093 && i
.op
[0].regs
->reg_num
== 1
7094 : (i
.tm
.base_opcode
| 1) == POP_SEG386_SHORT
7095 && i
.op
[0].regs
->reg_num
< 4)
7097 as_bad (_("you can't `%s %s%s'"),
7098 i
.tm
.name
, register_prefix
, i
.op
[0].regs
->reg_name
);
7101 if ( i
.op
[0].regs
->reg_num
> 3 && i
.tm
.opcode_length
== 1 )
7103 i
.tm
.base_opcode
^= POP_SEG_SHORT
^ POP_SEG386_SHORT
;
7104 i
.tm
.opcode_length
= 2;
7106 i
.tm
.base_opcode
|= (i
.op
[0].regs
->reg_num
<< 3);
7108 else if ((i
.tm
.base_opcode
& ~0x3) == MOV_AX_DISP32
)
7112 else if (i
.tm
.opcode_modifier
.isstring
)
7114 /* For the string instructions that allow a segment override
7115 on one of their operands, the default segment is ds. */
7118 else if (i
.short_form
)
7120 /* The register or float register operand is in operand
7122 unsigned int op
= i
.tm
.operand_types
[0].bitfield
.class != Reg
;
7124 /* Register goes in low 3 bits of opcode. */
7125 i
.tm
.base_opcode
|= i
.op
[op
].regs
->reg_num
;
7126 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7128 if (!quiet_warnings
&& i
.tm
.opcode_modifier
.ugh
)
7130 /* Warn about some common errors, but press on regardless.
7131 The first case can be generated by gcc (<= 2.8.1). */
7132 if (i
.operands
== 2)
7134 /* Reversed arguments on faddp, fsubp, etc. */
7135 as_warn (_("translating to `%s %s%s,%s%s'"), i
.tm
.name
,
7136 register_prefix
, i
.op
[!intel_syntax
].regs
->reg_name
,
7137 register_prefix
, i
.op
[intel_syntax
].regs
->reg_name
);
7141 /* Extraneous `l' suffix on fp insn. */
7142 as_warn (_("translating to `%s %s%s'"), i
.tm
.name
,
7143 register_prefix
, i
.op
[0].regs
->reg_name
);
7148 if (i
.tm
.base_opcode
== 0x8d /* lea */
7151 as_warn (_("segment override on `%s' is ineffectual"), i
.tm
.name
);
7153 /* If a segment was explicitly specified, and the specified segment
7154 is not the default, use an opcode prefix to select it. If we
7155 never figured out what the default segment is, then default_seg
7156 will be zero at this point, and the specified segment prefix will
7158 if ((i
.seg
[0]) && (i
.seg
[0] != default_seg
))
7160 if (!add_prefix (i
.seg
[0]->seg_prefix
))
7166 static const seg_entry
*
7167 build_modrm_byte (void)
7169 const seg_entry
*default_seg
= 0;
7170 unsigned int source
, dest
;
7173 vex_3_sources
= i
.tm
.opcode_modifier
.vexsources
== VEX3SOURCES
;
7176 unsigned int nds
, reg_slot
;
7179 dest
= i
.operands
- 1;
7182 /* There are 2 kinds of instructions:
7183 1. 5 operands: 4 register operands or 3 register operands
7184 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7185 VexW0 or VexW1. The destination must be either XMM, YMM or
7187 2. 4 operands: 4 register operands or 3 register operands
7188 plus 1 memory operand, with VexXDS. */
7189 gas_assert ((i
.reg_operands
== 4
7190 || (i
.reg_operands
== 3 && i
.mem_operands
== 1))
7191 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7192 && i
.tm
.opcode_modifier
.vexw
7193 && i
.tm
.operand_types
[dest
].bitfield
.class == RegSIMD
);
7195 /* If VexW1 is set, the first non-immediate operand is the source and
7196 the second non-immediate one is encoded in the immediate operand. */
7197 if (i
.tm
.opcode_modifier
.vexw
== VEXW1
)
7199 source
= i
.imm_operands
;
7200 reg_slot
= i
.imm_operands
+ 1;
7204 source
= i
.imm_operands
+ 1;
7205 reg_slot
= i
.imm_operands
;
7208 if (i
.imm_operands
== 0)
7210 /* When there is no immediate operand, generate an 8bit
7211 immediate operand to encode the first operand. */
7212 exp
= &im_expressions
[i
.imm_operands
++];
7213 i
.op
[i
.operands
].imms
= exp
;
7214 i
.types
[i
.operands
] = imm8
;
7217 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7218 exp
->X_op
= O_constant
;
7219 exp
->X_add_number
= register_number (i
.op
[reg_slot
].regs
) << 4;
7220 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7224 gas_assert (i
.imm_operands
== 1);
7225 gas_assert (fits_in_imm4 (i
.op
[0].imms
->X_add_number
));
7226 gas_assert (!i
.tm
.opcode_modifier
.immext
);
7228 /* Turn on Imm8 again so that output_imm will generate it. */
7229 i
.types
[0].bitfield
.imm8
= 1;
7231 gas_assert (i
.tm
.operand_types
[reg_slot
].bitfield
.class == RegSIMD
);
7232 i
.op
[0].imms
->X_add_number
7233 |= register_number (i
.op
[reg_slot
].regs
) << 4;
7234 gas_assert ((i
.op
[reg_slot
].regs
->reg_flags
& RegVRex
) == 0);
7237 gas_assert (i
.tm
.operand_types
[nds
].bitfield
.class == RegSIMD
);
7238 i
.vex
.register_specifier
= i
.op
[nds
].regs
;
7243 /* i.reg_operands MUST be the number of real register operands;
7244 implicit registers do not count. If there are 3 register
7245 operands, it must be a instruction with VexNDS. For a
7246 instruction with VexNDD, the destination register is encoded
7247 in VEX prefix. If there are 4 register operands, it must be
7248 a instruction with VEX prefix and 3 sources. */
7249 if (i
.mem_operands
== 0
7250 && ((i
.reg_operands
== 2
7251 && i
.tm
.opcode_modifier
.vexvvvv
<= VEXXDS
)
7252 || (i
.reg_operands
== 3
7253 && i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7254 || (i
.reg_operands
== 4 && vex_3_sources
)))
7262 /* When there are 3 operands, one of them may be immediate,
7263 which may be the first or the last operand. Otherwise,
7264 the first operand must be shift count register (cl) or it
7265 is an instruction with VexNDS. */
7266 gas_assert (i
.imm_operands
== 1
7267 || (i
.imm_operands
== 0
7268 && (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7269 || (i
.types
[0].bitfield
.instance
== RegC
7270 && i
.types
[0].bitfield
.byte
))));
7271 if (operand_type_check (i
.types
[0], imm
)
7272 || (i
.types
[0].bitfield
.instance
== RegC
7273 && i
.types
[0].bitfield
.byte
))
7279 /* When there are 4 operands, the first two must be 8bit
7280 immediate operands. The source operand will be the 3rd
7283 For instructions with VexNDS, if the first operand
7284 an imm8, the source operand is the 2nd one. If the last
7285 operand is imm8, the source operand is the first one. */
7286 gas_assert ((i
.imm_operands
== 2
7287 && i
.types
[0].bitfield
.imm8
7288 && i
.types
[1].bitfield
.imm8
)
7289 || (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
7290 && i
.imm_operands
== 1
7291 && (i
.types
[0].bitfield
.imm8
7292 || i
.types
[i
.operands
- 1].bitfield
.imm8
7294 if (i
.imm_operands
== 2)
7298 if (i
.types
[0].bitfield
.imm8
)
7305 if (is_evex_encoding (&i
.tm
))
7307 /* For EVEX instructions, when there are 5 operands, the
7308 first one must be immediate operand. If the second one
7309 is immediate operand, the source operand is the 3th
7310 one. If the last one is immediate operand, the source
7311 operand is the 2nd one. */
7312 gas_assert (i
.imm_operands
== 2
7313 && i
.tm
.opcode_modifier
.sae
7314 && operand_type_check (i
.types
[0], imm
));
7315 if (operand_type_check (i
.types
[1], imm
))
7317 else if (operand_type_check (i
.types
[4], imm
))
7331 /* RC/SAE operand could be between DEST and SRC. That happens
7332 when one operand is GPR and the other one is XMM/YMM/ZMM
7334 if (i
.rounding
&& i
.rounding
->operand
== (int) dest
)
7337 if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7339 /* For instructions with VexNDS, the register-only source
7340 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7341 register. It is encoded in VEX prefix. */
7343 i386_operand_type op
;
7346 /* Check register-only source operand when two source
7347 operands are swapped. */
7348 if (!i
.tm
.operand_types
[source
].bitfield
.baseindex
7349 && i
.tm
.operand_types
[dest
].bitfield
.baseindex
)
7357 op
= i
.tm
.operand_types
[vvvv
];
7358 if ((dest
+ 1) >= i
.operands
7359 || ((op
.bitfield
.class != Reg
7360 || (!op
.bitfield
.dword
&& !op
.bitfield
.qword
))
7361 && op
.bitfield
.class != RegSIMD
7362 && !operand_type_equal (&op
, ®mask
)))
7364 i
.vex
.register_specifier
= i
.op
[vvvv
].regs
;
7370 /* One of the register operands will be encoded in the i.rm.reg
7371 field, the other in the combined i.rm.mode and i.rm.regmem
7372 fields. If no form of this instruction supports a memory
7373 destination operand, then we assume the source operand may
7374 sometimes be a memory operand and so we need to store the
7375 destination in the i.rm.reg field. */
7376 if (!i
.tm
.opcode_modifier
.regmem
7377 && operand_type_check (i
.tm
.operand_types
[dest
], anymem
) == 0)
7379 i
.rm
.reg
= i
.op
[dest
].regs
->reg_num
;
7380 i
.rm
.regmem
= i
.op
[source
].regs
->reg_num
;
7381 if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegMMX
7382 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegMMX
)
7383 i
.has_regmmx
= TRUE
;
7384 else if (i
.op
[dest
].regs
->reg_type
.bitfield
.class == RegSIMD
7385 || i
.op
[source
].regs
->reg_type
.bitfield
.class == RegSIMD
)
7387 if (i
.types
[dest
].bitfield
.zmmword
7388 || i
.types
[source
].bitfield
.zmmword
)
7389 i
.has_regzmm
= TRUE
;
7390 else if (i
.types
[dest
].bitfield
.ymmword
7391 || i
.types
[source
].bitfield
.ymmword
)
7392 i
.has_regymm
= TRUE
;
7394 i
.has_regxmm
= TRUE
;
7396 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7398 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7400 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7402 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7407 i
.rm
.reg
= i
.op
[source
].regs
->reg_num
;
7408 i
.rm
.regmem
= i
.op
[dest
].regs
->reg_num
;
7409 if ((i
.op
[dest
].regs
->reg_flags
& RegRex
) != 0)
7411 if ((i
.op
[dest
].regs
->reg_flags
& RegVRex
) != 0)
7413 if ((i
.op
[source
].regs
->reg_flags
& RegRex
) != 0)
7415 if ((i
.op
[source
].regs
->reg_flags
& RegVRex
) != 0)
7418 if (flag_code
!= CODE_64BIT
&& (i
.rex
& REX_R
))
7420 if (i
.types
[!i
.tm
.opcode_modifier
.regmem
].bitfield
.class != RegCR
)
7423 add_prefix (LOCK_PREFIX_OPCODE
);
7427 { /* If it's not 2 reg operands... */
7432 unsigned int fake_zero_displacement
= 0;
7435 for (op
= 0; op
< i
.operands
; op
++)
7436 if (i
.flags
[op
] & Operand_Mem
)
7438 gas_assert (op
< i
.operands
);
7440 if (i
.tm
.opcode_modifier
.vecsib
)
7442 if (i
.index_reg
->reg_num
== RegIZ
)
7445 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7448 i
.sib
.base
= NO_BASE_REGISTER
;
7449 i
.sib
.scale
= i
.log2_scale_factor
;
7450 i
.types
[op
].bitfield
.disp8
= 0;
7451 i
.types
[op
].bitfield
.disp16
= 0;
7452 i
.types
[op
].bitfield
.disp64
= 0;
7453 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7455 /* Must be 32 bit */
7456 i
.types
[op
].bitfield
.disp32
= 1;
7457 i
.types
[op
].bitfield
.disp32s
= 0;
7461 i
.types
[op
].bitfield
.disp32
= 0;
7462 i
.types
[op
].bitfield
.disp32s
= 1;
7465 i
.sib
.index
= i
.index_reg
->reg_num
;
7466 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7468 if ((i
.index_reg
->reg_flags
& RegVRex
) != 0)
7474 if (i
.base_reg
== 0)
7477 if (!i
.disp_operands
)
7478 fake_zero_displacement
= 1;
7479 if (i
.index_reg
== 0)
7481 i386_operand_type newdisp
;
7483 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7484 /* Operand is just <disp> */
7485 if (flag_code
== CODE_64BIT
)
7487 /* 64bit mode overwrites the 32bit absolute
7488 addressing by RIP relative addressing and
7489 absolute addressing is encoded by one of the
7490 redundant SIB forms. */
7491 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7492 i
.sib
.base
= NO_BASE_REGISTER
;
7493 i
.sib
.index
= NO_INDEX_REGISTER
;
7494 newdisp
= (!i
.prefix
[ADDR_PREFIX
] ? disp32s
: disp32
);
7496 else if ((flag_code
== CODE_16BIT
)
7497 ^ (i
.prefix
[ADDR_PREFIX
] != 0))
7499 i
.rm
.regmem
= NO_BASE_REGISTER_16
;
7504 i
.rm
.regmem
= NO_BASE_REGISTER
;
7507 i
.types
[op
] = operand_type_and_not (i
.types
[op
], anydisp
);
7508 i
.types
[op
] = operand_type_or (i
.types
[op
], newdisp
);
7510 else if (!i
.tm
.opcode_modifier
.vecsib
)
7512 /* !i.base_reg && i.index_reg */
7513 if (i
.index_reg
->reg_num
== RegIZ
)
7514 i
.sib
.index
= NO_INDEX_REGISTER
;
7516 i
.sib
.index
= i
.index_reg
->reg_num
;
7517 i
.sib
.base
= NO_BASE_REGISTER
;
7518 i
.sib
.scale
= i
.log2_scale_factor
;
7519 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7520 i
.types
[op
].bitfield
.disp8
= 0;
7521 i
.types
[op
].bitfield
.disp16
= 0;
7522 i
.types
[op
].bitfield
.disp64
= 0;
7523 if (flag_code
!= CODE_64BIT
|| i
.prefix
[ADDR_PREFIX
])
7525 /* Must be 32 bit */
7526 i
.types
[op
].bitfield
.disp32
= 1;
7527 i
.types
[op
].bitfield
.disp32s
= 0;
7531 i
.types
[op
].bitfield
.disp32
= 0;
7532 i
.types
[op
].bitfield
.disp32s
= 1;
7534 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7538 /* RIP addressing for 64bit mode. */
7539 else if (i
.base_reg
->reg_num
== RegIP
)
7541 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7542 i
.rm
.regmem
= NO_BASE_REGISTER
;
7543 i
.types
[op
].bitfield
.disp8
= 0;
7544 i
.types
[op
].bitfield
.disp16
= 0;
7545 i
.types
[op
].bitfield
.disp32
= 0;
7546 i
.types
[op
].bitfield
.disp32s
= 1;
7547 i
.types
[op
].bitfield
.disp64
= 0;
7548 i
.flags
[op
] |= Operand_PCrel
;
7549 if (! i
.disp_operands
)
7550 fake_zero_displacement
= 1;
7552 else if (i
.base_reg
->reg_type
.bitfield
.word
)
7554 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7555 switch (i
.base_reg
->reg_num
)
7558 if (i
.index_reg
== 0)
7560 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
7561 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6;
7565 if (i
.index_reg
== 0)
7568 if (operand_type_check (i
.types
[op
], disp
) == 0)
7570 /* fake (%bp) into 0(%bp) */
7571 i
.types
[op
].bitfield
.disp8
= 1;
7572 fake_zero_displacement
= 1;
7575 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
7576 i
.rm
.regmem
= i
.index_reg
->reg_num
- 6 + 2;
7578 default: /* (%si) -> 4 or (%di) -> 5 */
7579 i
.rm
.regmem
= i
.base_reg
->reg_num
- 6 + 4;
7581 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7583 else /* i.base_reg and 32/64 bit mode */
7585 if (flag_code
== CODE_64BIT
7586 && operand_type_check (i
.types
[op
], disp
))
7588 i
.types
[op
].bitfield
.disp16
= 0;
7589 i
.types
[op
].bitfield
.disp64
= 0;
7590 if (i
.prefix
[ADDR_PREFIX
] == 0)
7592 i
.types
[op
].bitfield
.disp32
= 0;
7593 i
.types
[op
].bitfield
.disp32s
= 1;
7597 i
.types
[op
].bitfield
.disp32
= 1;
7598 i
.types
[op
].bitfield
.disp32s
= 0;
7602 if (!i
.tm
.opcode_modifier
.vecsib
)
7603 i
.rm
.regmem
= i
.base_reg
->reg_num
;
7604 if ((i
.base_reg
->reg_flags
& RegRex
) != 0)
7606 i
.sib
.base
= i
.base_reg
->reg_num
;
7607 /* x86-64 ignores REX prefix bit here to avoid decoder
7609 if (!(i
.base_reg
->reg_flags
& RegRex
)
7610 && (i
.base_reg
->reg_num
== EBP_REG_NUM
7611 || i
.base_reg
->reg_num
== ESP_REG_NUM
))
7613 if (i
.base_reg
->reg_num
== 5 && i
.disp_operands
== 0)
7615 fake_zero_displacement
= 1;
7616 i
.types
[op
].bitfield
.disp8
= 1;
7618 i
.sib
.scale
= i
.log2_scale_factor
;
7619 if (i
.index_reg
== 0)
7621 gas_assert (!i
.tm
.opcode_modifier
.vecsib
);
7622 /* <disp>(%esp) becomes two byte modrm with no index
7623 register. We've already stored the code for esp
7624 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
7625 Any base register besides %esp will not use the
7626 extra modrm byte. */
7627 i
.sib
.index
= NO_INDEX_REGISTER
;
7629 else if (!i
.tm
.opcode_modifier
.vecsib
)
7631 if (i
.index_reg
->reg_num
== RegIZ
)
7632 i
.sib
.index
= NO_INDEX_REGISTER
;
7634 i
.sib
.index
= i
.index_reg
->reg_num
;
7635 i
.rm
.regmem
= ESCAPE_TO_TWO_BYTE_ADDRESSING
;
7636 if ((i
.index_reg
->reg_flags
& RegRex
) != 0)
7641 && (i
.reloc
[op
] == BFD_RELOC_386_TLS_DESC_CALL
7642 || i
.reloc
[op
] == BFD_RELOC_X86_64_TLSDESC_CALL
))
7646 if (!fake_zero_displacement
7650 fake_zero_displacement
= 1;
7651 if (i
.disp_encoding
== disp_encoding_8bit
)
7652 i
.types
[op
].bitfield
.disp8
= 1;
7654 i
.types
[op
].bitfield
.disp32
= 1;
7656 i
.rm
.mode
= mode_from_disp_size (i
.types
[op
]);
7660 if (fake_zero_displacement
)
7662 /* Fakes a zero displacement assuming that i.types[op]
7663 holds the correct displacement size. */
7666 gas_assert (i
.op
[op
].disps
== 0);
7667 exp
= &disp_expressions
[i
.disp_operands
++];
7668 i
.op
[op
].disps
= exp
;
7669 exp
->X_op
= O_constant
;
7670 exp
->X_add_number
= 0;
7671 exp
->X_add_symbol
= (symbolS
*) 0;
7672 exp
->X_op_symbol
= (symbolS
*) 0;
7680 if (i
.tm
.opcode_modifier
.vexsources
== XOP2SOURCES
)
7682 if (operand_type_check (i
.types
[0], imm
))
7683 i
.vex
.register_specifier
= NULL
;
7686 /* VEX.vvvv encodes one of the sources when the first
7687 operand is not an immediate. */
7688 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7689 i
.vex
.register_specifier
= i
.op
[0].regs
;
7691 i
.vex
.register_specifier
= i
.op
[1].regs
;
7694 /* Destination is a XMM register encoded in the ModRM.reg
7696 i
.rm
.reg
= i
.op
[2].regs
->reg_num
;
7697 if ((i
.op
[2].regs
->reg_flags
& RegRex
) != 0)
7700 /* ModRM.rm and VEX.B encodes the other source. */
7701 if (!i
.mem_operands
)
7705 if (i
.tm
.opcode_modifier
.vexw
== VEXW0
)
7706 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7708 i
.rm
.regmem
= i
.op
[0].regs
->reg_num
;
7710 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7714 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXLWP
)
7716 i
.vex
.register_specifier
= i
.op
[2].regs
;
7717 if (!i
.mem_operands
)
7720 i
.rm
.regmem
= i
.op
[1].regs
->reg_num
;
7721 if ((i
.op
[1].regs
->reg_flags
& RegRex
) != 0)
7725 /* Fill in i.rm.reg or i.rm.regmem field with register operand
7726 (if any) based on i.tm.extension_opcode. Again, we must be
7727 careful to make sure that segment/control/debug/test/MMX
7728 registers are coded into the i.rm.reg field. */
7729 else if (i
.reg_operands
)
7732 unsigned int vex_reg
= ~0;
7734 for (op
= 0; op
< i
.operands
; op
++)
7736 if (i
.types
[op
].bitfield
.class == Reg
7737 || i
.types
[op
].bitfield
.class == RegBND
7738 || i
.types
[op
].bitfield
.class == RegMask
7739 || i
.types
[op
].bitfield
.class == SReg
7740 || i
.types
[op
].bitfield
.class == RegCR
7741 || i
.types
[op
].bitfield
.class == RegDR
7742 || i
.types
[op
].bitfield
.class == RegTR
)
7744 if (i
.types
[op
].bitfield
.class == RegSIMD
)
7746 if (i
.types
[op
].bitfield
.zmmword
)
7747 i
.has_regzmm
= TRUE
;
7748 else if (i
.types
[op
].bitfield
.ymmword
)
7749 i
.has_regymm
= TRUE
;
7751 i
.has_regxmm
= TRUE
;
7754 if (i
.types
[op
].bitfield
.class == RegMMX
)
7756 i
.has_regmmx
= TRUE
;
7763 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXXDS
)
7765 /* For instructions with VexNDS, the register-only
7766 source operand is encoded in VEX prefix. */
7767 gas_assert (mem
!= (unsigned int) ~0);
7772 gas_assert (op
< i
.operands
);
7776 /* Check register-only source operand when two source
7777 operands are swapped. */
7778 if (!i
.tm
.operand_types
[op
].bitfield
.baseindex
7779 && i
.tm
.operand_types
[op
+ 1].bitfield
.baseindex
)
7783 gas_assert (mem
== (vex_reg
+ 1)
7784 && op
< i
.operands
);
7789 gas_assert (vex_reg
< i
.operands
);
7793 else if (i
.tm
.opcode_modifier
.vexvvvv
== VEXNDD
)
7795 /* For instructions with VexNDD, the register destination
7796 is encoded in VEX prefix. */
7797 if (i
.mem_operands
== 0)
7799 /* There is no memory operand. */
7800 gas_assert ((op
+ 2) == i
.operands
);
7805 /* There are only 2 non-immediate operands. */
7806 gas_assert (op
< i
.imm_operands
+ 2
7807 && i
.operands
== i
.imm_operands
+ 2);
7808 vex_reg
= i
.imm_operands
+ 1;
7812 gas_assert (op
< i
.operands
);
7814 if (vex_reg
!= (unsigned int) ~0)
7816 i386_operand_type
*type
= &i
.tm
.operand_types
[vex_reg
];
7818 if ((type
->bitfield
.class != Reg
7819 || (!type
->bitfield
.dword
&& !type
->bitfield
.qword
))
7820 && type
->bitfield
.class != RegSIMD
7821 && !operand_type_equal (type
, ®mask
))
7824 i
.vex
.register_specifier
= i
.op
[vex_reg
].regs
;
7827 /* Don't set OP operand twice. */
7830 /* If there is an extension opcode to put here, the
7831 register number must be put into the regmem field. */
7832 if (i
.tm
.extension_opcode
!= None
)
7834 i
.rm
.regmem
= i
.op
[op
].regs
->reg_num
;
7835 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7837 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7842 i
.rm
.reg
= i
.op
[op
].regs
->reg_num
;
7843 if ((i
.op
[op
].regs
->reg_flags
& RegRex
) != 0)
7845 if ((i
.op
[op
].regs
->reg_flags
& RegVRex
) != 0)
7850 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
7851 must set it to 3 to indicate this is a register operand
7852 in the regmem field. */
7853 if (!i
.mem_operands
)
7857 /* Fill in i.rm.reg field with extension opcode (if any). */
7858 if (i
.tm
.extension_opcode
!= None
)
7859 i
.rm
.reg
= i
.tm
.extension_opcode
;
7865 flip_code16 (unsigned int code16
)
7867 gas_assert (i
.tm
.operands
== 1);
7869 return !(i
.prefix
[REX_PREFIX
] & REX_W
)
7870 && (code16
? i
.tm
.operand_types
[0].bitfield
.disp32
7871 || i
.tm
.operand_types
[0].bitfield
.disp32s
7872 : i
.tm
.operand_types
[0].bitfield
.disp16
)
7877 output_branch (void)
7883 relax_substateT subtype
;
7887 code16
= flag_code
== CODE_16BIT
? CODE16
: 0;
7888 size
= i
.disp_encoding
== disp_encoding_32bit
? BIG
: SMALL
;
7891 if (i
.prefix
[DATA_PREFIX
] != 0)
7895 code16
^= flip_code16(code16
);
7897 /* Pentium4 branch hints. */
7898 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
7899 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
7904 if (i
.prefix
[REX_PREFIX
] != 0)
7910 /* BND prefixed jump. */
7911 if (i
.prefix
[BND_PREFIX
] != 0)
7917 if (i
.prefixes
!= 0)
7918 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
7920 /* It's always a symbol; End frag & setup for relax.
7921 Make sure there is enough room in this frag for the largest
7922 instruction we may generate in md_convert_frag. This is 2
7923 bytes for the opcode and room for the prefix and largest
7925 frag_grow (prefix
+ 2 + 4);
7926 /* Prefix and 1 opcode byte go in fr_fix. */
7927 p
= frag_more (prefix
+ 1);
7928 if (i
.prefix
[DATA_PREFIX
] != 0)
7929 *p
++ = DATA_PREFIX_OPCODE
;
7930 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
7931 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
)
7932 *p
++ = i
.prefix
[SEG_PREFIX
];
7933 if (i
.prefix
[BND_PREFIX
] != 0)
7934 *p
++ = BND_PREFIX_OPCODE
;
7935 if (i
.prefix
[REX_PREFIX
] != 0)
7936 *p
++ = i
.prefix
[REX_PREFIX
];
7937 *p
= i
.tm
.base_opcode
;
7939 if ((unsigned char) *p
== JUMP_PC_RELATIVE
)
7940 subtype
= ENCODE_RELAX_STATE (UNCOND_JUMP
, size
);
7941 else if (cpu_arch_flags
.bitfield
.cpui386
)
7942 subtype
= ENCODE_RELAX_STATE (COND_JUMP
, size
);
7944 subtype
= ENCODE_RELAX_STATE (COND_JUMP86
, size
);
7947 sym
= i
.op
[0].disps
->X_add_symbol
;
7948 off
= i
.op
[0].disps
->X_add_number
;
7950 if (i
.op
[0].disps
->X_op
!= O_constant
7951 && i
.op
[0].disps
->X_op
!= O_symbol
)
7953 /* Handle complex expressions. */
7954 sym
= make_expr_symbol (i
.op
[0].disps
);
7958 /* 1 possible extra opcode + 4 byte displacement go in var part.
7959 Pass reloc in fr_var. */
7960 frag_var (rs_machine_dependent
, 5, i
.reloc
[0], subtype
, sym
, off
, p
);
7963 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7964 /* Return TRUE iff PLT32 relocation should be used for branching to
7968 need_plt32_p (symbolS
*s
)
7970 /* PLT32 relocation is ELF only. */
7975 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
7976 krtld support it. */
7980 /* Since there is no need to prepare for PLT branch on x86-64, we
7981 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
7982 be used as a marker for 32-bit PC-relative branches. */
7986 /* Weak or undefined symbol need PLT32 relocation. */
7987 if (S_IS_WEAK (s
) || !S_IS_DEFINED (s
))
7990 /* Non-global symbol doesn't need PLT32 relocation. */
7991 if (! S_IS_EXTERNAL (s
))
7994 /* Other global symbols need PLT32 relocation. NB: Symbol with
7995 non-default visibilities are treated as normal global symbol
7996 so that PLT32 relocation can be used as a marker for 32-bit
7997 PC-relative branches. It is useful for linker relaxation. */
8008 bfd_reloc_code_real_type jump_reloc
= i
.reloc
[0];
8010 if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
)
8012 /* This is a loop or jecxz type instruction. */
8014 if (i
.prefix
[ADDR_PREFIX
] != 0)
8016 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE
);
8019 /* Pentium4 branch hints. */
8020 if (i
.prefix
[SEG_PREFIX
] == CS_PREFIX_OPCODE
/* not taken */
8021 || i
.prefix
[SEG_PREFIX
] == DS_PREFIX_OPCODE
/* taken */)
8023 FRAG_APPEND_1_CHAR (i
.prefix
[SEG_PREFIX
]);
8032 if (flag_code
== CODE_16BIT
)
8035 if (i
.prefix
[DATA_PREFIX
] != 0)
8037 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE
);
8039 code16
^= flip_code16(code16
);
8047 /* BND prefixed jump. */
8048 if (i
.prefix
[BND_PREFIX
] != 0)
8050 FRAG_APPEND_1_CHAR (i
.prefix
[BND_PREFIX
]);
8054 if (i
.prefix
[REX_PREFIX
] != 0)
8056 FRAG_APPEND_1_CHAR (i
.prefix
[REX_PREFIX
]);
8060 if (i
.prefixes
!= 0)
8061 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8063 p
= frag_more (i
.tm
.opcode_length
+ size
);
8064 switch (i
.tm
.opcode_length
)
8067 *p
++ = i
.tm
.base_opcode
>> 8;
8070 *p
++ = i
.tm
.base_opcode
;
8076 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8078 && jump_reloc
== NO_RELOC
8079 && need_plt32_p (i
.op
[0].disps
->X_add_symbol
))
8080 jump_reloc
= BFD_RELOC_X86_64_PLT32
;
8083 jump_reloc
= reloc (size
, 1, 1, jump_reloc
);
8085 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8086 i
.op
[0].disps
, 1, jump_reloc
);
8088 /* All jumps handled here are signed, but don't use a signed limit
8089 check for 32 and 16 bit jumps as we want to allow wrap around at
8090 4G and 64k respectively. */
8092 fixP
->fx_signed
= 1;
8096 output_interseg_jump (void)
8104 if (flag_code
== CODE_16BIT
)
8108 if (i
.prefix
[DATA_PREFIX
] != 0)
8115 gas_assert (!i
.prefix
[REX_PREFIX
]);
8121 if (i
.prefixes
!= 0)
8122 as_warn (_("skipping prefixes on `%s'"), i
.tm
.name
);
8124 /* 1 opcode; 2 segment; offset */
8125 p
= frag_more (prefix
+ 1 + 2 + size
);
8127 if (i
.prefix
[DATA_PREFIX
] != 0)
8128 *p
++ = DATA_PREFIX_OPCODE
;
8130 if (i
.prefix
[REX_PREFIX
] != 0)
8131 *p
++ = i
.prefix
[REX_PREFIX
];
8133 *p
++ = i
.tm
.base_opcode
;
8134 if (i
.op
[1].imms
->X_op
== O_constant
)
8136 offsetT n
= i
.op
[1].imms
->X_add_number
;
8139 && !fits_in_unsigned_word (n
)
8140 && !fits_in_signed_word (n
))
8142 as_bad (_("16-bit jump out of range"));
8145 md_number_to_chars (p
, n
, size
);
8148 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
8149 i
.op
[1].imms
, 0, reloc (size
, 0, 0, i
.reloc
[1]));
8150 if (i
.op
[0].imms
->X_op
!= O_constant
)
8151 as_bad (_("can't handle non absolute segment in `%s'"),
8153 md_number_to_chars (p
+ size
, (valueT
) i
.op
[0].imms
->X_add_number
, 2);
8156 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8161 asection
*seg
= now_seg
;
8162 subsegT subseg
= now_subseg
;
8164 unsigned int alignment
, align_size_1
;
8165 unsigned int isa_1_descsz
, feature_2_descsz
, descsz
;
8166 unsigned int isa_1_descsz_raw
, feature_2_descsz_raw
;
8167 unsigned int padding
;
8169 if (!IS_ELF
|| !x86_used_note
)
8172 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X86
;
8174 /* The .note.gnu.property section layout:
8176 Field Length Contents
8179 n_descsz 4 The note descriptor size
8180 n_type 4 NT_GNU_PROPERTY_TYPE_0
8182 n_desc n_descsz The program property array
8186 /* Create the .note.gnu.property section. */
8187 sec
= subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME
, 0);
8188 bfd_set_section_flags (sec
,
8195 if (get_elf_backend_data (stdoutput
)->s
->elfclass
== ELFCLASS64
)
8206 bfd_set_section_alignment (sec
, alignment
);
8207 elf_section_type (sec
) = SHT_NOTE
;
8209 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8211 isa_1_descsz_raw
= 4 + 4 + 4;
8212 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8213 isa_1_descsz
= (isa_1_descsz_raw
+ align_size_1
) & ~align_size_1
;
8215 feature_2_descsz_raw
= isa_1_descsz
;
8216 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8218 feature_2_descsz_raw
+= 4 + 4 + 4;
8219 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8220 feature_2_descsz
= ((feature_2_descsz_raw
+ align_size_1
)
8223 descsz
= feature_2_descsz
;
8224 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8225 p
= frag_more (4 + 4 + 4 + 4 + descsz
);
8227 /* Write n_namsz. */
8228 md_number_to_chars (p
, (valueT
) 4, 4);
8230 /* Write n_descsz. */
8231 md_number_to_chars (p
+ 4, (valueT
) descsz
, 4);
8234 md_number_to_chars (p
+ 4 * 2, (valueT
) NT_GNU_PROPERTY_TYPE_0
, 4);
8237 memcpy (p
+ 4 * 3, "GNU", 4);
8239 /* Write 4-byte type. */
8240 md_number_to_chars (p
+ 4 * 4,
8241 (valueT
) GNU_PROPERTY_X86_ISA_1_USED
, 4);
8243 /* Write 4-byte data size. */
8244 md_number_to_chars (p
+ 4 * 5, (valueT
) 4, 4);
8246 /* Write 4-byte data. */
8247 md_number_to_chars (p
+ 4 * 6, (valueT
) x86_isa_1_used
, 4);
8249 /* Zero out paddings. */
8250 padding
= isa_1_descsz
- isa_1_descsz_raw
;
8252 memset (p
+ 4 * 7, 0, padding
);
8254 /* Write 4-byte type. */
8255 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 4,
8256 (valueT
) GNU_PROPERTY_X86_FEATURE_2_USED
, 4);
8258 /* Write 4-byte data size. */
8259 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 5, (valueT
) 4, 4);
8261 /* Write 4-byte data. */
8262 md_number_to_chars (p
+ isa_1_descsz
+ 4 * 6,
8263 (valueT
) x86_feature_2_used
, 4);
8265 /* Zero out paddings. */
8266 padding
= feature_2_descsz
- feature_2_descsz_raw
;
8268 memset (p
+ isa_1_descsz
+ 4 * 7, 0, padding
);
8270 /* We probably can't restore the current segment, for there likely
8273 subseg_set (seg
, subseg
);
8278 encoding_length (const fragS
*start_frag
, offsetT start_off
,
8279 const char *frag_now_ptr
)
8281 unsigned int len
= 0;
8283 if (start_frag
!= frag_now
)
8285 const fragS
*fr
= start_frag
;
8290 } while (fr
&& fr
!= frag_now
);
8293 return len
- start_off
+ (frag_now_ptr
- frag_now
->fr_literal
);
8296 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8297 be macro-fused with conditional jumps. */
8300 maybe_fused_with_jcc_p (void)
8302 /* No RIP address. */
8303 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
8306 /* No VEX/EVEX encoding. */
8307 if (is_any_vex_encoding (&i
.tm
))
8310 /* and, add, sub with destination register. */
8311 if ((i
.tm
.base_opcode
>= 0x20 && i
.tm
.base_opcode
<= 0x25)
8312 || i
.tm
.base_opcode
<= 5
8313 || (i
.tm
.base_opcode
>= 0x28 && i
.tm
.base_opcode
<= 0x2d)
8314 || ((i
.tm
.base_opcode
| 3) == 0x83
8315 && ((i
.tm
.extension_opcode
| 1) == 0x5
8316 || i
.tm
.extension_opcode
== 0x0)))
8317 return (i
.types
[1].bitfield
.class == Reg
8318 || i
.types
[1].bitfield
.instance
== Accum
);
8320 /* test, cmp with any register. */
8321 if ((i
.tm
.base_opcode
| 1) == 0x85
8322 || (i
.tm
.base_opcode
| 1) == 0xa9
8323 || ((i
.tm
.base_opcode
| 1) == 0xf7
8324 && i
.tm
.extension_opcode
== 0)
8325 || (i
.tm
.base_opcode
>= 0x38 && i
.tm
.base_opcode
<= 0x3d)
8326 || ((i
.tm
.base_opcode
| 3) == 0x83
8327 && (i
.tm
.extension_opcode
== 0x7)))
8328 return (i
.types
[0].bitfield
.class == Reg
8329 || i
.types
[0].bitfield
.instance
== Accum
8330 || i
.types
[1].bitfield
.class == Reg
8331 || i
.types
[1].bitfield
.instance
== Accum
);
8333 /* inc, dec with any register. */
8334 if ((i
.tm
.cpu_flags
.bitfield
.cpuno64
8335 && (i
.tm
.base_opcode
| 0xf) == 0x4f)
8336 || ((i
.tm
.base_opcode
| 1) == 0xff
8337 && i
.tm
.extension_opcode
<= 0x1))
8338 return (i
.types
[0].bitfield
.class == Reg
8339 || i
.types
[0].bitfield
.instance
== Accum
);
8344 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8347 add_fused_jcc_padding_frag_p (void)
8349 /* NB: Don't work with COND_JUMP86 without i386. */
8350 if (!align_branch_power
8351 || now_seg
== absolute_section
8352 || !cpu_arch_flags
.bitfield
.cpui386
8353 || !(align_branch
& align_branch_fused_bit
))
8356 if (maybe_fused_with_jcc_p ())
8358 if (last_insn
.kind
== last_insn_other
8359 || last_insn
.seg
!= now_seg
)
8362 as_warn_where (last_insn
.file
, last_insn
.line
,
8363 _("`%s` skips -malign-branch-boundary on `%s`"),
8364 last_insn
.name
, i
.tm
.name
);
8370 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8373 add_branch_prefix_frag_p (void)
8375 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8376 to PadLock instructions since they include prefixes in opcode. */
8377 if (!align_branch_power
8378 || !align_branch_prefix_size
8379 || now_seg
== absolute_section
8380 || i
.tm
.cpu_flags
.bitfield
.cpupadlock
8381 || !cpu_arch_flags
.bitfield
.cpui386
)
8384 /* Don't add prefix if it is a prefix or there is no operand in case
8385 that segment prefix is special. */
8386 if (!i
.operands
|| i
.tm
.opcode_modifier
.isprefix
)
8389 if (last_insn
.kind
== last_insn_other
8390 || last_insn
.seg
!= now_seg
)
8394 as_warn_where (last_insn
.file
, last_insn
.line
,
8395 _("`%s` skips -malign-branch-boundary on `%s`"),
8396 last_insn
.name
, i
.tm
.name
);
8401 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8404 add_branch_padding_frag_p (enum align_branch_kind
*branch_p
)
8408 /* NB: Don't work with COND_JUMP86 without i386. */
8409 if (!align_branch_power
8410 || now_seg
== absolute_section
8411 || !cpu_arch_flags
.bitfield
.cpui386
)
8416 /* Check for jcc and direct jmp. */
8417 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
8419 if (i
.tm
.base_opcode
== JUMP_PC_RELATIVE
)
8421 *branch_p
= align_branch_jmp
;
8422 add_padding
= align_branch
& align_branch_jmp_bit
;
8426 *branch_p
= align_branch_jcc
;
8427 if ((align_branch
& align_branch_jcc_bit
))
8431 else if (is_any_vex_encoding (&i
.tm
))
8433 else if ((i
.tm
.base_opcode
| 1) == 0xc3)
8436 *branch_p
= align_branch_ret
;
8437 if ((align_branch
& align_branch_ret_bit
))
8442 /* Check for indirect jmp, direct and indirect calls. */
8443 if (i
.tm
.base_opcode
== 0xe8)
8446 *branch_p
= align_branch_call
;
8447 if ((align_branch
& align_branch_call_bit
))
8450 else if (i
.tm
.base_opcode
== 0xff
8451 && (i
.tm
.extension_opcode
== 2
8452 || i
.tm
.extension_opcode
== 4))
8454 /* Indirect call and jmp. */
8455 *branch_p
= align_branch_indirect
;
8456 if ((align_branch
& align_branch_indirect_bit
))
8463 && (i
.op
[0].disps
->X_op
== O_symbol
8464 || (i
.op
[0].disps
->X_op
== O_subtract
8465 && i
.op
[0].disps
->X_op_symbol
== GOT_symbol
)))
8467 symbolS
*s
= i
.op
[0].disps
->X_add_symbol
;
8468 /* No padding to call to global or undefined tls_get_addr. */
8469 if ((S_IS_EXTERNAL (s
) || !S_IS_DEFINED (s
))
8470 && strcmp (S_GET_NAME (s
), tls_get_addr
) == 0)
8476 && last_insn
.kind
!= last_insn_other
8477 && last_insn
.seg
== now_seg
)
8480 as_warn_where (last_insn
.file
, last_insn
.line
,
8481 _("`%s` skips -malign-branch-boundary on `%s`"),
8482 last_insn
.name
, i
.tm
.name
);
8492 fragS
*insn_start_frag
;
8493 offsetT insn_start_off
;
8494 fragS
*fragP
= NULL
;
8495 enum align_branch_kind branch
= align_branch_none
;
8497 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8498 if (IS_ELF
&& x86_used_note
)
8500 if (i
.tm
.cpu_flags
.bitfield
.cpucmov
)
8501 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_CMOV
;
8502 if (i
.tm
.cpu_flags
.bitfield
.cpusse
)
8503 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE
;
8504 if (i
.tm
.cpu_flags
.bitfield
.cpusse2
)
8505 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE2
;
8506 if (i
.tm
.cpu_flags
.bitfield
.cpusse3
)
8507 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE3
;
8508 if (i
.tm
.cpu_flags
.bitfield
.cpussse3
)
8509 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSSE3
;
8510 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_1
)
8511 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_1
;
8512 if (i
.tm
.cpu_flags
.bitfield
.cpusse4_2
)
8513 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_SSE4_2
;
8514 if (i
.tm
.cpu_flags
.bitfield
.cpuavx
)
8515 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX
;
8516 if (i
.tm
.cpu_flags
.bitfield
.cpuavx2
)
8517 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX2
;
8518 if (i
.tm
.cpu_flags
.bitfield
.cpufma
)
8519 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_FMA
;
8520 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512f
)
8521 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512F
;
8522 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512cd
)
8523 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512CD
;
8524 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512er
)
8525 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512ER
;
8526 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512pf
)
8527 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512PF
;
8528 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vl
)
8529 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512VL
;
8530 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512dq
)
8531 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512DQ
;
8532 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512bw
)
8533 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512BW
;
8534 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4fmaps
)
8535 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS
;
8536 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_4vnniw
)
8537 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW
;
8538 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bitalg
)
8539 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG
;
8540 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512ifma
)
8541 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA
;
8542 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512vbmi
)
8543 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI
;
8544 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vbmi2
)
8545 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2
;
8546 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_vnni
)
8547 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI
;
8548 if (i
.tm
.cpu_flags
.bitfield
.cpuavx512_bf16
)
8549 x86_isa_1_used
|= GNU_PROPERTY_X86_ISA_1_AVX512_BF16
;
8551 if (i
.tm
.cpu_flags
.bitfield
.cpu8087
8552 || i
.tm
.cpu_flags
.bitfield
.cpu287
8553 || i
.tm
.cpu_flags
.bitfield
.cpu387
8554 || i
.tm
.cpu_flags
.bitfield
.cpu687
8555 || i
.tm
.cpu_flags
.bitfield
.cpufisttp
)
8556 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_X87
;
8558 || i
.tm
.base_opcode
== 0xf77 /* emms */
8559 || i
.tm
.base_opcode
== 0xf0e /* femms */)
8560 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_MMX
;
8562 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XMM
;
8564 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_YMM
;
8566 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_ZMM
;
8567 if (i
.tm
.cpu_flags
.bitfield
.cpufxsr
)
8568 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_FXSR
;
8569 if (i
.tm
.cpu_flags
.bitfield
.cpuxsave
)
8570 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVE
;
8571 if (i
.tm
.cpu_flags
.bitfield
.cpuxsaveopt
)
8572 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT
;
8573 if (i
.tm
.cpu_flags
.bitfield
.cpuxsavec
)
8574 x86_feature_2_used
|= GNU_PROPERTY_X86_FEATURE_2_XSAVEC
;
8578 /* Tie dwarf2 debug info to the address at the start of the insn.
8579 We can't do this after the insn has been output as the current
8580 frag may have been closed off. eg. by frag_var. */
8581 dwarf2_emit_insn (0);
8583 insn_start_frag
= frag_now
;
8584 insn_start_off
= frag_now_fix ();
8586 if (add_branch_padding_frag_p (&branch
))
8589 /* Branch can be 8 bytes. Leave some room for prefixes. */
8590 unsigned int max_branch_padding_size
= 14;
8592 /* Align section to boundary. */
8593 record_alignment (now_seg
, align_branch_power
);
8595 /* Make room for padding. */
8596 frag_grow (max_branch_padding_size
);
8598 /* Start of the padding. */
8603 frag_var (rs_machine_dependent
, max_branch_padding_size
, 0,
8604 ENCODE_RELAX_STATE (BRANCH_PADDING
, 0),
8607 fragP
->tc_frag_data
.branch_type
= branch
;
8608 fragP
->tc_frag_data
.max_bytes
= max_branch_padding_size
;
8612 if (i
.tm
.opcode_modifier
.jump
== JUMP
)
8614 else if (i
.tm
.opcode_modifier
.jump
== JUMP_BYTE
8615 || i
.tm
.opcode_modifier
.jump
== JUMP_DWORD
)
8617 else if (i
.tm
.opcode_modifier
.jump
== JUMP_INTERSEGMENT
)
8618 output_interseg_jump ();
8621 /* Output normal instructions here. */
8625 unsigned int prefix
;
8628 && (i
.tm
.base_opcode
== 0xfaee8
8629 || i
.tm
.base_opcode
== 0xfaef0
8630 || i
.tm
.base_opcode
== 0xfaef8))
8632 /* Encode lfence, mfence, and sfence as
8633 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
8634 offsetT val
= 0x240483f0ULL
;
8636 md_number_to_chars (p
, val
, 5);
8640 /* Some processors fail on LOCK prefix. This options makes
8641 assembler ignore LOCK prefix and serves as a workaround. */
8642 if (omit_lock_prefix
)
8644 if (i
.tm
.base_opcode
== LOCK_PREFIX_OPCODE
)
8646 i
.prefix
[LOCK_PREFIX
] = 0;
8650 /* Skip if this is a branch. */
8652 else if (add_fused_jcc_padding_frag_p ())
8654 /* Make room for padding. */
8655 frag_grow (MAX_FUSED_JCC_PADDING_SIZE
);
8660 frag_var (rs_machine_dependent
, MAX_FUSED_JCC_PADDING_SIZE
, 0,
8661 ENCODE_RELAX_STATE (FUSED_JCC_PADDING
, 0),
8664 fragP
->tc_frag_data
.branch_type
= align_branch_fused
;
8665 fragP
->tc_frag_data
.max_bytes
= MAX_FUSED_JCC_PADDING_SIZE
;
8667 else if (add_branch_prefix_frag_p ())
8669 unsigned int max_prefix_size
= align_branch_prefix_size
;
8671 /* Make room for padding. */
8672 frag_grow (max_prefix_size
);
8677 frag_var (rs_machine_dependent
, max_prefix_size
, 0,
8678 ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0),
8681 fragP
->tc_frag_data
.max_bytes
= max_prefix_size
;
8684 /* Since the VEX/EVEX prefix contains the implicit prefix, we
8685 don't need the explicit prefix. */
8686 if (!i
.tm
.opcode_modifier
.vex
&& !i
.tm
.opcode_modifier
.evex
)
8688 switch (i
.tm
.opcode_length
)
8691 if (i
.tm
.base_opcode
& 0xff000000)
8693 prefix
= (i
.tm
.base_opcode
>> 24) & 0xff;
8694 if (!i
.tm
.cpu_flags
.bitfield
.cpupadlock
8695 || prefix
!= REPE_PREFIX_OPCODE
8696 || (i
.prefix
[REP_PREFIX
] != REPE_PREFIX_OPCODE
))
8697 add_prefix (prefix
);
8701 if ((i
.tm
.base_opcode
& 0xff0000) != 0)
8703 prefix
= (i
.tm
.base_opcode
>> 16) & 0xff;
8704 add_prefix (prefix
);
8710 /* Check for pseudo prefixes. */
8711 as_bad_where (insn_start_frag
->fr_file
,
8712 insn_start_frag
->fr_line
,
8713 _("pseudo prefix without instruction"));
8719 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8720 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
8721 R_X86_64_GOTTPOFF relocation so that linker can safely
8722 perform IE->LE optimization. A dummy REX_OPCODE prefix
8723 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
8724 relocation for GDesc -> IE/LE optimization. */
8725 if (x86_elf_abi
== X86_64_X32_ABI
8727 && (i
.reloc
[0] == BFD_RELOC_X86_64_GOTTPOFF
8728 || i
.reloc
[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC
)
8729 && i
.prefix
[REX_PREFIX
] == 0)
8730 add_prefix (REX_OPCODE
);
8733 /* The prefix bytes. */
8734 for (j
= ARRAY_SIZE (i
.prefix
), q
= i
.prefix
; j
> 0; j
--, q
++)
8736 FRAG_APPEND_1_CHAR (*q
);
8740 for (j
= 0, q
= i
.prefix
; j
< ARRAY_SIZE (i
.prefix
); j
++, q
++)
8745 /* REX byte is encoded in VEX prefix. */
8749 FRAG_APPEND_1_CHAR (*q
);
8752 /* There should be no other prefixes for instructions
8757 /* For EVEX instructions i.vrex should become 0 after
8758 build_evex_prefix. For VEX instructions upper 16 registers
8759 aren't available, so VREX should be 0. */
8762 /* Now the VEX prefix. */
8763 p
= frag_more (i
.vex
.length
);
8764 for (j
= 0; j
< i
.vex
.length
; j
++)
8765 p
[j
] = i
.vex
.bytes
[j
];
8768 /* Now the opcode; be careful about word order here! */
8769 if (i
.tm
.opcode_length
== 1)
8771 FRAG_APPEND_1_CHAR (i
.tm
.base_opcode
);
8775 switch (i
.tm
.opcode_length
)
8779 *p
++ = (i
.tm
.base_opcode
>> 24) & 0xff;
8780 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8784 *p
++ = (i
.tm
.base_opcode
>> 16) & 0xff;
8794 /* Put out high byte first: can't use md_number_to_chars! */
8795 *p
++ = (i
.tm
.base_opcode
>> 8) & 0xff;
8796 *p
= i
.tm
.base_opcode
& 0xff;
8799 /* Now the modrm byte and sib byte (if present). */
8800 if (i
.tm
.opcode_modifier
.modrm
)
8802 FRAG_APPEND_1_CHAR ((i
.rm
.regmem
<< 0
8805 /* If i.rm.regmem == ESP (4)
8806 && i.rm.mode != (Register mode)
8808 ==> need second modrm byte. */
8809 if (i
.rm
.regmem
== ESCAPE_TO_TWO_BYTE_ADDRESSING
8811 && !(i
.base_reg
&& i
.base_reg
->reg_type
.bitfield
.word
))
8812 FRAG_APPEND_1_CHAR ((i
.sib
.base
<< 0
8814 | i
.sib
.scale
<< 6));
8817 if (i
.disp_operands
)
8818 output_disp (insn_start_frag
, insn_start_off
);
8821 output_imm (insn_start_frag
, insn_start_off
);
8824 * frag_now_fix () returning plain abs_section_offset when we're in the
8825 * absolute section, and abs_section_offset not getting updated as data
8826 * gets added to the frag breaks the logic below.
8828 if (now_seg
!= absolute_section
)
8830 j
= encoding_length (insn_start_frag
, insn_start_off
, frag_more (0));
8832 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
8836 /* NB: Don't add prefix with GOTPC relocation since
8837 output_disp() above depends on the fixed encoding
8838 length. Can't add prefix with TLS relocation since
8839 it breaks TLS linker optimization. */
8840 unsigned int max
= i
.has_gotpc_tls_reloc
? 0 : 15 - j
;
8841 /* Prefix count on the current instruction. */
8842 unsigned int count
= i
.vex
.length
;
8844 for (k
= 0; k
< ARRAY_SIZE (i
.prefix
); k
++)
8845 /* REX byte is encoded in VEX/EVEX prefix. */
8846 if (i
.prefix
[k
] && (k
!= REX_PREFIX
|| !i
.vex
.length
))
8849 /* Count prefixes for extended opcode maps. */
8851 switch (i
.tm
.opcode_length
)
8854 if (((i
.tm
.base_opcode
>> 16) & 0xff) == 0xf)
8857 switch ((i
.tm
.base_opcode
>> 8) & 0xff)
8869 if (((i
.tm
.base_opcode
>> 8) & 0xff) == 0xf)
8878 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
8881 /* Set the maximum prefix size in BRANCH_PREFIX
8883 if (fragP
->tc_frag_data
.max_bytes
> max
)
8884 fragP
->tc_frag_data
.max_bytes
= max
;
8885 if (fragP
->tc_frag_data
.max_bytes
> count
)
8886 fragP
->tc_frag_data
.max_bytes
-= count
;
8888 fragP
->tc_frag_data
.max_bytes
= 0;
8892 /* Remember the maximum prefix size in FUSED_JCC_PADDING
8894 unsigned int max_prefix_size
;
8895 if (align_branch_prefix_size
> max
)
8896 max_prefix_size
= max
;
8898 max_prefix_size
= align_branch_prefix_size
;
8899 if (max_prefix_size
> count
)
8900 fragP
->tc_frag_data
.max_prefix_length
8901 = max_prefix_size
- count
;
8904 /* Use existing segment prefix if possible. Use CS
8905 segment prefix in 64-bit mode. In 32-bit mode, use SS
8906 segment prefix with ESP/EBP base register and use DS
8907 segment prefix without ESP/EBP base register. */
8908 if (i
.prefix
[SEG_PREFIX
])
8909 fragP
->tc_frag_data
.default_prefix
= i
.prefix
[SEG_PREFIX
];
8910 else if (flag_code
== CODE_64BIT
)
8911 fragP
->tc_frag_data
.default_prefix
= CS_PREFIX_OPCODE
;
8913 && (i
.base_reg
->reg_num
== 4
8914 || i
.base_reg
->reg_num
== 5))
8915 fragP
->tc_frag_data
.default_prefix
= SS_PREFIX_OPCODE
;
8917 fragP
->tc_frag_data
.default_prefix
= DS_PREFIX_OPCODE
;
8922 /* NB: Don't work with COND_JUMP86 without i386. */
8923 if (align_branch_power
8924 && now_seg
!= absolute_section
8925 && cpu_arch_flags
.bitfield
.cpui386
)
8927 /* Terminate each frag so that we can add prefix and check for
8929 frag_wane (frag_now
);
8936 pi ("" /*line*/, &i
);
8938 #endif /* DEBUG386 */
8941 /* Return the size of the displacement operand N. */
8944 disp_size (unsigned int n
)
8948 if (i
.types
[n
].bitfield
.disp64
)
8950 else if (i
.types
[n
].bitfield
.disp8
)
8952 else if (i
.types
[n
].bitfield
.disp16
)
8957 /* Return the size of the immediate operand N. */
8960 imm_size (unsigned int n
)
8963 if (i
.types
[n
].bitfield
.imm64
)
8965 else if (i
.types
[n
].bitfield
.imm8
|| i
.types
[n
].bitfield
.imm8s
)
8967 else if (i
.types
[n
].bitfield
.imm16
)
8973 output_disp (fragS
*insn_start_frag
, offsetT insn_start_off
)
8978 for (n
= 0; n
< i
.operands
; n
++)
8980 if (operand_type_check (i
.types
[n
], disp
))
8982 if (i
.op
[n
].disps
->X_op
== O_constant
)
8984 int size
= disp_size (n
);
8985 offsetT val
= i
.op
[n
].disps
->X_add_number
;
8987 val
= offset_in_range (val
>> (size
== 1 ? i
.memshift
: 0),
8989 p
= frag_more (size
);
8990 md_number_to_chars (p
, val
, size
);
8994 enum bfd_reloc_code_real reloc_type
;
8995 int size
= disp_size (n
);
8996 int sign
= i
.types
[n
].bitfield
.disp32s
;
8997 int pcrel
= (i
.flags
[n
] & Operand_PCrel
) != 0;
9000 /* We can't have 8 bit displacement here. */
9001 gas_assert (!i
.types
[n
].bitfield
.disp8
);
9003 /* The PC relative address is computed relative
9004 to the instruction boundary, so in case immediate
9005 fields follows, we need to adjust the value. */
9006 if (pcrel
&& i
.imm_operands
)
9011 for (n1
= 0; n1
< i
.operands
; n1
++)
9012 if (operand_type_check (i
.types
[n1
], imm
))
9014 /* Only one immediate is allowed for PC
9015 relative address. */
9016 gas_assert (sz
== 0);
9018 i
.op
[n
].disps
->X_add_number
-= sz
;
9020 /* We should find the immediate. */
9021 gas_assert (sz
!= 0);
9024 p
= frag_more (size
);
9025 reloc_type
= reloc (size
, pcrel
, sign
, i
.reloc
[n
]);
9027 && GOT_symbol
== i
.op
[n
].disps
->X_add_symbol
9028 && (((reloc_type
== BFD_RELOC_32
9029 || reloc_type
== BFD_RELOC_X86_64_32S
9030 || (reloc_type
== BFD_RELOC_64
9032 && (i
.op
[n
].disps
->X_op
== O_symbol
9033 || (i
.op
[n
].disps
->X_op
== O_add
9034 && ((symbol_get_value_expression
9035 (i
.op
[n
].disps
->X_op_symbol
)->X_op
)
9037 || reloc_type
== BFD_RELOC_32_PCREL
))
9041 reloc_type
= BFD_RELOC_386_GOTPC
;
9042 i
.has_gotpc_tls_reloc
= TRUE
;
9043 i
.op
[n
].imms
->X_add_number
+=
9044 encoding_length (insn_start_frag
, insn_start_off
, p
);
9046 else if (reloc_type
== BFD_RELOC_64
)
9047 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9049 /* Don't do the adjustment for x86-64, as there
9050 the pcrel addressing is relative to the _next_
9051 insn, and that is taken care of in other code. */
9052 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9054 else if (align_branch_power
)
9058 case BFD_RELOC_386_TLS_GD
:
9059 case BFD_RELOC_386_TLS_LDM
:
9060 case BFD_RELOC_386_TLS_IE
:
9061 case BFD_RELOC_386_TLS_IE_32
:
9062 case BFD_RELOC_386_TLS_GOTIE
:
9063 case BFD_RELOC_386_TLS_GOTDESC
:
9064 case BFD_RELOC_386_TLS_DESC_CALL
:
9065 case BFD_RELOC_X86_64_TLSGD
:
9066 case BFD_RELOC_X86_64_TLSLD
:
9067 case BFD_RELOC_X86_64_GOTTPOFF
:
9068 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
9069 case BFD_RELOC_X86_64_TLSDESC_CALL
:
9070 i
.has_gotpc_tls_reloc
= TRUE
;
9075 fixP
= fix_new_exp (frag_now
, p
- frag_now
->fr_literal
,
9076 size
, i
.op
[n
].disps
, pcrel
,
9078 /* Check for "call/jmp *mem", "mov mem, %reg",
9079 "test %reg, mem" and "binop mem, %reg" where binop
9080 is one of adc, add, and, cmp, or, sbb, sub, xor
9081 instructions without data prefix. Always generate
9082 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9083 if (i
.prefix
[DATA_PREFIX
] == 0
9084 && (generate_relax_relocations
9087 && i
.rm
.regmem
== 5))
9089 || (i
.rm
.mode
== 0 && i
.rm
.regmem
== 5))
9090 && !is_any_vex_encoding(&i
.tm
)
9091 && ((i
.operands
== 1
9092 && i
.tm
.base_opcode
== 0xff
9093 && (i
.rm
.reg
== 2 || i
.rm
.reg
== 4))
9095 && (i
.tm
.base_opcode
== 0x8b
9096 || i
.tm
.base_opcode
== 0x85
9097 || (i
.tm
.base_opcode
& ~0x38) == 0x03))))
9101 fixP
->fx_tcbit
= i
.rex
!= 0;
9103 && (i
.base_reg
->reg_num
== RegIP
))
9104 fixP
->fx_tcbit2
= 1;
9107 fixP
->fx_tcbit2
= 1;
9115 output_imm (fragS
*insn_start_frag
, offsetT insn_start_off
)
9120 for (n
= 0; n
< i
.operands
; n
++)
9122 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9123 if (i
.rounding
&& (int) n
== i
.rounding
->operand
)
9126 if (operand_type_check (i
.types
[n
], imm
))
9128 if (i
.op
[n
].imms
->X_op
== O_constant
)
9130 int size
= imm_size (n
);
9133 val
= offset_in_range (i
.op
[n
].imms
->X_add_number
,
9135 p
= frag_more (size
);
9136 md_number_to_chars (p
, val
, size
);
9140 /* Not absolute_section.
9141 Need a 32-bit fixup (don't support 8bit
9142 non-absolute imms). Try to support other
9144 enum bfd_reloc_code_real reloc_type
;
9145 int size
= imm_size (n
);
9148 if (i
.types
[n
].bitfield
.imm32s
9149 && (i
.suffix
== QWORD_MNEM_SUFFIX
9150 || (!i
.suffix
&& i
.tm
.opcode_modifier
.no_lsuf
)))
9155 p
= frag_more (size
);
9156 reloc_type
= reloc (size
, 0, sign
, i
.reloc
[n
]);
9158 /* This is tough to explain. We end up with this one if we
9159 * have operands that look like
9160 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9161 * obtain the absolute address of the GOT, and it is strongly
9162 * preferable from a performance point of view to avoid using
9163 * a runtime relocation for this. The actual sequence of
9164 * instructions often look something like:
9169 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9171 * The call and pop essentially return the absolute address
9172 * of the label .L66 and store it in %ebx. The linker itself
9173 * will ultimately change the first operand of the addl so
9174 * that %ebx points to the GOT, but to keep things simple, the
9175 * .o file must have this operand set so that it generates not
9176 * the absolute address of .L66, but the absolute address of
9177 * itself. This allows the linker itself simply treat a GOTPC
9178 * relocation as asking for a pcrel offset to the GOT to be
9179 * added in, and the addend of the relocation is stored in the
9180 * operand field for the instruction itself.
9182 * Our job here is to fix the operand so that it would add
9183 * the correct offset so that %ebx would point to itself. The
9184 * thing that is tricky is that .-.L66 will point to the
9185 * beginning of the instruction, so we need to further modify
9186 * the operand so that it will point to itself. There are
9187 * other cases where you have something like:
9189 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9191 * and here no correction would be required. Internally in
9192 * the assembler we treat operands of this form as not being
9193 * pcrel since the '.' is explicitly mentioned, and I wonder
9194 * whether it would simplify matters to do it this way. Who
9195 * knows. In earlier versions of the PIC patches, the
9196 * pcrel_adjust field was used to store the correction, but
9197 * since the expression is not pcrel, I felt it would be
9198 * confusing to do it this way. */
9200 if ((reloc_type
== BFD_RELOC_32
9201 || reloc_type
== BFD_RELOC_X86_64_32S
9202 || reloc_type
== BFD_RELOC_64
)
9204 && GOT_symbol
== i
.op
[n
].imms
->X_add_symbol
9205 && (i
.op
[n
].imms
->X_op
== O_symbol
9206 || (i
.op
[n
].imms
->X_op
== O_add
9207 && ((symbol_get_value_expression
9208 (i
.op
[n
].imms
->X_op_symbol
)->X_op
)
9212 reloc_type
= BFD_RELOC_386_GOTPC
;
9214 reloc_type
= BFD_RELOC_X86_64_GOTPC32
;
9216 reloc_type
= BFD_RELOC_X86_64_GOTPC64
;
9217 i
.has_gotpc_tls_reloc
= TRUE
;
9218 i
.op
[n
].imms
->X_add_number
+=
9219 encoding_length (insn_start_frag
, insn_start_off
, p
);
9221 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
, size
,
9222 i
.op
[n
].imms
, 0, reloc_type
);
9228 /* x86_cons_fix_new is called via the expression parsing code when a
9229 reloc is needed. We use this hook to get the correct .got reloc. */
9230 static int cons_sign
= -1;
9233 x86_cons_fix_new (fragS
*frag
, unsigned int off
, unsigned int len
,
9234 expressionS
*exp
, bfd_reloc_code_real_type r
)
9236 r
= reloc (len
, 0, cons_sign
, r
);
9239 if (exp
->X_op
== O_secrel
)
9241 exp
->X_op
= O_symbol
;
9242 r
= BFD_RELOC_32_SECREL
;
9246 fix_new_exp (frag
, off
, len
, exp
, 0, r
);
9249 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9250 purpose of the `.dc.a' internal pseudo-op. */
9253 x86_address_bytes (void)
9255 if ((stdoutput
->arch_info
->mach
& bfd_mach_x64_32
))
9257 return stdoutput
->arch_info
->bits_per_address
/ 8;
9260 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9262 # define lex_got(reloc, adjust, types) NULL
9264 /* Parse operands of the form
9265 <symbol>@GOTOFF+<nnn>
9266 and similar .plt or .got references.
9268 If we find one, set up the correct relocation in RELOC and copy the
9269 input string, minus the `@GOTOFF' into a malloc'd buffer for
9270 parsing by the calling routine. Return this buffer, and if ADJUST
9271 is non-null set it to the length of the string we removed from the
9272 input line. Otherwise return NULL. */
9274 lex_got (enum bfd_reloc_code_real
*rel
,
9276 i386_operand_type
*types
)
9278 /* Some of the relocations depend on the size of what field is to
9279 be relocated. But in our callers i386_immediate and i386_displacement
9280 we don't yet know the operand size (this will be set by insn
9281 matching). Hence we record the word32 relocation here,
9282 and adjust the reloc according to the real size in reloc(). */
9283 static const struct {
9286 const enum bfd_reloc_code_real rel
[2];
9287 const i386_operand_type types64
;
9289 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9290 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32
,
9292 OPERAND_TYPE_IMM32_64
},
9294 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real
,
9295 BFD_RELOC_X86_64_PLTOFF64
},
9296 OPERAND_TYPE_IMM64
},
9297 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32
,
9298 BFD_RELOC_X86_64_PLT32
},
9299 OPERAND_TYPE_IMM32_32S_DISP32
},
9300 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real
,
9301 BFD_RELOC_X86_64_GOTPLT64
},
9302 OPERAND_TYPE_IMM64_DISP64
},
9303 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF
,
9304 BFD_RELOC_X86_64_GOTOFF64
},
9305 OPERAND_TYPE_IMM64_DISP64
},
9306 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real
,
9307 BFD_RELOC_X86_64_GOTPCREL
},
9308 OPERAND_TYPE_IMM32_32S_DISP32
},
9309 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD
,
9310 BFD_RELOC_X86_64_TLSGD
},
9311 OPERAND_TYPE_IMM32_32S_DISP32
},
9312 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM
,
9313 _dummy_first_bfd_reloc_code_real
},
9314 OPERAND_TYPE_NONE
},
9315 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real
,
9316 BFD_RELOC_X86_64_TLSLD
},
9317 OPERAND_TYPE_IMM32_32S_DISP32
},
9318 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32
,
9319 BFD_RELOC_X86_64_GOTTPOFF
},
9320 OPERAND_TYPE_IMM32_32S_DISP32
},
9321 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32
,
9322 BFD_RELOC_X86_64_TPOFF32
},
9323 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9324 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE
,
9325 _dummy_first_bfd_reloc_code_real
},
9326 OPERAND_TYPE_NONE
},
9327 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32
,
9328 BFD_RELOC_X86_64_DTPOFF32
},
9329 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9330 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE
,
9331 _dummy_first_bfd_reloc_code_real
},
9332 OPERAND_TYPE_NONE
},
9333 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE
,
9334 _dummy_first_bfd_reloc_code_real
},
9335 OPERAND_TYPE_NONE
},
9336 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32
,
9337 BFD_RELOC_X86_64_GOT32
},
9338 OPERAND_TYPE_IMM32_32S_64_DISP32
},
9339 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC
,
9340 BFD_RELOC_X86_64_GOTPC32_TLSDESC
},
9341 OPERAND_TYPE_IMM32_32S_DISP32
},
9342 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL
,
9343 BFD_RELOC_X86_64_TLSDESC_CALL
},
9344 OPERAND_TYPE_IMM32_32S_DISP32
},
9349 #if defined (OBJ_MAYBE_ELF)
9354 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9355 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9358 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9360 int len
= gotrel
[j
].len
;
9361 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9363 if (gotrel
[j
].rel
[object_64bit
] != 0)
9366 char *tmpbuf
, *past_reloc
;
9368 *rel
= gotrel
[j
].rel
[object_64bit
];
9372 if (flag_code
!= CODE_64BIT
)
9374 types
->bitfield
.imm32
= 1;
9375 types
->bitfield
.disp32
= 1;
9378 *types
= gotrel
[j
].types64
;
9381 if (j
!= 0 && GOT_symbol
== NULL
)
9382 GOT_symbol
= symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME
);
9384 /* The length of the first part of our input line. */
9385 first
= cp
- input_line_pointer
;
9387 /* The second part goes from after the reloc token until
9388 (and including) an end_of_line char or comma. */
9389 past_reloc
= cp
+ 1 + len
;
9391 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9393 second
= cp
+ 1 - past_reloc
;
9395 /* Allocate and copy string. The trailing NUL shouldn't
9396 be necessary, but be safe. */
9397 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9398 memcpy (tmpbuf
, input_line_pointer
, first
);
9399 if (second
!= 0 && *past_reloc
!= ' ')
9400 /* Replace the relocation token with ' ', so that
9401 errors like foo@GOTOFF1 will be detected. */
9402 tmpbuf
[first
++] = ' ';
9404 /* Increment length by 1 if the relocation token is
9409 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9410 tmpbuf
[first
+ second
] = '\0';
9414 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9415 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9420 /* Might be a symbol version string. Don't as_bad here. */
9429 /* Parse operands of the form
9430 <symbol>@SECREL32+<nnn>
9432 If we find one, set up the correct relocation in RELOC and copy the
9433 input string, minus the `@SECREL32' into a malloc'd buffer for
9434 parsing by the calling routine. Return this buffer, and if ADJUST
9435 is non-null set it to the length of the string we removed from the
9436 input line. Otherwise return NULL.
9438 This function is copied from the ELF version above adjusted for PE targets. */
9441 lex_got (enum bfd_reloc_code_real
*rel ATTRIBUTE_UNUSED
,
9442 int *adjust ATTRIBUTE_UNUSED
,
9443 i386_operand_type
*types
)
9449 const enum bfd_reloc_code_real rel
[2];
9450 const i386_operand_type types64
;
9454 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL
,
9455 BFD_RELOC_32_SECREL
},
9456 OPERAND_TYPE_IMM32_32S_64_DISP32_64
},
9462 for (cp
= input_line_pointer
; *cp
!= '@'; cp
++)
9463 if (is_end_of_line
[(unsigned char) *cp
] || *cp
== ',')
9466 for (j
= 0; j
< ARRAY_SIZE (gotrel
); j
++)
9468 int len
= gotrel
[j
].len
;
9470 if (strncasecmp (cp
+ 1, gotrel
[j
].str
, len
) == 0)
9472 if (gotrel
[j
].rel
[object_64bit
] != 0)
9475 char *tmpbuf
, *past_reloc
;
9477 *rel
= gotrel
[j
].rel
[object_64bit
];
9483 if (flag_code
!= CODE_64BIT
)
9485 types
->bitfield
.imm32
= 1;
9486 types
->bitfield
.disp32
= 1;
9489 *types
= gotrel
[j
].types64
;
9492 /* The length of the first part of our input line. */
9493 first
= cp
- input_line_pointer
;
9495 /* The second part goes from after the reloc token until
9496 (and including) an end_of_line char or comma. */
9497 past_reloc
= cp
+ 1 + len
;
9499 while (!is_end_of_line
[(unsigned char) *cp
] && *cp
!= ',')
9501 second
= cp
+ 1 - past_reloc
;
9503 /* Allocate and copy string. The trailing NUL shouldn't
9504 be necessary, but be safe. */
9505 tmpbuf
= XNEWVEC (char, first
+ second
+ 2);
9506 memcpy (tmpbuf
, input_line_pointer
, first
);
9507 if (second
!= 0 && *past_reloc
!= ' ')
9508 /* Replace the relocation token with ' ', so that
9509 errors like foo@SECLREL321 will be detected. */
9510 tmpbuf
[first
++] = ' ';
9511 memcpy (tmpbuf
+ first
, past_reloc
, second
);
9512 tmpbuf
[first
+ second
] = '\0';
9516 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9517 gotrel
[j
].str
, 1 << (5 + object_64bit
));
9522 /* Might be a symbol version string. Don't as_bad here. */
9528 bfd_reloc_code_real_type
9529 x86_cons (expressionS
*exp
, int size
)
9531 bfd_reloc_code_real_type got_reloc
= NO_RELOC
;
9533 intel_syntax
= -intel_syntax
;
9536 if (size
== 4 || (object_64bit
&& size
== 8))
9538 /* Handle @GOTOFF and the like in an expression. */
9540 char *gotfree_input_line
;
9543 save
= input_line_pointer
;
9544 gotfree_input_line
= lex_got (&got_reloc
, &adjust
, NULL
);
9545 if (gotfree_input_line
)
9546 input_line_pointer
= gotfree_input_line
;
9550 if (gotfree_input_line
)
9552 /* expression () has merrily parsed up to the end of line,
9553 or a comma - in the wrong buffer. Transfer how far
9554 input_line_pointer has moved to the right buffer. */
9555 input_line_pointer
= (save
9556 + (input_line_pointer
- gotfree_input_line
)
9558 free (gotfree_input_line
);
9559 if (exp
->X_op
== O_constant
9560 || exp
->X_op
== O_absent
9561 || exp
->X_op
== O_illegal
9562 || exp
->X_op
== O_register
9563 || exp
->X_op
== O_big
)
9565 char c
= *input_line_pointer
;
9566 *input_line_pointer
= 0;
9567 as_bad (_("missing or invalid expression `%s'"), save
);
9568 *input_line_pointer
= c
;
9570 else if ((got_reloc
== BFD_RELOC_386_PLT32
9571 || got_reloc
== BFD_RELOC_X86_64_PLT32
)
9572 && exp
->X_op
!= O_symbol
)
9574 char c
= *input_line_pointer
;
9575 *input_line_pointer
= 0;
9576 as_bad (_("invalid PLT expression `%s'"), save
);
9577 *input_line_pointer
= c
;
9584 intel_syntax
= -intel_syntax
;
9587 i386_intel_simplify (exp
);
9593 signed_cons (int size
)
9595 if (flag_code
== CODE_64BIT
)
9603 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
9610 if (exp
.X_op
== O_symbol
)
9611 exp
.X_op
= O_secrel
;
9613 emit_expr (&exp
, 4);
9615 while (*input_line_pointer
++ == ',');
9617 input_line_pointer
--;
9618 demand_empty_rest_of_line ();
9622 /* Handle Vector operations. */
9625 check_VecOperations (char *op_string
, char *op_end
)
9627 const reg_entry
*mask
;
9632 && (op_end
== NULL
|| op_string
< op_end
))
9635 if (*op_string
== '{')
9639 /* Check broadcasts. */
9640 if (strncmp (op_string
, "1to", 3) == 0)
9645 goto duplicated_vec_op
;
9648 if (*op_string
== '8')
9650 else if (*op_string
== '4')
9652 else if (*op_string
== '2')
9654 else if (*op_string
== '1'
9655 && *(op_string
+1) == '6')
9662 as_bad (_("Unsupported broadcast: `%s'"), saved
);
9667 broadcast_op
.type
= bcst_type
;
9668 broadcast_op
.operand
= this_operand
;
9669 broadcast_op
.bytes
= 0;
9670 i
.broadcast
= &broadcast_op
;
9672 /* Check masking operation. */
9673 else if ((mask
= parse_register (op_string
, &end_op
)) != NULL
)
9675 /* k0 can't be used for write mask. */
9676 if (mask
->reg_type
.bitfield
.class != RegMask
|| !mask
->reg_num
)
9678 as_bad (_("`%s%s' can't be used for write mask"),
9679 register_prefix
, mask
->reg_name
);
9685 mask_op
.mask
= mask
;
9686 mask_op
.zeroing
= 0;
9687 mask_op
.operand
= this_operand
;
9693 goto duplicated_vec_op
;
9695 i
.mask
->mask
= mask
;
9697 /* Only "{z}" is allowed here. No need to check
9698 zeroing mask explicitly. */
9699 if (i
.mask
->operand
!= this_operand
)
9701 as_bad (_("invalid write mask `%s'"), saved
);
9708 /* Check zeroing-flag for masking operation. */
9709 else if (*op_string
== 'z')
9713 mask_op
.mask
= NULL
;
9714 mask_op
.zeroing
= 1;
9715 mask_op
.operand
= this_operand
;
9720 if (i
.mask
->zeroing
)
9723 as_bad (_("duplicated `%s'"), saved
);
9727 i
.mask
->zeroing
= 1;
9729 /* Only "{%k}" is allowed here. No need to check mask
9730 register explicitly. */
9731 if (i
.mask
->operand
!= this_operand
)
9733 as_bad (_("invalid zeroing-masking `%s'"),
9742 goto unknown_vec_op
;
9744 if (*op_string
!= '}')
9746 as_bad (_("missing `}' in `%s'"), saved
);
9751 /* Strip whitespace since the addition of pseudo prefixes
9752 changed how the scrubber treats '{'. */
9753 if (is_space_char (*op_string
))
9759 /* We don't know this one. */
9760 as_bad (_("unknown vector operation: `%s'"), saved
);
9764 if (i
.mask
&& i
.mask
->zeroing
&& !i
.mask
->mask
)
9766 as_bad (_("zeroing-masking only allowed with write mask"));
9774 i386_immediate (char *imm_start
)
9776 char *save_input_line_pointer
;
9777 char *gotfree_input_line
;
9780 i386_operand_type types
;
9782 operand_type_set (&types
, ~0);
9784 if (i
.imm_operands
== MAX_IMMEDIATE_OPERANDS
)
9786 as_bad (_("at most %d immediate operands are allowed"),
9787 MAX_IMMEDIATE_OPERANDS
);
9791 exp
= &im_expressions
[i
.imm_operands
++];
9792 i
.op
[this_operand
].imms
= exp
;
9794 if (is_space_char (*imm_start
))
9797 save_input_line_pointer
= input_line_pointer
;
9798 input_line_pointer
= imm_start
;
9800 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
9801 if (gotfree_input_line
)
9802 input_line_pointer
= gotfree_input_line
;
9804 exp_seg
= expression (exp
);
9808 /* Handle vector operations. */
9809 if (*input_line_pointer
== '{')
9811 input_line_pointer
= check_VecOperations (input_line_pointer
,
9813 if (input_line_pointer
== NULL
)
9817 if (*input_line_pointer
)
9818 as_bad (_("junk `%s' after expression"), input_line_pointer
);
9820 input_line_pointer
= save_input_line_pointer
;
9821 if (gotfree_input_line
)
9823 free (gotfree_input_line
);
9825 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
9826 exp
->X_op
= O_illegal
;
9829 return i386_finalize_immediate (exp_seg
, exp
, types
, imm_start
);
9833 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
9834 i386_operand_type types
, const char *imm_start
)
9836 if (exp
->X_op
== O_absent
|| exp
->X_op
== O_illegal
|| exp
->X_op
== O_big
)
9839 as_bad (_("missing or invalid immediate expression `%s'"),
9843 else if (exp
->X_op
== O_constant
)
9845 /* Size it properly later. */
9846 i
.types
[this_operand
].bitfield
.imm64
= 1;
9847 /* If not 64bit, sign extend val. */
9848 if (flag_code
!= CODE_64BIT
9849 && (exp
->X_add_number
& ~(((addressT
) 2 << 31) - 1)) == 0)
9851 = (exp
->X_add_number
^ ((addressT
) 1 << 31)) - ((addressT
) 1 << 31);
9853 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9854 else if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
9855 && exp_seg
!= absolute_section
9856 && exp_seg
!= text_section
9857 && exp_seg
!= data_section
9858 && exp_seg
!= bss_section
9859 && exp_seg
!= undefined_section
9860 && !bfd_is_com_section (exp_seg
))
9862 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
9866 else if (!intel_syntax
&& exp_seg
== reg_section
)
9869 as_bad (_("illegal immediate register operand %s"), imm_start
);
9874 /* This is an address. The size of the address will be
9875 determined later, depending on destination register,
9876 suffix, or the default for the section. */
9877 i
.types
[this_operand
].bitfield
.imm8
= 1;
9878 i
.types
[this_operand
].bitfield
.imm16
= 1;
9879 i
.types
[this_operand
].bitfield
.imm32
= 1;
9880 i
.types
[this_operand
].bitfield
.imm32s
= 1;
9881 i
.types
[this_operand
].bitfield
.imm64
= 1;
9882 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
9890 i386_scale (char *scale
)
9893 char *save
= input_line_pointer
;
9895 input_line_pointer
= scale
;
9896 val
= get_absolute_expression ();
9901 i
.log2_scale_factor
= 0;
9904 i
.log2_scale_factor
= 1;
9907 i
.log2_scale_factor
= 2;
9910 i
.log2_scale_factor
= 3;
9914 char sep
= *input_line_pointer
;
9916 *input_line_pointer
= '\0';
9917 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
9919 *input_line_pointer
= sep
;
9920 input_line_pointer
= save
;
9924 if (i
.log2_scale_factor
!= 0 && i
.index_reg
== 0)
9926 as_warn (_("scale factor of %d without an index register"),
9927 1 << i
.log2_scale_factor
);
9928 i
.log2_scale_factor
= 0;
9930 scale
= input_line_pointer
;
9931 input_line_pointer
= save
;
9936 i386_displacement (char *disp_start
, char *disp_end
)
9940 char *save_input_line_pointer
;
9941 char *gotfree_input_line
;
9943 i386_operand_type bigdisp
, types
= anydisp
;
9946 if (i
.disp_operands
== MAX_MEMORY_OPERANDS
)
9948 as_bad (_("at most %d displacement operands are allowed"),
9949 MAX_MEMORY_OPERANDS
);
9953 operand_type_set (&bigdisp
, 0);
9955 || i
.types
[this_operand
].bitfield
.baseindex
9956 || (current_templates
->start
->opcode_modifier
.jump
!= JUMP
9957 && current_templates
->start
->opcode_modifier
.jump
!= JUMP_DWORD
))
9959 i386_addressing_mode ();
9960 override
= (i
.prefix
[ADDR_PREFIX
] != 0);
9961 if (flag_code
== CODE_64BIT
)
9965 bigdisp
.bitfield
.disp32s
= 1;
9966 bigdisp
.bitfield
.disp64
= 1;
9969 bigdisp
.bitfield
.disp32
= 1;
9971 else if ((flag_code
== CODE_16BIT
) ^ override
)
9972 bigdisp
.bitfield
.disp16
= 1;
9974 bigdisp
.bitfield
.disp32
= 1;
9978 /* For PC-relative branches, the width of the displacement may be
9979 dependent upon data size, but is never dependent upon address size.
9980 Also make sure to not unintentionally match against a non-PC-relative
9982 static templates aux_templates
;
9983 const insn_template
*t
= current_templates
->start
;
9984 bfd_boolean has_intel64
= FALSE
;
9986 aux_templates
.start
= t
;
9987 while (++t
< current_templates
->end
)
9989 if (t
->opcode_modifier
.jump
9990 != current_templates
->start
->opcode_modifier
.jump
)
9992 if ((t
->opcode_modifier
.isa64
>= INTEL64
))
9995 if (t
< current_templates
->end
)
9997 aux_templates
.end
= t
;
9998 current_templates
= &aux_templates
;
10001 override
= (i
.prefix
[DATA_PREFIX
] != 0);
10002 if (flag_code
== CODE_64BIT
)
10004 if ((override
|| i
.suffix
== WORD_MNEM_SUFFIX
)
10005 && (!intel64
|| !has_intel64
))
10006 bigdisp
.bitfield
.disp16
= 1;
10008 bigdisp
.bitfield
.disp32s
= 1;
10013 override
= (i
.suffix
== (flag_code
!= CODE_16BIT
10015 : LONG_MNEM_SUFFIX
));
10016 bigdisp
.bitfield
.disp32
= 1;
10017 if ((flag_code
== CODE_16BIT
) ^ override
)
10019 bigdisp
.bitfield
.disp32
= 0;
10020 bigdisp
.bitfield
.disp16
= 1;
10024 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10027 exp
= &disp_expressions
[i
.disp_operands
];
10028 i
.op
[this_operand
].disps
= exp
;
10030 save_input_line_pointer
= input_line_pointer
;
10031 input_line_pointer
= disp_start
;
10032 END_STRING_AND_SAVE (disp_end
);
10034 #ifndef GCC_ASM_O_HACK
10035 #define GCC_ASM_O_HACK 0
10038 END_STRING_AND_SAVE (disp_end
+ 1);
10039 if (i
.types
[this_operand
].bitfield
.baseIndex
10040 && displacement_string_end
[-1] == '+')
10042 /* This hack is to avoid a warning when using the "o"
10043 constraint within gcc asm statements.
10046 #define _set_tssldt_desc(n,addr,limit,type) \
10047 __asm__ __volatile__ ( \
10048 "movw %w2,%0\n\t" \
10049 "movw %w1,2+%0\n\t" \
10050 "rorl $16,%1\n\t" \
10051 "movb %b1,4+%0\n\t" \
10052 "movb %4,5+%0\n\t" \
10053 "movb $0,6+%0\n\t" \
10054 "movb %h1,7+%0\n\t" \
10056 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10058 This works great except that the output assembler ends
10059 up looking a bit weird if it turns out that there is
10060 no offset. You end up producing code that looks like:
10073 So here we provide the missing zero. */
10075 *displacement_string_end
= '0';
10078 gotfree_input_line
= lex_got (&i
.reloc
[this_operand
], NULL
, &types
);
10079 if (gotfree_input_line
)
10080 input_line_pointer
= gotfree_input_line
;
10082 exp_seg
= expression (exp
);
10084 SKIP_WHITESPACE ();
10085 if (*input_line_pointer
)
10086 as_bad (_("junk `%s' after expression"), input_line_pointer
);
10088 RESTORE_END_STRING (disp_end
+ 1);
10090 input_line_pointer
= save_input_line_pointer
;
10091 if (gotfree_input_line
)
10093 free (gotfree_input_line
);
10095 if (exp
->X_op
== O_constant
|| exp
->X_op
== O_register
)
10096 exp
->X_op
= O_illegal
;
10099 ret
= i386_finalize_displacement (exp_seg
, exp
, types
, disp_start
);
10101 RESTORE_END_STRING (disp_end
);
10107 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED
, expressionS
*exp
,
10108 i386_operand_type types
, const char *disp_start
)
10110 i386_operand_type bigdisp
;
10113 /* We do this to make sure that the section symbol is in
10114 the symbol table. We will ultimately change the relocation
10115 to be relative to the beginning of the section. */
10116 if (i
.reloc
[this_operand
] == BFD_RELOC_386_GOTOFF
10117 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
10118 || i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10120 if (exp
->X_op
!= O_symbol
)
10123 if (S_IS_LOCAL (exp
->X_add_symbol
)
10124 && S_GET_SEGMENT (exp
->X_add_symbol
) != undefined_section
10125 && S_GET_SEGMENT (exp
->X_add_symbol
) != expr_section
)
10126 section_symbol (S_GET_SEGMENT (exp
->X_add_symbol
));
10127 exp
->X_op
= O_subtract
;
10128 exp
->X_op_symbol
= GOT_symbol
;
10129 if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTPCREL
)
10130 i
.reloc
[this_operand
] = BFD_RELOC_32_PCREL
;
10131 else if (i
.reloc
[this_operand
] == BFD_RELOC_X86_64_GOTOFF64
)
10132 i
.reloc
[this_operand
] = BFD_RELOC_64
;
10134 i
.reloc
[this_operand
] = BFD_RELOC_32
;
10137 else if (exp
->X_op
== O_absent
10138 || exp
->X_op
== O_illegal
10139 || exp
->X_op
== O_big
)
10142 as_bad (_("missing or invalid displacement expression `%s'"),
10147 else if (flag_code
== CODE_64BIT
10148 && !i
.prefix
[ADDR_PREFIX
]
10149 && exp
->X_op
== O_constant
)
10151 /* Since displacement is signed extended to 64bit, don't allow
10152 disp32 and turn off disp32s if they are out of range. */
10153 i
.types
[this_operand
].bitfield
.disp32
= 0;
10154 if (!fits_in_signed_long (exp
->X_add_number
))
10156 i
.types
[this_operand
].bitfield
.disp32s
= 0;
10157 if (i
.types
[this_operand
].bitfield
.baseindex
)
10159 as_bad (_("0x%lx out range of signed 32bit displacement"),
10160 (long) exp
->X_add_number
);
10166 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10167 else if (exp
->X_op
!= O_constant
10168 && OUTPUT_FLAVOR
== bfd_target_aout_flavour
10169 && exp_seg
!= absolute_section
10170 && exp_seg
!= text_section
10171 && exp_seg
!= data_section
10172 && exp_seg
!= bss_section
10173 && exp_seg
!= undefined_section
10174 && !bfd_is_com_section (exp_seg
))
10176 as_bad (_("unimplemented segment %s in operand"), exp_seg
->name
);
10181 if (current_templates
->start
->opcode_modifier
.jump
== JUMP_BYTE
10182 /* Constants get taken care of by optimize_disp(). */
10183 && exp
->X_op
!= O_constant
)
10184 i
.types
[this_operand
].bitfield
.disp8
= 1;
10186 /* Check if this is a displacement only operand. */
10187 bigdisp
= i
.types
[this_operand
];
10188 bigdisp
.bitfield
.disp8
= 0;
10189 bigdisp
.bitfield
.disp16
= 0;
10190 bigdisp
.bitfield
.disp32
= 0;
10191 bigdisp
.bitfield
.disp32s
= 0;
10192 bigdisp
.bitfield
.disp64
= 0;
10193 if (operand_type_all_zero (&bigdisp
))
10194 i
.types
[this_operand
] = operand_type_and (i
.types
[this_operand
],
10200 /* Return the active addressing mode, taking address override and
10201 registers forming the address into consideration. Update the
10202 address override prefix if necessary. */
10204 static enum flag_code
10205 i386_addressing_mode (void)
10207 enum flag_code addr_mode
;
10209 if (i
.prefix
[ADDR_PREFIX
])
10210 addr_mode
= flag_code
== CODE_32BIT
? CODE_16BIT
: CODE_32BIT
;
10213 addr_mode
= flag_code
;
10215 #if INFER_ADDR_PREFIX
10216 if (i
.mem_operands
== 0)
10218 /* Infer address prefix from the first memory operand. */
10219 const reg_entry
*addr_reg
= i
.base_reg
;
10221 if (addr_reg
== NULL
)
10222 addr_reg
= i
.index_reg
;
10226 if (addr_reg
->reg_type
.bitfield
.dword
)
10227 addr_mode
= CODE_32BIT
;
10228 else if (flag_code
!= CODE_64BIT
10229 && addr_reg
->reg_type
.bitfield
.word
)
10230 addr_mode
= CODE_16BIT
;
10232 if (addr_mode
!= flag_code
)
10234 i
.prefix
[ADDR_PREFIX
] = ADDR_PREFIX_OPCODE
;
10236 /* Change the size of any displacement too. At most one
10237 of Disp16 or Disp32 is set.
10238 FIXME. There doesn't seem to be any real need for
10239 separate Disp16 and Disp32 flags. The same goes for
10240 Imm16 and Imm32. Removing them would probably clean
10241 up the code quite a lot. */
10242 if (flag_code
!= CODE_64BIT
10243 && (i
.types
[this_operand
].bitfield
.disp16
10244 || i
.types
[this_operand
].bitfield
.disp32
))
10245 i
.types
[this_operand
]
10246 = operand_type_xor (i
.types
[this_operand
], disp16_32
);
10256 /* Make sure the memory operand we've been dealt is valid.
10257 Return 1 on success, 0 on a failure. */
10260 i386_index_check (const char *operand_string
)
10262 const char *kind
= "base/index";
10263 enum flag_code addr_mode
= i386_addressing_mode ();
10265 if (current_templates
->start
->opcode_modifier
.isstring
10266 && !current_templates
->start
->cpu_flags
.bitfield
.cpupadlock
10267 && (current_templates
->end
[-1].opcode_modifier
.isstring
10268 || i
.mem_operands
))
10270 /* Memory operands of string insns are special in that they only allow
10271 a single register (rDI, rSI, or rBX) as their memory address. */
10272 const reg_entry
*expected_reg
;
10273 static const char *di_si
[][2] =
10279 static const char *bx
[] = { "ebx", "bx", "rbx" };
10281 kind
= "string address";
10283 if (current_templates
->start
->opcode_modifier
.repprefixok
)
10285 int es_op
= current_templates
->end
[-1].opcode_modifier
.isstring
10286 - IS_STRING_ES_OP0
;
10289 if (!current_templates
->end
[-1].operand_types
[0].bitfield
.baseindex
10290 || ((!i
.mem_operands
!= !intel_syntax
)
10291 && current_templates
->end
[-1].operand_types
[1]
10292 .bitfield
.baseindex
))
10294 expected_reg
= hash_find (reg_hash
, di_si
[addr_mode
][op
== es_op
]);
10297 expected_reg
= hash_find (reg_hash
, bx
[addr_mode
]);
10299 if (i
.base_reg
!= expected_reg
10301 || operand_type_check (i
.types
[this_operand
], disp
))
10303 /* The second memory operand must have the same size as
10307 && !((addr_mode
== CODE_64BIT
10308 && i
.base_reg
->reg_type
.bitfield
.qword
)
10309 || (addr_mode
== CODE_32BIT
10310 ? i
.base_reg
->reg_type
.bitfield
.dword
10311 : i
.base_reg
->reg_type
.bitfield
.word
)))
10314 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10316 intel_syntax
? '[' : '(',
10318 expected_reg
->reg_name
,
10319 intel_syntax
? ']' : ')');
10326 as_bad (_("`%s' is not a valid %s expression"),
10327 operand_string
, kind
);
10332 if (addr_mode
!= CODE_16BIT
)
10334 /* 32-bit/64-bit checks. */
10336 && ((addr_mode
== CODE_64BIT
10337 ? !i
.base_reg
->reg_type
.bitfield
.qword
10338 : !i
.base_reg
->reg_type
.bitfield
.dword
)
10339 || (i
.index_reg
&& i
.base_reg
->reg_num
== RegIP
)
10340 || i
.base_reg
->reg_num
== RegIZ
))
10342 && !i
.index_reg
->reg_type
.bitfield
.xmmword
10343 && !i
.index_reg
->reg_type
.bitfield
.ymmword
10344 && !i
.index_reg
->reg_type
.bitfield
.zmmword
10345 && ((addr_mode
== CODE_64BIT
10346 ? !i
.index_reg
->reg_type
.bitfield
.qword
10347 : !i
.index_reg
->reg_type
.bitfield
.dword
)
10348 || !i
.index_reg
->reg_type
.bitfield
.baseindex
)))
10351 /* bndmk, bndldx, and bndstx have special restrictions. */
10352 if (current_templates
->start
->base_opcode
== 0xf30f1b
10353 || (current_templates
->start
->base_opcode
& ~1) == 0x0f1a)
10355 /* They cannot use RIP-relative addressing. */
10356 if (i
.base_reg
&& i
.base_reg
->reg_num
== RegIP
)
10358 as_bad (_("`%s' cannot be used here"), operand_string
);
10362 /* bndldx and bndstx ignore their scale factor. */
10363 if (current_templates
->start
->base_opcode
!= 0xf30f1b
10364 && i
.log2_scale_factor
)
10365 as_warn (_("register scaling is being ignored here"));
10370 /* 16-bit checks. */
10372 && (!i
.base_reg
->reg_type
.bitfield
.word
10373 || !i
.base_reg
->reg_type
.bitfield
.baseindex
))
10375 && (!i
.index_reg
->reg_type
.bitfield
.word
10376 || !i
.index_reg
->reg_type
.bitfield
.baseindex
10378 && i
.base_reg
->reg_num
< 6
10379 && i
.index_reg
->reg_num
>= 6
10380 && i
.log2_scale_factor
== 0))))
10387 /* Handle vector immediates. */
10390 RC_SAE_immediate (const char *imm_start
)
10392 unsigned int match_found
, j
;
10393 const char *pstr
= imm_start
;
10401 for (j
= 0; j
< ARRAY_SIZE (RC_NamesTable
); j
++)
10403 if (!strncmp (pstr
, RC_NamesTable
[j
].name
, RC_NamesTable
[j
].len
))
10407 rc_op
.type
= RC_NamesTable
[j
].type
;
10408 rc_op
.operand
= this_operand
;
10409 i
.rounding
= &rc_op
;
10413 as_bad (_("duplicated `%s'"), imm_start
);
10416 pstr
+= RC_NamesTable
[j
].len
;
10424 if (*pstr
++ != '}')
10426 as_bad (_("Missing '}': '%s'"), imm_start
);
10429 /* RC/SAE immediate string should contain nothing more. */;
10432 as_bad (_("Junk after '}': '%s'"), imm_start
);
10436 exp
= &im_expressions
[i
.imm_operands
++];
10437 i
.op
[this_operand
].imms
= exp
;
10439 exp
->X_op
= O_constant
;
10440 exp
->X_add_number
= 0;
10441 exp
->X_add_symbol
= (symbolS
*) 0;
10442 exp
->X_op_symbol
= (symbolS
*) 0;
10444 i
.types
[this_operand
].bitfield
.imm8
= 1;
10448 /* Only string instructions can have a second memory operand, so
10449 reduce current_templates to just those if it contains any. */
10451 maybe_adjust_templates (void)
10453 const insn_template
*t
;
10455 gas_assert (i
.mem_operands
== 1);
10457 for (t
= current_templates
->start
; t
< current_templates
->end
; ++t
)
10458 if (t
->opcode_modifier
.isstring
)
10461 if (t
< current_templates
->end
)
10463 static templates aux_templates
;
10464 bfd_boolean recheck
;
10466 aux_templates
.start
= t
;
10467 for (; t
< current_templates
->end
; ++t
)
10468 if (!t
->opcode_modifier
.isstring
)
10470 aux_templates
.end
= t
;
10472 /* Determine whether to re-check the first memory operand. */
10473 recheck
= (aux_templates
.start
!= current_templates
->start
10474 || t
!= current_templates
->end
);
10476 current_templates
= &aux_templates
;
10480 i
.mem_operands
= 0;
10481 if (i
.memop1_string
!= NULL
10482 && i386_index_check (i
.memop1_string
) == 0)
10484 i
.mem_operands
= 1;
10491 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
10495 i386_att_operand (char *operand_string
)
10497 const reg_entry
*r
;
10499 char *op_string
= operand_string
;
10501 if (is_space_char (*op_string
))
10504 /* We check for an absolute prefix (differentiating,
10505 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
10506 if (*op_string
== ABSOLUTE_PREFIX
)
10509 if (is_space_char (*op_string
))
10511 i
.jumpabsolute
= TRUE
;
10514 /* Check if operand is a register. */
10515 if ((r
= parse_register (op_string
, &end_op
)) != NULL
)
10517 i386_operand_type temp
;
10519 /* Check for a segment override by searching for ':' after a
10520 segment register. */
10521 op_string
= end_op
;
10522 if (is_space_char (*op_string
))
10524 if (*op_string
== ':' && r
->reg_type
.bitfield
.class == SReg
)
10526 switch (r
->reg_num
)
10529 i
.seg
[i
.mem_operands
] = &es
;
10532 i
.seg
[i
.mem_operands
] = &cs
;
10535 i
.seg
[i
.mem_operands
] = &ss
;
10538 i
.seg
[i
.mem_operands
] = &ds
;
10541 i
.seg
[i
.mem_operands
] = &fs
;
10544 i
.seg
[i
.mem_operands
] = &gs
;
10548 /* Skip the ':' and whitespace. */
10550 if (is_space_char (*op_string
))
10553 if (!is_digit_char (*op_string
)
10554 && !is_identifier_char (*op_string
)
10555 && *op_string
!= '('
10556 && *op_string
!= ABSOLUTE_PREFIX
)
10558 as_bad (_("bad memory operand `%s'"), op_string
);
10561 /* Handle case of %es:*foo. */
10562 if (*op_string
== ABSOLUTE_PREFIX
)
10565 if (is_space_char (*op_string
))
10567 i
.jumpabsolute
= TRUE
;
10569 goto do_memory_reference
;
10572 /* Handle vector operations. */
10573 if (*op_string
== '{')
10575 op_string
= check_VecOperations (op_string
, NULL
);
10576 if (op_string
== NULL
)
10582 as_bad (_("junk `%s' after register"), op_string
);
10585 temp
= r
->reg_type
;
10586 temp
.bitfield
.baseindex
= 0;
10587 i
.types
[this_operand
] = operand_type_or (i
.types
[this_operand
],
10589 i
.types
[this_operand
].bitfield
.unspecified
= 0;
10590 i
.op
[this_operand
].regs
= r
;
10593 else if (*op_string
== REGISTER_PREFIX
)
10595 as_bad (_("bad register name `%s'"), op_string
);
10598 else if (*op_string
== IMMEDIATE_PREFIX
)
10601 if (i
.jumpabsolute
)
10603 as_bad (_("immediate operand illegal with absolute jump"));
10606 if (!i386_immediate (op_string
))
10609 else if (RC_SAE_immediate (operand_string
))
10611 /* If it is a RC or SAE immediate, do nothing. */
10614 else if (is_digit_char (*op_string
)
10615 || is_identifier_char (*op_string
)
10616 || *op_string
== '"'
10617 || *op_string
== '(')
10619 /* This is a memory reference of some sort. */
10622 /* Start and end of displacement string expression (if found). */
10623 char *displacement_string_start
;
10624 char *displacement_string_end
;
10627 do_memory_reference
:
10628 if (i
.mem_operands
== 1 && !maybe_adjust_templates ())
10630 if ((i
.mem_operands
== 1
10631 && !current_templates
->start
->opcode_modifier
.isstring
)
10632 || i
.mem_operands
== 2)
10634 as_bad (_("too many memory references for `%s'"),
10635 current_templates
->start
->name
);
10639 /* Check for base index form. We detect the base index form by
10640 looking for an ')' at the end of the operand, searching
10641 for the '(' matching it, and finding a REGISTER_PREFIX or ','
10643 base_string
= op_string
+ strlen (op_string
);
10645 /* Handle vector operations. */
10646 vop_start
= strchr (op_string
, '{');
10647 if (vop_start
&& vop_start
< base_string
)
10649 if (check_VecOperations (vop_start
, base_string
) == NULL
)
10651 base_string
= vop_start
;
10655 if (is_space_char (*base_string
))
10658 /* If we only have a displacement, set-up for it to be parsed later. */
10659 displacement_string_start
= op_string
;
10660 displacement_string_end
= base_string
+ 1;
10662 if (*base_string
== ')')
10665 unsigned int parens_balanced
= 1;
10666 /* We've already checked that the number of left & right ()'s are
10667 equal, so this loop will not be infinite. */
10671 if (*base_string
== ')')
10673 if (*base_string
== '(')
10676 while (parens_balanced
);
10678 temp_string
= base_string
;
10680 /* Skip past '(' and whitespace. */
10682 if (is_space_char (*base_string
))
10685 if (*base_string
== ','
10686 || ((i
.base_reg
= parse_register (base_string
, &end_op
))
10689 displacement_string_end
= temp_string
;
10691 i
.types
[this_operand
].bitfield
.baseindex
= 1;
10695 base_string
= end_op
;
10696 if (is_space_char (*base_string
))
10700 /* There may be an index reg or scale factor here. */
10701 if (*base_string
== ',')
10704 if (is_space_char (*base_string
))
10707 if ((i
.index_reg
= parse_register (base_string
, &end_op
))
10710 base_string
= end_op
;
10711 if (is_space_char (*base_string
))
10713 if (*base_string
== ',')
10716 if (is_space_char (*base_string
))
10719 else if (*base_string
!= ')')
10721 as_bad (_("expecting `,' or `)' "
10722 "after index register in `%s'"),
10727 else if (*base_string
== REGISTER_PREFIX
)
10729 end_op
= strchr (base_string
, ',');
10732 as_bad (_("bad register name `%s'"), base_string
);
10736 /* Check for scale factor. */
10737 if (*base_string
!= ')')
10739 char *end_scale
= i386_scale (base_string
);
10744 base_string
= end_scale
;
10745 if (is_space_char (*base_string
))
10747 if (*base_string
!= ')')
10749 as_bad (_("expecting `)' "
10750 "after scale factor in `%s'"),
10755 else if (!i
.index_reg
)
10757 as_bad (_("expecting index register or scale factor "
10758 "after `,'; got '%c'"),
10763 else if (*base_string
!= ')')
10765 as_bad (_("expecting `,' or `)' "
10766 "after base register in `%s'"),
10771 else if (*base_string
== REGISTER_PREFIX
)
10773 end_op
= strchr (base_string
, ',');
10776 as_bad (_("bad register name `%s'"), base_string
);
10781 /* If there's an expression beginning the operand, parse it,
10782 assuming displacement_string_start and
10783 displacement_string_end are meaningful. */
10784 if (displacement_string_start
!= displacement_string_end
)
10786 if (!i386_displacement (displacement_string_start
,
10787 displacement_string_end
))
10791 /* Special case for (%dx) while doing input/output op. */
10793 && i
.base_reg
->reg_type
.bitfield
.instance
== RegD
10794 && i
.base_reg
->reg_type
.bitfield
.word
10795 && i
.index_reg
== 0
10796 && i
.log2_scale_factor
== 0
10797 && i
.seg
[i
.mem_operands
] == 0
10798 && !operand_type_check (i
.types
[this_operand
], disp
))
10800 i
.types
[this_operand
] = i
.base_reg
->reg_type
;
10804 if (i386_index_check (operand_string
) == 0)
10806 i
.flags
[this_operand
] |= Operand_Mem
;
10807 if (i
.mem_operands
== 0)
10808 i
.memop1_string
= xstrdup (operand_string
);
10813 /* It's not a memory operand; argh! */
10814 as_bad (_("invalid char %s beginning operand %d `%s'"),
10815 output_invalid (*op_string
),
10820 return 1; /* Normal return. */
10823 /* Calculate the maximum variable size (i.e., excluding fr_fix)
10824 that an rs_machine_dependent frag may reach. */
10827 i386_frag_max_var (fragS
*frag
)
10829 /* The only relaxable frags are for jumps.
10830 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
10831 gas_assert (frag
->fr_type
== rs_machine_dependent
);
10832 return TYPE_FROM_RELAX_STATE (frag
->fr_subtype
) == UNCOND_JUMP
? 4 : 5;
10835 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10837 elf_symbol_resolved_in_segment_p (symbolS
*fr_symbol
, offsetT fr_var
)
10839 /* STT_GNU_IFUNC symbol must go through PLT. */
10840 if ((symbol_get_bfdsym (fr_symbol
)->flags
10841 & BSF_GNU_INDIRECT_FUNCTION
) != 0)
10844 if (!S_IS_EXTERNAL (fr_symbol
))
10845 /* Symbol may be weak or local. */
10846 return !S_IS_WEAK (fr_symbol
);
10848 /* Global symbols with non-default visibility can't be preempted. */
10849 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol
)) != STV_DEFAULT
)
10852 if (fr_var
!= NO_RELOC
)
10853 switch ((enum bfd_reloc_code_real
) fr_var
)
10855 case BFD_RELOC_386_PLT32
:
10856 case BFD_RELOC_X86_64_PLT32
:
10857 /* Symbol with PLT relocation may be preempted. */
10863 /* Global symbols with default visibility in a shared library may be
10864 preempted by another definition. */
10869 /* Return the next non-empty frag. */
10872 i386_next_non_empty_frag (fragS
*fragP
)
10874 /* There may be a frag with a ".fill 0" when there is no room in
10875 the current frag for frag_grow in output_insn. */
10876 for (fragP
= fragP
->fr_next
;
10878 && fragP
->fr_type
== rs_fill
10879 && fragP
->fr_fix
== 0);
10880 fragP
= fragP
->fr_next
)
10885 /* Return the next jcc frag after BRANCH_PADDING. */
10888 i386_next_jcc_frag (fragS
*fragP
)
10893 if (fragP
->fr_type
== rs_machine_dependent
10894 && (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
10895 == BRANCH_PADDING
))
10897 fragP
= i386_next_non_empty_frag (fragP
);
10898 if (fragP
->fr_type
!= rs_machine_dependent
)
10900 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == COND_JUMP
)
10907 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
10910 i386_classify_machine_dependent_frag (fragS
*fragP
)
10914 fragS
*branch_fragP
;
10916 unsigned int max_prefix_length
;
10918 if (fragP
->tc_frag_data
.classified
)
10921 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
10922 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
10923 for (next_fragP
= fragP
;
10924 next_fragP
!= NULL
;
10925 next_fragP
= next_fragP
->fr_next
)
10927 next_fragP
->tc_frag_data
.classified
= 1;
10928 if (next_fragP
->fr_type
== rs_machine_dependent
)
10929 switch (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
))
10931 case BRANCH_PADDING
:
10932 /* The BRANCH_PADDING frag must be followed by a branch
10934 branch_fragP
= i386_next_non_empty_frag (next_fragP
);
10935 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
10937 case FUSED_JCC_PADDING
:
10938 /* Check if this is a fused jcc:
10940 CMP like instruction
10944 cmp_fragP
= i386_next_non_empty_frag (next_fragP
);
10945 pad_fragP
= i386_next_non_empty_frag (cmp_fragP
);
10946 branch_fragP
= i386_next_jcc_frag (pad_fragP
);
10949 /* The BRANCH_PADDING frag is merged with the
10950 FUSED_JCC_PADDING frag. */
10951 next_fragP
->tc_frag_data
.u
.branch_fragP
= branch_fragP
;
10952 /* CMP like instruction size. */
10953 next_fragP
->tc_frag_data
.cmp_size
= cmp_fragP
->fr_fix
;
10954 frag_wane (pad_fragP
);
10955 /* Skip to branch_fragP. */
10956 next_fragP
= branch_fragP
;
10958 else if (next_fragP
->tc_frag_data
.max_prefix_length
)
10960 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
10962 next_fragP
->fr_subtype
10963 = ENCODE_RELAX_STATE (BRANCH_PREFIX
, 0);
10964 next_fragP
->tc_frag_data
.max_bytes
10965 = next_fragP
->tc_frag_data
.max_prefix_length
;
10966 /* This will be updated in the BRANCH_PREFIX scan. */
10967 next_fragP
->tc_frag_data
.max_prefix_length
= 0;
10970 frag_wane (next_fragP
);
10975 /* Stop if there is no BRANCH_PREFIX. */
10976 if (!align_branch_prefix_size
)
10979 /* Scan for BRANCH_PREFIX. */
10980 for (; fragP
!= NULL
; fragP
= fragP
->fr_next
)
10982 if (fragP
->fr_type
!= rs_machine_dependent
10983 || (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
10987 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
10988 COND_JUMP_PREFIX. */
10989 max_prefix_length
= 0;
10990 for (next_fragP
= fragP
;
10991 next_fragP
!= NULL
;
10992 next_fragP
= next_fragP
->fr_next
)
10994 if (next_fragP
->fr_type
== rs_fill
)
10995 /* Skip rs_fill frags. */
10997 else if (next_fragP
->fr_type
!= rs_machine_dependent
)
10998 /* Stop for all other frags. */
11001 /* rs_machine_dependent frags. */
11002 if (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11005 /* Count BRANCH_PREFIX frags. */
11006 if (max_prefix_length
>= MAX_FUSED_JCC_PADDING_SIZE
)
11008 max_prefix_length
= MAX_FUSED_JCC_PADDING_SIZE
;
11009 frag_wane (next_fragP
);
11013 += next_fragP
->tc_frag_data
.max_bytes
;
11015 else if ((TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11017 || (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11018 == FUSED_JCC_PADDING
))
11020 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11021 fragP
->tc_frag_data
.u
.padding_fragP
= next_fragP
;
11025 /* Stop for other rs_machine_dependent frags. */
11029 fragP
->tc_frag_data
.max_prefix_length
= max_prefix_length
;
11031 /* Skip to the next frag. */
11032 fragP
= next_fragP
;
11036 /* Compute padding size for
11039 CMP like instruction
11041 COND_JUMP/UNCOND_JUMP
11046 COND_JUMP/UNCOND_JUMP
11050 i386_branch_padding_size (fragS
*fragP
, offsetT address
)
11052 unsigned int offset
, size
, padding_size
;
11053 fragS
*branch_fragP
= fragP
->tc_frag_data
.u
.branch_fragP
;
11055 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11057 address
= fragP
->fr_address
;
11058 address
+= fragP
->fr_fix
;
11060 /* CMP like instrunction size. */
11061 size
= fragP
->tc_frag_data
.cmp_size
;
11063 /* The base size of the branch frag. */
11064 size
+= branch_fragP
->fr_fix
;
11066 /* Add opcode and displacement bytes for the rs_machine_dependent
11068 if (branch_fragP
->fr_type
== rs_machine_dependent
)
11069 size
+= md_relax_table
[branch_fragP
->fr_subtype
].rlx_length
;
11071 /* Check if branch is within boundary and doesn't end at the last
11073 offset
= address
& ((1U << align_branch_power
) - 1);
11074 if ((offset
+ size
) >= (1U << align_branch_power
))
11075 /* Padding needed to avoid crossing boundary. */
11076 padding_size
= (1U << align_branch_power
) - offset
;
11078 /* No padding needed. */
11081 /* The return value may be saved in tc_frag_data.length which is
11083 if (!fits_in_unsigned_byte (padding_size
))
11086 return padding_size
;
11089 /* i386_generic_table_relax_frag()
11091 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11092 grow/shrink padding to align branch frags. Hand others to
11096 i386_generic_table_relax_frag (segT segment
, fragS
*fragP
, long stretch
)
11098 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11099 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11101 long padding_size
= i386_branch_padding_size (fragP
, 0);
11102 long grow
= padding_size
- fragP
->tc_frag_data
.length
;
11104 /* When the BRANCH_PREFIX frag is used, the computed address
11105 must match the actual address and there should be no padding. */
11106 if (fragP
->tc_frag_data
.padding_address
11107 && (fragP
->tc_frag_data
.padding_address
!= fragP
->fr_address
11111 /* Update the padding size. */
11113 fragP
->tc_frag_data
.length
= padding_size
;
11117 else if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11119 fragS
*padding_fragP
, *next_fragP
;
11120 long padding_size
, left_size
, last_size
;
11122 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11123 if (!padding_fragP
)
11124 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11125 return (fragP
->tc_frag_data
.length
11126 - fragP
->tc_frag_data
.last_length
);
11128 /* Compute the relative address of the padding frag in the very
11129 first time where the BRANCH_PREFIX frag sizes are zero. */
11130 if (!fragP
->tc_frag_data
.padding_address
)
11131 fragP
->tc_frag_data
.padding_address
11132 = padding_fragP
->fr_address
- (fragP
->fr_address
- stretch
);
11134 /* First update the last length from the previous interation. */
11135 left_size
= fragP
->tc_frag_data
.prefix_length
;
11136 for (next_fragP
= fragP
;
11137 next_fragP
!= padding_fragP
;
11138 next_fragP
= next_fragP
->fr_next
)
11139 if (next_fragP
->fr_type
== rs_machine_dependent
11140 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11145 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11149 if (max
> left_size
)
11154 next_fragP
->tc_frag_data
.last_length
= size
;
11158 next_fragP
->tc_frag_data
.last_length
= 0;
11161 /* Check the padding size for the padding frag. */
11162 padding_size
= i386_branch_padding_size
11163 (padding_fragP
, (fragP
->fr_address
11164 + fragP
->tc_frag_data
.padding_address
));
11166 last_size
= fragP
->tc_frag_data
.prefix_length
;
11167 /* Check if there is change from the last interation. */
11168 if (padding_size
== last_size
)
11170 /* Update the expected address of the padding frag. */
11171 padding_fragP
->tc_frag_data
.padding_address
11172 = (fragP
->fr_address
+ padding_size
11173 + fragP
->tc_frag_data
.padding_address
);
11177 if (padding_size
> fragP
->tc_frag_data
.max_prefix_length
)
11179 /* No padding if there is no sufficient room. Clear the
11180 expected address of the padding frag. */
11181 padding_fragP
->tc_frag_data
.padding_address
= 0;
11185 /* Store the expected address of the padding frag. */
11186 padding_fragP
->tc_frag_data
.padding_address
11187 = (fragP
->fr_address
+ padding_size
11188 + fragP
->tc_frag_data
.padding_address
);
11190 fragP
->tc_frag_data
.prefix_length
= padding_size
;
11192 /* Update the length for the current interation. */
11193 left_size
= padding_size
;
11194 for (next_fragP
= fragP
;
11195 next_fragP
!= padding_fragP
;
11196 next_fragP
= next_fragP
->fr_next
)
11197 if (next_fragP
->fr_type
== rs_machine_dependent
11198 && (TYPE_FROM_RELAX_STATE (next_fragP
->fr_subtype
)
11203 int max
= next_fragP
->tc_frag_data
.max_bytes
;
11207 if (max
> left_size
)
11212 next_fragP
->tc_frag_data
.length
= size
;
11216 next_fragP
->tc_frag_data
.length
= 0;
11219 return (fragP
->tc_frag_data
.length
11220 - fragP
->tc_frag_data
.last_length
);
11222 return relax_frag (segment
, fragP
, stretch
);
11225 /* md_estimate_size_before_relax()
11227 Called just before relax() for rs_machine_dependent frags. The x86
11228 assembler uses these frags to handle variable size jump
11231 Any symbol that is now undefined will not become defined.
11232 Return the correct fr_subtype in the frag.
11233 Return the initial "guess for variable size of frag" to caller.
11234 The guess is actually the growth beyond the fixed part. Whatever
11235 we do to grow the fixed or variable part contributes to our
11239 md_estimate_size_before_relax (fragS
*fragP
, segT segment
)
11241 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11242 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
11243 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
)
11245 i386_classify_machine_dependent_frag (fragP
);
11246 return fragP
->tc_frag_data
.length
;
11249 /* We've already got fragP->fr_subtype right; all we have to do is
11250 check for un-relaxable symbols. On an ELF system, we can't relax
11251 an externally visible symbol, because it may be overridden by a
11253 if (S_GET_SEGMENT (fragP
->fr_symbol
) != segment
11254 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11256 && !elf_symbol_resolved_in_segment_p (fragP
->fr_symbol
,
11259 #if defined (OBJ_COFF) && defined (TE_PE)
11260 || (OUTPUT_FLAVOR
== bfd_target_coff_flavour
11261 && S_IS_WEAK (fragP
->fr_symbol
))
11265 /* Symbol is undefined in this segment, or we need to keep a
11266 reloc so that weak symbols can be overridden. */
11267 int size
= (fragP
->fr_subtype
& CODE16
) ? 2 : 4;
11268 enum bfd_reloc_code_real reloc_type
;
11269 unsigned char *opcode
;
11272 if (fragP
->fr_var
!= NO_RELOC
)
11273 reloc_type
= (enum bfd_reloc_code_real
) fragP
->fr_var
;
11274 else if (size
== 2)
11275 reloc_type
= BFD_RELOC_16_PCREL
;
11276 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11277 else if (need_plt32_p (fragP
->fr_symbol
))
11278 reloc_type
= BFD_RELOC_X86_64_PLT32
;
11281 reloc_type
= BFD_RELOC_32_PCREL
;
11283 old_fr_fix
= fragP
->fr_fix
;
11284 opcode
= (unsigned char *) fragP
->fr_opcode
;
11286 switch (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
))
11289 /* Make jmp (0xeb) a (d)word displacement jump. */
11291 fragP
->fr_fix
+= size
;
11292 fix_new (fragP
, old_fr_fix
, size
,
11294 fragP
->fr_offset
, 1,
11300 && (!no_cond_jump_promotion
|| fragP
->fr_var
!= NO_RELOC
))
11302 /* Negate the condition, and branch past an
11303 unconditional jump. */
11306 /* Insert an unconditional jump. */
11308 /* We added two extra opcode bytes, and have a two byte
11310 fragP
->fr_fix
+= 2 + 2;
11311 fix_new (fragP
, old_fr_fix
+ 2, 2,
11313 fragP
->fr_offset
, 1,
11317 /* Fall through. */
11320 if (no_cond_jump_promotion
&& fragP
->fr_var
== NO_RELOC
)
11324 fragP
->fr_fix
+= 1;
11325 fixP
= fix_new (fragP
, old_fr_fix
, 1,
11327 fragP
->fr_offset
, 1,
11328 BFD_RELOC_8_PCREL
);
11329 fixP
->fx_signed
= 1;
11333 /* This changes the byte-displacement jump 0x7N
11334 to the (d)word-displacement jump 0x0f,0x8N. */
11335 opcode
[1] = opcode
[0] + 0x10;
11336 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11337 /* We've added an opcode byte. */
11338 fragP
->fr_fix
+= 1 + size
;
11339 fix_new (fragP
, old_fr_fix
+ 1, size
,
11341 fragP
->fr_offset
, 1,
11346 BAD_CASE (fragP
->fr_subtype
);
11350 return fragP
->fr_fix
- old_fr_fix
;
11353 /* Guess size depending on current relax state. Initially the relax
11354 state will correspond to a short jump and we return 1, because
11355 the variable part of the frag (the branch offset) is one byte
11356 long. However, we can relax a section more than once and in that
11357 case we must either set fr_subtype back to the unrelaxed state,
11358 or return the value for the appropriate branch. */
11359 return md_relax_table
[fragP
->fr_subtype
].rlx_length
;
11362 /* Called after relax() is finished.
11364 In: Address of frag.
11365 fr_type == rs_machine_dependent.
11366 fr_subtype is what the address relaxed to.
11368 Out: Any fixSs and constants are set up.
11369 Caller will turn frag into a ".space 0". */
11372 md_convert_frag (bfd
*abfd ATTRIBUTE_UNUSED
, segT sec ATTRIBUTE_UNUSED
,
11375 unsigned char *opcode
;
11376 unsigned char *where_to_put_displacement
= NULL
;
11377 offsetT target_address
;
11378 offsetT opcode_address
;
11379 unsigned int extension
= 0;
11380 offsetT displacement_from_opcode_start
;
11382 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PADDING
11383 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == FUSED_JCC_PADDING
11384 || TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11386 /* Generate nop padding. */
11387 unsigned int size
= fragP
->tc_frag_data
.length
;
11390 if (size
> fragP
->tc_frag_data
.max_bytes
)
11396 const char *branch
= "branch";
11397 const char *prefix
= "";
11398 fragS
*padding_fragP
;
11399 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
)
11402 padding_fragP
= fragP
->tc_frag_data
.u
.padding_fragP
;
11403 switch (fragP
->tc_frag_data
.default_prefix
)
11408 case CS_PREFIX_OPCODE
:
11411 case DS_PREFIX_OPCODE
:
11414 case ES_PREFIX_OPCODE
:
11417 case FS_PREFIX_OPCODE
:
11420 case GS_PREFIX_OPCODE
:
11423 case SS_PREFIX_OPCODE
:
11428 msg
= _("%s:%u: add %d%s at 0x%llx to align "
11429 "%s within %d-byte boundary\n");
11431 msg
= _("%s:%u: add additional %d%s at 0x%llx to "
11432 "align %s within %d-byte boundary\n");
11436 padding_fragP
= fragP
;
11437 msg
= _("%s:%u: add %d%s-byte nop at 0x%llx to align "
11438 "%s within %d-byte boundary\n");
11442 switch (padding_fragP
->tc_frag_data
.branch_type
)
11444 case align_branch_jcc
:
11447 case align_branch_fused
:
11448 branch
= "fused jcc";
11450 case align_branch_jmp
:
11453 case align_branch_call
:
11456 case align_branch_indirect
:
11457 branch
= "indiret branch";
11459 case align_branch_ret
:
11466 fprintf (stdout
, msg
,
11467 fragP
->fr_file
, fragP
->fr_line
, size
, prefix
,
11468 (long long) fragP
->fr_address
, branch
,
11469 1 << align_branch_power
);
11471 if (TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) == BRANCH_PREFIX
)
11472 memset (fragP
->fr_opcode
,
11473 fragP
->tc_frag_data
.default_prefix
, size
);
11475 i386_generate_nops (fragP
, (char *) fragP
->fr_opcode
,
11477 fragP
->fr_fix
+= size
;
11482 opcode
= (unsigned char *) fragP
->fr_opcode
;
11484 /* Address we want to reach in file space. */
11485 target_address
= S_GET_VALUE (fragP
->fr_symbol
) + fragP
->fr_offset
;
11487 /* Address opcode resides at in file space. */
11488 opcode_address
= fragP
->fr_address
+ fragP
->fr_fix
;
11490 /* Displacement from opcode start to fill into instruction. */
11491 displacement_from_opcode_start
= target_address
- opcode_address
;
11493 if ((fragP
->fr_subtype
& BIG
) == 0)
11495 /* Don't have to change opcode. */
11496 extension
= 1; /* 1 opcode + 1 displacement */
11497 where_to_put_displacement
= &opcode
[1];
11501 if (no_cond_jump_promotion
11502 && TYPE_FROM_RELAX_STATE (fragP
->fr_subtype
) != UNCOND_JUMP
)
11503 as_warn_where (fragP
->fr_file
, fragP
->fr_line
,
11504 _("long jump required"));
11506 switch (fragP
->fr_subtype
)
11508 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG
):
11509 extension
= 4; /* 1 opcode + 4 displacement */
11511 where_to_put_displacement
= &opcode
[1];
11514 case ENCODE_RELAX_STATE (UNCOND_JUMP
, BIG16
):
11515 extension
= 2; /* 1 opcode + 2 displacement */
11517 where_to_put_displacement
= &opcode
[1];
11520 case ENCODE_RELAX_STATE (COND_JUMP
, BIG
):
11521 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG
):
11522 extension
= 5; /* 2 opcode + 4 displacement */
11523 opcode
[1] = opcode
[0] + 0x10;
11524 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11525 where_to_put_displacement
= &opcode
[2];
11528 case ENCODE_RELAX_STATE (COND_JUMP
, BIG16
):
11529 extension
= 3; /* 2 opcode + 2 displacement */
11530 opcode
[1] = opcode
[0] + 0x10;
11531 opcode
[0] = TWO_BYTE_OPCODE_ESCAPE
;
11532 where_to_put_displacement
= &opcode
[2];
11535 case ENCODE_RELAX_STATE (COND_JUMP86
, BIG16
):
11540 where_to_put_displacement
= &opcode
[3];
11544 BAD_CASE (fragP
->fr_subtype
);
11549 /* If size if less then four we are sure that the operand fits,
11550 but if it's 4, then it could be that the displacement is larger
11552 if (DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
) == 4
11554 && ((addressT
) (displacement_from_opcode_start
- extension
11555 + ((addressT
) 1 << 31))
11556 > (((addressT
) 2 << 31) - 1)))
11558 as_bad_where (fragP
->fr_file
, fragP
->fr_line
,
11559 _("jump target out of range"));
11560 /* Make us emit 0. */
11561 displacement_from_opcode_start
= extension
;
11563 /* Now put displacement after opcode. */
11564 md_number_to_chars ((char *) where_to_put_displacement
,
11565 (valueT
) (displacement_from_opcode_start
- extension
),
11566 DISP_SIZE_FROM_RELAX_STATE (fragP
->fr_subtype
));
11567 fragP
->fr_fix
+= extension
;
11570 /* Apply a fixup (fixP) to segment data, once it has been determined
11571 by our caller that we have all the info we need to fix it up.
11573 Parameter valP is the pointer to the value of the bits.
11575 On the 386, immediates, displacements, and data pointers are all in
11576 the same (little-endian) format, so we don't need to care about which
11577 we are handling. */
11580 md_apply_fix (fixS
*fixP
, valueT
*valP
, segT seg ATTRIBUTE_UNUSED
)
11582 char *p
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
11583 valueT value
= *valP
;
11585 #if !defined (TE_Mach)
11586 if (fixP
->fx_pcrel
)
11588 switch (fixP
->fx_r_type
)
11594 fixP
->fx_r_type
= BFD_RELOC_64_PCREL
;
11597 case BFD_RELOC_X86_64_32S
:
11598 fixP
->fx_r_type
= BFD_RELOC_32_PCREL
;
11601 fixP
->fx_r_type
= BFD_RELOC_16_PCREL
;
11604 fixP
->fx_r_type
= BFD_RELOC_8_PCREL
;
11609 if (fixP
->fx_addsy
!= NULL
11610 && (fixP
->fx_r_type
== BFD_RELOC_32_PCREL
11611 || fixP
->fx_r_type
== BFD_RELOC_64_PCREL
11612 || fixP
->fx_r_type
== BFD_RELOC_16_PCREL
11613 || fixP
->fx_r_type
== BFD_RELOC_8_PCREL
)
11614 && !use_rela_relocations
)
11616 /* This is a hack. There should be a better way to handle this.
11617 This covers for the fact that bfd_install_relocation will
11618 subtract the current location (for partial_inplace, PC relative
11619 relocations); see more below. */
11623 || OUTPUT_FLAVOR
== bfd_target_coff_flavour
11626 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11628 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11631 segT sym_seg
= S_GET_SEGMENT (fixP
->fx_addsy
);
11633 if ((sym_seg
== seg
11634 || (symbol_section_p (fixP
->fx_addsy
)
11635 && sym_seg
!= absolute_section
))
11636 && !generic_force_reloc (fixP
))
11638 /* Yes, we add the values in twice. This is because
11639 bfd_install_relocation subtracts them out again. I think
11640 bfd_install_relocation is broken, but I don't dare change
11642 value
+= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
11646 #if defined (OBJ_COFF) && defined (TE_PE)
11647 /* For some reason, the PE format does not store a
11648 section address offset for a PC relative symbol. */
11649 if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
11650 || S_IS_WEAK (fixP
->fx_addsy
))
11651 value
+= md_pcrel_from (fixP
);
11654 #if defined (OBJ_COFF) && defined (TE_PE)
11655 if (fixP
->fx_addsy
!= NULL
11656 && S_IS_WEAK (fixP
->fx_addsy
)
11657 /* PR 16858: Do not modify weak function references. */
11658 && ! fixP
->fx_pcrel
)
11660 #if !defined (TE_PEP)
11661 /* For x86 PE weak function symbols are neither PC-relative
11662 nor do they set S_IS_FUNCTION. So the only reliable way
11663 to detect them is to check the flags of their containing
11665 if (S_GET_SEGMENT (fixP
->fx_addsy
) != NULL
11666 && S_GET_SEGMENT (fixP
->fx_addsy
)->flags
& SEC_CODE
)
11670 value
-= S_GET_VALUE (fixP
->fx_addsy
);
11674 /* Fix a few things - the dynamic linker expects certain values here,
11675 and we must not disappoint it. */
11676 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11677 if (IS_ELF
&& fixP
->fx_addsy
)
11678 switch (fixP
->fx_r_type
)
11680 case BFD_RELOC_386_PLT32
:
11681 case BFD_RELOC_X86_64_PLT32
:
11682 /* Make the jump instruction point to the address of the operand.
11683 At runtime we merely add the offset to the actual PLT entry.
11684 NB: Subtract the offset size only for jump instructions. */
11685 if (fixP
->fx_pcrel
)
11689 case BFD_RELOC_386_TLS_GD
:
11690 case BFD_RELOC_386_TLS_LDM
:
11691 case BFD_RELOC_386_TLS_IE_32
:
11692 case BFD_RELOC_386_TLS_IE
:
11693 case BFD_RELOC_386_TLS_GOTIE
:
11694 case BFD_RELOC_386_TLS_GOTDESC
:
11695 case BFD_RELOC_X86_64_TLSGD
:
11696 case BFD_RELOC_X86_64_TLSLD
:
11697 case BFD_RELOC_X86_64_GOTTPOFF
:
11698 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
11699 value
= 0; /* Fully resolved at runtime. No addend. */
11701 case BFD_RELOC_386_TLS_LE
:
11702 case BFD_RELOC_386_TLS_LDO_32
:
11703 case BFD_RELOC_386_TLS_LE_32
:
11704 case BFD_RELOC_X86_64_DTPOFF32
:
11705 case BFD_RELOC_X86_64_DTPOFF64
:
11706 case BFD_RELOC_X86_64_TPOFF32
:
11707 case BFD_RELOC_X86_64_TPOFF64
:
11708 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
11711 case BFD_RELOC_386_TLS_DESC_CALL
:
11712 case BFD_RELOC_X86_64_TLSDESC_CALL
:
11713 value
= 0; /* Fully resolved at runtime. No addend. */
11714 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
11718 case BFD_RELOC_VTABLE_INHERIT
:
11719 case BFD_RELOC_VTABLE_ENTRY
:
11726 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
11728 #endif /* !defined (TE_Mach) */
11730 /* Are we finished with this relocation now? */
11731 if (fixP
->fx_addsy
== NULL
)
11733 #if defined (OBJ_COFF) && defined (TE_PE)
11734 else if (fixP
->fx_addsy
!= NULL
&& S_IS_WEAK (fixP
->fx_addsy
))
11737 /* Remember value for tc_gen_reloc. */
11738 fixP
->fx_addnumber
= value
;
11739 /* Clear out the frag for now. */
11743 else if (use_rela_relocations
)
11745 fixP
->fx_no_overflow
= 1;
11746 /* Remember value for tc_gen_reloc. */
11747 fixP
->fx_addnumber
= value
;
11751 md_number_to_chars (p
, value
, fixP
->fx_size
);
11755 md_atof (int type
, char *litP
, int *sizeP
)
11757 /* This outputs the LITTLENUMs in REVERSE order;
11758 in accord with the bigendian 386. */
11759 return ieee_md_atof (type
, litP
, sizeP
, FALSE
);
11762 static char output_invalid_buf
[sizeof (unsigned char) * 2 + 6];
11765 output_invalid (int c
)
11768 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
11771 snprintf (output_invalid_buf
, sizeof (output_invalid_buf
),
11772 "(0x%x)", (unsigned char) c
);
11773 return output_invalid_buf
;
11776 /* REG_STRING starts *before* REGISTER_PREFIX. */
11778 static const reg_entry
*
11779 parse_real_register (char *reg_string
, char **end_op
)
11781 char *s
= reg_string
;
11783 char reg_name_given
[MAX_REG_NAME_SIZE
+ 1];
11784 const reg_entry
*r
;
11786 /* Skip possible REGISTER_PREFIX and possible whitespace. */
11787 if (*s
== REGISTER_PREFIX
)
11790 if (is_space_char (*s
))
11793 p
= reg_name_given
;
11794 while ((*p
++ = register_chars
[(unsigned char) *s
]) != '\0')
11796 if (p
>= reg_name_given
+ MAX_REG_NAME_SIZE
)
11797 return (const reg_entry
*) NULL
;
11801 /* For naked regs, make sure that we are not dealing with an identifier.
11802 This prevents confusing an identifier like `eax_var' with register
11804 if (allow_naked_reg
&& identifier_chars
[(unsigned char) *s
])
11805 return (const reg_entry
*) NULL
;
11809 r
= (const reg_entry
*) hash_find (reg_hash
, reg_name_given
);
11811 /* Handle floating point regs, allowing spaces in the (i) part. */
11812 if (r
== i386_regtab
/* %st is first entry of table */)
11814 if (!cpu_arch_flags
.bitfield
.cpu8087
11815 && !cpu_arch_flags
.bitfield
.cpu287
11816 && !cpu_arch_flags
.bitfield
.cpu387
)
11817 return (const reg_entry
*) NULL
;
11819 if (is_space_char (*s
))
11824 if (is_space_char (*s
))
11826 if (*s
>= '0' && *s
<= '7')
11828 int fpr
= *s
- '0';
11830 if (is_space_char (*s
))
11835 r
= (const reg_entry
*) hash_find (reg_hash
, "st(0)");
11840 /* We have "%st(" then garbage. */
11841 return (const reg_entry
*) NULL
;
11845 if (r
== NULL
|| allow_pseudo_reg
)
11848 if (operand_type_all_zero (&r
->reg_type
))
11849 return (const reg_entry
*) NULL
;
11851 if ((r
->reg_type
.bitfield
.dword
11852 || (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
> 3)
11853 || r
->reg_type
.bitfield
.class == RegCR
11854 || r
->reg_type
.bitfield
.class == RegDR
11855 || r
->reg_type
.bitfield
.class == RegTR
)
11856 && !cpu_arch_flags
.bitfield
.cpui386
)
11857 return (const reg_entry
*) NULL
;
11859 if (r
->reg_type
.bitfield
.class == RegMMX
&& !cpu_arch_flags
.bitfield
.cpummx
)
11860 return (const reg_entry
*) NULL
;
11862 if (!cpu_arch_flags
.bitfield
.cpuavx512f
)
11864 if (r
->reg_type
.bitfield
.zmmword
11865 || r
->reg_type
.bitfield
.class == RegMask
)
11866 return (const reg_entry
*) NULL
;
11868 if (!cpu_arch_flags
.bitfield
.cpuavx
)
11870 if (r
->reg_type
.bitfield
.ymmword
)
11871 return (const reg_entry
*) NULL
;
11873 if (!cpu_arch_flags
.bitfield
.cpusse
&& r
->reg_type
.bitfield
.xmmword
)
11874 return (const reg_entry
*) NULL
;
11878 if (r
->reg_type
.bitfield
.class == RegBND
&& !cpu_arch_flags
.bitfield
.cpumpx
)
11879 return (const reg_entry
*) NULL
;
11881 /* Don't allow fake index register unless allow_index_reg isn't 0. */
11882 if (!allow_index_reg
&& r
->reg_num
== RegIZ
)
11883 return (const reg_entry
*) NULL
;
11885 /* Upper 16 vector registers are only available with VREX in 64bit
11886 mode, and require EVEX encoding. */
11887 if (r
->reg_flags
& RegVRex
)
11889 if (!cpu_arch_flags
.bitfield
.cpuavx512f
11890 || flag_code
!= CODE_64BIT
)
11891 return (const reg_entry
*) NULL
;
11893 i
.vec_encoding
= vex_encoding_evex
;
11896 if (((r
->reg_flags
& (RegRex64
| RegRex
)) || r
->reg_type
.bitfield
.qword
)
11897 && (!cpu_arch_flags
.bitfield
.cpulm
|| r
->reg_type
.bitfield
.class != RegCR
)
11898 && flag_code
!= CODE_64BIT
)
11899 return (const reg_entry
*) NULL
;
11901 if (r
->reg_type
.bitfield
.class == SReg
&& r
->reg_num
== RegFlat
11903 return (const reg_entry
*) NULL
;
11908 /* REG_STRING starts *before* REGISTER_PREFIX. */
11910 static const reg_entry
*
11911 parse_register (char *reg_string
, char **end_op
)
11913 const reg_entry
*r
;
11915 if (*reg_string
== REGISTER_PREFIX
|| allow_naked_reg
)
11916 r
= parse_real_register (reg_string
, end_op
);
11921 char *save
= input_line_pointer
;
11925 input_line_pointer
= reg_string
;
11926 c
= get_symbol_name (®_string
);
11927 symbolP
= symbol_find (reg_string
);
11928 if (symbolP
&& S_GET_SEGMENT (symbolP
) == reg_section
)
11930 const expressionS
*e
= symbol_get_value_expression (symbolP
);
11932 know (e
->X_op
== O_register
);
11933 know (e
->X_add_number
>= 0
11934 && (valueT
) e
->X_add_number
< i386_regtab_size
);
11935 r
= i386_regtab
+ e
->X_add_number
;
11936 if ((r
->reg_flags
& RegVRex
))
11937 i
.vec_encoding
= vex_encoding_evex
;
11938 *end_op
= input_line_pointer
;
11940 *input_line_pointer
= c
;
11941 input_line_pointer
= save
;
11947 i386_parse_name (char *name
, expressionS
*e
, char *nextcharP
)
11949 const reg_entry
*r
;
11950 char *end
= input_line_pointer
;
11953 r
= parse_register (name
, &input_line_pointer
);
11954 if (r
&& end
<= input_line_pointer
)
11956 *nextcharP
= *input_line_pointer
;
11957 *input_line_pointer
= 0;
11958 e
->X_op
= O_register
;
11959 e
->X_add_number
= r
- i386_regtab
;
11962 input_line_pointer
= end
;
11964 return intel_syntax
? i386_intel_parse_name (name
, e
) : 0;
11968 md_operand (expressionS
*e
)
11971 const reg_entry
*r
;
11973 switch (*input_line_pointer
)
11975 case REGISTER_PREFIX
:
11976 r
= parse_real_register (input_line_pointer
, &end
);
11979 e
->X_op
= O_register
;
11980 e
->X_add_number
= r
- i386_regtab
;
11981 input_line_pointer
= end
;
11986 gas_assert (intel_syntax
);
11987 end
= input_line_pointer
++;
11989 if (*input_line_pointer
== ']')
11991 ++input_line_pointer
;
11992 e
->X_op_symbol
= make_expr_symbol (e
);
11993 e
->X_add_symbol
= NULL
;
11994 e
->X_add_number
= 0;
11999 e
->X_op
= O_absent
;
12000 input_line_pointer
= end
;
12007 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12008 const char *md_shortopts
= "kVQ:sqnO::";
12010 const char *md_shortopts
= "qnO::";
12013 #define OPTION_32 (OPTION_MD_BASE + 0)
12014 #define OPTION_64 (OPTION_MD_BASE + 1)
12015 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12016 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12017 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12018 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12019 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12020 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12021 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12022 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12023 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12024 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12025 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12026 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12027 #define OPTION_X32 (OPTION_MD_BASE + 14)
12028 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12029 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12030 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12031 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12032 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12033 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12034 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12035 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12036 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12037 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12038 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12039 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12040 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12041 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12042 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12043 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12045 struct option md_longopts
[] =
12047 {"32", no_argument
, NULL
, OPTION_32
},
12048 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12049 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12050 {"64", no_argument
, NULL
, OPTION_64
},
12052 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12053 {"x32", no_argument
, NULL
, OPTION_X32
},
12054 {"mshared", no_argument
, NULL
, OPTION_MSHARED
},
12055 {"mx86-used-note", required_argument
, NULL
, OPTION_X86_USED_NOTE
},
12057 {"divide", no_argument
, NULL
, OPTION_DIVIDE
},
12058 {"march", required_argument
, NULL
, OPTION_MARCH
},
12059 {"mtune", required_argument
, NULL
, OPTION_MTUNE
},
12060 {"mmnemonic", required_argument
, NULL
, OPTION_MMNEMONIC
},
12061 {"msyntax", required_argument
, NULL
, OPTION_MSYNTAX
},
12062 {"mindex-reg", no_argument
, NULL
, OPTION_MINDEX_REG
},
12063 {"mnaked-reg", no_argument
, NULL
, OPTION_MNAKED_REG
},
12064 {"msse2avx", no_argument
, NULL
, OPTION_MSSE2AVX
},
12065 {"msse-check", required_argument
, NULL
, OPTION_MSSE_CHECK
},
12066 {"moperand-check", required_argument
, NULL
, OPTION_MOPERAND_CHECK
},
12067 {"mavxscalar", required_argument
, NULL
, OPTION_MAVXSCALAR
},
12068 {"mvexwig", required_argument
, NULL
, OPTION_MVEXWIG
},
12069 {"madd-bnd-prefix", no_argument
, NULL
, OPTION_MADD_BND_PREFIX
},
12070 {"mevexlig", required_argument
, NULL
, OPTION_MEVEXLIG
},
12071 {"mevexwig", required_argument
, NULL
, OPTION_MEVEXWIG
},
12072 # if defined (TE_PE) || defined (TE_PEP)
12073 {"mbig-obj", no_argument
, NULL
, OPTION_MBIG_OBJ
},
12075 {"momit-lock-prefix", required_argument
, NULL
, OPTION_MOMIT_LOCK_PREFIX
},
12076 {"mfence-as-lock-add", required_argument
, NULL
, OPTION_MFENCE_AS_LOCK_ADD
},
12077 {"mrelax-relocations", required_argument
, NULL
, OPTION_MRELAX_RELOCATIONS
},
12078 {"mevexrcig", required_argument
, NULL
, OPTION_MEVEXRCIG
},
12079 {"malign-branch-boundary", required_argument
, NULL
, OPTION_MALIGN_BRANCH_BOUNDARY
},
12080 {"malign-branch-prefix-size", required_argument
, NULL
, OPTION_MALIGN_BRANCH_PREFIX_SIZE
},
12081 {"malign-branch", required_argument
, NULL
, OPTION_MALIGN_BRANCH
},
12082 {"mbranches-within-32B-boundaries", no_argument
, NULL
, OPTION_MBRANCHES_WITH_32B_BOUNDARIES
},
12083 {"mamd64", no_argument
, NULL
, OPTION_MAMD64
},
12084 {"mintel64", no_argument
, NULL
, OPTION_MINTEL64
},
12085 {NULL
, no_argument
, NULL
, 0}
12087 size_t md_longopts_size
= sizeof (md_longopts
);
12090 md_parse_option (int c
, const char *arg
)
12093 char *arch
, *next
, *saved
, *type
;
12098 optimize_align_code
= 0;
12102 quiet_warnings
= 1;
12105 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12106 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12107 should be emitted or not. FIXME: Not implemented. */
12109 if ((arg
[0] != 'y' && arg
[0] != 'n') || arg
[1])
12113 /* -V: SVR4 argument to print version ID. */
12115 print_version_id ();
12118 /* -k: Ignore for FreeBSD compatibility. */
12123 /* -s: On i386 Solaris, this tells the native assembler to use
12124 .stab instead of .stab.excl. We always use .stab anyhow. */
12127 case OPTION_MSHARED
:
12131 case OPTION_X86_USED_NOTE
:
12132 if (strcasecmp (arg
, "yes") == 0)
12134 else if (strcasecmp (arg
, "no") == 0)
12137 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg
);
12142 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12143 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12146 const char **list
, **l
;
12148 list
= bfd_target_list ();
12149 for (l
= list
; *l
!= NULL
; l
++)
12150 if (CONST_STRNEQ (*l
, "elf64-x86-64")
12151 || strcmp (*l
, "coff-x86-64") == 0
12152 || strcmp (*l
, "pe-x86-64") == 0
12153 || strcmp (*l
, "pei-x86-64") == 0
12154 || strcmp (*l
, "mach-o-x86-64") == 0)
12156 default_arch
= "x86_64";
12160 as_fatal (_("no compiled in support for x86_64"));
12166 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12170 const char **list
, **l
;
12172 list
= bfd_target_list ();
12173 for (l
= list
; *l
!= NULL
; l
++)
12174 if (CONST_STRNEQ (*l
, "elf32-x86-64"))
12176 default_arch
= "x86_64:32";
12180 as_fatal (_("no compiled in support for 32bit x86_64"));
12184 as_fatal (_("32bit x86_64 is only supported for ELF"));
12189 default_arch
= "i386";
12192 case OPTION_DIVIDE
:
12193 #ifdef SVR4_COMMENT_CHARS
12198 n
= XNEWVEC (char, strlen (i386_comment_chars
) + 1);
12200 for (s
= i386_comment_chars
; *s
!= '\0'; s
++)
12204 i386_comment_chars
= n
;
12210 saved
= xstrdup (arg
);
12212 /* Allow -march=+nosse. */
12218 as_fatal (_("invalid -march= option: `%s'"), arg
);
12219 next
= strchr (arch
, '+');
12222 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12224 if (strcmp (arch
, cpu_arch
[j
].name
) == 0)
12227 if (! cpu_arch
[j
].flags
.bitfield
.cpui386
)
12230 cpu_arch_name
= cpu_arch
[j
].name
;
12231 cpu_sub_arch_name
= NULL
;
12232 cpu_arch_flags
= cpu_arch
[j
].flags
;
12233 cpu_arch_isa
= cpu_arch
[j
].type
;
12234 cpu_arch_isa_flags
= cpu_arch
[j
].flags
;
12235 if (!cpu_arch_tune_set
)
12237 cpu_arch_tune
= cpu_arch_isa
;
12238 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
12242 else if (*cpu_arch
[j
].name
== '.'
12243 && strcmp (arch
, cpu_arch
[j
].name
+ 1) == 0)
12245 /* ISA extension. */
12246 i386_cpu_flags flags
;
12248 flags
= cpu_flags_or (cpu_arch_flags
,
12249 cpu_arch
[j
].flags
);
12251 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12253 if (cpu_sub_arch_name
)
12255 char *name
= cpu_sub_arch_name
;
12256 cpu_sub_arch_name
= concat (name
,
12258 (const char *) NULL
);
12262 cpu_sub_arch_name
= xstrdup (cpu_arch
[j
].name
);
12263 cpu_arch_flags
= flags
;
12264 cpu_arch_isa_flags
= flags
;
12268 = cpu_flags_or (cpu_arch_isa_flags
,
12269 cpu_arch
[j
].flags
);
12274 if (j
>= ARRAY_SIZE (cpu_arch
))
12276 /* Disable an ISA extension. */
12277 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
12278 if (strcmp (arch
, cpu_noarch
[j
].name
) == 0)
12280 i386_cpu_flags flags
;
12282 flags
= cpu_flags_and_not (cpu_arch_flags
,
12283 cpu_noarch
[j
].flags
);
12284 if (!cpu_flags_equal (&flags
, &cpu_arch_flags
))
12286 if (cpu_sub_arch_name
)
12288 char *name
= cpu_sub_arch_name
;
12289 cpu_sub_arch_name
= concat (arch
,
12290 (const char *) NULL
);
12294 cpu_sub_arch_name
= xstrdup (arch
);
12295 cpu_arch_flags
= flags
;
12296 cpu_arch_isa_flags
= flags
;
12301 if (j
>= ARRAY_SIZE (cpu_noarch
))
12302 j
= ARRAY_SIZE (cpu_arch
);
12305 if (j
>= ARRAY_SIZE (cpu_arch
))
12306 as_fatal (_("invalid -march= option: `%s'"), arg
);
12310 while (next
!= NULL
);
12316 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12317 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12319 if (strcmp (arg
, cpu_arch
[j
].name
) == 0)
12321 cpu_arch_tune_set
= 1;
12322 cpu_arch_tune
= cpu_arch
[j
].type
;
12323 cpu_arch_tune_flags
= cpu_arch
[j
].flags
;
12327 if (j
>= ARRAY_SIZE (cpu_arch
))
12328 as_fatal (_("invalid -mtune= option: `%s'"), arg
);
12331 case OPTION_MMNEMONIC
:
12332 if (strcasecmp (arg
, "att") == 0)
12333 intel_mnemonic
= 0;
12334 else if (strcasecmp (arg
, "intel") == 0)
12335 intel_mnemonic
= 1;
12337 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg
);
12340 case OPTION_MSYNTAX
:
12341 if (strcasecmp (arg
, "att") == 0)
12343 else if (strcasecmp (arg
, "intel") == 0)
12346 as_fatal (_("invalid -msyntax= option: `%s'"), arg
);
12349 case OPTION_MINDEX_REG
:
12350 allow_index_reg
= 1;
12353 case OPTION_MNAKED_REG
:
12354 allow_naked_reg
= 1;
12357 case OPTION_MSSE2AVX
:
12361 case OPTION_MSSE_CHECK
:
12362 if (strcasecmp (arg
, "error") == 0)
12363 sse_check
= check_error
;
12364 else if (strcasecmp (arg
, "warning") == 0)
12365 sse_check
= check_warning
;
12366 else if (strcasecmp (arg
, "none") == 0)
12367 sse_check
= check_none
;
12369 as_fatal (_("invalid -msse-check= option: `%s'"), arg
);
12372 case OPTION_MOPERAND_CHECK
:
12373 if (strcasecmp (arg
, "error") == 0)
12374 operand_check
= check_error
;
12375 else if (strcasecmp (arg
, "warning") == 0)
12376 operand_check
= check_warning
;
12377 else if (strcasecmp (arg
, "none") == 0)
12378 operand_check
= check_none
;
12380 as_fatal (_("invalid -moperand-check= option: `%s'"), arg
);
12383 case OPTION_MAVXSCALAR
:
12384 if (strcasecmp (arg
, "128") == 0)
12385 avxscalar
= vex128
;
12386 else if (strcasecmp (arg
, "256") == 0)
12387 avxscalar
= vex256
;
12389 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg
);
12392 case OPTION_MVEXWIG
:
12393 if (strcmp (arg
, "0") == 0)
12395 else if (strcmp (arg
, "1") == 0)
12398 as_fatal (_("invalid -mvexwig= option: `%s'"), arg
);
12401 case OPTION_MADD_BND_PREFIX
:
12402 add_bnd_prefix
= 1;
12405 case OPTION_MEVEXLIG
:
12406 if (strcmp (arg
, "128") == 0)
12407 evexlig
= evexl128
;
12408 else if (strcmp (arg
, "256") == 0)
12409 evexlig
= evexl256
;
12410 else if (strcmp (arg
, "512") == 0)
12411 evexlig
= evexl512
;
12413 as_fatal (_("invalid -mevexlig= option: `%s'"), arg
);
12416 case OPTION_MEVEXRCIG
:
12417 if (strcmp (arg
, "rne") == 0)
12419 else if (strcmp (arg
, "rd") == 0)
12421 else if (strcmp (arg
, "ru") == 0)
12423 else if (strcmp (arg
, "rz") == 0)
12426 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg
);
12429 case OPTION_MEVEXWIG
:
12430 if (strcmp (arg
, "0") == 0)
12432 else if (strcmp (arg
, "1") == 0)
12435 as_fatal (_("invalid -mevexwig= option: `%s'"), arg
);
12438 # if defined (TE_PE) || defined (TE_PEP)
12439 case OPTION_MBIG_OBJ
:
12444 case OPTION_MOMIT_LOCK_PREFIX
:
12445 if (strcasecmp (arg
, "yes") == 0)
12446 omit_lock_prefix
= 1;
12447 else if (strcasecmp (arg
, "no") == 0)
12448 omit_lock_prefix
= 0;
12450 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg
);
12453 case OPTION_MFENCE_AS_LOCK_ADD
:
12454 if (strcasecmp (arg
, "yes") == 0)
12456 else if (strcasecmp (arg
, "no") == 0)
12459 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg
);
12462 case OPTION_MRELAX_RELOCATIONS
:
12463 if (strcasecmp (arg
, "yes") == 0)
12464 generate_relax_relocations
= 1;
12465 else if (strcasecmp (arg
, "no") == 0)
12466 generate_relax_relocations
= 0;
12468 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg
);
12471 case OPTION_MALIGN_BRANCH_BOUNDARY
:
12474 long int align
= strtoul (arg
, &end
, 0);
12479 align_branch_power
= 0;
12482 else if (align
>= 16)
12485 for (align_power
= 0;
12487 align
>>= 1, align_power
++)
12489 /* Limit alignment power to 31. */
12490 if (align
== 1 && align_power
< 32)
12492 align_branch_power
= align_power
;
12497 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg
);
12501 case OPTION_MALIGN_BRANCH_PREFIX_SIZE
:
12504 int align
= strtoul (arg
, &end
, 0);
12505 /* Some processors only support 5 prefixes. */
12506 if (*end
== '\0' && align
>= 0 && align
< 6)
12508 align_branch_prefix_size
= align
;
12511 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
12516 case OPTION_MALIGN_BRANCH
:
12518 saved
= xstrdup (arg
);
12522 next
= strchr (type
, '+');
12525 if (strcasecmp (type
, "jcc") == 0)
12526 align_branch
|= align_branch_jcc_bit
;
12527 else if (strcasecmp (type
, "fused") == 0)
12528 align_branch
|= align_branch_fused_bit
;
12529 else if (strcasecmp (type
, "jmp") == 0)
12530 align_branch
|= align_branch_jmp_bit
;
12531 else if (strcasecmp (type
, "call") == 0)
12532 align_branch
|= align_branch_call_bit
;
12533 else if (strcasecmp (type
, "ret") == 0)
12534 align_branch
|= align_branch_ret_bit
;
12535 else if (strcasecmp (type
, "indirect") == 0)
12536 align_branch
|= align_branch_indirect_bit
;
12538 as_fatal (_("invalid -malign-branch= option: `%s'"), arg
);
12541 while (next
!= NULL
);
12545 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES
:
12546 align_branch_power
= 5;
12547 align_branch_prefix_size
= 5;
12548 align_branch
= (align_branch_jcc_bit
12549 | align_branch_fused_bit
12550 | align_branch_jmp_bit
);
12553 case OPTION_MAMD64
:
12557 case OPTION_MINTEL64
:
12565 /* Turn off -Os. */
12566 optimize_for_space
= 0;
12568 else if (*arg
== 's')
12570 optimize_for_space
= 1;
12571 /* Turn on all encoding optimizations. */
12572 optimize
= INT_MAX
;
12576 optimize
= atoi (arg
);
12577 /* Turn off -Os. */
12578 optimize_for_space
= 0;
12588 #define MESSAGE_TEMPLATE \
12592 output_message (FILE *stream
, char *p
, char *message
, char *start
,
12593 int *left_p
, const char *name
, int len
)
12595 int size
= sizeof (MESSAGE_TEMPLATE
);
12596 int left
= *left_p
;
12598 /* Reserve 2 spaces for ", " or ",\0" */
12601 /* Check if there is any room. */
12609 p
= mempcpy (p
, name
, len
);
12613 /* Output the current message now and start a new one. */
12616 fprintf (stream
, "%s\n", message
);
12618 left
= size
- (start
- message
) - len
- 2;
12620 gas_assert (left
>= 0);
12622 p
= mempcpy (p
, name
, len
);
12630 show_arch (FILE *stream
, int ext
, int check
)
12632 static char message
[] = MESSAGE_TEMPLATE
;
12633 char *start
= message
+ 27;
12635 int size
= sizeof (MESSAGE_TEMPLATE
);
12642 left
= size
- (start
- message
);
12643 for (j
= 0; j
< ARRAY_SIZE (cpu_arch
); j
++)
12645 /* Should it be skipped? */
12646 if (cpu_arch
[j
].skip
)
12649 name
= cpu_arch
[j
].name
;
12650 len
= cpu_arch
[j
].len
;
12653 /* It is an extension. Skip if we aren't asked to show it. */
12664 /* It is an processor. Skip if we show only extension. */
12667 else if (check
&& ! cpu_arch
[j
].flags
.bitfield
.cpui386
)
12669 /* It is an impossible processor - skip. */
12673 p
= output_message (stream
, p
, message
, start
, &left
, name
, len
);
12676 /* Display disabled extensions. */
12678 for (j
= 0; j
< ARRAY_SIZE (cpu_noarch
); j
++)
12680 name
= cpu_noarch
[j
].name
;
12681 len
= cpu_noarch
[j
].len
;
12682 p
= output_message (stream
, p
, message
, start
, &left
, name
,
12687 fprintf (stream
, "%s\n", message
);
12691 md_show_usage (FILE *stream
)
12693 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12694 fprintf (stream
, _("\
12695 -Qy, -Qn ignored\n\
12696 -V print assembler version number\n\
12699 fprintf (stream
, _("\
12700 -n Do not optimize code alignment\n\
12701 -q quieten some warnings\n"));
12702 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12703 fprintf (stream
, _("\
12706 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12707 || defined (TE_PE) || defined (TE_PEP))
12708 fprintf (stream
, _("\
12709 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
12711 #ifdef SVR4_COMMENT_CHARS
12712 fprintf (stream
, _("\
12713 --divide do not treat `/' as a comment character\n"));
12715 fprintf (stream
, _("\
12716 --divide ignored\n"));
12718 fprintf (stream
, _("\
12719 -march=CPU[,+EXTENSION...]\n\
12720 generate code for CPU and EXTENSION, CPU is one of:\n"));
12721 show_arch (stream
, 0, 1);
12722 fprintf (stream
, _("\
12723 EXTENSION is combination of:\n"));
12724 show_arch (stream
, 1, 0);
12725 fprintf (stream
, _("\
12726 -mtune=CPU optimize for CPU, CPU is one of:\n"));
12727 show_arch (stream
, 0, 0);
12728 fprintf (stream
, _("\
12729 -msse2avx encode SSE instructions with VEX prefix\n"));
12730 fprintf (stream
, _("\
12731 -msse-check=[none|error|warning] (default: warning)\n\
12732 check SSE instructions\n"));
12733 fprintf (stream
, _("\
12734 -moperand-check=[none|error|warning] (default: warning)\n\
12735 check operand combinations for validity\n"));
12736 fprintf (stream
, _("\
12737 -mavxscalar=[128|256] (default: 128)\n\
12738 encode scalar AVX instructions with specific vector\n\
12740 fprintf (stream
, _("\
12741 -mvexwig=[0|1] (default: 0)\n\
12742 encode VEX instructions with specific VEX.W value\n\
12743 for VEX.W bit ignored instructions\n"));
12744 fprintf (stream
, _("\
12745 -mevexlig=[128|256|512] (default: 128)\n\
12746 encode scalar EVEX instructions with specific vector\n\
12748 fprintf (stream
, _("\
12749 -mevexwig=[0|1] (default: 0)\n\
12750 encode EVEX instructions with specific EVEX.W value\n\
12751 for EVEX.W bit ignored instructions\n"));
12752 fprintf (stream
, _("\
12753 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
12754 encode EVEX instructions with specific EVEX.RC value\n\
12755 for SAE-only ignored instructions\n"));
12756 fprintf (stream
, _("\
12757 -mmnemonic=[att|intel] "));
12758 if (SYSV386_COMPAT
)
12759 fprintf (stream
, _("(default: att)\n"));
12761 fprintf (stream
, _("(default: intel)\n"));
12762 fprintf (stream
, _("\
12763 use AT&T/Intel mnemonic\n"));
12764 fprintf (stream
, _("\
12765 -msyntax=[att|intel] (default: att)\n\
12766 use AT&T/Intel syntax\n"));
12767 fprintf (stream
, _("\
12768 -mindex-reg support pseudo index registers\n"));
12769 fprintf (stream
, _("\
12770 -mnaked-reg don't require `%%' prefix for registers\n"));
12771 fprintf (stream
, _("\
12772 -madd-bnd-prefix add BND prefix for all valid branches\n"));
12773 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12774 fprintf (stream
, _("\
12775 -mshared disable branch optimization for shared code\n"));
12776 fprintf (stream
, _("\
12777 -mx86-used-note=[no|yes] "));
12778 if (DEFAULT_X86_USED_NOTE
)
12779 fprintf (stream
, _("(default: yes)\n"));
12781 fprintf (stream
, _("(default: no)\n"));
12782 fprintf (stream
, _("\
12783 generate x86 used ISA and feature properties\n"));
12785 #if defined (TE_PE) || defined (TE_PEP)
12786 fprintf (stream
, _("\
12787 -mbig-obj generate big object files\n"));
12789 fprintf (stream
, _("\
12790 -momit-lock-prefix=[no|yes] (default: no)\n\
12791 strip all lock prefixes\n"));
12792 fprintf (stream
, _("\
12793 -mfence-as-lock-add=[no|yes] (default: no)\n\
12794 encode lfence, mfence and sfence as\n\
12795 lock addl $0x0, (%%{re}sp)\n"));
12796 fprintf (stream
, _("\
12797 -mrelax-relocations=[no|yes] "));
12798 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS
)
12799 fprintf (stream
, _("(default: yes)\n"));
12801 fprintf (stream
, _("(default: no)\n"));
12802 fprintf (stream
, _("\
12803 generate relax relocations\n"));
12804 fprintf (stream
, _("\
12805 -malign-branch-boundary=NUM (default: 0)\n\
12806 align branches within NUM byte boundary\n"));
12807 fprintf (stream
, _("\
12808 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
12809 TYPE is combination of jcc, fused, jmp, call, ret,\n\
12811 specify types of branches to align\n"));
12812 fprintf (stream
, _("\
12813 -malign-branch-prefix-size=NUM (default: 5)\n\
12814 align branches with NUM prefixes per instruction\n"));
12815 fprintf (stream
, _("\
12816 -mbranches-within-32B-boundaries\n\
12817 align branches within 32 byte boundary\n"));
12818 fprintf (stream
, _("\
12819 -mamd64 accept only AMD64 ISA [default]\n"));
12820 fprintf (stream
, _("\
12821 -mintel64 accept only Intel64 ISA\n"));
12824 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
12825 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12826 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12828 /* Pick the target format to use. */
12831 i386_target_format (void)
12833 if (!strncmp (default_arch
, "x86_64", 6))
12835 update_code_flag (CODE_64BIT
, 1);
12836 if (default_arch
[6] == '\0')
12837 x86_elf_abi
= X86_64_ABI
;
12839 x86_elf_abi
= X86_64_X32_ABI
;
12841 else if (!strcmp (default_arch
, "i386"))
12842 update_code_flag (CODE_32BIT
, 1);
12843 else if (!strcmp (default_arch
, "iamcu"))
12845 update_code_flag (CODE_32BIT
, 1);
12846 if (cpu_arch_isa
== PROCESSOR_UNKNOWN
)
12848 static const i386_cpu_flags iamcu_flags
= CPU_IAMCU_FLAGS
;
12849 cpu_arch_name
= "iamcu";
12850 cpu_sub_arch_name
= NULL
;
12851 cpu_arch_flags
= iamcu_flags
;
12852 cpu_arch_isa
= PROCESSOR_IAMCU
;
12853 cpu_arch_isa_flags
= iamcu_flags
;
12854 if (!cpu_arch_tune_set
)
12856 cpu_arch_tune
= cpu_arch_isa
;
12857 cpu_arch_tune_flags
= cpu_arch_isa_flags
;
12860 else if (cpu_arch_isa
!= PROCESSOR_IAMCU
)
12861 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
12865 as_fatal (_("unknown architecture"));
12867 if (cpu_flags_all_zero (&cpu_arch_isa_flags
))
12868 cpu_arch_isa_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
12869 if (cpu_flags_all_zero (&cpu_arch_tune_flags
))
12870 cpu_arch_tune_flags
= cpu_arch
[flag_code
== CODE_64BIT
].flags
;
12872 switch (OUTPUT_FLAVOR
)
12874 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
12875 case bfd_target_aout_flavour
:
12876 return AOUT_TARGET_FORMAT
;
12878 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
12879 # if defined (TE_PE) || defined (TE_PEP)
12880 case bfd_target_coff_flavour
:
12881 if (flag_code
== CODE_64BIT
)
12882 return use_big_obj
? "pe-bigobj-x86-64" : "pe-x86-64";
12885 # elif defined (TE_GO32)
12886 case bfd_target_coff_flavour
:
12887 return "coff-go32";
12889 case bfd_target_coff_flavour
:
12890 return "coff-i386";
12893 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
12894 case bfd_target_elf_flavour
:
12896 const char *format
;
12898 switch (x86_elf_abi
)
12901 format
= ELF_TARGET_FORMAT
;
12903 tls_get_addr
= "___tls_get_addr";
12907 use_rela_relocations
= 1;
12910 tls_get_addr
= "__tls_get_addr";
12912 format
= ELF_TARGET_FORMAT64
;
12914 case X86_64_X32_ABI
:
12915 use_rela_relocations
= 1;
12918 tls_get_addr
= "__tls_get_addr";
12920 disallow_64bit_reloc
= 1;
12921 format
= ELF_TARGET_FORMAT32
;
12924 if (cpu_arch_isa
== PROCESSOR_L1OM
)
12926 if (x86_elf_abi
!= X86_64_ABI
)
12927 as_fatal (_("Intel L1OM is 64bit only"));
12928 return ELF_TARGET_L1OM_FORMAT
;
12930 else if (cpu_arch_isa
== PROCESSOR_K1OM
)
12932 if (x86_elf_abi
!= X86_64_ABI
)
12933 as_fatal (_("Intel K1OM is 64bit only"));
12934 return ELF_TARGET_K1OM_FORMAT
;
12936 else if (cpu_arch_isa
== PROCESSOR_IAMCU
)
12938 if (x86_elf_abi
!= I386_ABI
)
12939 as_fatal (_("Intel MCU is 32bit only"));
12940 return ELF_TARGET_IAMCU_FORMAT
;
12946 #if defined (OBJ_MACH_O)
12947 case bfd_target_mach_o_flavour
:
12948 if (flag_code
== CODE_64BIT
)
12950 use_rela_relocations
= 1;
12952 return "mach-o-x86-64";
12955 return "mach-o-i386";
12963 #endif /* OBJ_MAYBE_ more than one */
12966 md_undefined_symbol (char *name
)
12968 if (name
[0] == GLOBAL_OFFSET_TABLE_NAME
[0]
12969 && name
[1] == GLOBAL_OFFSET_TABLE_NAME
[1]
12970 && name
[2] == GLOBAL_OFFSET_TABLE_NAME
[2]
12971 && strcmp (name
, GLOBAL_OFFSET_TABLE_NAME
) == 0)
12975 if (symbol_find (name
))
12976 as_bad (_("GOT already in symbol table"));
12977 GOT_symbol
= symbol_new (name
, undefined_section
,
12978 (valueT
) 0, &zero_address_frag
);
12985 /* Round up a section size to the appropriate boundary. */
12988 md_section_align (segT segment ATTRIBUTE_UNUSED
, valueT size
)
12990 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
12991 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
12993 /* For a.out, force the section size to be aligned. If we don't do
12994 this, BFD will align it for us, but it will not write out the
12995 final bytes of the section. This may be a bug in BFD, but it is
12996 easier to fix it here since that is how the other a.out targets
13000 align
= bfd_section_alignment (segment
);
13001 size
= ((size
+ (1 << align
) - 1) & (-((valueT
) 1 << align
)));
13008 /* On the i386, PC-relative offsets are relative to the start of the
13009 next instruction. That is, the address of the offset, plus its
13010 size, since the offset is always the last part of the insn. */
13013 md_pcrel_from (fixS
*fixP
)
13015 return fixP
->fx_size
+ fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
13021 s_bss (int ignore ATTRIBUTE_UNUSED
)
13025 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13027 obj_elf_section_change_hook ();
13029 temp
= get_absolute_expression ();
13030 subseg_set (bss_section
, (subsegT
) temp
);
13031 demand_empty_rest_of_line ();
13036 /* Remember constant directive. */
13039 i386_cons_align (int ignore ATTRIBUTE_UNUSED
)
13041 if (last_insn
.kind
!= last_insn_directive
13042 && (bfd_section_flags (now_seg
) & SEC_CODE
))
13044 last_insn
.seg
= now_seg
;
13045 last_insn
.kind
= last_insn_directive
;
13046 last_insn
.name
= "constant directive";
13047 last_insn
.file
= as_where (&last_insn
.line
);
13052 i386_validate_fix (fixS
*fixp
)
13054 if (fixp
->fx_subsy
)
13056 if (fixp
->fx_subsy
== GOT_symbol
)
13058 if (fixp
->fx_r_type
== BFD_RELOC_32_PCREL
)
13062 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13063 if (fixp
->fx_tcbit2
)
13064 fixp
->fx_r_type
= (fixp
->fx_tcbit
13065 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13066 : BFD_RELOC_X86_64_GOTPCRELX
);
13069 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTPCREL
;
13074 fixp
->fx_r_type
= BFD_RELOC_386_GOTOFF
;
13076 fixp
->fx_r_type
= BFD_RELOC_X86_64_GOTOFF64
;
13078 fixp
->fx_subsy
= 0;
13081 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13082 else if (!object_64bit
)
13084 if (fixp
->fx_r_type
== BFD_RELOC_386_GOT32
13085 && fixp
->fx_tcbit2
)
13086 fixp
->fx_r_type
= BFD_RELOC_386_GOT32X
;
13092 tc_gen_reloc (asection
*section ATTRIBUTE_UNUSED
, fixS
*fixp
)
13095 bfd_reloc_code_real_type code
;
13097 switch (fixp
->fx_r_type
)
13099 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13100 case BFD_RELOC_SIZE32
:
13101 case BFD_RELOC_SIZE64
:
13102 if (S_IS_DEFINED (fixp
->fx_addsy
)
13103 && !S_IS_EXTERNAL (fixp
->fx_addsy
))
13105 /* Resolve size relocation against local symbol to size of
13106 the symbol plus addend. */
13107 valueT value
= S_GET_SIZE (fixp
->fx_addsy
) + fixp
->fx_offset
;
13108 if (fixp
->fx_r_type
== BFD_RELOC_SIZE32
13109 && !fits_in_unsigned_long (value
))
13110 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13111 _("symbol size computation overflow"));
13112 fixp
->fx_addsy
= NULL
;
13113 fixp
->fx_subsy
= NULL
;
13114 md_apply_fix (fixp
, (valueT
*) &value
, NULL
);
13118 /* Fall through. */
13120 case BFD_RELOC_X86_64_PLT32
:
13121 case BFD_RELOC_X86_64_GOT32
:
13122 case BFD_RELOC_X86_64_GOTPCREL
:
13123 case BFD_RELOC_X86_64_GOTPCRELX
:
13124 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13125 case BFD_RELOC_386_PLT32
:
13126 case BFD_RELOC_386_GOT32
:
13127 case BFD_RELOC_386_GOT32X
:
13128 case BFD_RELOC_386_GOTOFF
:
13129 case BFD_RELOC_386_GOTPC
:
13130 case BFD_RELOC_386_TLS_GD
:
13131 case BFD_RELOC_386_TLS_LDM
:
13132 case BFD_RELOC_386_TLS_LDO_32
:
13133 case BFD_RELOC_386_TLS_IE_32
:
13134 case BFD_RELOC_386_TLS_IE
:
13135 case BFD_RELOC_386_TLS_GOTIE
:
13136 case BFD_RELOC_386_TLS_LE_32
:
13137 case BFD_RELOC_386_TLS_LE
:
13138 case BFD_RELOC_386_TLS_GOTDESC
:
13139 case BFD_RELOC_386_TLS_DESC_CALL
:
13140 case BFD_RELOC_X86_64_TLSGD
:
13141 case BFD_RELOC_X86_64_TLSLD
:
13142 case BFD_RELOC_X86_64_DTPOFF32
:
13143 case BFD_RELOC_X86_64_DTPOFF64
:
13144 case BFD_RELOC_X86_64_GOTTPOFF
:
13145 case BFD_RELOC_X86_64_TPOFF32
:
13146 case BFD_RELOC_X86_64_TPOFF64
:
13147 case BFD_RELOC_X86_64_GOTOFF64
:
13148 case BFD_RELOC_X86_64_GOTPC32
:
13149 case BFD_RELOC_X86_64_GOT64
:
13150 case BFD_RELOC_X86_64_GOTPCREL64
:
13151 case BFD_RELOC_X86_64_GOTPC64
:
13152 case BFD_RELOC_X86_64_GOTPLT64
:
13153 case BFD_RELOC_X86_64_PLTOFF64
:
13154 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13155 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13156 case BFD_RELOC_RVA
:
13157 case BFD_RELOC_VTABLE_ENTRY
:
13158 case BFD_RELOC_VTABLE_INHERIT
:
13160 case BFD_RELOC_32_SECREL
:
13162 code
= fixp
->fx_r_type
;
13164 case BFD_RELOC_X86_64_32S
:
13165 if (!fixp
->fx_pcrel
)
13167 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13168 code
= fixp
->fx_r_type
;
13171 /* Fall through. */
13173 if (fixp
->fx_pcrel
)
13175 switch (fixp
->fx_size
)
13178 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13179 _("can not do %d byte pc-relative relocation"),
13181 code
= BFD_RELOC_32_PCREL
;
13183 case 1: code
= BFD_RELOC_8_PCREL
; break;
13184 case 2: code
= BFD_RELOC_16_PCREL
; break;
13185 case 4: code
= BFD_RELOC_32_PCREL
; break;
13187 case 8: code
= BFD_RELOC_64_PCREL
; break;
13193 switch (fixp
->fx_size
)
13196 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13197 _("can not do %d byte relocation"),
13199 code
= BFD_RELOC_32
;
13201 case 1: code
= BFD_RELOC_8
; break;
13202 case 2: code
= BFD_RELOC_16
; break;
13203 case 4: code
= BFD_RELOC_32
; break;
13205 case 8: code
= BFD_RELOC_64
; break;
13212 if ((code
== BFD_RELOC_32
13213 || code
== BFD_RELOC_32_PCREL
13214 || code
== BFD_RELOC_X86_64_32S
)
13216 && fixp
->fx_addsy
== GOT_symbol
)
13219 code
= BFD_RELOC_386_GOTPC
;
13221 code
= BFD_RELOC_X86_64_GOTPC32
;
13223 if ((code
== BFD_RELOC_64
|| code
== BFD_RELOC_64_PCREL
)
13225 && fixp
->fx_addsy
== GOT_symbol
)
13227 code
= BFD_RELOC_X86_64_GOTPC64
;
13230 rel
= XNEW (arelent
);
13231 rel
->sym_ptr_ptr
= XNEW (asymbol
*);
13232 *rel
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
13234 rel
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
13236 if (!use_rela_relocations
)
13238 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13239 vtable entry to be used in the relocation's section offset. */
13240 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
13241 rel
->address
= fixp
->fx_offset
;
13242 #if defined (OBJ_COFF) && defined (TE_PE)
13243 else if (fixp
->fx_addsy
&& S_IS_WEAK (fixp
->fx_addsy
))
13244 rel
->addend
= fixp
->fx_addnumber
- (S_GET_VALUE (fixp
->fx_addsy
) * 2);
13249 /* Use the rela in 64bit mode. */
13252 if (disallow_64bit_reloc
)
13255 case BFD_RELOC_X86_64_DTPOFF64
:
13256 case BFD_RELOC_X86_64_TPOFF64
:
13257 case BFD_RELOC_64_PCREL
:
13258 case BFD_RELOC_X86_64_GOTOFF64
:
13259 case BFD_RELOC_X86_64_GOT64
:
13260 case BFD_RELOC_X86_64_GOTPCREL64
:
13261 case BFD_RELOC_X86_64_GOTPC64
:
13262 case BFD_RELOC_X86_64_GOTPLT64
:
13263 case BFD_RELOC_X86_64_PLTOFF64
:
13264 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13265 _("cannot represent relocation type %s in x32 mode"),
13266 bfd_get_reloc_code_name (code
));
13272 if (!fixp
->fx_pcrel
)
13273 rel
->addend
= fixp
->fx_offset
;
13277 case BFD_RELOC_X86_64_PLT32
:
13278 case BFD_RELOC_X86_64_GOT32
:
13279 case BFD_RELOC_X86_64_GOTPCREL
:
13280 case BFD_RELOC_X86_64_GOTPCRELX
:
13281 case BFD_RELOC_X86_64_REX_GOTPCRELX
:
13282 case BFD_RELOC_X86_64_TLSGD
:
13283 case BFD_RELOC_X86_64_TLSLD
:
13284 case BFD_RELOC_X86_64_GOTTPOFF
:
13285 case BFD_RELOC_X86_64_GOTPC32_TLSDESC
:
13286 case BFD_RELOC_X86_64_TLSDESC_CALL
:
13287 rel
->addend
= fixp
->fx_offset
- fixp
->fx_size
;
13290 rel
->addend
= (section
->vma
13292 + fixp
->fx_addnumber
13293 + md_pcrel_from (fixp
));
13298 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
13299 if (rel
->howto
== NULL
)
13301 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
13302 _("cannot represent relocation type %s"),
13303 bfd_get_reloc_code_name (code
));
13304 /* Set howto to a garbage value so that we can keep going. */
13305 rel
->howto
= bfd_reloc_type_lookup (stdoutput
, BFD_RELOC_32
);
13306 gas_assert (rel
->howto
!= NULL
);
13312 #include "tc-i386-intel.c"
13315 tc_x86_parse_to_dw2regnum (expressionS
*exp
)
13317 int saved_naked_reg
;
13318 char saved_register_dot
;
13320 saved_naked_reg
= allow_naked_reg
;
13321 allow_naked_reg
= 1;
13322 saved_register_dot
= register_chars
['.'];
13323 register_chars
['.'] = '.';
13324 allow_pseudo_reg
= 1;
13325 expression_and_evaluate (exp
);
13326 allow_pseudo_reg
= 0;
13327 register_chars
['.'] = saved_register_dot
;
13328 allow_naked_reg
= saved_naked_reg
;
13330 if (exp
->X_op
== O_register
&& exp
->X_add_number
>= 0)
13332 if ((addressT
) exp
->X_add_number
< i386_regtab_size
)
13334 exp
->X_op
= O_constant
;
13335 exp
->X_add_number
= i386_regtab
[exp
->X_add_number
]
13336 .dw2_regnum
[flag_code
>> 1];
13339 exp
->X_op
= O_illegal
;
13344 tc_x86_frame_initial_instructions (void)
13346 static unsigned int sp_regno
[2];
13348 if (!sp_regno
[flag_code
>> 1])
13350 char *saved_input
= input_line_pointer
;
13351 char sp
[][4] = {"esp", "rsp"};
13354 input_line_pointer
= sp
[flag_code
>> 1];
13355 tc_x86_parse_to_dw2regnum (&exp
);
13356 gas_assert (exp
.X_op
== O_constant
);
13357 sp_regno
[flag_code
>> 1] = exp
.X_add_number
;
13358 input_line_pointer
= saved_input
;
13361 cfi_add_CFA_def_cfa (sp_regno
[flag_code
>> 1], -x86_cie_data_alignment
);
13362 cfi_add_CFA_offset (x86_dwarf2_return_column
, x86_cie_data_alignment
);
13366 x86_dwarf2_addr_size (void)
13368 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13369 if (x86_elf_abi
== X86_64_X32_ABI
)
13372 return bfd_arch_bits_per_address (stdoutput
) / 8;
13376 i386_elf_section_type (const char *str
, size_t len
)
13378 if (flag_code
== CODE_64BIT
13379 && len
== sizeof ("unwind") - 1
13380 && strncmp (str
, "unwind", 6) == 0)
13381 return SHT_X86_64_UNWIND
;
13388 i386_solaris_fix_up_eh_frame (segT sec
)
13390 if (flag_code
== CODE_64BIT
)
13391 elf_section_type (sec
) = SHT_X86_64_UNWIND
;
13397 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
13401 exp
.X_op
= O_secrel
;
13402 exp
.X_add_symbol
= symbol
;
13403 exp
.X_add_number
= 0;
13404 emit_expr (&exp
, size
);
13408 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13409 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
13412 x86_64_section_letter (int letter
, const char **ptr_msg
)
13414 if (flag_code
== CODE_64BIT
)
13417 return SHF_X86_64_LARGE
;
13419 *ptr_msg
= _("bad .section directive: want a,l,w,x,M,S,G,T in string");
13422 *ptr_msg
= _("bad .section directive: want a,w,x,M,S,G,T in string");
13427 x86_64_section_word (char *str
, size_t len
)
13429 if (len
== 5 && flag_code
== CODE_64BIT
&& CONST_STRNEQ (str
, "large"))
13430 return SHF_X86_64_LARGE
;
13436 handle_large_common (int small ATTRIBUTE_UNUSED
)
13438 if (flag_code
!= CODE_64BIT
)
13440 s_comm_internal (0, elf_common_parse
);
13441 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
13445 static segT lbss_section
;
13446 asection
*saved_com_section_ptr
= elf_com_section_ptr
;
13447 asection
*saved_bss_section
= bss_section
;
13449 if (lbss_section
== NULL
)
13451 flagword applicable
;
13452 segT seg
= now_seg
;
13453 subsegT subseg
= now_subseg
;
13455 /* The .lbss section is for local .largecomm symbols. */
13456 lbss_section
= subseg_new (".lbss", 0);
13457 applicable
= bfd_applicable_section_flags (stdoutput
);
13458 bfd_set_section_flags (lbss_section
, applicable
& SEC_ALLOC
);
13459 seg_info (lbss_section
)->bss
= 1;
13461 subseg_set (seg
, subseg
);
13464 elf_com_section_ptr
= &_bfd_elf_large_com_section
;
13465 bss_section
= lbss_section
;
13467 s_comm_internal (0, elf_common_parse
);
13469 elf_com_section_ptr
= saved_com_section_ptr
;
13470 bss_section
= saved_bss_section
;
13473 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */