]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gas/config/tc-i386.c
x86: Rename VecSIB to SIB for Intel AMX
[thirdparty/binutils-gdb.git] / gas / config / tc-i386.c
1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2020 Free Software Foundation, Inc.
3
4 This file is part of GAS, the GNU Assembler.
5
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19 02110-1301, USA. */
20
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
27
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35
36 #ifdef HAVE_LIMITS_H
37 #include <limits.h>
38 #else
39 #ifdef HAVE_SYS_PARAM_H
40 #include <sys/param.h>
41 #endif
42 #ifndef INT_MAX
43 #define INT_MAX (int) (((unsigned) (-1)) >> 1)
44 #endif
45 #endif
46
47 #ifndef INFER_ADDR_PREFIX
48 #define INFER_ADDR_PREFIX 1
49 #endif
50
51 #ifndef DEFAULT_ARCH
52 #define DEFAULT_ARCH "i386"
53 #endif
54
55 #ifndef INLINE
56 #if __GNUC__ >= 2
57 #define INLINE __inline__
58 #else
59 #define INLINE
60 #endif
61 #endif
62
63 /* Prefixes will be emitted in the order defined below.
64 WAIT_PREFIX must be the first prefix since FWAIT is really is an
65 instruction, and so must come before any prefixes.
66 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
67 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
68 #define WAIT_PREFIX 0
69 #define SEG_PREFIX 1
70 #define ADDR_PREFIX 2
71 #define DATA_PREFIX 3
72 #define REP_PREFIX 4
73 #define HLE_PREFIX REP_PREFIX
74 #define BND_PREFIX REP_PREFIX
75 #define LOCK_PREFIX 5
76 #define REX_PREFIX 6 /* must come last. */
77 #define MAX_PREFIXES 7 /* max prefixes per opcode */
78
79 /* we define the syntax here (modulo base,index,scale syntax) */
80 #define REGISTER_PREFIX '%'
81 #define IMMEDIATE_PREFIX '$'
82 #define ABSOLUTE_PREFIX '*'
83
84 /* these are the instruction mnemonic suffixes in AT&T syntax or
85 memory operand size in Intel syntax. */
86 #define WORD_MNEM_SUFFIX 'w'
87 #define BYTE_MNEM_SUFFIX 'b'
88 #define SHORT_MNEM_SUFFIX 's'
89 #define LONG_MNEM_SUFFIX 'l'
90 #define QWORD_MNEM_SUFFIX 'q'
91 /* Intel Syntax. Use a non-ascii letter since since it never appears
92 in instructions. */
93 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
94
95 #define END_OF_INSN '\0'
96
97 /* This matches the C -> StaticRounding alias in the opcode table. */
98 #define commutative staticrounding
99
100 /*
101 'templates' is for grouping together 'template' structures for opcodes
102 of the same name. This is only used for storing the insns in the grand
103 ole hash table of insns.
104 The templates themselves start at START and range up to (but not including)
105 END.
106 */
107 typedef struct
108 {
109 const insn_template *start;
110 const insn_template *end;
111 }
112 templates;
113
114 /* 386 operand encoding bytes: see 386 book for details of this. */
115 typedef struct
116 {
117 unsigned int regmem; /* codes register or memory operand */
118 unsigned int reg; /* codes register operand (or extended opcode) */
119 unsigned int mode; /* how to interpret regmem & reg */
120 }
121 modrm_byte;
122
123 /* x86-64 extension prefix. */
124 typedef int rex_byte;
125
126 /* 386 opcode byte to code indirect addressing. */
127 typedef struct
128 {
129 unsigned base;
130 unsigned index;
131 unsigned scale;
132 }
133 sib_byte;
134
135 /* x86 arch names, types and features */
136 typedef struct
137 {
138 const char *name; /* arch name */
139 unsigned int len; /* arch string length */
140 enum processor_type type; /* arch type */
141 i386_cpu_flags flags; /* cpu feature flags */
142 unsigned int skip; /* show_arch should skip this. */
143 }
144 arch_entry;
145
146 /* Used to turn off indicated flags. */
147 typedef struct
148 {
149 const char *name; /* arch name */
150 unsigned int len; /* arch string length */
151 i386_cpu_flags flags; /* cpu feature flags */
152 }
153 noarch_entry;
154
155 static void update_code_flag (int, int);
156 static void set_code_flag (int);
157 static void set_16bit_gcc_code_flag (int);
158 static void set_intel_syntax (int);
159 static void set_intel_mnemonic (int);
160 static void set_allow_index_reg (int);
161 static void set_check (int);
162 static void set_cpu_arch (int);
163 #ifdef TE_PE
164 static void pe_directive_secrel (int);
165 #endif
166 static void signed_cons (int);
167 static char *output_invalid (int c);
168 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
169 const char *);
170 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
171 const char *);
172 static int i386_att_operand (char *);
173 static int i386_intel_operand (char *, int);
174 static int i386_intel_simplify (expressionS *);
175 static int i386_intel_parse_name (const char *, expressionS *);
176 static const reg_entry *parse_register (char *, char **);
177 static char *parse_insn (char *, char *);
178 static char *parse_operands (char *, const char *);
179 static void swap_operands (void);
180 static void swap_2_operands (int, int);
181 static enum flag_code i386_addressing_mode (void);
182 static void optimize_imm (void);
183 static void optimize_disp (void);
184 static const insn_template *match_template (char);
185 static int check_string (void);
186 static int process_suffix (void);
187 static int check_byte_reg (void);
188 static int check_long_reg (void);
189 static int check_qword_reg (void);
190 static int check_word_reg (void);
191 static int finalize_imm (void);
192 static int process_operands (void);
193 static const seg_entry *build_modrm_byte (void);
194 static void output_insn (void);
195 static void output_imm (fragS *, offsetT);
196 static void output_disp (fragS *, offsetT);
197 #ifndef I386COFF
198 static void s_bss (int);
199 #endif
200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
201 static void handle_large_common (int small ATTRIBUTE_UNUSED);
202
203 /* GNU_PROPERTY_X86_ISA_1_USED. */
204 static unsigned int x86_isa_1_used;
205 /* GNU_PROPERTY_X86_FEATURE_2_USED. */
206 static unsigned int x86_feature_2_used;
207 /* Generate x86 used ISA and feature properties. */
208 static unsigned int x86_used_note = DEFAULT_X86_USED_NOTE;
209 #endif
210
211 static const char *default_arch = DEFAULT_ARCH;
212
213 /* parse_register() returns this when a register alias cannot be used. */
214 static const reg_entry bad_reg = { "<bad>", OPERAND_TYPE_NONE, 0, 0,
215 { Dw2Inval, Dw2Inval } };
216
217 /* This struct describes rounding control and SAE in the instruction. */
218 struct RC_Operation
219 {
220 enum rc_type
221 {
222 rne = 0,
223 rd,
224 ru,
225 rz,
226 saeonly
227 } type;
228 int operand;
229 };
230
231 static struct RC_Operation rc_op;
232
233 /* The struct describes masking, applied to OPERAND in the instruction.
234 MASK is a pointer to the corresponding mask register. ZEROING tells
235 whether merging or zeroing mask is used. */
236 struct Mask_Operation
237 {
238 const reg_entry *mask;
239 unsigned int zeroing;
240 /* The operand where this operation is associated. */
241 int operand;
242 };
243
244 static struct Mask_Operation mask_op;
245
246 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
247 broadcast factor. */
248 struct Broadcast_Operation
249 {
250 /* Type of broadcast: {1to2}, {1to4}, {1to8}, or {1to16}. */
251 int type;
252
253 /* Index of broadcasted operand. */
254 int operand;
255
256 /* Number of bytes to broadcast. */
257 int bytes;
258 };
259
260 static struct Broadcast_Operation broadcast_op;
261
262 /* VEX prefix. */
263 typedef struct
264 {
265 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
266 unsigned char bytes[4];
267 unsigned int length;
268 /* Destination or source register specifier. */
269 const reg_entry *register_specifier;
270 } vex_prefix;
271
272 /* 'md_assemble ()' gathers together information and puts it into a
273 i386_insn. */
274
275 union i386_op
276 {
277 expressionS *disps;
278 expressionS *imms;
279 const reg_entry *regs;
280 };
281
282 enum i386_error
283 {
284 operand_size_mismatch,
285 operand_type_mismatch,
286 register_type_mismatch,
287 number_of_operands_mismatch,
288 invalid_instruction_suffix,
289 bad_imm4,
290 unsupported_with_intel_mnemonic,
291 unsupported_syntax,
292 unsupported,
293 invalid_vsib_address,
294 invalid_vector_register_set,
295 unsupported_vector_index_register,
296 unsupported_broadcast,
297 broadcast_needed,
298 unsupported_masking,
299 mask_not_on_destination,
300 no_default_mask,
301 unsupported_rc_sae,
302 rc_sae_operand_not_last_imm,
303 invalid_register_operand,
304 };
305
306 struct _i386_insn
307 {
308 /* TM holds the template for the insn were currently assembling. */
309 insn_template tm;
310
311 /* SUFFIX holds the instruction size suffix for byte, word, dword
312 or qword, if given. */
313 char suffix;
314
315 /* OPERANDS gives the number of given operands. */
316 unsigned int operands;
317
318 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
319 of given register, displacement, memory operands and immediate
320 operands. */
321 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
322
323 /* TYPES [i] is the type (see above #defines) which tells us how to
324 use OP[i] for the corresponding operand. */
325 i386_operand_type types[MAX_OPERANDS];
326
327 /* Displacement expression, immediate expression, or register for each
328 operand. */
329 union i386_op op[MAX_OPERANDS];
330
331 /* Flags for operands. */
332 unsigned int flags[MAX_OPERANDS];
333 #define Operand_PCrel 1
334 #define Operand_Mem 2
335
336 /* Relocation type for operand */
337 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
338
339 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
340 the base index byte below. */
341 const reg_entry *base_reg;
342 const reg_entry *index_reg;
343 unsigned int log2_scale_factor;
344
345 /* SEG gives the seg_entries of this insn. They are zero unless
346 explicit segment overrides are given. */
347 const seg_entry *seg[2];
348
349 /* Copied first memory operand string, for re-checking. */
350 char *memop1_string;
351
352 /* PREFIX holds all the given prefix opcodes (usually null).
353 PREFIXES is the number of prefix opcodes. */
354 unsigned int prefixes;
355 unsigned char prefix[MAX_PREFIXES];
356
357 /* Register is in low 3 bits of opcode. */
358 bfd_boolean short_form;
359
360 /* The operand to a branch insn indicates an absolute branch. */
361 bfd_boolean jumpabsolute;
362
363 /* Has MMX register operands. */
364 bfd_boolean has_regmmx;
365
366 /* Has XMM register operands. */
367 bfd_boolean has_regxmm;
368
369 /* Has YMM register operands. */
370 bfd_boolean has_regymm;
371
372 /* Has ZMM register operands. */
373 bfd_boolean has_regzmm;
374
375 /* Has GOTPC or TLS relocation. */
376 bfd_boolean has_gotpc_tls_reloc;
377
378 /* RM and SIB are the modrm byte and the sib byte where the
379 addressing modes of this insn are encoded. */
380 modrm_byte rm;
381 rex_byte rex;
382 rex_byte vrex;
383 sib_byte sib;
384 vex_prefix vex;
385
386 /* Masking attributes. */
387 struct Mask_Operation *mask;
388
389 /* Rounding control and SAE attributes. */
390 struct RC_Operation *rounding;
391
392 /* Broadcasting attributes. */
393 struct Broadcast_Operation *broadcast;
394
395 /* Compressed disp8*N attribute. */
396 unsigned int memshift;
397
398 /* Prefer load or store in encoding. */
399 enum
400 {
401 dir_encoding_default = 0,
402 dir_encoding_load,
403 dir_encoding_store,
404 dir_encoding_swap
405 } dir_encoding;
406
407 /* Prefer 8bit or 32bit displacement in encoding. */
408 enum
409 {
410 disp_encoding_default = 0,
411 disp_encoding_8bit,
412 disp_encoding_32bit
413 } disp_encoding;
414
415 /* Prefer the REX byte in encoding. */
416 bfd_boolean rex_encoding;
417
418 /* Disable instruction size optimization. */
419 bfd_boolean no_optimize;
420
421 /* How to encode vector instructions. */
422 enum
423 {
424 vex_encoding_default = 0,
425 vex_encoding_vex,
426 vex_encoding_vex3,
427 vex_encoding_evex,
428 vex_encoding_error
429 } vec_encoding;
430
431 /* REP prefix. */
432 const char *rep_prefix;
433
434 /* HLE prefix. */
435 const char *hle_prefix;
436
437 /* Have BND prefix. */
438 const char *bnd_prefix;
439
440 /* Have NOTRACK prefix. */
441 const char *notrack_prefix;
442
443 /* Error message. */
444 enum i386_error error;
445 };
446
447 typedef struct _i386_insn i386_insn;
448
449 /* Link RC type with corresponding string, that'll be looked for in
450 asm. */
451 struct RC_name
452 {
453 enum rc_type type;
454 const char *name;
455 unsigned int len;
456 };
457
458 static const struct RC_name RC_NamesTable[] =
459 {
460 { rne, STRING_COMMA_LEN ("rn-sae") },
461 { rd, STRING_COMMA_LEN ("rd-sae") },
462 { ru, STRING_COMMA_LEN ("ru-sae") },
463 { rz, STRING_COMMA_LEN ("rz-sae") },
464 { saeonly, STRING_COMMA_LEN ("sae") },
465 };
466
467 /* List of chars besides those in app.c:symbol_chars that can start an
468 operand. Used to prevent the scrubber eating vital white-space. */
469 const char extra_symbol_chars[] = "*%-([{}"
470 #ifdef LEX_AT
471 "@"
472 #endif
473 #ifdef LEX_QM
474 "?"
475 #endif
476 ;
477
478 #if (defined (TE_I386AIX) \
479 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
480 && !defined (TE_GNU) \
481 && !defined (TE_LINUX) \
482 && !defined (TE_NACL) \
483 && !defined (TE_FreeBSD) \
484 && !defined (TE_DragonFly) \
485 && !defined (TE_NetBSD)))
486 /* This array holds the chars that always start a comment. If the
487 pre-processor is disabled, these aren't very useful. The option
488 --divide will remove '/' from this list. */
489 const char *i386_comment_chars = "#/";
490 #define SVR4_COMMENT_CHARS 1
491 #define PREFIX_SEPARATOR '\\'
492
493 #else
494 const char *i386_comment_chars = "#";
495 #define PREFIX_SEPARATOR '/'
496 #endif
497
498 /* This array holds the chars that only start a comment at the beginning of
499 a line. If the line seems to have the form '# 123 filename'
500 .line and .file directives will appear in the pre-processed output.
501 Note that input_file.c hand checks for '#' at the beginning of the
502 first line of the input file. This is because the compiler outputs
503 #NO_APP at the beginning of its output.
504 Also note that comments started like this one will always work if
505 '/' isn't otherwise defined. */
506 const char line_comment_chars[] = "#/";
507
508 const char line_separator_chars[] = ";";
509
510 /* Chars that can be used to separate mant from exp in floating point
511 nums. */
512 const char EXP_CHARS[] = "eE";
513
514 /* Chars that mean this number is a floating point constant
515 As in 0f12.456
516 or 0d1.2345e12. */
517 const char FLT_CHARS[] = "fFdDxX";
518
519 /* Tables for lexical analysis. */
520 static char mnemonic_chars[256];
521 static char register_chars[256];
522 static char operand_chars[256];
523 static char identifier_chars[256];
524 static char digit_chars[256];
525
526 /* Lexical macros. */
527 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
528 #define is_operand_char(x) (operand_chars[(unsigned char) x])
529 #define is_register_char(x) (register_chars[(unsigned char) x])
530 #define is_space_char(x) ((x) == ' ')
531 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
532 #define is_digit_char(x) (digit_chars[(unsigned char) x])
533
534 /* All non-digit non-letter characters that may occur in an operand. */
535 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
536
537 /* md_assemble() always leaves the strings it's passed unaltered. To
538 effect this we maintain a stack of saved characters that we've smashed
539 with '\0's (indicating end of strings for various sub-fields of the
540 assembler instruction). */
541 static char save_stack[32];
542 static char *save_stack_p;
543 #define END_STRING_AND_SAVE(s) \
544 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
545 #define RESTORE_END_STRING(s) \
546 do { *(s) = *--save_stack_p; } while (0)
547
548 /* The instruction we're assembling. */
549 static i386_insn i;
550
551 /* Possible templates for current insn. */
552 static const templates *current_templates;
553
554 /* Per instruction expressionS buffers: max displacements & immediates. */
555 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
556 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
557
558 /* Current operand we are working on. */
559 static int this_operand = -1;
560
561 /* We support four different modes. FLAG_CODE variable is used to distinguish
562 these. */
563
564 enum flag_code {
565 CODE_32BIT,
566 CODE_16BIT,
567 CODE_64BIT };
568
569 static enum flag_code flag_code;
570 static unsigned int object_64bit;
571 static unsigned int disallow_64bit_reloc;
572 static int use_rela_relocations = 0;
573 /* __tls_get_addr/___tls_get_addr symbol for TLS. */
574 static const char *tls_get_addr;
575
576 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
577 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
578 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
579
580 /* The ELF ABI to use. */
581 enum x86_elf_abi
582 {
583 I386_ABI,
584 X86_64_ABI,
585 X86_64_X32_ABI
586 };
587
588 static enum x86_elf_abi x86_elf_abi = I386_ABI;
589 #endif
590
591 #if defined (TE_PE) || defined (TE_PEP)
592 /* Use big object file format. */
593 static int use_big_obj = 0;
594 #endif
595
596 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
597 /* 1 if generating code for a shared library. */
598 static int shared = 0;
599 #endif
600
601 /* 1 for intel syntax,
602 0 if att syntax. */
603 static int intel_syntax = 0;
604
605 static enum x86_64_isa
606 {
607 amd64 = 1, /* AMD64 ISA. */
608 intel64 /* Intel64 ISA. */
609 } isa64;
610
611 /* 1 for intel mnemonic,
612 0 if att mnemonic. */
613 static int intel_mnemonic = !SYSV386_COMPAT;
614
615 /* 1 if pseudo registers are permitted. */
616 static int allow_pseudo_reg = 0;
617
618 /* 1 if register prefix % not required. */
619 static int allow_naked_reg = 0;
620
621 /* 1 if the assembler should add BND prefix for all control-transferring
622 instructions supporting it, even if this prefix wasn't specified
623 explicitly. */
624 static int add_bnd_prefix = 0;
625
626 /* 1 if pseudo index register, eiz/riz, is allowed . */
627 static int allow_index_reg = 0;
628
629 /* 1 if the assembler should ignore LOCK prefix, even if it was
630 specified explicitly. */
631 static int omit_lock_prefix = 0;
632
633 /* 1 if the assembler should encode lfence, mfence, and sfence as
634 "lock addl $0, (%{re}sp)". */
635 static int avoid_fence = 0;
636
637 /* 1 if lfence should be inserted after every load. */
638 static int lfence_after_load = 0;
639
640 /* Non-zero if lfence should be inserted before indirect branch. */
641 static enum lfence_before_indirect_branch_kind
642 {
643 lfence_branch_none = 0,
644 lfence_branch_register,
645 lfence_branch_memory,
646 lfence_branch_all
647 }
648 lfence_before_indirect_branch;
649
650 /* Non-zero if lfence should be inserted before ret. */
651 static enum lfence_before_ret_kind
652 {
653 lfence_before_ret_none = 0,
654 lfence_before_ret_not,
655 lfence_before_ret_or,
656 lfence_before_ret_shl
657 }
658 lfence_before_ret;
659
660 /* Types of previous instruction is .byte or prefix. */
661 static struct
662 {
663 segT seg;
664 const char *file;
665 const char *name;
666 unsigned int line;
667 enum last_insn_kind
668 {
669 last_insn_other = 0,
670 last_insn_directive,
671 last_insn_prefix
672 } kind;
673 } last_insn;
674
675 /* 1 if the assembler should generate relax relocations. */
676
677 static int generate_relax_relocations
678 = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
679
680 static enum check_kind
681 {
682 check_none = 0,
683 check_warning,
684 check_error
685 }
686 sse_check, operand_check = check_warning;
687
688 /* Non-zero if branches should be aligned within power of 2 boundary. */
689 static int align_branch_power = 0;
690
691 /* Types of branches to align. */
692 enum align_branch_kind
693 {
694 align_branch_none = 0,
695 align_branch_jcc = 1,
696 align_branch_fused = 2,
697 align_branch_jmp = 3,
698 align_branch_call = 4,
699 align_branch_indirect = 5,
700 align_branch_ret = 6
701 };
702
703 /* Type bits of branches to align. */
704 enum align_branch_bit
705 {
706 align_branch_jcc_bit = 1 << align_branch_jcc,
707 align_branch_fused_bit = 1 << align_branch_fused,
708 align_branch_jmp_bit = 1 << align_branch_jmp,
709 align_branch_call_bit = 1 << align_branch_call,
710 align_branch_indirect_bit = 1 << align_branch_indirect,
711 align_branch_ret_bit = 1 << align_branch_ret
712 };
713
714 static unsigned int align_branch = (align_branch_jcc_bit
715 | align_branch_fused_bit
716 | align_branch_jmp_bit);
717
718 /* Types of condition jump used by macro-fusion. */
719 enum mf_jcc_kind
720 {
721 mf_jcc_jo = 0, /* base opcode 0x70 */
722 mf_jcc_jc, /* base opcode 0x72 */
723 mf_jcc_je, /* base opcode 0x74 */
724 mf_jcc_jna, /* base opcode 0x76 */
725 mf_jcc_js, /* base opcode 0x78 */
726 mf_jcc_jp, /* base opcode 0x7a */
727 mf_jcc_jl, /* base opcode 0x7c */
728 mf_jcc_jle, /* base opcode 0x7e */
729 };
730
731 /* Types of compare flag-modifying insntructions used by macro-fusion. */
732 enum mf_cmp_kind
733 {
734 mf_cmp_test_and, /* test/cmp */
735 mf_cmp_alu_cmp, /* add/sub/cmp */
736 mf_cmp_incdec /* inc/dec */
737 };
738
739 /* The maximum padding size for fused jcc. CMP like instruction can
740 be 9 bytes and jcc can be 6 bytes. Leave room just in case for
741 prefixes. */
742 #define MAX_FUSED_JCC_PADDING_SIZE 20
743
744 /* The maximum number of prefixes added for an instruction. */
745 static unsigned int align_branch_prefix_size = 5;
746
747 /* Optimization:
748 1. Clear the REX_W bit with register operand if possible.
749 2. Above plus use 128bit vector instruction to clear the full vector
750 register.
751 */
752 static int optimize = 0;
753
754 /* Optimization:
755 1. Clear the REX_W bit with register operand if possible.
756 2. Above plus use 128bit vector instruction to clear the full vector
757 register.
758 3. Above plus optimize "test{q,l,w} $imm8,%r{64,32,16}" to
759 "testb $imm7,%r8".
760 */
761 static int optimize_for_space = 0;
762
763 /* Register prefix used for error message. */
764 static const char *register_prefix = "%";
765
766 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
767 leave, push, and pop instructions so that gcc has the same stack
768 frame as in 32 bit mode. */
769 static char stackop_size = '\0';
770
771 /* Non-zero to optimize code alignment. */
772 int optimize_align_code = 1;
773
774 /* Non-zero to quieten some warnings. */
775 static int quiet_warnings = 0;
776
777 /* CPU name. */
778 static const char *cpu_arch_name = NULL;
779 static char *cpu_sub_arch_name = NULL;
780
781 /* CPU feature flags. */
782 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
783
784 /* If we have selected a cpu we are generating instructions for. */
785 static int cpu_arch_tune_set = 0;
786
787 /* Cpu we are generating instructions for. */
788 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
789
790 /* CPU feature flags of cpu we are generating instructions for. */
791 static i386_cpu_flags cpu_arch_tune_flags;
792
793 /* CPU instruction set architecture used. */
794 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
795
796 /* CPU feature flags of instruction set architecture used. */
797 i386_cpu_flags cpu_arch_isa_flags;
798
799 /* If set, conditional jumps are not automatically promoted to handle
800 larger than a byte offset. */
801 static unsigned int no_cond_jump_promotion = 0;
802
803 /* Encode SSE instructions with VEX prefix. */
804 static unsigned int sse2avx;
805
806 /* Encode scalar AVX instructions with specific vector length. */
807 static enum
808 {
809 vex128 = 0,
810 vex256
811 } avxscalar;
812
813 /* Encode VEX WIG instructions with specific vex.w. */
814 static enum
815 {
816 vexw0 = 0,
817 vexw1
818 } vexwig;
819
820 /* Encode scalar EVEX LIG instructions with specific vector length. */
821 static enum
822 {
823 evexl128 = 0,
824 evexl256,
825 evexl512
826 } evexlig;
827
828 /* Encode EVEX WIG instructions with specific evex.w. */
829 static enum
830 {
831 evexw0 = 0,
832 evexw1
833 } evexwig;
834
835 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
836 static enum rc_type evexrcig = rne;
837
838 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
839 static symbolS *GOT_symbol;
840
841 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
842 unsigned int x86_dwarf2_return_column;
843
844 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
845 int x86_cie_data_alignment;
846
847 /* Interface to relax_segment.
848 There are 3 major relax states for 386 jump insns because the
849 different types of jumps add different sizes to frags when we're
850 figuring out what sort of jump to choose to reach a given label.
851
852 BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING are used to align
853 branches which are handled by md_estimate_size_before_relax() and
854 i386_generic_table_relax_frag(). */
855
856 /* Types. */
857 #define UNCOND_JUMP 0
858 #define COND_JUMP 1
859 #define COND_JUMP86 2
860 #define BRANCH_PADDING 3
861 #define BRANCH_PREFIX 4
862 #define FUSED_JCC_PADDING 5
863
864 /* Sizes. */
865 #define CODE16 1
866 #define SMALL 0
867 #define SMALL16 (SMALL | CODE16)
868 #define BIG 2
869 #define BIG16 (BIG | CODE16)
870
871 #ifndef INLINE
872 #ifdef __GNUC__
873 #define INLINE __inline__
874 #else
875 #define INLINE
876 #endif
877 #endif
878
879 #define ENCODE_RELAX_STATE(type, size) \
880 ((relax_substateT) (((type) << 2) | (size)))
881 #define TYPE_FROM_RELAX_STATE(s) \
882 ((s) >> 2)
883 #define DISP_SIZE_FROM_RELAX_STATE(s) \
884 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
885
886 /* This table is used by relax_frag to promote short jumps to long
887 ones where necessary. SMALL (short) jumps may be promoted to BIG
888 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
889 don't allow a short jump in a 32 bit code segment to be promoted to
890 a 16 bit offset jump because it's slower (requires data size
891 prefix), and doesn't work, unless the destination is in the bottom
892 64k of the code segment (The top 16 bits of eip are zeroed). */
893
894 const relax_typeS md_relax_table[] =
895 {
896 /* The fields are:
897 1) most positive reach of this state,
898 2) most negative reach of this state,
899 3) how many bytes this mode will have in the variable part of the frag
900 4) which index into the table to try if we can't fit into this one. */
901
902 /* UNCOND_JUMP states. */
903 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
904 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
905 /* dword jmp adds 4 bytes to frag:
906 0 extra opcode bytes, 4 displacement bytes. */
907 {0, 0, 4, 0},
908 /* word jmp adds 2 byte2 to frag:
909 0 extra opcode bytes, 2 displacement bytes. */
910 {0, 0, 2, 0},
911
912 /* COND_JUMP states. */
913 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
914 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
915 /* dword conditionals adds 5 bytes to frag:
916 1 extra opcode byte, 4 displacement bytes. */
917 {0, 0, 5, 0},
918 /* word conditionals add 3 bytes to frag:
919 1 extra opcode byte, 2 displacement bytes. */
920 {0, 0, 3, 0},
921
922 /* COND_JUMP86 states. */
923 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
924 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
925 /* dword conditionals adds 5 bytes to frag:
926 1 extra opcode byte, 4 displacement bytes. */
927 {0, 0, 5, 0},
928 /* word conditionals add 4 bytes to frag:
929 1 displacement byte and a 3 byte long branch insn. */
930 {0, 0, 4, 0}
931 };
932
933 static const arch_entry cpu_arch[] =
934 {
935 /* Do not replace the first two entries - i386_target_format()
936 relies on them being there in this order. */
937 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
938 CPU_GENERIC32_FLAGS, 0 },
939 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
940 CPU_GENERIC64_FLAGS, 0 },
941 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
942 CPU_NONE_FLAGS, 0 },
943 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
944 CPU_I186_FLAGS, 0 },
945 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
946 CPU_I286_FLAGS, 0 },
947 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
948 CPU_I386_FLAGS, 0 },
949 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
950 CPU_I486_FLAGS, 0 },
951 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
952 CPU_I586_FLAGS, 0 },
953 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
954 CPU_I686_FLAGS, 0 },
955 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
956 CPU_I586_FLAGS, 0 },
957 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
958 CPU_PENTIUMPRO_FLAGS, 0 },
959 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
960 CPU_P2_FLAGS, 0 },
961 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
962 CPU_P3_FLAGS, 0 },
963 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
964 CPU_P4_FLAGS, 0 },
965 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
966 CPU_CORE_FLAGS, 0 },
967 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
968 CPU_NOCONA_FLAGS, 0 },
969 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
970 CPU_CORE_FLAGS, 1 },
971 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
972 CPU_CORE_FLAGS, 0 },
973 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
974 CPU_CORE2_FLAGS, 1 },
975 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
976 CPU_CORE2_FLAGS, 0 },
977 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
978 CPU_COREI7_FLAGS, 0 },
979 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
980 CPU_L1OM_FLAGS, 0 },
981 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
982 CPU_K1OM_FLAGS, 0 },
983 { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
984 CPU_IAMCU_FLAGS, 0 },
985 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
986 CPU_K6_FLAGS, 0 },
987 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
988 CPU_K6_2_FLAGS, 0 },
989 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
990 CPU_ATHLON_FLAGS, 0 },
991 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
992 CPU_K8_FLAGS, 1 },
993 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
994 CPU_K8_FLAGS, 0 },
995 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
996 CPU_K8_FLAGS, 0 },
997 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
998 CPU_AMDFAM10_FLAGS, 0 },
999 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
1000 CPU_BDVER1_FLAGS, 0 },
1001 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
1002 CPU_BDVER2_FLAGS, 0 },
1003 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
1004 CPU_BDVER3_FLAGS, 0 },
1005 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
1006 CPU_BDVER4_FLAGS, 0 },
1007 { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
1008 CPU_ZNVER1_FLAGS, 0 },
1009 { STRING_COMMA_LEN ("znver2"), PROCESSOR_ZNVER,
1010 CPU_ZNVER2_FLAGS, 0 },
1011 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
1012 CPU_BTVER1_FLAGS, 0 },
1013 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
1014 CPU_BTVER2_FLAGS, 0 },
1015 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
1016 CPU_8087_FLAGS, 0 },
1017 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
1018 CPU_287_FLAGS, 0 },
1019 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
1020 CPU_387_FLAGS, 0 },
1021 { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
1022 CPU_687_FLAGS, 0 },
1023 { STRING_COMMA_LEN (".cmov"), PROCESSOR_UNKNOWN,
1024 CPU_CMOV_FLAGS, 0 },
1025 { STRING_COMMA_LEN (".fxsr"), PROCESSOR_UNKNOWN,
1026 CPU_FXSR_FLAGS, 0 },
1027 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
1028 CPU_MMX_FLAGS, 0 },
1029 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
1030 CPU_SSE_FLAGS, 0 },
1031 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
1032 CPU_SSE2_FLAGS, 0 },
1033 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
1034 CPU_SSE3_FLAGS, 0 },
1035 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
1036 CPU_SSE4A_FLAGS, 0 },
1037 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
1038 CPU_SSSE3_FLAGS, 0 },
1039 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
1040 CPU_SSE4_1_FLAGS, 0 },
1041 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
1042 CPU_SSE4_2_FLAGS, 0 },
1043 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
1044 CPU_SSE4_2_FLAGS, 0 },
1045 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
1046 CPU_AVX_FLAGS, 0 },
1047 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
1048 CPU_AVX2_FLAGS, 0 },
1049 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
1050 CPU_AVX512F_FLAGS, 0 },
1051 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
1052 CPU_AVX512CD_FLAGS, 0 },
1053 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
1054 CPU_AVX512ER_FLAGS, 0 },
1055 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
1056 CPU_AVX512PF_FLAGS, 0 },
1057 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
1058 CPU_AVX512DQ_FLAGS, 0 },
1059 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
1060 CPU_AVX512BW_FLAGS, 0 },
1061 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
1062 CPU_AVX512VL_FLAGS, 0 },
1063 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
1064 CPU_VMX_FLAGS, 0 },
1065 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
1066 CPU_VMFUNC_FLAGS, 0 },
1067 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
1068 CPU_SMX_FLAGS, 0 },
1069 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
1070 CPU_XSAVE_FLAGS, 0 },
1071 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
1072 CPU_XSAVEOPT_FLAGS, 0 },
1073 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
1074 CPU_XSAVEC_FLAGS, 0 },
1075 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
1076 CPU_XSAVES_FLAGS, 0 },
1077 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
1078 CPU_AES_FLAGS, 0 },
1079 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
1080 CPU_PCLMUL_FLAGS, 0 },
1081 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
1082 CPU_PCLMUL_FLAGS, 1 },
1083 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
1084 CPU_FSGSBASE_FLAGS, 0 },
1085 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
1086 CPU_RDRND_FLAGS, 0 },
1087 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
1088 CPU_F16C_FLAGS, 0 },
1089 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
1090 CPU_BMI2_FLAGS, 0 },
1091 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
1092 CPU_FMA_FLAGS, 0 },
1093 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
1094 CPU_FMA4_FLAGS, 0 },
1095 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
1096 CPU_XOP_FLAGS, 0 },
1097 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
1098 CPU_LWP_FLAGS, 0 },
1099 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
1100 CPU_MOVBE_FLAGS, 0 },
1101 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
1102 CPU_CX16_FLAGS, 0 },
1103 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
1104 CPU_EPT_FLAGS, 0 },
1105 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
1106 CPU_LZCNT_FLAGS, 0 },
1107 { STRING_COMMA_LEN (".popcnt"), PROCESSOR_UNKNOWN,
1108 CPU_POPCNT_FLAGS, 0 },
1109 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
1110 CPU_HLE_FLAGS, 0 },
1111 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
1112 CPU_RTM_FLAGS, 0 },
1113 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
1114 CPU_INVPCID_FLAGS, 0 },
1115 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
1116 CPU_CLFLUSH_FLAGS, 0 },
1117 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
1118 CPU_NOP_FLAGS, 0 },
1119 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
1120 CPU_SYSCALL_FLAGS, 0 },
1121 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
1122 CPU_RDTSCP_FLAGS, 0 },
1123 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
1124 CPU_3DNOW_FLAGS, 0 },
1125 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
1126 CPU_3DNOWA_FLAGS, 0 },
1127 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
1128 CPU_PADLOCK_FLAGS, 0 },
1129 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
1130 CPU_SVME_FLAGS, 1 },
1131 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
1132 CPU_SVME_FLAGS, 0 },
1133 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
1134 CPU_SSE4A_FLAGS, 0 },
1135 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
1136 CPU_ABM_FLAGS, 0 },
1137 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
1138 CPU_BMI_FLAGS, 0 },
1139 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
1140 CPU_TBM_FLAGS, 0 },
1141 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
1142 CPU_ADX_FLAGS, 0 },
1143 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
1144 CPU_RDSEED_FLAGS, 0 },
1145 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
1146 CPU_PRFCHW_FLAGS, 0 },
1147 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
1148 CPU_SMAP_FLAGS, 0 },
1149 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
1150 CPU_MPX_FLAGS, 0 },
1151 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
1152 CPU_SHA_FLAGS, 0 },
1153 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
1154 CPU_CLFLUSHOPT_FLAGS, 0 },
1155 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
1156 CPU_PREFETCHWT1_FLAGS, 0 },
1157 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
1158 CPU_SE1_FLAGS, 0 },
1159 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
1160 CPU_CLWB_FLAGS, 0 },
1161 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
1162 CPU_AVX512IFMA_FLAGS, 0 },
1163 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
1164 CPU_AVX512VBMI_FLAGS, 0 },
1165 { STRING_COMMA_LEN (".avx512_4fmaps"), PROCESSOR_UNKNOWN,
1166 CPU_AVX512_4FMAPS_FLAGS, 0 },
1167 { STRING_COMMA_LEN (".avx512_4vnniw"), PROCESSOR_UNKNOWN,
1168 CPU_AVX512_4VNNIW_FLAGS, 0 },
1169 { STRING_COMMA_LEN (".avx512_vpopcntdq"), PROCESSOR_UNKNOWN,
1170 CPU_AVX512_VPOPCNTDQ_FLAGS, 0 },
1171 { STRING_COMMA_LEN (".avx512_vbmi2"), PROCESSOR_UNKNOWN,
1172 CPU_AVX512_VBMI2_FLAGS, 0 },
1173 { STRING_COMMA_LEN (".avx512_vnni"), PROCESSOR_UNKNOWN,
1174 CPU_AVX512_VNNI_FLAGS, 0 },
1175 { STRING_COMMA_LEN (".avx512_bitalg"), PROCESSOR_UNKNOWN,
1176 CPU_AVX512_BITALG_FLAGS, 0 },
1177 { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
1178 CPU_CLZERO_FLAGS, 0 },
1179 { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
1180 CPU_MWAITX_FLAGS, 0 },
1181 { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
1182 CPU_OSPKE_FLAGS, 0 },
1183 { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
1184 CPU_RDPID_FLAGS, 0 },
1185 { STRING_COMMA_LEN (".ptwrite"), PROCESSOR_UNKNOWN,
1186 CPU_PTWRITE_FLAGS, 0 },
1187 { STRING_COMMA_LEN (".ibt"), PROCESSOR_UNKNOWN,
1188 CPU_IBT_FLAGS, 0 },
1189 { STRING_COMMA_LEN (".shstk"), PROCESSOR_UNKNOWN,
1190 CPU_SHSTK_FLAGS, 0 },
1191 { STRING_COMMA_LEN (".gfni"), PROCESSOR_UNKNOWN,
1192 CPU_GFNI_FLAGS, 0 },
1193 { STRING_COMMA_LEN (".vaes"), PROCESSOR_UNKNOWN,
1194 CPU_VAES_FLAGS, 0 },
1195 { STRING_COMMA_LEN (".vpclmulqdq"), PROCESSOR_UNKNOWN,
1196 CPU_VPCLMULQDQ_FLAGS, 0 },
1197 { STRING_COMMA_LEN (".wbnoinvd"), PROCESSOR_UNKNOWN,
1198 CPU_WBNOINVD_FLAGS, 0 },
1199 { STRING_COMMA_LEN (".pconfig"), PROCESSOR_UNKNOWN,
1200 CPU_PCONFIG_FLAGS, 0 },
1201 { STRING_COMMA_LEN (".waitpkg"), PROCESSOR_UNKNOWN,
1202 CPU_WAITPKG_FLAGS, 0 },
1203 { STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
1204 CPU_CLDEMOTE_FLAGS, 0 },
1205 { STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
1206 CPU_MOVDIRI_FLAGS, 0 },
1207 { STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
1208 CPU_MOVDIR64B_FLAGS, 0 },
1209 { STRING_COMMA_LEN (".avx512_bf16"), PROCESSOR_UNKNOWN,
1210 CPU_AVX512_BF16_FLAGS, 0 },
1211 { STRING_COMMA_LEN (".avx512_vp2intersect"), PROCESSOR_UNKNOWN,
1212 CPU_AVX512_VP2INTERSECT_FLAGS, 0 },
1213 { STRING_COMMA_LEN (".enqcmd"), PROCESSOR_UNKNOWN,
1214 CPU_ENQCMD_FLAGS, 0 },
1215 { STRING_COMMA_LEN (".serialize"), PROCESSOR_UNKNOWN,
1216 CPU_SERIALIZE_FLAGS, 0 },
1217 { STRING_COMMA_LEN (".rdpru"), PROCESSOR_UNKNOWN,
1218 CPU_RDPRU_FLAGS, 0 },
1219 { STRING_COMMA_LEN (".mcommit"), PROCESSOR_UNKNOWN,
1220 CPU_MCOMMIT_FLAGS, 0 },
1221 { STRING_COMMA_LEN (".sev_es"), PROCESSOR_UNKNOWN,
1222 CPU_SEV_ES_FLAGS, 0 },
1223 { STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN,
1224 CPU_TSXLDTRK_FLAGS, 0 },
1225 };
1226
1227 static const noarch_entry cpu_noarch[] =
1228 {
1229 { STRING_COMMA_LEN ("no87"), CPU_ANY_X87_FLAGS },
1230 { STRING_COMMA_LEN ("no287"), CPU_ANY_287_FLAGS },
1231 { STRING_COMMA_LEN ("no387"), CPU_ANY_387_FLAGS },
1232 { STRING_COMMA_LEN ("no687"), CPU_ANY_687_FLAGS },
1233 { STRING_COMMA_LEN ("nocmov"), CPU_ANY_CMOV_FLAGS },
1234 { STRING_COMMA_LEN ("nofxsr"), CPU_ANY_FXSR_FLAGS },
1235 { STRING_COMMA_LEN ("nommx"), CPU_ANY_MMX_FLAGS },
1236 { STRING_COMMA_LEN ("nosse"), CPU_ANY_SSE_FLAGS },
1237 { STRING_COMMA_LEN ("nosse2"), CPU_ANY_SSE2_FLAGS },
1238 { STRING_COMMA_LEN ("nosse3"), CPU_ANY_SSE3_FLAGS },
1239 { STRING_COMMA_LEN ("nosse4a"), CPU_ANY_SSE4A_FLAGS },
1240 { STRING_COMMA_LEN ("nossse3"), CPU_ANY_SSSE3_FLAGS },
1241 { STRING_COMMA_LEN ("nosse4.1"), CPU_ANY_SSE4_1_FLAGS },
1242 { STRING_COMMA_LEN ("nosse4.2"), CPU_ANY_SSE4_2_FLAGS },
1243 { STRING_COMMA_LEN ("nosse4"), CPU_ANY_SSE4_1_FLAGS },
1244 { STRING_COMMA_LEN ("noavx"), CPU_ANY_AVX_FLAGS },
1245 { STRING_COMMA_LEN ("noavx2"), CPU_ANY_AVX2_FLAGS },
1246 { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
1247 { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
1248 { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
1249 { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
1250 { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
1251 { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
1252 { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1253 { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1254 { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1255 { STRING_COMMA_LEN ("noavx512_4fmaps"), CPU_ANY_AVX512_4FMAPS_FLAGS },
1256 { STRING_COMMA_LEN ("noavx512_4vnniw"), CPU_ANY_AVX512_4VNNIW_FLAGS },
1257 { STRING_COMMA_LEN ("noavx512_vpopcntdq"), CPU_ANY_AVX512_VPOPCNTDQ_FLAGS },
1258 { STRING_COMMA_LEN ("noavx512_vbmi2"), CPU_ANY_AVX512_VBMI2_FLAGS },
1259 { STRING_COMMA_LEN ("noavx512_vnni"), CPU_ANY_AVX512_VNNI_FLAGS },
1260 { STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
1261 { STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
1262 { STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
1263 { STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
1264 { STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
1265 { STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
1266 { STRING_COMMA_LEN ("noavx512_vp2intersect"),
1267 CPU_ANY_AVX512_VP2INTERSECT_FLAGS },
1268 { STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
1269 { STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS },
1270 { STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS },
1271 };
1272
1273 #ifdef I386COFF
1274 /* Like s_lcomm_internal in gas/read.c but the alignment string
1275 is allowed to be optional. */
1276
1277 static symbolS *
1278 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1279 {
1280 addressT align = 0;
1281
1282 SKIP_WHITESPACE ();
1283
1284 if (needs_align
1285 && *input_line_pointer == ',')
1286 {
1287 align = parse_align (needs_align - 1);
1288
1289 if (align == (addressT) -1)
1290 return NULL;
1291 }
1292 else
1293 {
1294 if (size >= 8)
1295 align = 3;
1296 else if (size >= 4)
1297 align = 2;
1298 else if (size >= 2)
1299 align = 1;
1300 else
1301 align = 0;
1302 }
1303
1304 bss_alloc (symbolP, size, align);
1305 return symbolP;
1306 }
1307
1308 static void
1309 pe_lcomm (int needs_align)
1310 {
1311 s_comm_internal (needs_align * 2, pe_lcomm_internal);
1312 }
1313 #endif
1314
1315 const pseudo_typeS md_pseudo_table[] =
1316 {
1317 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1318 {"align", s_align_bytes, 0},
1319 #else
1320 {"align", s_align_ptwo, 0},
1321 #endif
1322 {"arch", set_cpu_arch, 0},
1323 #ifndef I386COFF
1324 {"bss", s_bss, 0},
1325 #else
1326 {"lcomm", pe_lcomm, 1},
1327 #endif
1328 {"ffloat", float_cons, 'f'},
1329 {"dfloat", float_cons, 'd'},
1330 {"tfloat", float_cons, 'x'},
1331 {"value", cons, 2},
1332 {"slong", signed_cons, 4},
1333 {"noopt", s_ignore, 0},
1334 {"optim", s_ignore, 0},
1335 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1336 {"code16", set_code_flag, CODE_16BIT},
1337 {"code32", set_code_flag, CODE_32BIT},
1338 #ifdef BFD64
1339 {"code64", set_code_flag, CODE_64BIT},
1340 #endif
1341 {"intel_syntax", set_intel_syntax, 1},
1342 {"att_syntax", set_intel_syntax, 0},
1343 {"intel_mnemonic", set_intel_mnemonic, 1},
1344 {"att_mnemonic", set_intel_mnemonic, 0},
1345 {"allow_index_reg", set_allow_index_reg, 1},
1346 {"disallow_index_reg", set_allow_index_reg, 0},
1347 {"sse_check", set_check, 0},
1348 {"operand_check", set_check, 1},
1349 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1350 {"largecomm", handle_large_common, 0},
1351 #else
1352 {"file", dwarf2_directive_file, 0},
1353 {"loc", dwarf2_directive_loc, 0},
1354 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1355 #endif
1356 #ifdef TE_PE
1357 {"secrel32", pe_directive_secrel, 0},
1358 #endif
1359 {0, 0, 0}
1360 };
1361
1362 /* For interface with expression (). */
1363 extern char *input_line_pointer;
1364
1365 /* Hash table for instruction mnemonic lookup. */
1366 static struct hash_control *op_hash;
1367
1368 /* Hash table for register lookup. */
1369 static struct hash_control *reg_hash;
1370 \f
1371 /* Various efficient no-op patterns for aligning code labels.
1372 Note: Don't try to assemble the instructions in the comments.
1373 0L and 0w are not legal. */
1374 static const unsigned char f32_1[] =
1375 {0x90}; /* nop */
1376 static const unsigned char f32_2[] =
1377 {0x66,0x90}; /* xchg %ax,%ax */
1378 static const unsigned char f32_3[] =
1379 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1380 static const unsigned char f32_4[] =
1381 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1382 static const unsigned char f32_6[] =
1383 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1384 static const unsigned char f32_7[] =
1385 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1386 static const unsigned char f16_3[] =
1387 {0x8d,0x74,0x00}; /* lea 0(%si),%si */
1388 static const unsigned char f16_4[] =
1389 {0x8d,0xb4,0x00,0x00}; /* lea 0W(%si),%si */
1390 static const unsigned char jump_disp8[] =
1391 {0xeb}; /* jmp disp8 */
1392 static const unsigned char jump32_disp32[] =
1393 {0xe9}; /* jmp disp32 */
1394 static const unsigned char jump16_disp32[] =
1395 {0x66,0xe9}; /* jmp disp32 */
1396 /* 32-bit NOPs patterns. */
1397 static const unsigned char *const f32_patt[] = {
1398 f32_1, f32_2, f32_3, f32_4, NULL, f32_6, f32_7
1399 };
1400 /* 16-bit NOPs patterns. */
1401 static const unsigned char *const f16_patt[] = {
1402 f32_1, f32_2, f16_3, f16_4
1403 };
1404 /* nopl (%[re]ax) */
1405 static const unsigned char alt_3[] =
1406 {0x0f,0x1f,0x00};
1407 /* nopl 0(%[re]ax) */
1408 static const unsigned char alt_4[] =
1409 {0x0f,0x1f,0x40,0x00};
1410 /* nopl 0(%[re]ax,%[re]ax,1) */
1411 static const unsigned char alt_5[] =
1412 {0x0f,0x1f,0x44,0x00,0x00};
1413 /* nopw 0(%[re]ax,%[re]ax,1) */
1414 static const unsigned char alt_6[] =
1415 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1416 /* nopl 0L(%[re]ax) */
1417 static const unsigned char alt_7[] =
1418 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1419 /* nopl 0L(%[re]ax,%[re]ax,1) */
1420 static const unsigned char alt_8[] =
1421 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1422 /* nopw 0L(%[re]ax,%[re]ax,1) */
1423 static const unsigned char alt_9[] =
1424 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1425 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1426 static const unsigned char alt_10[] =
1427 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1428 /* data16 nopw %cs:0L(%eax,%eax,1) */
1429 static const unsigned char alt_11[] =
1430 {0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1431 /* 32-bit and 64-bit NOPs patterns. */
1432 static const unsigned char *const alt_patt[] = {
1433 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1434 alt_9, alt_10, alt_11
1435 };
1436
1437 /* Genenerate COUNT bytes of NOPs to WHERE from PATT with the maximum
1438 size of a single NOP instruction MAX_SINGLE_NOP_SIZE. */
1439
1440 static void
1441 i386_output_nops (char *where, const unsigned char *const *patt,
1442 int count, int max_single_nop_size)
1443
1444 {
1445 /* Place the longer NOP first. */
1446 int last;
1447 int offset;
1448 const unsigned char *nops;
1449
1450 if (max_single_nop_size < 1)
1451 {
1452 as_fatal (_("i386_output_nops called to generate nops of at most %d bytes!"),
1453 max_single_nop_size);
1454 return;
1455 }
1456
1457 nops = patt[max_single_nop_size - 1];
1458
1459 /* Use the smaller one if the requsted one isn't available. */
1460 if (nops == NULL)
1461 {
1462 max_single_nop_size--;
1463 nops = patt[max_single_nop_size - 1];
1464 }
1465
1466 last = count % max_single_nop_size;
1467
1468 count -= last;
1469 for (offset = 0; offset < count; offset += max_single_nop_size)
1470 memcpy (where + offset, nops, max_single_nop_size);
1471
1472 if (last)
1473 {
1474 nops = patt[last - 1];
1475 if (nops == NULL)
1476 {
1477 /* Use the smaller one plus one-byte NOP if the needed one
1478 isn't available. */
1479 last--;
1480 nops = patt[last - 1];
1481 memcpy (where + offset, nops, last);
1482 where[offset + last] = *patt[0];
1483 }
1484 else
1485 memcpy (where + offset, nops, last);
1486 }
1487 }
1488
1489 static INLINE int
1490 fits_in_imm7 (offsetT num)
1491 {
1492 return (num & 0x7f) == num;
1493 }
1494
1495 static INLINE int
1496 fits_in_imm31 (offsetT num)
1497 {
1498 return (num & 0x7fffffff) == num;
1499 }
1500
1501 /* Genenerate COUNT bytes of NOPs to WHERE with the maximum size of a
1502 single NOP instruction LIMIT. */
1503
1504 void
1505 i386_generate_nops (fragS *fragP, char *where, offsetT count, int limit)
1506 {
1507 const unsigned char *const *patt = NULL;
1508 int max_single_nop_size;
1509 /* Maximum number of NOPs before switching to jump over NOPs. */
1510 int max_number_of_nops;
1511
1512 switch (fragP->fr_type)
1513 {
1514 case rs_fill_nop:
1515 case rs_align_code:
1516 break;
1517 case rs_machine_dependent:
1518 /* Allow NOP padding for jumps and calls. */
1519 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
1520 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
1521 break;
1522 /* Fall through. */
1523 default:
1524 return;
1525 }
1526
1527 /* We need to decide which NOP sequence to use for 32bit and
1528 64bit. When -mtune= is used:
1529
1530 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1531 PROCESSOR_GENERIC32, f32_patt will be used.
1532 2. For the rest, alt_patt will be used.
1533
1534 When -mtune= isn't used, alt_patt will be used if
1535 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1536 be used.
1537
1538 When -march= or .arch is used, we can't use anything beyond
1539 cpu_arch_isa_flags. */
1540
1541 if (flag_code == CODE_16BIT)
1542 {
1543 patt = f16_patt;
1544 max_single_nop_size = sizeof (f16_patt) / sizeof (f16_patt[0]);
1545 /* Limit number of NOPs to 2 in 16-bit mode. */
1546 max_number_of_nops = 2;
1547 }
1548 else
1549 {
1550 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1551 {
1552 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1553 switch (cpu_arch_tune)
1554 {
1555 case PROCESSOR_UNKNOWN:
1556 /* We use cpu_arch_isa_flags to check if we SHOULD
1557 optimize with nops. */
1558 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1559 patt = alt_patt;
1560 else
1561 patt = f32_patt;
1562 break;
1563 case PROCESSOR_PENTIUM4:
1564 case PROCESSOR_NOCONA:
1565 case PROCESSOR_CORE:
1566 case PROCESSOR_CORE2:
1567 case PROCESSOR_COREI7:
1568 case PROCESSOR_L1OM:
1569 case PROCESSOR_K1OM:
1570 case PROCESSOR_GENERIC64:
1571 case PROCESSOR_K6:
1572 case PROCESSOR_ATHLON:
1573 case PROCESSOR_K8:
1574 case PROCESSOR_AMDFAM10:
1575 case PROCESSOR_BD:
1576 case PROCESSOR_ZNVER:
1577 case PROCESSOR_BT:
1578 patt = alt_patt;
1579 break;
1580 case PROCESSOR_I386:
1581 case PROCESSOR_I486:
1582 case PROCESSOR_PENTIUM:
1583 case PROCESSOR_PENTIUMPRO:
1584 case PROCESSOR_IAMCU:
1585 case PROCESSOR_GENERIC32:
1586 patt = f32_patt;
1587 break;
1588 }
1589 }
1590 else
1591 {
1592 switch (fragP->tc_frag_data.tune)
1593 {
1594 case PROCESSOR_UNKNOWN:
1595 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1596 PROCESSOR_UNKNOWN. */
1597 abort ();
1598 break;
1599
1600 case PROCESSOR_I386:
1601 case PROCESSOR_I486:
1602 case PROCESSOR_PENTIUM:
1603 case PROCESSOR_IAMCU:
1604 case PROCESSOR_K6:
1605 case PROCESSOR_ATHLON:
1606 case PROCESSOR_K8:
1607 case PROCESSOR_AMDFAM10:
1608 case PROCESSOR_BD:
1609 case PROCESSOR_ZNVER:
1610 case PROCESSOR_BT:
1611 case PROCESSOR_GENERIC32:
1612 /* We use cpu_arch_isa_flags to check if we CAN optimize
1613 with nops. */
1614 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1615 patt = alt_patt;
1616 else
1617 patt = f32_patt;
1618 break;
1619 case PROCESSOR_PENTIUMPRO:
1620 case PROCESSOR_PENTIUM4:
1621 case PROCESSOR_NOCONA:
1622 case PROCESSOR_CORE:
1623 case PROCESSOR_CORE2:
1624 case PROCESSOR_COREI7:
1625 case PROCESSOR_L1OM:
1626 case PROCESSOR_K1OM:
1627 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1628 patt = alt_patt;
1629 else
1630 patt = f32_patt;
1631 break;
1632 case PROCESSOR_GENERIC64:
1633 patt = alt_patt;
1634 break;
1635 }
1636 }
1637
1638 if (patt == f32_patt)
1639 {
1640 max_single_nop_size = sizeof (f32_patt) / sizeof (f32_patt[0]);
1641 /* Limit number of NOPs to 2 for older processors. */
1642 max_number_of_nops = 2;
1643 }
1644 else
1645 {
1646 max_single_nop_size = sizeof (alt_patt) / sizeof (alt_patt[0]);
1647 /* Limit number of NOPs to 7 for newer processors. */
1648 max_number_of_nops = 7;
1649 }
1650 }
1651
1652 if (limit == 0)
1653 limit = max_single_nop_size;
1654
1655 if (fragP->fr_type == rs_fill_nop)
1656 {
1657 /* Output NOPs for .nop directive. */
1658 if (limit > max_single_nop_size)
1659 {
1660 as_bad_where (fragP->fr_file, fragP->fr_line,
1661 _("invalid single nop size: %d "
1662 "(expect within [0, %d])"),
1663 limit, max_single_nop_size);
1664 return;
1665 }
1666 }
1667 else if (fragP->fr_type != rs_machine_dependent)
1668 fragP->fr_var = count;
1669
1670 if ((count / max_single_nop_size) > max_number_of_nops)
1671 {
1672 /* Generate jump over NOPs. */
1673 offsetT disp = count - 2;
1674 if (fits_in_imm7 (disp))
1675 {
1676 /* Use "jmp disp8" if possible. */
1677 count = disp;
1678 where[0] = jump_disp8[0];
1679 where[1] = count;
1680 where += 2;
1681 }
1682 else
1683 {
1684 unsigned int size_of_jump;
1685
1686 if (flag_code == CODE_16BIT)
1687 {
1688 where[0] = jump16_disp32[0];
1689 where[1] = jump16_disp32[1];
1690 size_of_jump = 2;
1691 }
1692 else
1693 {
1694 where[0] = jump32_disp32[0];
1695 size_of_jump = 1;
1696 }
1697
1698 count -= size_of_jump + 4;
1699 if (!fits_in_imm31 (count))
1700 {
1701 as_bad_where (fragP->fr_file, fragP->fr_line,
1702 _("jump over nop padding out of range"));
1703 return;
1704 }
1705
1706 md_number_to_chars (where + size_of_jump, count, 4);
1707 where += size_of_jump + 4;
1708 }
1709 }
1710
1711 /* Generate multiple NOPs. */
1712 i386_output_nops (where, patt, count, limit);
1713 }
1714
1715 static INLINE int
1716 operand_type_all_zero (const union i386_operand_type *x)
1717 {
1718 switch (ARRAY_SIZE(x->array))
1719 {
1720 case 3:
1721 if (x->array[2])
1722 return 0;
1723 /* Fall through. */
1724 case 2:
1725 if (x->array[1])
1726 return 0;
1727 /* Fall through. */
1728 case 1:
1729 return !x->array[0];
1730 default:
1731 abort ();
1732 }
1733 }
1734
1735 static INLINE void
1736 operand_type_set (union i386_operand_type *x, unsigned int v)
1737 {
1738 switch (ARRAY_SIZE(x->array))
1739 {
1740 case 3:
1741 x->array[2] = v;
1742 /* Fall through. */
1743 case 2:
1744 x->array[1] = v;
1745 /* Fall through. */
1746 case 1:
1747 x->array[0] = v;
1748 /* Fall through. */
1749 break;
1750 default:
1751 abort ();
1752 }
1753
1754 x->bitfield.class = ClassNone;
1755 x->bitfield.instance = InstanceNone;
1756 }
1757
1758 static INLINE int
1759 operand_type_equal (const union i386_operand_type *x,
1760 const union i386_operand_type *y)
1761 {
1762 switch (ARRAY_SIZE(x->array))
1763 {
1764 case 3:
1765 if (x->array[2] != y->array[2])
1766 return 0;
1767 /* Fall through. */
1768 case 2:
1769 if (x->array[1] != y->array[1])
1770 return 0;
1771 /* Fall through. */
1772 case 1:
1773 return x->array[0] == y->array[0];
1774 break;
1775 default:
1776 abort ();
1777 }
1778 }
1779
1780 static INLINE int
1781 cpu_flags_all_zero (const union i386_cpu_flags *x)
1782 {
1783 switch (ARRAY_SIZE(x->array))
1784 {
1785 case 4:
1786 if (x->array[3])
1787 return 0;
1788 /* Fall through. */
1789 case 3:
1790 if (x->array[2])
1791 return 0;
1792 /* Fall through. */
1793 case 2:
1794 if (x->array[1])
1795 return 0;
1796 /* Fall through. */
1797 case 1:
1798 return !x->array[0];
1799 default:
1800 abort ();
1801 }
1802 }
1803
1804 static INLINE int
1805 cpu_flags_equal (const union i386_cpu_flags *x,
1806 const union i386_cpu_flags *y)
1807 {
1808 switch (ARRAY_SIZE(x->array))
1809 {
1810 case 4:
1811 if (x->array[3] != y->array[3])
1812 return 0;
1813 /* Fall through. */
1814 case 3:
1815 if (x->array[2] != y->array[2])
1816 return 0;
1817 /* Fall through. */
1818 case 2:
1819 if (x->array[1] != y->array[1])
1820 return 0;
1821 /* Fall through. */
1822 case 1:
1823 return x->array[0] == y->array[0];
1824 break;
1825 default:
1826 abort ();
1827 }
1828 }
1829
1830 static INLINE int
1831 cpu_flags_check_cpu64 (i386_cpu_flags f)
1832 {
1833 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1834 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1835 }
1836
1837 static INLINE i386_cpu_flags
1838 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1839 {
1840 switch (ARRAY_SIZE (x.array))
1841 {
1842 case 4:
1843 x.array [3] &= y.array [3];
1844 /* Fall through. */
1845 case 3:
1846 x.array [2] &= y.array [2];
1847 /* Fall through. */
1848 case 2:
1849 x.array [1] &= y.array [1];
1850 /* Fall through. */
1851 case 1:
1852 x.array [0] &= y.array [0];
1853 break;
1854 default:
1855 abort ();
1856 }
1857 return x;
1858 }
1859
1860 static INLINE i386_cpu_flags
1861 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1862 {
1863 switch (ARRAY_SIZE (x.array))
1864 {
1865 case 4:
1866 x.array [3] |= y.array [3];
1867 /* Fall through. */
1868 case 3:
1869 x.array [2] |= y.array [2];
1870 /* Fall through. */
1871 case 2:
1872 x.array [1] |= y.array [1];
1873 /* Fall through. */
1874 case 1:
1875 x.array [0] |= y.array [0];
1876 break;
1877 default:
1878 abort ();
1879 }
1880 return x;
1881 }
1882
1883 static INLINE i386_cpu_flags
1884 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1885 {
1886 switch (ARRAY_SIZE (x.array))
1887 {
1888 case 4:
1889 x.array [3] &= ~y.array [3];
1890 /* Fall through. */
1891 case 3:
1892 x.array [2] &= ~y.array [2];
1893 /* Fall through. */
1894 case 2:
1895 x.array [1] &= ~y.array [1];
1896 /* Fall through. */
1897 case 1:
1898 x.array [0] &= ~y.array [0];
1899 break;
1900 default:
1901 abort ();
1902 }
1903 return x;
1904 }
1905
1906 static const i386_cpu_flags avx512 = CPU_ANY_AVX512F_FLAGS;
1907
1908 #define CPU_FLAGS_ARCH_MATCH 0x1
1909 #define CPU_FLAGS_64BIT_MATCH 0x2
1910
1911 #define CPU_FLAGS_PERFECT_MATCH \
1912 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_64BIT_MATCH)
1913
1914 /* Return CPU flags match bits. */
1915
1916 static int
1917 cpu_flags_match (const insn_template *t)
1918 {
1919 i386_cpu_flags x = t->cpu_flags;
1920 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1921
1922 x.bitfield.cpu64 = 0;
1923 x.bitfield.cpuno64 = 0;
1924
1925 if (cpu_flags_all_zero (&x))
1926 {
1927 /* This instruction is available on all archs. */
1928 match |= CPU_FLAGS_ARCH_MATCH;
1929 }
1930 else
1931 {
1932 /* This instruction is available only on some archs. */
1933 i386_cpu_flags cpu = cpu_arch_flags;
1934
1935 /* AVX512VL is no standalone feature - match it and then strip it. */
1936 if (x.bitfield.cpuavx512vl && !cpu.bitfield.cpuavx512vl)
1937 return match;
1938 x.bitfield.cpuavx512vl = 0;
1939
1940 cpu = cpu_flags_and (x, cpu);
1941 if (!cpu_flags_all_zero (&cpu))
1942 {
1943 if (x.bitfield.cpuavx)
1944 {
1945 /* We need to check a few extra flags with AVX. */
1946 if (cpu.bitfield.cpuavx
1947 && (!t->opcode_modifier.sse2avx
1948 || (sse2avx && !i.prefix[DATA_PREFIX]))
1949 && (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1950 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1951 && (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
1952 match |= CPU_FLAGS_ARCH_MATCH;
1953 }
1954 else if (x.bitfield.cpuavx512f)
1955 {
1956 /* We need to check a few extra flags with AVX512F. */
1957 if (cpu.bitfield.cpuavx512f
1958 && (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
1959 && (!x.bitfield.cpuvaes || cpu.bitfield.cpuvaes)
1960 && (!x.bitfield.cpuvpclmulqdq || cpu.bitfield.cpuvpclmulqdq))
1961 match |= CPU_FLAGS_ARCH_MATCH;
1962 }
1963 else
1964 match |= CPU_FLAGS_ARCH_MATCH;
1965 }
1966 }
1967 return match;
1968 }
1969
1970 static INLINE i386_operand_type
1971 operand_type_and (i386_operand_type x, i386_operand_type y)
1972 {
1973 if (x.bitfield.class != y.bitfield.class)
1974 x.bitfield.class = ClassNone;
1975 if (x.bitfield.instance != y.bitfield.instance)
1976 x.bitfield.instance = InstanceNone;
1977
1978 switch (ARRAY_SIZE (x.array))
1979 {
1980 case 3:
1981 x.array [2] &= y.array [2];
1982 /* Fall through. */
1983 case 2:
1984 x.array [1] &= y.array [1];
1985 /* Fall through. */
1986 case 1:
1987 x.array [0] &= y.array [0];
1988 break;
1989 default:
1990 abort ();
1991 }
1992 return x;
1993 }
1994
1995 static INLINE i386_operand_type
1996 operand_type_and_not (i386_operand_type x, i386_operand_type y)
1997 {
1998 gas_assert (y.bitfield.class == ClassNone);
1999 gas_assert (y.bitfield.instance == InstanceNone);
2000
2001 switch (ARRAY_SIZE (x.array))
2002 {
2003 case 3:
2004 x.array [2] &= ~y.array [2];
2005 /* Fall through. */
2006 case 2:
2007 x.array [1] &= ~y.array [1];
2008 /* Fall through. */
2009 case 1:
2010 x.array [0] &= ~y.array [0];
2011 break;
2012 default:
2013 abort ();
2014 }
2015 return x;
2016 }
2017
2018 static INLINE i386_operand_type
2019 operand_type_or (i386_operand_type x, i386_operand_type y)
2020 {
2021 gas_assert (x.bitfield.class == ClassNone ||
2022 y.bitfield.class == ClassNone ||
2023 x.bitfield.class == y.bitfield.class);
2024 gas_assert (x.bitfield.instance == InstanceNone ||
2025 y.bitfield.instance == InstanceNone ||
2026 x.bitfield.instance == y.bitfield.instance);
2027
2028 switch (ARRAY_SIZE (x.array))
2029 {
2030 case 3:
2031 x.array [2] |= y.array [2];
2032 /* Fall through. */
2033 case 2:
2034 x.array [1] |= y.array [1];
2035 /* Fall through. */
2036 case 1:
2037 x.array [0] |= y.array [0];
2038 break;
2039 default:
2040 abort ();
2041 }
2042 return x;
2043 }
2044
2045 static INLINE i386_operand_type
2046 operand_type_xor (i386_operand_type x, i386_operand_type y)
2047 {
2048 gas_assert (y.bitfield.class == ClassNone);
2049 gas_assert (y.bitfield.instance == InstanceNone);
2050
2051 switch (ARRAY_SIZE (x.array))
2052 {
2053 case 3:
2054 x.array [2] ^= y.array [2];
2055 /* Fall through. */
2056 case 2:
2057 x.array [1] ^= y.array [1];
2058 /* Fall through. */
2059 case 1:
2060 x.array [0] ^= y.array [0];
2061 break;
2062 default:
2063 abort ();
2064 }
2065 return x;
2066 }
2067
2068 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
2069 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
2070 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
2071 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
2072 static const i386_operand_type anydisp = OPERAND_TYPE_ANYDISP;
2073 static const i386_operand_type anyimm = OPERAND_TYPE_ANYIMM;
2074 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
2075 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
2076 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
2077 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
2078 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
2079 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
2080 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
2081 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
2082 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
2083 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
2084 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
2085
2086 enum operand_type
2087 {
2088 reg,
2089 imm,
2090 disp,
2091 anymem
2092 };
2093
2094 static INLINE int
2095 operand_type_check (i386_operand_type t, enum operand_type c)
2096 {
2097 switch (c)
2098 {
2099 case reg:
2100 return t.bitfield.class == Reg;
2101
2102 case imm:
2103 return (t.bitfield.imm8
2104 || t.bitfield.imm8s
2105 || t.bitfield.imm16
2106 || t.bitfield.imm32
2107 || t.bitfield.imm32s
2108 || t.bitfield.imm64);
2109
2110 case disp:
2111 return (t.bitfield.disp8
2112 || t.bitfield.disp16
2113 || t.bitfield.disp32
2114 || t.bitfield.disp32s
2115 || t.bitfield.disp64);
2116
2117 case anymem:
2118 return (t.bitfield.disp8
2119 || t.bitfield.disp16
2120 || t.bitfield.disp32
2121 || t.bitfield.disp32s
2122 || t.bitfield.disp64
2123 || t.bitfield.baseindex);
2124
2125 default:
2126 abort ();
2127 }
2128
2129 return 0;
2130 }
2131
2132 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit/80bit size
2133 between operand GIVEN and opeand WANTED for instruction template T. */
2134
2135 static INLINE int
2136 match_operand_size (const insn_template *t, unsigned int wanted,
2137 unsigned int given)
2138 {
2139 return !((i.types[given].bitfield.byte
2140 && !t->operand_types[wanted].bitfield.byte)
2141 || (i.types[given].bitfield.word
2142 && !t->operand_types[wanted].bitfield.word)
2143 || (i.types[given].bitfield.dword
2144 && !t->operand_types[wanted].bitfield.dword)
2145 || (i.types[given].bitfield.qword
2146 && !t->operand_types[wanted].bitfield.qword)
2147 || (i.types[given].bitfield.tbyte
2148 && !t->operand_types[wanted].bitfield.tbyte));
2149 }
2150
2151 /* Return 1 if there is no conflict in SIMD register between operand
2152 GIVEN and opeand WANTED for instruction template T. */
2153
2154 static INLINE int
2155 match_simd_size (const insn_template *t, unsigned int wanted,
2156 unsigned int given)
2157 {
2158 return !((i.types[given].bitfield.xmmword
2159 && !t->operand_types[wanted].bitfield.xmmword)
2160 || (i.types[given].bitfield.ymmword
2161 && !t->operand_types[wanted].bitfield.ymmword)
2162 || (i.types[given].bitfield.zmmword
2163 && !t->operand_types[wanted].bitfield.zmmword));
2164 }
2165
2166 /* Return 1 if there is no conflict in any size between operand GIVEN
2167 and opeand WANTED for instruction template T. */
2168
2169 static INLINE int
2170 match_mem_size (const insn_template *t, unsigned int wanted,
2171 unsigned int given)
2172 {
2173 return (match_operand_size (t, wanted, given)
2174 && !((i.types[given].bitfield.unspecified
2175 && !i.broadcast
2176 && !t->operand_types[wanted].bitfield.unspecified)
2177 || (i.types[given].bitfield.fword
2178 && !t->operand_types[wanted].bitfield.fword)
2179 /* For scalar opcode templates to allow register and memory
2180 operands at the same time, some special casing is needed
2181 here. Also for v{,p}broadcast*, {,v}pmov{s,z}*, and
2182 down-conversion vpmov*. */
2183 || ((t->operand_types[wanted].bitfield.class == RegSIMD
2184 && t->operand_types[wanted].bitfield.byte
2185 + t->operand_types[wanted].bitfield.word
2186 + t->operand_types[wanted].bitfield.dword
2187 + t->operand_types[wanted].bitfield.qword
2188 > !!t->opcode_modifier.broadcast)
2189 ? (i.types[given].bitfield.xmmword
2190 || i.types[given].bitfield.ymmword
2191 || i.types[given].bitfield.zmmword)
2192 : !match_simd_size(t, wanted, given))));
2193 }
2194
2195 /* Return value has MATCH_STRAIGHT set if there is no size conflict on any
2196 operands for instruction template T, and it has MATCH_REVERSE set if there
2197 is no size conflict on any operands for the template with operands reversed
2198 (and the template allows for reversing in the first place). */
2199
2200 #define MATCH_STRAIGHT 1
2201 #define MATCH_REVERSE 2
2202
2203 static INLINE unsigned int
2204 operand_size_match (const insn_template *t)
2205 {
2206 unsigned int j, match = MATCH_STRAIGHT;
2207
2208 /* Don't check non-absolute jump instructions. */
2209 if (t->opcode_modifier.jump
2210 && t->opcode_modifier.jump != JUMP_ABSOLUTE)
2211 return match;
2212
2213 /* Check memory and accumulator operand size. */
2214 for (j = 0; j < i.operands; j++)
2215 {
2216 if (i.types[j].bitfield.class != Reg
2217 && i.types[j].bitfield.class != RegSIMD
2218 && t->opcode_modifier.anysize)
2219 continue;
2220
2221 if (t->operand_types[j].bitfield.class == Reg
2222 && !match_operand_size (t, j, j))
2223 {
2224 match = 0;
2225 break;
2226 }
2227
2228 if (t->operand_types[j].bitfield.class == RegSIMD
2229 && !match_simd_size (t, j, j))
2230 {
2231 match = 0;
2232 break;
2233 }
2234
2235 if (t->operand_types[j].bitfield.instance == Accum
2236 && (!match_operand_size (t, j, j) || !match_simd_size (t, j, j)))
2237 {
2238 match = 0;
2239 break;
2240 }
2241
2242 if ((i.flags[j] & Operand_Mem) && !match_mem_size (t, j, j))
2243 {
2244 match = 0;
2245 break;
2246 }
2247 }
2248
2249 if (!t->opcode_modifier.d)
2250 {
2251 mismatch:
2252 if (!match)
2253 i.error = operand_size_mismatch;
2254 return match;
2255 }
2256
2257 /* Check reverse. */
2258 gas_assert (i.operands >= 2 && i.operands <= 3);
2259
2260 for (j = 0; j < i.operands; j++)
2261 {
2262 unsigned int given = i.operands - j - 1;
2263
2264 if (t->operand_types[j].bitfield.class == Reg
2265 && !match_operand_size (t, j, given))
2266 goto mismatch;
2267
2268 if (t->operand_types[j].bitfield.class == RegSIMD
2269 && !match_simd_size (t, j, given))
2270 goto mismatch;
2271
2272 if (t->operand_types[j].bitfield.instance == Accum
2273 && (!match_operand_size (t, j, given)
2274 || !match_simd_size (t, j, given)))
2275 goto mismatch;
2276
2277 if ((i.flags[given] & Operand_Mem) && !match_mem_size (t, j, given))
2278 goto mismatch;
2279 }
2280
2281 return match | MATCH_REVERSE;
2282 }
2283
2284 static INLINE int
2285 operand_type_match (i386_operand_type overlap,
2286 i386_operand_type given)
2287 {
2288 i386_operand_type temp = overlap;
2289
2290 temp.bitfield.unspecified = 0;
2291 temp.bitfield.byte = 0;
2292 temp.bitfield.word = 0;
2293 temp.bitfield.dword = 0;
2294 temp.bitfield.fword = 0;
2295 temp.bitfield.qword = 0;
2296 temp.bitfield.tbyte = 0;
2297 temp.bitfield.xmmword = 0;
2298 temp.bitfield.ymmword = 0;
2299 temp.bitfield.zmmword = 0;
2300 if (operand_type_all_zero (&temp))
2301 goto mismatch;
2302
2303 if (given.bitfield.baseindex == overlap.bitfield.baseindex)
2304 return 1;
2305
2306 mismatch:
2307 i.error = operand_type_mismatch;
2308 return 0;
2309 }
2310
2311 /* If given types g0 and g1 are registers they must be of the same type
2312 unless the expected operand type register overlap is null.
2313 Some Intel syntax memory operand size checking also happens here. */
2314
2315 static INLINE int
2316 operand_type_register_match (i386_operand_type g0,
2317 i386_operand_type t0,
2318 i386_operand_type g1,
2319 i386_operand_type t1)
2320 {
2321 if (g0.bitfield.class != Reg
2322 && g0.bitfield.class != RegSIMD
2323 && (!operand_type_check (g0, anymem)
2324 || g0.bitfield.unspecified
2325 || (t0.bitfield.class != Reg
2326 && t0.bitfield.class != RegSIMD)))
2327 return 1;
2328
2329 if (g1.bitfield.class != Reg
2330 && g1.bitfield.class != RegSIMD
2331 && (!operand_type_check (g1, anymem)
2332 || g1.bitfield.unspecified
2333 || (t1.bitfield.class != Reg
2334 && t1.bitfield.class != RegSIMD)))
2335 return 1;
2336
2337 if (g0.bitfield.byte == g1.bitfield.byte
2338 && g0.bitfield.word == g1.bitfield.word
2339 && g0.bitfield.dword == g1.bitfield.dword
2340 && g0.bitfield.qword == g1.bitfield.qword
2341 && g0.bitfield.xmmword == g1.bitfield.xmmword
2342 && g0.bitfield.ymmword == g1.bitfield.ymmword
2343 && g0.bitfield.zmmword == g1.bitfield.zmmword)
2344 return 1;
2345
2346 if (!(t0.bitfield.byte & t1.bitfield.byte)
2347 && !(t0.bitfield.word & t1.bitfield.word)
2348 && !(t0.bitfield.dword & t1.bitfield.dword)
2349 && !(t0.bitfield.qword & t1.bitfield.qword)
2350 && !(t0.bitfield.xmmword & t1.bitfield.xmmword)
2351 && !(t0.bitfield.ymmword & t1.bitfield.ymmword)
2352 && !(t0.bitfield.zmmword & t1.bitfield.zmmword))
2353 return 1;
2354
2355 i.error = register_type_mismatch;
2356
2357 return 0;
2358 }
2359
2360 static INLINE unsigned int
2361 register_number (const reg_entry *r)
2362 {
2363 unsigned int nr = r->reg_num;
2364
2365 if (r->reg_flags & RegRex)
2366 nr += 8;
2367
2368 if (r->reg_flags & RegVRex)
2369 nr += 16;
2370
2371 return nr;
2372 }
2373
2374 static INLINE unsigned int
2375 mode_from_disp_size (i386_operand_type t)
2376 {
2377 if (t.bitfield.disp8)
2378 return 1;
2379 else if (t.bitfield.disp16
2380 || t.bitfield.disp32
2381 || t.bitfield.disp32s)
2382 return 2;
2383 else
2384 return 0;
2385 }
2386
2387 static INLINE int
2388 fits_in_signed_byte (addressT num)
2389 {
2390 return num + 0x80 <= 0xff;
2391 }
2392
2393 static INLINE int
2394 fits_in_unsigned_byte (addressT num)
2395 {
2396 return num <= 0xff;
2397 }
2398
2399 static INLINE int
2400 fits_in_unsigned_word (addressT num)
2401 {
2402 return num <= 0xffff;
2403 }
2404
2405 static INLINE int
2406 fits_in_signed_word (addressT num)
2407 {
2408 return num + 0x8000 <= 0xffff;
2409 }
2410
2411 static INLINE int
2412 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
2413 {
2414 #ifndef BFD64
2415 return 1;
2416 #else
2417 return num + 0x80000000 <= 0xffffffff;
2418 #endif
2419 } /* fits_in_signed_long() */
2420
2421 static INLINE int
2422 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
2423 {
2424 #ifndef BFD64
2425 return 1;
2426 #else
2427 return num <= 0xffffffff;
2428 #endif
2429 } /* fits_in_unsigned_long() */
2430
2431 static INLINE int
2432 fits_in_disp8 (offsetT num)
2433 {
2434 int shift = i.memshift;
2435 unsigned int mask;
2436
2437 if (shift == -1)
2438 abort ();
2439
2440 mask = (1 << shift) - 1;
2441
2442 /* Return 0 if NUM isn't properly aligned. */
2443 if ((num & mask))
2444 return 0;
2445
2446 /* Check if NUM will fit in 8bit after shift. */
2447 return fits_in_signed_byte (num >> shift);
2448 }
2449
2450 static INLINE int
2451 fits_in_imm4 (offsetT num)
2452 {
2453 return (num & 0xf) == num;
2454 }
2455
2456 static i386_operand_type
2457 smallest_imm_type (offsetT num)
2458 {
2459 i386_operand_type t;
2460
2461 operand_type_set (&t, 0);
2462 t.bitfield.imm64 = 1;
2463
2464 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2465 {
2466 /* This code is disabled on the 486 because all the Imm1 forms
2467 in the opcode table are slower on the i486. They're the
2468 versions with the implicitly specified single-position
2469 displacement, which has another syntax if you really want to
2470 use that form. */
2471 t.bitfield.imm1 = 1;
2472 t.bitfield.imm8 = 1;
2473 t.bitfield.imm8s = 1;
2474 t.bitfield.imm16 = 1;
2475 t.bitfield.imm32 = 1;
2476 t.bitfield.imm32s = 1;
2477 }
2478 else if (fits_in_signed_byte (num))
2479 {
2480 t.bitfield.imm8 = 1;
2481 t.bitfield.imm8s = 1;
2482 t.bitfield.imm16 = 1;
2483 t.bitfield.imm32 = 1;
2484 t.bitfield.imm32s = 1;
2485 }
2486 else if (fits_in_unsigned_byte (num))
2487 {
2488 t.bitfield.imm8 = 1;
2489 t.bitfield.imm16 = 1;
2490 t.bitfield.imm32 = 1;
2491 t.bitfield.imm32s = 1;
2492 }
2493 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2494 {
2495 t.bitfield.imm16 = 1;
2496 t.bitfield.imm32 = 1;
2497 t.bitfield.imm32s = 1;
2498 }
2499 else if (fits_in_signed_long (num))
2500 {
2501 t.bitfield.imm32 = 1;
2502 t.bitfield.imm32s = 1;
2503 }
2504 else if (fits_in_unsigned_long (num))
2505 t.bitfield.imm32 = 1;
2506
2507 return t;
2508 }
2509
2510 static offsetT
2511 offset_in_range (offsetT val, int size)
2512 {
2513 addressT mask;
2514
2515 switch (size)
2516 {
2517 case 1: mask = ((addressT) 1 << 8) - 1; break;
2518 case 2: mask = ((addressT) 1 << 16) - 1; break;
2519 case 4: mask = ((addressT) 2 << 31) - 1; break;
2520 #ifdef BFD64
2521 case 8: mask = ((addressT) 2 << 63) - 1; break;
2522 #endif
2523 default: abort ();
2524 }
2525
2526 #ifdef BFD64
2527 /* If BFD64, sign extend val for 32bit address mode. */
2528 if (flag_code != CODE_64BIT
2529 || i.prefix[ADDR_PREFIX])
2530 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2531 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2532 #endif
2533
2534 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2535 {
2536 char buf1[40], buf2[40];
2537
2538 sprint_value (buf1, val);
2539 sprint_value (buf2, val & mask);
2540 as_warn (_("%s shortened to %s"), buf1, buf2);
2541 }
2542 return val & mask;
2543 }
2544
2545 enum PREFIX_GROUP
2546 {
2547 PREFIX_EXIST = 0,
2548 PREFIX_LOCK,
2549 PREFIX_REP,
2550 PREFIX_DS,
2551 PREFIX_OTHER
2552 };
2553
2554 /* Returns
2555 a. PREFIX_EXIST if attempting to add a prefix where one from the
2556 same class already exists.
2557 b. PREFIX_LOCK if lock prefix is added.
2558 c. PREFIX_REP if rep/repne prefix is added.
2559 d. PREFIX_DS if ds prefix is added.
2560 e. PREFIX_OTHER if other prefix is added.
2561 */
2562
2563 static enum PREFIX_GROUP
2564 add_prefix (unsigned int prefix)
2565 {
2566 enum PREFIX_GROUP ret = PREFIX_OTHER;
2567 unsigned int q;
2568
2569 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2570 && flag_code == CODE_64BIT)
2571 {
2572 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2573 || (i.prefix[REX_PREFIX] & prefix & REX_R)
2574 || (i.prefix[REX_PREFIX] & prefix & REX_X)
2575 || (i.prefix[REX_PREFIX] & prefix & REX_B))
2576 ret = PREFIX_EXIST;
2577 q = REX_PREFIX;
2578 }
2579 else
2580 {
2581 switch (prefix)
2582 {
2583 default:
2584 abort ();
2585
2586 case DS_PREFIX_OPCODE:
2587 ret = PREFIX_DS;
2588 /* Fall through. */
2589 case CS_PREFIX_OPCODE:
2590 case ES_PREFIX_OPCODE:
2591 case FS_PREFIX_OPCODE:
2592 case GS_PREFIX_OPCODE:
2593 case SS_PREFIX_OPCODE:
2594 q = SEG_PREFIX;
2595 break;
2596
2597 case REPNE_PREFIX_OPCODE:
2598 case REPE_PREFIX_OPCODE:
2599 q = REP_PREFIX;
2600 ret = PREFIX_REP;
2601 break;
2602
2603 case LOCK_PREFIX_OPCODE:
2604 q = LOCK_PREFIX;
2605 ret = PREFIX_LOCK;
2606 break;
2607
2608 case FWAIT_OPCODE:
2609 q = WAIT_PREFIX;
2610 break;
2611
2612 case ADDR_PREFIX_OPCODE:
2613 q = ADDR_PREFIX;
2614 break;
2615
2616 case DATA_PREFIX_OPCODE:
2617 q = DATA_PREFIX;
2618 break;
2619 }
2620 if (i.prefix[q] != 0)
2621 ret = PREFIX_EXIST;
2622 }
2623
2624 if (ret)
2625 {
2626 if (!i.prefix[q])
2627 ++i.prefixes;
2628 i.prefix[q] |= prefix;
2629 }
2630 else
2631 as_bad (_("same type of prefix used twice"));
2632
2633 return ret;
2634 }
2635
2636 static void
2637 update_code_flag (int value, int check)
2638 {
2639 PRINTF_LIKE ((*as_error));
2640
2641 flag_code = (enum flag_code) value;
2642 if (flag_code == CODE_64BIT)
2643 {
2644 cpu_arch_flags.bitfield.cpu64 = 1;
2645 cpu_arch_flags.bitfield.cpuno64 = 0;
2646 }
2647 else
2648 {
2649 cpu_arch_flags.bitfield.cpu64 = 0;
2650 cpu_arch_flags.bitfield.cpuno64 = 1;
2651 }
2652 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2653 {
2654 if (check)
2655 as_error = as_fatal;
2656 else
2657 as_error = as_bad;
2658 (*as_error) (_("64bit mode not supported on `%s'."),
2659 cpu_arch_name ? cpu_arch_name : default_arch);
2660 }
2661 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2662 {
2663 if (check)
2664 as_error = as_fatal;
2665 else
2666 as_error = as_bad;
2667 (*as_error) (_("32bit mode not supported on `%s'."),
2668 cpu_arch_name ? cpu_arch_name : default_arch);
2669 }
2670 stackop_size = '\0';
2671 }
2672
2673 static void
2674 set_code_flag (int value)
2675 {
2676 update_code_flag (value, 0);
2677 }
2678
2679 static void
2680 set_16bit_gcc_code_flag (int new_code_flag)
2681 {
2682 flag_code = (enum flag_code) new_code_flag;
2683 if (flag_code != CODE_16BIT)
2684 abort ();
2685 cpu_arch_flags.bitfield.cpu64 = 0;
2686 cpu_arch_flags.bitfield.cpuno64 = 1;
2687 stackop_size = LONG_MNEM_SUFFIX;
2688 }
2689
2690 static void
2691 set_intel_syntax (int syntax_flag)
2692 {
2693 /* Find out if register prefixing is specified. */
2694 int ask_naked_reg = 0;
2695
2696 SKIP_WHITESPACE ();
2697 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2698 {
2699 char *string;
2700 int e = get_symbol_name (&string);
2701
2702 if (strcmp (string, "prefix") == 0)
2703 ask_naked_reg = 1;
2704 else if (strcmp (string, "noprefix") == 0)
2705 ask_naked_reg = -1;
2706 else
2707 as_bad (_("bad argument to syntax directive."));
2708 (void) restore_line_pointer (e);
2709 }
2710 demand_empty_rest_of_line ();
2711
2712 intel_syntax = syntax_flag;
2713
2714 if (ask_naked_reg == 0)
2715 allow_naked_reg = (intel_syntax
2716 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2717 else
2718 allow_naked_reg = (ask_naked_reg < 0);
2719
2720 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2721
2722 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2723 identifier_chars['$'] = intel_syntax ? '$' : 0;
2724 register_prefix = allow_naked_reg ? "" : "%";
2725 }
2726
2727 static void
2728 set_intel_mnemonic (int mnemonic_flag)
2729 {
2730 intel_mnemonic = mnemonic_flag;
2731 }
2732
2733 static void
2734 set_allow_index_reg (int flag)
2735 {
2736 allow_index_reg = flag;
2737 }
2738
2739 static void
2740 set_check (int what)
2741 {
2742 enum check_kind *kind;
2743 const char *str;
2744
2745 if (what)
2746 {
2747 kind = &operand_check;
2748 str = "operand";
2749 }
2750 else
2751 {
2752 kind = &sse_check;
2753 str = "sse";
2754 }
2755
2756 SKIP_WHITESPACE ();
2757
2758 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2759 {
2760 char *string;
2761 int e = get_symbol_name (&string);
2762
2763 if (strcmp (string, "none") == 0)
2764 *kind = check_none;
2765 else if (strcmp (string, "warning") == 0)
2766 *kind = check_warning;
2767 else if (strcmp (string, "error") == 0)
2768 *kind = check_error;
2769 else
2770 as_bad (_("bad argument to %s_check directive."), str);
2771 (void) restore_line_pointer (e);
2772 }
2773 else
2774 as_bad (_("missing argument for %s_check directive"), str);
2775
2776 demand_empty_rest_of_line ();
2777 }
2778
2779 static void
2780 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2781 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2782 {
2783 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2784 static const char *arch;
2785
2786 /* Intel LIOM is only supported on ELF. */
2787 if (!IS_ELF)
2788 return;
2789
2790 if (!arch)
2791 {
2792 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2793 use default_arch. */
2794 arch = cpu_arch_name;
2795 if (!arch)
2796 arch = default_arch;
2797 }
2798
2799 /* If we are targeting Intel MCU, we must enable it. */
2800 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2801 || new_flag.bitfield.cpuiamcu)
2802 return;
2803
2804 /* If we are targeting Intel L1OM, we must enable it. */
2805 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2806 || new_flag.bitfield.cpul1om)
2807 return;
2808
2809 /* If we are targeting Intel K1OM, we must enable it. */
2810 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2811 || new_flag.bitfield.cpuk1om)
2812 return;
2813
2814 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2815 #endif
2816 }
2817
2818 static void
2819 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2820 {
2821 SKIP_WHITESPACE ();
2822
2823 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2824 {
2825 char *string;
2826 int e = get_symbol_name (&string);
2827 unsigned int j;
2828 i386_cpu_flags flags;
2829
2830 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2831 {
2832 if (strcmp (string, cpu_arch[j].name) == 0)
2833 {
2834 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2835
2836 if (*string != '.')
2837 {
2838 cpu_arch_name = cpu_arch[j].name;
2839 cpu_sub_arch_name = NULL;
2840 cpu_arch_flags = cpu_arch[j].flags;
2841 if (flag_code == CODE_64BIT)
2842 {
2843 cpu_arch_flags.bitfield.cpu64 = 1;
2844 cpu_arch_flags.bitfield.cpuno64 = 0;
2845 }
2846 else
2847 {
2848 cpu_arch_flags.bitfield.cpu64 = 0;
2849 cpu_arch_flags.bitfield.cpuno64 = 1;
2850 }
2851 cpu_arch_isa = cpu_arch[j].type;
2852 cpu_arch_isa_flags = cpu_arch[j].flags;
2853 if (!cpu_arch_tune_set)
2854 {
2855 cpu_arch_tune = cpu_arch_isa;
2856 cpu_arch_tune_flags = cpu_arch_isa_flags;
2857 }
2858 break;
2859 }
2860
2861 flags = cpu_flags_or (cpu_arch_flags,
2862 cpu_arch[j].flags);
2863
2864 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2865 {
2866 if (cpu_sub_arch_name)
2867 {
2868 char *name = cpu_sub_arch_name;
2869 cpu_sub_arch_name = concat (name,
2870 cpu_arch[j].name,
2871 (const char *) NULL);
2872 free (name);
2873 }
2874 else
2875 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2876 cpu_arch_flags = flags;
2877 cpu_arch_isa_flags = flags;
2878 }
2879 else
2880 cpu_arch_isa_flags
2881 = cpu_flags_or (cpu_arch_isa_flags,
2882 cpu_arch[j].flags);
2883 (void) restore_line_pointer (e);
2884 demand_empty_rest_of_line ();
2885 return;
2886 }
2887 }
2888
2889 if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2890 {
2891 /* Disable an ISA extension. */
2892 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2893 if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2894 {
2895 flags = cpu_flags_and_not (cpu_arch_flags,
2896 cpu_noarch[j].flags);
2897 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2898 {
2899 if (cpu_sub_arch_name)
2900 {
2901 char *name = cpu_sub_arch_name;
2902 cpu_sub_arch_name = concat (name, string,
2903 (const char *) NULL);
2904 free (name);
2905 }
2906 else
2907 cpu_sub_arch_name = xstrdup (string);
2908 cpu_arch_flags = flags;
2909 cpu_arch_isa_flags = flags;
2910 }
2911 (void) restore_line_pointer (e);
2912 demand_empty_rest_of_line ();
2913 return;
2914 }
2915
2916 j = ARRAY_SIZE (cpu_arch);
2917 }
2918
2919 if (j >= ARRAY_SIZE (cpu_arch))
2920 as_bad (_("no such architecture: `%s'"), string);
2921
2922 *input_line_pointer = e;
2923 }
2924 else
2925 as_bad (_("missing cpu architecture"));
2926
2927 no_cond_jump_promotion = 0;
2928 if (*input_line_pointer == ','
2929 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2930 {
2931 char *string;
2932 char e;
2933
2934 ++input_line_pointer;
2935 e = get_symbol_name (&string);
2936
2937 if (strcmp (string, "nojumps") == 0)
2938 no_cond_jump_promotion = 1;
2939 else if (strcmp (string, "jumps") == 0)
2940 ;
2941 else
2942 as_bad (_("no such architecture modifier: `%s'"), string);
2943
2944 (void) restore_line_pointer (e);
2945 }
2946
2947 demand_empty_rest_of_line ();
2948 }
2949
2950 enum bfd_architecture
2951 i386_arch (void)
2952 {
2953 if (cpu_arch_isa == PROCESSOR_L1OM)
2954 {
2955 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2956 || flag_code != CODE_64BIT)
2957 as_fatal (_("Intel L1OM is 64bit ELF only"));
2958 return bfd_arch_l1om;
2959 }
2960 else if (cpu_arch_isa == PROCESSOR_K1OM)
2961 {
2962 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2963 || flag_code != CODE_64BIT)
2964 as_fatal (_("Intel K1OM is 64bit ELF only"));
2965 return bfd_arch_k1om;
2966 }
2967 else if (cpu_arch_isa == PROCESSOR_IAMCU)
2968 {
2969 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2970 || flag_code == CODE_64BIT)
2971 as_fatal (_("Intel MCU is 32bit ELF only"));
2972 return bfd_arch_iamcu;
2973 }
2974 else
2975 return bfd_arch_i386;
2976 }
2977
2978 unsigned long
2979 i386_mach (void)
2980 {
2981 if (!strncmp (default_arch, "x86_64", 6))
2982 {
2983 if (cpu_arch_isa == PROCESSOR_L1OM)
2984 {
2985 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2986 || default_arch[6] != '\0')
2987 as_fatal (_("Intel L1OM is 64bit ELF only"));
2988 return bfd_mach_l1om;
2989 }
2990 else if (cpu_arch_isa == PROCESSOR_K1OM)
2991 {
2992 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2993 || default_arch[6] != '\0')
2994 as_fatal (_("Intel K1OM is 64bit ELF only"));
2995 return bfd_mach_k1om;
2996 }
2997 else if (default_arch[6] == '\0')
2998 return bfd_mach_x86_64;
2999 else
3000 return bfd_mach_x64_32;
3001 }
3002 else if (!strcmp (default_arch, "i386")
3003 || !strcmp (default_arch, "iamcu"))
3004 {
3005 if (cpu_arch_isa == PROCESSOR_IAMCU)
3006 {
3007 if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
3008 as_fatal (_("Intel MCU is 32bit ELF only"));
3009 return bfd_mach_i386_iamcu;
3010 }
3011 else
3012 return bfd_mach_i386_i386;
3013 }
3014 else
3015 as_fatal (_("unknown architecture"));
3016 }
3017 \f
3018 void
3019 md_begin (void)
3020 {
3021 const char *hash_err;
3022
3023 /* Support pseudo prefixes like {disp32}. */
3024 lex_type ['{'] = LEX_BEGIN_NAME;
3025
3026 /* Initialize op_hash hash table. */
3027 op_hash = hash_new ();
3028
3029 {
3030 const insn_template *optab;
3031 templates *core_optab;
3032
3033 /* Setup for loop. */
3034 optab = i386_optab;
3035 core_optab = XNEW (templates);
3036 core_optab->start = optab;
3037
3038 while (1)
3039 {
3040 ++optab;
3041 if (optab->name == NULL
3042 || strcmp (optab->name, (optab - 1)->name) != 0)
3043 {
3044 /* different name --> ship out current template list;
3045 add to hash table; & begin anew. */
3046 core_optab->end = optab;
3047 hash_err = hash_insert (op_hash,
3048 (optab - 1)->name,
3049 (void *) core_optab);
3050 if (hash_err)
3051 {
3052 as_fatal (_("can't hash %s: %s"),
3053 (optab - 1)->name,
3054 hash_err);
3055 }
3056 if (optab->name == NULL)
3057 break;
3058 core_optab = XNEW (templates);
3059 core_optab->start = optab;
3060 }
3061 }
3062 }
3063
3064 /* Initialize reg_hash hash table. */
3065 reg_hash = hash_new ();
3066 {
3067 const reg_entry *regtab;
3068 unsigned int regtab_size = i386_regtab_size;
3069
3070 for (regtab = i386_regtab; regtab_size--; regtab++)
3071 {
3072 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
3073 if (hash_err)
3074 as_fatal (_("can't hash %s: %s"),
3075 regtab->reg_name,
3076 hash_err);
3077 }
3078 }
3079
3080 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
3081 {
3082 int c;
3083 char *p;
3084
3085 for (c = 0; c < 256; c++)
3086 {
3087 if (ISDIGIT (c))
3088 {
3089 digit_chars[c] = c;
3090 mnemonic_chars[c] = c;
3091 register_chars[c] = c;
3092 operand_chars[c] = c;
3093 }
3094 else if (ISLOWER (c))
3095 {
3096 mnemonic_chars[c] = c;
3097 register_chars[c] = c;
3098 operand_chars[c] = c;
3099 }
3100 else if (ISUPPER (c))
3101 {
3102 mnemonic_chars[c] = TOLOWER (c);
3103 register_chars[c] = mnemonic_chars[c];
3104 operand_chars[c] = c;
3105 }
3106 else if (c == '{' || c == '}')
3107 {
3108 mnemonic_chars[c] = c;
3109 operand_chars[c] = c;
3110 }
3111
3112 if (ISALPHA (c) || ISDIGIT (c))
3113 identifier_chars[c] = c;
3114 else if (c >= 128)
3115 {
3116 identifier_chars[c] = c;
3117 operand_chars[c] = c;
3118 }
3119 }
3120
3121 #ifdef LEX_AT
3122 identifier_chars['@'] = '@';
3123 #endif
3124 #ifdef LEX_QM
3125 identifier_chars['?'] = '?';
3126 operand_chars['?'] = '?';
3127 #endif
3128 digit_chars['-'] = '-';
3129 mnemonic_chars['_'] = '_';
3130 mnemonic_chars['-'] = '-';
3131 mnemonic_chars['.'] = '.';
3132 identifier_chars['_'] = '_';
3133 identifier_chars['.'] = '.';
3134
3135 for (p = operand_special_chars; *p != '\0'; p++)
3136 operand_chars[(unsigned char) *p] = *p;
3137 }
3138
3139 if (flag_code == CODE_64BIT)
3140 {
3141 #if defined (OBJ_COFF) && defined (TE_PE)
3142 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
3143 ? 32 : 16);
3144 #else
3145 x86_dwarf2_return_column = 16;
3146 #endif
3147 x86_cie_data_alignment = -8;
3148 }
3149 else
3150 {
3151 x86_dwarf2_return_column = 8;
3152 x86_cie_data_alignment = -4;
3153 }
3154
3155 /* NB: FUSED_JCC_PADDING frag must have sufficient room so that it
3156 can be turned into BRANCH_PREFIX frag. */
3157 if (align_branch_prefix_size > MAX_FUSED_JCC_PADDING_SIZE)
3158 abort ();
3159 }
3160
3161 void
3162 i386_print_statistics (FILE *file)
3163 {
3164 hash_print_statistics (file, "i386 opcode", op_hash);
3165 hash_print_statistics (file, "i386 register", reg_hash);
3166 }
3167 \f
3168 #ifdef DEBUG386
3169
3170 /* Debugging routines for md_assemble. */
3171 static void pte (insn_template *);
3172 static void pt (i386_operand_type);
3173 static void pe (expressionS *);
3174 static void ps (symbolS *);
3175
3176 static void
3177 pi (const char *line, i386_insn *x)
3178 {
3179 unsigned int j;
3180
3181 fprintf (stdout, "%s: template ", line);
3182 pte (&x->tm);
3183 fprintf (stdout, " address: base %s index %s scale %x\n",
3184 x->base_reg ? x->base_reg->reg_name : "none",
3185 x->index_reg ? x->index_reg->reg_name : "none",
3186 x->log2_scale_factor);
3187 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
3188 x->rm.mode, x->rm.reg, x->rm.regmem);
3189 fprintf (stdout, " sib: base %x index %x scale %x\n",
3190 x->sib.base, x->sib.index, x->sib.scale);
3191 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
3192 (x->rex & REX_W) != 0,
3193 (x->rex & REX_R) != 0,
3194 (x->rex & REX_X) != 0,
3195 (x->rex & REX_B) != 0);
3196 for (j = 0; j < x->operands; j++)
3197 {
3198 fprintf (stdout, " #%d: ", j + 1);
3199 pt (x->types[j]);
3200 fprintf (stdout, "\n");
3201 if (x->types[j].bitfield.class == Reg
3202 || x->types[j].bitfield.class == RegMMX
3203 || x->types[j].bitfield.class == RegSIMD
3204 || x->types[j].bitfield.class == RegMask
3205 || x->types[j].bitfield.class == SReg
3206 || x->types[j].bitfield.class == RegCR
3207 || x->types[j].bitfield.class == RegDR
3208 || x->types[j].bitfield.class == RegTR
3209 || x->types[j].bitfield.class == RegBND)
3210 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
3211 if (operand_type_check (x->types[j], imm))
3212 pe (x->op[j].imms);
3213 if (operand_type_check (x->types[j], disp))
3214 pe (x->op[j].disps);
3215 }
3216 }
3217
3218 static void
3219 pte (insn_template *t)
3220 {
3221 unsigned int j;
3222 fprintf (stdout, " %d operands ", t->operands);
3223 fprintf (stdout, "opcode %x ", t->base_opcode);
3224 if (t->extension_opcode != None)
3225 fprintf (stdout, "ext %x ", t->extension_opcode);
3226 if (t->opcode_modifier.d)
3227 fprintf (stdout, "D");
3228 if (t->opcode_modifier.w)
3229 fprintf (stdout, "W");
3230 fprintf (stdout, "\n");
3231 for (j = 0; j < t->operands; j++)
3232 {
3233 fprintf (stdout, " #%d type ", j + 1);
3234 pt (t->operand_types[j]);
3235 fprintf (stdout, "\n");
3236 }
3237 }
3238
3239 static void
3240 pe (expressionS *e)
3241 {
3242 fprintf (stdout, " operation %d\n", e->X_op);
3243 fprintf (stdout, " add_number %ld (%lx)\n",
3244 (long) e->X_add_number, (long) e->X_add_number);
3245 if (e->X_add_symbol)
3246 {
3247 fprintf (stdout, " add_symbol ");
3248 ps (e->X_add_symbol);
3249 fprintf (stdout, "\n");
3250 }
3251 if (e->X_op_symbol)
3252 {
3253 fprintf (stdout, " op_symbol ");
3254 ps (e->X_op_symbol);
3255 fprintf (stdout, "\n");
3256 }
3257 }
3258
3259 static void
3260 ps (symbolS *s)
3261 {
3262 fprintf (stdout, "%s type %s%s",
3263 S_GET_NAME (s),
3264 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
3265 segment_name (S_GET_SEGMENT (s)));
3266 }
3267
3268 static struct type_name
3269 {
3270 i386_operand_type mask;
3271 const char *name;
3272 }
3273 const type_names[] =
3274 {
3275 { OPERAND_TYPE_REG8, "r8" },
3276 { OPERAND_TYPE_REG16, "r16" },
3277 { OPERAND_TYPE_REG32, "r32" },
3278 { OPERAND_TYPE_REG64, "r64" },
3279 { OPERAND_TYPE_ACC8, "acc8" },
3280 { OPERAND_TYPE_ACC16, "acc16" },
3281 { OPERAND_TYPE_ACC32, "acc32" },
3282 { OPERAND_TYPE_ACC64, "acc64" },
3283 { OPERAND_TYPE_IMM8, "i8" },
3284 { OPERAND_TYPE_IMM8, "i8s" },
3285 { OPERAND_TYPE_IMM16, "i16" },
3286 { OPERAND_TYPE_IMM32, "i32" },
3287 { OPERAND_TYPE_IMM32S, "i32s" },
3288 { OPERAND_TYPE_IMM64, "i64" },
3289 { OPERAND_TYPE_IMM1, "i1" },
3290 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
3291 { OPERAND_TYPE_DISP8, "d8" },
3292 { OPERAND_TYPE_DISP16, "d16" },
3293 { OPERAND_TYPE_DISP32, "d32" },
3294 { OPERAND_TYPE_DISP32S, "d32s" },
3295 { OPERAND_TYPE_DISP64, "d64" },
3296 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
3297 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
3298 { OPERAND_TYPE_CONTROL, "control reg" },
3299 { OPERAND_TYPE_TEST, "test reg" },
3300 { OPERAND_TYPE_DEBUG, "debug reg" },
3301 { OPERAND_TYPE_FLOATREG, "FReg" },
3302 { OPERAND_TYPE_FLOATACC, "FAcc" },
3303 { OPERAND_TYPE_SREG, "SReg" },
3304 { OPERAND_TYPE_REGMMX, "rMMX" },
3305 { OPERAND_TYPE_REGXMM, "rXMM" },
3306 { OPERAND_TYPE_REGYMM, "rYMM" },
3307 { OPERAND_TYPE_REGZMM, "rZMM" },
3308 { OPERAND_TYPE_REGMASK, "Mask reg" },
3309 };
3310
3311 static void
3312 pt (i386_operand_type t)
3313 {
3314 unsigned int j;
3315 i386_operand_type a;
3316
3317 for (j = 0; j < ARRAY_SIZE (type_names); j++)
3318 {
3319 a = operand_type_and (t, type_names[j].mask);
3320 if (operand_type_equal (&a, &type_names[j].mask))
3321 fprintf (stdout, "%s, ", type_names[j].name);
3322 }
3323 fflush (stdout);
3324 }
3325
3326 #endif /* DEBUG386 */
3327 \f
3328 static bfd_reloc_code_real_type
3329 reloc (unsigned int size,
3330 int pcrel,
3331 int sign,
3332 bfd_reloc_code_real_type other)
3333 {
3334 if (other != NO_RELOC)
3335 {
3336 reloc_howto_type *rel;
3337
3338 if (size == 8)
3339 switch (other)
3340 {
3341 case BFD_RELOC_X86_64_GOT32:
3342 return BFD_RELOC_X86_64_GOT64;
3343 break;
3344 case BFD_RELOC_X86_64_GOTPLT64:
3345 return BFD_RELOC_X86_64_GOTPLT64;
3346 break;
3347 case BFD_RELOC_X86_64_PLTOFF64:
3348 return BFD_RELOC_X86_64_PLTOFF64;
3349 break;
3350 case BFD_RELOC_X86_64_GOTPC32:
3351 other = BFD_RELOC_X86_64_GOTPC64;
3352 break;
3353 case BFD_RELOC_X86_64_GOTPCREL:
3354 other = BFD_RELOC_X86_64_GOTPCREL64;
3355 break;
3356 case BFD_RELOC_X86_64_TPOFF32:
3357 other = BFD_RELOC_X86_64_TPOFF64;
3358 break;
3359 case BFD_RELOC_X86_64_DTPOFF32:
3360 other = BFD_RELOC_X86_64_DTPOFF64;
3361 break;
3362 default:
3363 break;
3364 }
3365
3366 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3367 if (other == BFD_RELOC_SIZE32)
3368 {
3369 if (size == 8)
3370 other = BFD_RELOC_SIZE64;
3371 if (pcrel)
3372 {
3373 as_bad (_("there are no pc-relative size relocations"));
3374 return NO_RELOC;
3375 }
3376 }
3377 #endif
3378
3379 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
3380 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
3381 sign = -1;
3382
3383 rel = bfd_reloc_type_lookup (stdoutput, other);
3384 if (!rel)
3385 as_bad (_("unknown relocation (%u)"), other);
3386 else if (size != bfd_get_reloc_size (rel))
3387 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
3388 bfd_get_reloc_size (rel),
3389 size);
3390 else if (pcrel && !rel->pc_relative)
3391 as_bad (_("non-pc-relative relocation for pc-relative field"));
3392 else if ((rel->complain_on_overflow == complain_overflow_signed
3393 && !sign)
3394 || (rel->complain_on_overflow == complain_overflow_unsigned
3395 && sign > 0))
3396 as_bad (_("relocated field and relocation type differ in signedness"));
3397 else
3398 return other;
3399 return NO_RELOC;
3400 }
3401
3402 if (pcrel)
3403 {
3404 if (!sign)
3405 as_bad (_("there are no unsigned pc-relative relocations"));
3406 switch (size)
3407 {
3408 case 1: return BFD_RELOC_8_PCREL;
3409 case 2: return BFD_RELOC_16_PCREL;
3410 case 4: return BFD_RELOC_32_PCREL;
3411 case 8: return BFD_RELOC_64_PCREL;
3412 }
3413 as_bad (_("cannot do %u byte pc-relative relocation"), size);
3414 }
3415 else
3416 {
3417 if (sign > 0)
3418 switch (size)
3419 {
3420 case 4: return BFD_RELOC_X86_64_32S;
3421 }
3422 else
3423 switch (size)
3424 {
3425 case 1: return BFD_RELOC_8;
3426 case 2: return BFD_RELOC_16;
3427 case 4: return BFD_RELOC_32;
3428 case 8: return BFD_RELOC_64;
3429 }
3430 as_bad (_("cannot do %s %u byte relocation"),
3431 sign > 0 ? "signed" : "unsigned", size);
3432 }
3433
3434 return NO_RELOC;
3435 }
3436
3437 /* Here we decide which fixups can be adjusted to make them relative to
3438 the beginning of the section instead of the symbol. Basically we need
3439 to make sure that the dynamic relocations are done correctly, so in
3440 some cases we force the original symbol to be used. */
3441
3442 int
3443 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
3444 {
3445 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3446 if (!IS_ELF)
3447 return 1;
3448
3449 /* Don't adjust pc-relative references to merge sections in 64-bit
3450 mode. */
3451 if (use_rela_relocations
3452 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3453 && fixP->fx_pcrel)
3454 return 0;
3455
3456 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3457 and changed later by validate_fix. */
3458 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3459 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3460 return 0;
3461
3462 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
3463 for size relocations. */
3464 if (fixP->fx_r_type == BFD_RELOC_SIZE32
3465 || fixP->fx_r_type == BFD_RELOC_SIZE64
3466 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3467 || fixP->fx_r_type == BFD_RELOC_386_GOT32
3468 || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3469 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3470 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3471 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3472 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3473 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3474 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3475 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3476 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3477 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3478 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3479 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3480 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3481 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3482 || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3483 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3484 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3485 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3486 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3487 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3488 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3489 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3490 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3491 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3492 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3493 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3494 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3495 return 0;
3496 #endif
3497 return 1;
3498 }
3499
3500 static int
3501 intel_float_operand (const char *mnemonic)
3502 {
3503 /* Note that the value returned is meaningful only for opcodes with (memory)
3504 operands, hence the code here is free to improperly handle opcodes that
3505 have no operands (for better performance and smaller code). */
3506
3507 if (mnemonic[0] != 'f')
3508 return 0; /* non-math */
3509
3510 switch (mnemonic[1])
3511 {
3512 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3513 the fs segment override prefix not currently handled because no
3514 call path can make opcodes without operands get here */
3515 case 'i':
3516 return 2 /* integer op */;
3517 case 'l':
3518 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3519 return 3; /* fldcw/fldenv */
3520 break;
3521 case 'n':
3522 if (mnemonic[2] != 'o' /* fnop */)
3523 return 3; /* non-waiting control op */
3524 break;
3525 case 'r':
3526 if (mnemonic[2] == 's')
3527 return 3; /* frstor/frstpm */
3528 break;
3529 case 's':
3530 if (mnemonic[2] == 'a')
3531 return 3; /* fsave */
3532 if (mnemonic[2] == 't')
3533 {
3534 switch (mnemonic[3])
3535 {
3536 case 'c': /* fstcw */
3537 case 'd': /* fstdw */
3538 case 'e': /* fstenv */
3539 case 's': /* fsts[gw] */
3540 return 3;
3541 }
3542 }
3543 break;
3544 case 'x':
3545 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3546 return 0; /* fxsave/fxrstor are not really math ops */
3547 break;
3548 }
3549
3550 return 1;
3551 }
3552
3553 /* Build the VEX prefix. */
3554
3555 static void
3556 build_vex_prefix (const insn_template *t)
3557 {
3558 unsigned int register_specifier;
3559 unsigned int implied_prefix;
3560 unsigned int vector_length;
3561 unsigned int w;
3562
3563 /* Check register specifier. */
3564 if (i.vex.register_specifier)
3565 {
3566 register_specifier =
3567 ~register_number (i.vex.register_specifier) & 0xf;
3568 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3569 }
3570 else
3571 register_specifier = 0xf;
3572
3573 /* Use 2-byte VEX prefix by swapping destination and source operand
3574 if there are more than 1 register operand. */
3575 if (i.reg_operands > 1
3576 && i.vec_encoding != vex_encoding_vex3
3577 && i.dir_encoding == dir_encoding_default
3578 && i.operands == i.reg_operands
3579 && operand_type_equal (&i.types[0], &i.types[i.operands - 1])
3580 && i.tm.opcode_modifier.vexopcode == VEX0F
3581 && (i.tm.opcode_modifier.load || i.tm.opcode_modifier.d)
3582 && i.rex == REX_B)
3583 {
3584 unsigned int xchg = i.operands - 1;
3585 union i386_op temp_op;
3586 i386_operand_type temp_type;
3587
3588 temp_type = i.types[xchg];
3589 i.types[xchg] = i.types[0];
3590 i.types[0] = temp_type;
3591 temp_op = i.op[xchg];
3592 i.op[xchg] = i.op[0];
3593 i.op[0] = temp_op;
3594
3595 gas_assert (i.rm.mode == 3);
3596
3597 i.rex = REX_R;
3598 xchg = i.rm.regmem;
3599 i.rm.regmem = i.rm.reg;
3600 i.rm.reg = xchg;
3601
3602 if (i.tm.opcode_modifier.d)
3603 i.tm.base_opcode ^= (i.tm.base_opcode & 0xee) != 0x6e
3604 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
3605 else /* Use the next insn. */
3606 i.tm = t[1];
3607 }
3608
3609 /* Use 2-byte VEX prefix by swapping commutative source operands if there
3610 are no memory operands and at least 3 register ones. */
3611 if (i.reg_operands >= 3
3612 && i.vec_encoding != vex_encoding_vex3
3613 && i.reg_operands == i.operands - i.imm_operands
3614 && i.tm.opcode_modifier.vex
3615 && i.tm.opcode_modifier.commutative
3616 && (i.tm.opcode_modifier.sse2avx || optimize > 1)
3617 && i.rex == REX_B
3618 && i.vex.register_specifier
3619 && !(i.vex.register_specifier->reg_flags & RegRex))
3620 {
3621 unsigned int xchg = i.operands - i.reg_operands;
3622 union i386_op temp_op;
3623 i386_operand_type temp_type;
3624
3625 gas_assert (i.tm.opcode_modifier.vexopcode == VEX0F);
3626 gas_assert (!i.tm.opcode_modifier.sae);
3627 gas_assert (operand_type_equal (&i.types[i.operands - 2],
3628 &i.types[i.operands - 3]));
3629 gas_assert (i.rm.mode == 3);
3630
3631 temp_type = i.types[xchg];
3632 i.types[xchg] = i.types[xchg + 1];
3633 i.types[xchg + 1] = temp_type;
3634 temp_op = i.op[xchg];
3635 i.op[xchg] = i.op[xchg + 1];
3636 i.op[xchg + 1] = temp_op;
3637
3638 i.rex = 0;
3639 xchg = i.rm.regmem | 8;
3640 i.rm.regmem = ~register_specifier & 0xf;
3641 gas_assert (!(i.rm.regmem & 8));
3642 i.vex.register_specifier += xchg - i.rm.regmem;
3643 register_specifier = ~xchg & 0xf;
3644 }
3645
3646 if (i.tm.opcode_modifier.vex == VEXScalar)
3647 vector_length = avxscalar;
3648 else if (i.tm.opcode_modifier.vex == VEX256)
3649 vector_length = 1;
3650 else
3651 {
3652 unsigned int op;
3653
3654 /* Determine vector length from the last multi-length vector
3655 operand. */
3656 vector_length = 0;
3657 for (op = t->operands; op--;)
3658 if (t->operand_types[op].bitfield.xmmword
3659 && t->operand_types[op].bitfield.ymmword
3660 && i.types[op].bitfield.ymmword)
3661 {
3662 vector_length = 1;
3663 break;
3664 }
3665 }
3666
3667 switch ((i.tm.base_opcode >> 8) & 0xff)
3668 {
3669 case 0:
3670 implied_prefix = 0;
3671 break;
3672 case DATA_PREFIX_OPCODE:
3673 implied_prefix = 1;
3674 break;
3675 case REPE_PREFIX_OPCODE:
3676 implied_prefix = 2;
3677 break;
3678 case REPNE_PREFIX_OPCODE:
3679 implied_prefix = 3;
3680 break;
3681 default:
3682 abort ();
3683 }
3684
3685 /* Check the REX.W bit and VEXW. */
3686 if (i.tm.opcode_modifier.vexw == VEXWIG)
3687 w = (vexwig == vexw1 || (i.rex & REX_W)) ? 1 : 0;
3688 else if (i.tm.opcode_modifier.vexw)
3689 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3690 else
3691 w = (flag_code == CODE_64BIT ? i.rex & REX_W : vexwig == vexw1) ? 1 : 0;
3692
3693 /* Use 2-byte VEX prefix if possible. */
3694 if (w == 0
3695 && i.vec_encoding != vex_encoding_vex3
3696 && i.tm.opcode_modifier.vexopcode == VEX0F
3697 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3698 {
3699 /* 2-byte VEX prefix. */
3700 unsigned int r;
3701
3702 i.vex.length = 2;
3703 i.vex.bytes[0] = 0xc5;
3704
3705 /* Check the REX.R bit. */
3706 r = (i.rex & REX_R) ? 0 : 1;
3707 i.vex.bytes[1] = (r << 7
3708 | register_specifier << 3
3709 | vector_length << 2
3710 | implied_prefix);
3711 }
3712 else
3713 {
3714 /* 3-byte VEX prefix. */
3715 unsigned int m;
3716
3717 i.vex.length = 3;
3718
3719 switch (i.tm.opcode_modifier.vexopcode)
3720 {
3721 case VEX0F:
3722 m = 0x1;
3723 i.vex.bytes[0] = 0xc4;
3724 break;
3725 case VEX0F38:
3726 m = 0x2;
3727 i.vex.bytes[0] = 0xc4;
3728 break;
3729 case VEX0F3A:
3730 m = 0x3;
3731 i.vex.bytes[0] = 0xc4;
3732 break;
3733 case XOP08:
3734 m = 0x8;
3735 i.vex.bytes[0] = 0x8f;
3736 break;
3737 case XOP09:
3738 m = 0x9;
3739 i.vex.bytes[0] = 0x8f;
3740 break;
3741 case XOP0A:
3742 m = 0xa;
3743 i.vex.bytes[0] = 0x8f;
3744 break;
3745 default:
3746 abort ();
3747 }
3748
3749 /* The high 3 bits of the second VEX byte are 1's compliment
3750 of RXB bits from REX. */
3751 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3752
3753 i.vex.bytes[2] = (w << 7
3754 | register_specifier << 3
3755 | vector_length << 2
3756 | implied_prefix);
3757 }
3758 }
3759
3760 static INLINE bfd_boolean
3761 is_evex_encoding (const insn_template *t)
3762 {
3763 return t->opcode_modifier.evex || t->opcode_modifier.disp8memshift
3764 || t->opcode_modifier.broadcast || t->opcode_modifier.masking
3765 || t->opcode_modifier.sae;
3766 }
3767
3768 static INLINE bfd_boolean
3769 is_any_vex_encoding (const insn_template *t)
3770 {
3771 return t->opcode_modifier.vex || t->opcode_modifier.vexopcode
3772 || is_evex_encoding (t);
3773 }
3774
3775 /* Build the EVEX prefix. */
3776
3777 static void
3778 build_evex_prefix (void)
3779 {
3780 unsigned int register_specifier;
3781 unsigned int implied_prefix;
3782 unsigned int m, w;
3783 rex_byte vrex_used = 0;
3784
3785 /* Check register specifier. */
3786 if (i.vex.register_specifier)
3787 {
3788 gas_assert ((i.vrex & REX_X) == 0);
3789
3790 register_specifier = i.vex.register_specifier->reg_num;
3791 if ((i.vex.register_specifier->reg_flags & RegRex))
3792 register_specifier += 8;
3793 /* The upper 16 registers are encoded in the fourth byte of the
3794 EVEX prefix. */
3795 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3796 i.vex.bytes[3] = 0x8;
3797 register_specifier = ~register_specifier & 0xf;
3798 }
3799 else
3800 {
3801 register_specifier = 0xf;
3802
3803 /* Encode upper 16 vector index register in the fourth byte of
3804 the EVEX prefix. */
3805 if (!(i.vrex & REX_X))
3806 i.vex.bytes[3] = 0x8;
3807 else
3808 vrex_used |= REX_X;
3809 }
3810
3811 switch ((i.tm.base_opcode >> 8) & 0xff)
3812 {
3813 case 0:
3814 implied_prefix = 0;
3815 break;
3816 case DATA_PREFIX_OPCODE:
3817 implied_prefix = 1;
3818 break;
3819 case REPE_PREFIX_OPCODE:
3820 implied_prefix = 2;
3821 break;
3822 case REPNE_PREFIX_OPCODE:
3823 implied_prefix = 3;
3824 break;
3825 default:
3826 abort ();
3827 }
3828
3829 /* 4 byte EVEX prefix. */
3830 i.vex.length = 4;
3831 i.vex.bytes[0] = 0x62;
3832
3833 /* mmmm bits. */
3834 switch (i.tm.opcode_modifier.vexopcode)
3835 {
3836 case VEX0F:
3837 m = 1;
3838 break;
3839 case VEX0F38:
3840 m = 2;
3841 break;
3842 case VEX0F3A:
3843 m = 3;
3844 break;
3845 default:
3846 abort ();
3847 break;
3848 }
3849
3850 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3851 bits from REX. */
3852 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3853
3854 /* The fifth bit of the second EVEX byte is 1's compliment of the
3855 REX_R bit in VREX. */
3856 if (!(i.vrex & REX_R))
3857 i.vex.bytes[1] |= 0x10;
3858 else
3859 vrex_used |= REX_R;
3860
3861 if ((i.reg_operands + i.imm_operands) == i.operands)
3862 {
3863 /* When all operands are registers, the REX_X bit in REX is not
3864 used. We reuse it to encode the upper 16 registers, which is
3865 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3866 as 1's compliment. */
3867 if ((i.vrex & REX_B))
3868 {
3869 vrex_used |= REX_B;
3870 i.vex.bytes[1] &= ~0x40;
3871 }
3872 }
3873
3874 /* EVEX instructions shouldn't need the REX prefix. */
3875 i.vrex &= ~vrex_used;
3876 gas_assert (i.vrex == 0);
3877
3878 /* Check the REX.W bit and VEXW. */
3879 if (i.tm.opcode_modifier.vexw == VEXWIG)
3880 w = (evexwig == evexw1 || (i.rex & REX_W)) ? 1 : 0;
3881 else if (i.tm.opcode_modifier.vexw)
3882 w = i.tm.opcode_modifier.vexw == VEXW1 ? 1 : 0;
3883 else
3884 w = (flag_code == CODE_64BIT ? i.rex & REX_W : evexwig == evexw1) ? 1 : 0;
3885
3886 /* Encode the U bit. */
3887 implied_prefix |= 0x4;
3888
3889 /* The third byte of the EVEX prefix. */
3890 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3891
3892 /* The fourth byte of the EVEX prefix. */
3893 /* The zeroing-masking bit. */
3894 if (i.mask && i.mask->zeroing)
3895 i.vex.bytes[3] |= 0x80;
3896
3897 /* Don't always set the broadcast bit if there is no RC. */
3898 if (!i.rounding)
3899 {
3900 /* Encode the vector length. */
3901 unsigned int vec_length;
3902
3903 if (!i.tm.opcode_modifier.evex
3904 || i.tm.opcode_modifier.evex == EVEXDYN)
3905 {
3906 unsigned int op;
3907
3908 /* Determine vector length from the last multi-length vector
3909 operand. */
3910 for (op = i.operands; op--;)
3911 if (i.tm.operand_types[op].bitfield.xmmword
3912 + i.tm.operand_types[op].bitfield.ymmword
3913 + i.tm.operand_types[op].bitfield.zmmword > 1)
3914 {
3915 if (i.types[op].bitfield.zmmword)
3916 {
3917 i.tm.opcode_modifier.evex = EVEX512;
3918 break;
3919 }
3920 else if (i.types[op].bitfield.ymmword)
3921 {
3922 i.tm.opcode_modifier.evex = EVEX256;
3923 break;
3924 }
3925 else if (i.types[op].bitfield.xmmword)
3926 {
3927 i.tm.opcode_modifier.evex = EVEX128;
3928 break;
3929 }
3930 else if (i.broadcast && (int) op == i.broadcast->operand)
3931 {
3932 switch (i.broadcast->bytes)
3933 {
3934 case 64:
3935 i.tm.opcode_modifier.evex = EVEX512;
3936 break;
3937 case 32:
3938 i.tm.opcode_modifier.evex = EVEX256;
3939 break;
3940 case 16:
3941 i.tm.opcode_modifier.evex = EVEX128;
3942 break;
3943 default:
3944 abort ();
3945 }
3946 break;
3947 }
3948 }
3949
3950 if (op >= MAX_OPERANDS)
3951 abort ();
3952 }
3953
3954 switch (i.tm.opcode_modifier.evex)
3955 {
3956 case EVEXLIG: /* LL' is ignored */
3957 vec_length = evexlig << 5;
3958 break;
3959 case EVEX128:
3960 vec_length = 0 << 5;
3961 break;
3962 case EVEX256:
3963 vec_length = 1 << 5;
3964 break;
3965 case EVEX512:
3966 vec_length = 2 << 5;
3967 break;
3968 default:
3969 abort ();
3970 break;
3971 }
3972 i.vex.bytes[3] |= vec_length;
3973 /* Encode the broadcast bit. */
3974 if (i.broadcast)
3975 i.vex.bytes[3] |= 0x10;
3976 }
3977 else
3978 {
3979 if (i.rounding->type != saeonly)
3980 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3981 else
3982 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3983 }
3984
3985 if (i.mask && i.mask->mask)
3986 i.vex.bytes[3] |= i.mask->mask->reg_num;
3987 }
3988
3989 static void
3990 process_immext (void)
3991 {
3992 expressionS *exp;
3993
3994 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3995 which is coded in the same place as an 8-bit immediate field
3996 would be. Here we fake an 8-bit immediate operand from the
3997 opcode suffix stored in tm.extension_opcode.
3998
3999 AVX instructions also use this encoding, for some of
4000 3 argument instructions. */
4001
4002 gas_assert (i.imm_operands <= 1
4003 && (i.operands <= 2
4004 || (is_any_vex_encoding (&i.tm)
4005 && i.operands <= 4)));
4006
4007 exp = &im_expressions[i.imm_operands++];
4008 i.op[i.operands].imms = exp;
4009 i.types[i.operands] = imm8;
4010 i.operands++;
4011 exp->X_op = O_constant;
4012 exp->X_add_number = i.tm.extension_opcode;
4013 i.tm.extension_opcode = None;
4014 }
4015
4016
4017 static int
4018 check_hle (void)
4019 {
4020 switch (i.tm.opcode_modifier.hleprefixok)
4021 {
4022 default:
4023 abort ();
4024 case HLEPrefixNone:
4025 as_bad (_("invalid instruction `%s' after `%s'"),
4026 i.tm.name, i.hle_prefix);
4027 return 0;
4028 case HLEPrefixLock:
4029 if (i.prefix[LOCK_PREFIX])
4030 return 1;
4031 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
4032 return 0;
4033 case HLEPrefixAny:
4034 return 1;
4035 case HLEPrefixRelease:
4036 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
4037 {
4038 as_bad (_("instruction `%s' after `xacquire' not allowed"),
4039 i.tm.name);
4040 return 0;
4041 }
4042 if (i.mem_operands == 0 || !(i.flags[i.operands - 1] & Operand_Mem))
4043 {
4044 as_bad (_("memory destination needed for instruction `%s'"
4045 " after `xrelease'"), i.tm.name);
4046 return 0;
4047 }
4048 return 1;
4049 }
4050 }
4051
4052 /* Try the shortest encoding by shortening operand size. */
4053
4054 static void
4055 optimize_encoding (void)
4056 {
4057 unsigned int j;
4058
4059 if (optimize_for_space
4060 && !is_any_vex_encoding (&i.tm)
4061 && i.reg_operands == 1
4062 && i.imm_operands == 1
4063 && !i.types[1].bitfield.byte
4064 && i.op[0].imms->X_op == O_constant
4065 && fits_in_imm7 (i.op[0].imms->X_add_number)
4066 && (i.tm.base_opcode == 0xa8
4067 || (i.tm.base_opcode == 0xf6
4068 && i.tm.extension_opcode == 0x0)))
4069 {
4070 /* Optimize: -Os:
4071 test $imm7, %r64/%r32/%r16 -> test $imm7, %r8
4072 */
4073 unsigned int base_regnum = i.op[1].regs->reg_num;
4074 if (flag_code == CODE_64BIT || base_regnum < 4)
4075 {
4076 i.types[1].bitfield.byte = 1;
4077 /* Ignore the suffix. */
4078 i.suffix = 0;
4079 /* Convert to byte registers. */
4080 if (i.types[1].bitfield.word)
4081 j = 16;
4082 else if (i.types[1].bitfield.dword)
4083 j = 32;
4084 else
4085 j = 48;
4086 if (!(i.op[1].regs->reg_flags & RegRex) && base_regnum < 4)
4087 j += 8;
4088 i.op[1].regs -= j;
4089 }
4090 }
4091 else if (flag_code == CODE_64BIT
4092 && !is_any_vex_encoding (&i.tm)
4093 && ((i.types[1].bitfield.qword
4094 && i.reg_operands == 1
4095 && i.imm_operands == 1
4096 && i.op[0].imms->X_op == O_constant
4097 && ((i.tm.base_opcode == 0xb8
4098 && i.tm.extension_opcode == None
4099 && fits_in_unsigned_long (i.op[0].imms->X_add_number))
4100 || (fits_in_imm31 (i.op[0].imms->X_add_number)
4101 && ((i.tm.base_opcode == 0x24
4102 || i.tm.base_opcode == 0xa8)
4103 || (i.tm.base_opcode == 0x80
4104 && i.tm.extension_opcode == 0x4)
4105 || ((i.tm.base_opcode == 0xf6
4106 || (i.tm.base_opcode | 1) == 0xc7)
4107 && i.tm.extension_opcode == 0x0)))
4108 || (fits_in_imm7 (i.op[0].imms->X_add_number)
4109 && i.tm.base_opcode == 0x83
4110 && i.tm.extension_opcode == 0x4)))
4111 || (i.types[0].bitfield.qword
4112 && ((i.reg_operands == 2
4113 && i.op[0].regs == i.op[1].regs
4114 && (i.tm.base_opcode == 0x30
4115 || i.tm.base_opcode == 0x28))
4116 || (i.reg_operands == 1
4117 && i.operands == 1
4118 && i.tm.base_opcode == 0x30)))))
4119 {
4120 /* Optimize: -O:
4121 andq $imm31, %r64 -> andl $imm31, %r32
4122 andq $imm7, %r64 -> andl $imm7, %r32
4123 testq $imm31, %r64 -> testl $imm31, %r32
4124 xorq %r64, %r64 -> xorl %r32, %r32
4125 subq %r64, %r64 -> subl %r32, %r32
4126 movq $imm31, %r64 -> movl $imm31, %r32
4127 movq $imm32, %r64 -> movl $imm32, %r32
4128 */
4129 i.tm.opcode_modifier.norex64 = 1;
4130 if (i.tm.base_opcode == 0xb8 || (i.tm.base_opcode | 1) == 0xc7)
4131 {
4132 /* Handle
4133 movq $imm31, %r64 -> movl $imm31, %r32
4134 movq $imm32, %r64 -> movl $imm32, %r32
4135 */
4136 i.tm.operand_types[0].bitfield.imm32 = 1;
4137 i.tm.operand_types[0].bitfield.imm32s = 0;
4138 i.tm.operand_types[0].bitfield.imm64 = 0;
4139 i.types[0].bitfield.imm32 = 1;
4140 i.types[0].bitfield.imm32s = 0;
4141 i.types[0].bitfield.imm64 = 0;
4142 i.types[1].bitfield.dword = 1;
4143 i.types[1].bitfield.qword = 0;
4144 if ((i.tm.base_opcode | 1) == 0xc7)
4145 {
4146 /* Handle
4147 movq $imm31, %r64 -> movl $imm31, %r32
4148 */
4149 i.tm.base_opcode = 0xb8;
4150 i.tm.extension_opcode = None;
4151 i.tm.opcode_modifier.w = 0;
4152 i.tm.opcode_modifier.modrm = 0;
4153 }
4154 }
4155 }
4156 else if (optimize > 1
4157 && !optimize_for_space
4158 && !is_any_vex_encoding (&i.tm)
4159 && i.reg_operands == 2
4160 && i.op[0].regs == i.op[1].regs
4161 && ((i.tm.base_opcode & ~(Opcode_D | 1)) == 0x8
4162 || (i.tm.base_opcode & ~(Opcode_D | 1)) == 0x20)
4163 && (flag_code != CODE_64BIT || !i.types[0].bitfield.dword))
4164 {
4165 /* Optimize: -O2:
4166 andb %rN, %rN -> testb %rN, %rN
4167 andw %rN, %rN -> testw %rN, %rN
4168 andq %rN, %rN -> testq %rN, %rN
4169 orb %rN, %rN -> testb %rN, %rN
4170 orw %rN, %rN -> testw %rN, %rN
4171 orq %rN, %rN -> testq %rN, %rN
4172
4173 and outside of 64-bit mode
4174
4175 andl %rN, %rN -> testl %rN, %rN
4176 orl %rN, %rN -> testl %rN, %rN
4177 */
4178 i.tm.base_opcode = 0x84 | (i.tm.base_opcode & 1);
4179 }
4180 else if (i.reg_operands == 3
4181 && i.op[0].regs == i.op[1].regs
4182 && !i.types[2].bitfield.xmmword
4183 && (i.tm.opcode_modifier.vex
4184 || ((!i.mask || i.mask->zeroing)
4185 && !i.rounding
4186 && is_evex_encoding (&i.tm)
4187 && (i.vec_encoding != vex_encoding_evex
4188 || cpu_arch_isa_flags.bitfield.cpuavx512vl
4189 || i.tm.cpu_flags.bitfield.cpuavx512vl
4190 || (i.tm.operand_types[2].bitfield.zmmword
4191 && i.types[2].bitfield.ymmword))))
4192 && ((i.tm.base_opcode == 0x55
4193 || i.tm.base_opcode == 0x6655
4194 || i.tm.base_opcode == 0x66df
4195 || i.tm.base_opcode == 0x57
4196 || i.tm.base_opcode == 0x6657
4197 || i.tm.base_opcode == 0x66ef
4198 || i.tm.base_opcode == 0x66f8
4199 || i.tm.base_opcode == 0x66f9
4200 || i.tm.base_opcode == 0x66fa
4201 || i.tm.base_opcode == 0x66fb
4202 || i.tm.base_opcode == 0x42
4203 || i.tm.base_opcode == 0x6642
4204 || i.tm.base_opcode == 0x47
4205 || i.tm.base_opcode == 0x6647)
4206 && i.tm.extension_opcode == None))
4207 {
4208 /* Optimize: -O1:
4209 VOP, one of vandnps, vandnpd, vxorps, vxorpd, vpsubb, vpsubd,
4210 vpsubq and vpsubw:
4211 EVEX VOP %zmmM, %zmmM, %zmmN
4212 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4213 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4214 EVEX VOP %ymmM, %ymmM, %ymmN
4215 -> VEX VOP %xmmM, %xmmM, %xmmN (M and N < 16)
4216 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4217 VEX VOP %ymmM, %ymmM, %ymmN
4218 -> VEX VOP %xmmM, %xmmM, %xmmN
4219 VOP, one of vpandn and vpxor:
4220 VEX VOP %ymmM, %ymmM, %ymmN
4221 -> VEX VOP %xmmM, %xmmM, %xmmN
4222 VOP, one of vpandnd and vpandnq:
4223 EVEX VOP %zmmM, %zmmM, %zmmN
4224 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4225 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4226 EVEX VOP %ymmM, %ymmM, %ymmN
4227 -> VEX vpandn %xmmM, %xmmM, %xmmN (M and N < 16)
4228 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4229 VOP, one of vpxord and vpxorq:
4230 EVEX VOP %zmmM, %zmmM, %zmmN
4231 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4232 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4233 EVEX VOP %ymmM, %ymmM, %ymmN
4234 -> VEX vpxor %xmmM, %xmmM, %xmmN (M and N < 16)
4235 -> EVEX VOP %xmmM, %xmmM, %xmmN (M || N >= 16) (-O2)
4236 VOP, one of kxord and kxorq:
4237 VEX VOP %kM, %kM, %kN
4238 -> VEX kxorw %kM, %kM, %kN
4239 VOP, one of kandnd and kandnq:
4240 VEX VOP %kM, %kM, %kN
4241 -> VEX kandnw %kM, %kM, %kN
4242 */
4243 if (is_evex_encoding (&i.tm))
4244 {
4245 if (i.vec_encoding != vex_encoding_evex)
4246 {
4247 i.tm.opcode_modifier.vex = VEX128;
4248 i.tm.opcode_modifier.vexw = VEXW0;
4249 i.tm.opcode_modifier.evex = 0;
4250 }
4251 else if (optimize > 1)
4252 i.tm.opcode_modifier.evex = EVEX128;
4253 else
4254 return;
4255 }
4256 else if (i.tm.operand_types[0].bitfield.class == RegMask)
4257 {
4258 i.tm.base_opcode &= 0xff;
4259 i.tm.opcode_modifier.vexw = VEXW0;
4260 }
4261 else
4262 i.tm.opcode_modifier.vex = VEX128;
4263
4264 if (i.tm.opcode_modifier.vex)
4265 for (j = 0; j < 3; j++)
4266 {
4267 i.types[j].bitfield.xmmword = 1;
4268 i.types[j].bitfield.ymmword = 0;
4269 }
4270 }
4271 else if (i.vec_encoding != vex_encoding_evex
4272 && !i.types[0].bitfield.zmmword
4273 && !i.types[1].bitfield.zmmword
4274 && !i.mask
4275 && !i.broadcast
4276 && is_evex_encoding (&i.tm)
4277 && ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0x666f
4278 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf36f
4279 || (i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f
4280 || (i.tm.base_opcode & ~4) == 0x66db
4281 || (i.tm.base_opcode & ~4) == 0x66eb)
4282 && i.tm.extension_opcode == None)
4283 {
4284 /* Optimize: -O1:
4285 VOP, one of vmovdqa32, vmovdqa64, vmovdqu8, vmovdqu16,
4286 vmovdqu32 and vmovdqu64:
4287 EVEX VOP %xmmM, %xmmN
4288 -> VEX vmovdqa|vmovdqu %xmmM, %xmmN (M and N < 16)
4289 EVEX VOP %ymmM, %ymmN
4290 -> VEX vmovdqa|vmovdqu %ymmM, %ymmN (M and N < 16)
4291 EVEX VOP %xmmM, mem
4292 -> VEX vmovdqa|vmovdqu %xmmM, mem (M < 16)
4293 EVEX VOP %ymmM, mem
4294 -> VEX vmovdqa|vmovdqu %ymmM, mem (M < 16)
4295 EVEX VOP mem, %xmmN
4296 -> VEX mvmovdqa|vmovdquem, %xmmN (N < 16)
4297 EVEX VOP mem, %ymmN
4298 -> VEX vmovdqa|vmovdqu mem, %ymmN (N < 16)
4299 VOP, one of vpand, vpandn, vpor, vpxor:
4300 EVEX VOP{d,q} %xmmL, %xmmM, %xmmN
4301 -> VEX VOP %xmmL, %xmmM, %xmmN (L, M, and N < 16)
4302 EVEX VOP{d,q} %ymmL, %ymmM, %ymmN
4303 -> VEX VOP %ymmL, %ymmM, %ymmN (L, M, and N < 16)
4304 EVEX VOP{d,q} mem, %xmmM, %xmmN
4305 -> VEX VOP mem, %xmmM, %xmmN (M and N < 16)
4306 EVEX VOP{d,q} mem, %ymmM, %ymmN
4307 -> VEX VOP mem, %ymmM, %ymmN (M and N < 16)
4308 */
4309 for (j = 0; j < i.operands; j++)
4310 if (operand_type_check (i.types[j], disp)
4311 && i.op[j].disps->X_op == O_constant)
4312 {
4313 /* Since the VEX prefix has 2 or 3 bytes, the EVEX prefix
4314 has 4 bytes, EVEX Disp8 has 1 byte and VEX Disp32 has 4
4315 bytes, we choose EVEX Disp8 over VEX Disp32. */
4316 int evex_disp8, vex_disp8;
4317 unsigned int memshift = i.memshift;
4318 offsetT n = i.op[j].disps->X_add_number;
4319
4320 evex_disp8 = fits_in_disp8 (n);
4321 i.memshift = 0;
4322 vex_disp8 = fits_in_disp8 (n);
4323 if (evex_disp8 != vex_disp8)
4324 {
4325 i.memshift = memshift;
4326 return;
4327 }
4328
4329 i.types[j].bitfield.disp8 = vex_disp8;
4330 break;
4331 }
4332 if ((i.tm.base_opcode & ~Opcode_SIMD_IntD) == 0xf26f)
4333 i.tm.base_opcode ^= 0xf36f ^ 0xf26f;
4334 i.tm.opcode_modifier.vex
4335 = i.types[0].bitfield.ymmword ? VEX256 : VEX128;
4336 i.tm.opcode_modifier.vexw = VEXW0;
4337 /* VPAND, VPOR, and VPXOR are commutative. */
4338 if (i.reg_operands == 3 && i.tm.base_opcode != 0x66df)
4339 i.tm.opcode_modifier.commutative = 1;
4340 i.tm.opcode_modifier.evex = 0;
4341 i.tm.opcode_modifier.masking = 0;
4342 i.tm.opcode_modifier.broadcast = 0;
4343 i.tm.opcode_modifier.disp8memshift = 0;
4344 i.memshift = 0;
4345 if (j < i.operands)
4346 i.types[j].bitfield.disp8
4347 = fits_in_disp8 (i.op[j].disps->X_add_number);
4348 }
4349 }
4350
4351 /* Return non-zero for load instruction. */
4352
4353 static int
4354 load_insn_p (void)
4355 {
4356 unsigned int dest;
4357 int any_vex_p = is_any_vex_encoding (&i.tm);
4358 unsigned int base_opcode = i.tm.base_opcode | 1;
4359
4360 if (!any_vex_p)
4361 {
4362 /* Anysize insns: lea, invlpg, clflush, prefetchnta, prefetcht0,
4363 prefetcht1, prefetcht2, prefetchtw, bndmk, bndcl, bndcu, bndcn,
4364 bndstx, bndldx, prefetchwt1, clflushopt, clwb, cldemote. */
4365 if (i.tm.opcode_modifier.anysize)
4366 return 0;
4367
4368 /* pop, popf, popa. */
4369 if (strcmp (i.tm.name, "pop") == 0
4370 || i.tm.base_opcode == 0x9d
4371 || i.tm.base_opcode == 0x61)
4372 return 1;
4373
4374 /* movs, cmps, lods, scas. */
4375 if ((i.tm.base_opcode | 0xb) == 0xaf)
4376 return 1;
4377
4378 /* outs, xlatb. */
4379 if (base_opcode == 0x6f
4380 || i.tm.base_opcode == 0xd7)
4381 return 1;
4382 /* NB: For AMD-specific insns with implicit memory operands,
4383 they're intentionally not covered. */
4384 }
4385
4386 /* No memory operand. */
4387 if (!i.mem_operands)
4388 return 0;
4389
4390 if (any_vex_p)
4391 {
4392 /* vldmxcsr. */
4393 if (i.tm.base_opcode == 0xae
4394 && i.tm.opcode_modifier.vex
4395 && i.tm.opcode_modifier.vexopcode == VEX0F
4396 && i.tm.extension_opcode == 2)
4397 return 1;
4398 }
4399 else
4400 {
4401 /* test, not, neg, mul, imul, div, idiv. */
4402 if ((i.tm.base_opcode == 0xf6 || i.tm.base_opcode == 0xf7)
4403 && i.tm.extension_opcode != 1)
4404 return 1;
4405
4406 /* inc, dec. */
4407 if (base_opcode == 0xff && i.tm.extension_opcode <= 1)
4408 return 1;
4409
4410 /* add, or, adc, sbb, and, sub, xor, cmp. */
4411 if (i.tm.base_opcode >= 0x80 && i.tm.base_opcode <= 0x83)
4412 return 1;
4413
4414 /* bt, bts, btr, btc. */
4415 if (i.tm.base_opcode == 0xfba
4416 && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7))
4417 return 1;
4418
4419 /* rol, ror, rcl, rcr, shl/sal, shr, sar. */
4420 if ((base_opcode == 0xc1
4421 || (i.tm.base_opcode >= 0xd0 && i.tm.base_opcode <= 0xd3))
4422 && i.tm.extension_opcode != 6)
4423 return 1;
4424
4425 /* cmpxchg8b, cmpxchg16b, xrstors. */
4426 if (i.tm.base_opcode == 0xfc7
4427 && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3))
4428 return 1;
4429
4430 /* fxrstor, ldmxcsr, xrstor. */
4431 if (i.tm.base_opcode == 0xfae
4432 && (i.tm.extension_opcode == 1
4433 || i.tm.extension_opcode == 2
4434 || i.tm.extension_opcode == 5))
4435 return 1;
4436
4437 /* lgdt, lidt, lmsw. */
4438 if (i.tm.base_opcode == 0xf01
4439 && (i.tm.extension_opcode == 2
4440 || i.tm.extension_opcode == 3
4441 || i.tm.extension_opcode == 6))
4442 return 1;
4443
4444 /* vmptrld */
4445 if (i.tm.base_opcode == 0xfc7
4446 && i.tm.extension_opcode == 6)
4447 return 1;
4448
4449 /* Check for x87 instructions. */
4450 if (i.tm.base_opcode >= 0xd8 && i.tm.base_opcode <= 0xdf)
4451 {
4452 /* Skip fst, fstp, fstenv, fstcw. */
4453 if (i.tm.base_opcode == 0xd9
4454 && (i.tm.extension_opcode == 2
4455 || i.tm.extension_opcode == 3
4456 || i.tm.extension_opcode == 6
4457 || i.tm.extension_opcode == 7))
4458 return 0;
4459
4460 /* Skip fisttp, fist, fistp, fstp. */
4461 if (i.tm.base_opcode == 0xdb
4462 && (i.tm.extension_opcode == 1
4463 || i.tm.extension_opcode == 2
4464 || i.tm.extension_opcode == 3
4465 || i.tm.extension_opcode == 7))
4466 return 0;
4467
4468 /* Skip fisttp, fst, fstp, fsave, fstsw. */
4469 if (i.tm.base_opcode == 0xdd
4470 && (i.tm.extension_opcode == 1
4471 || i.tm.extension_opcode == 2
4472 || i.tm.extension_opcode == 3
4473 || i.tm.extension_opcode == 6
4474 || i.tm.extension_opcode == 7))
4475 return 0;
4476
4477 /* Skip fisttp, fist, fistp, fbstp, fistp. */
4478 if (i.tm.base_opcode == 0xdf
4479 && (i.tm.extension_opcode == 1
4480 || i.tm.extension_opcode == 2
4481 || i.tm.extension_opcode == 3
4482 || i.tm.extension_opcode == 6
4483 || i.tm.extension_opcode == 7))
4484 return 0;
4485
4486 return 1;
4487 }
4488 }
4489
4490 dest = i.operands - 1;
4491
4492 /* Check fake imm8 operand and 3 source operands. */
4493 if ((i.tm.opcode_modifier.immext
4494 || i.tm.opcode_modifier.vexsources == VEX3SOURCES)
4495 && i.types[dest].bitfield.imm8)
4496 dest--;
4497
4498 /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */
4499 if (!any_vex_p
4500 && (base_opcode == 0x1
4501 || base_opcode == 0x9
4502 || base_opcode == 0x11
4503 || base_opcode == 0x19
4504 || base_opcode == 0x21
4505 || base_opcode == 0x29
4506 || base_opcode == 0x31
4507 || base_opcode == 0x39
4508 || (i.tm.base_opcode >= 0x84 && i.tm.base_opcode <= 0x87)
4509 || base_opcode == 0xfc1))
4510 return 1;
4511
4512 /* Check for load instruction. */
4513 return (i.types[dest].bitfield.class != ClassNone
4514 || i.types[dest].bitfield.instance == Accum);
4515 }
4516
4517 /* Output lfence, 0xfaee8, after instruction. */
4518
4519 static void
4520 insert_lfence_after (void)
4521 {
4522 if (lfence_after_load && load_insn_p ())
4523 {
4524 /* There are also two REP string instructions that require
4525 special treatment. Specifically, the compare string (CMPS)
4526 and scan string (SCAS) instructions set EFLAGS in a manner
4527 that depends on the data being compared/scanned. When used
4528 with a REP prefix, the number of iterations may therefore
4529 vary depending on this data. If the data is a program secret
4530 chosen by the adversary using an LVI method,
4531 then this data-dependent behavior may leak some aspect
4532 of the secret. */
4533 if (((i.tm.base_opcode | 0x1) == 0xa7
4534 || (i.tm.base_opcode | 0x1) == 0xaf)
4535 && i.prefix[REP_PREFIX])
4536 {
4537 as_warn (_("`%s` changes flags which would affect control flow behavior"),
4538 i.tm.name);
4539 }
4540 char *p = frag_more (3);
4541 *p++ = 0xf;
4542 *p++ = 0xae;
4543 *p = 0xe8;
4544 }
4545 }
4546
4547 /* Output lfence, 0xfaee8, before instruction. */
4548
4549 static void
4550 insert_lfence_before (void)
4551 {
4552 char *p;
4553
4554 if (is_any_vex_encoding (&i.tm))
4555 return;
4556
4557 if (i.tm.base_opcode == 0xff
4558 && (i.tm.extension_opcode == 2 || i.tm.extension_opcode == 4))
4559 {
4560 /* Insert lfence before indirect branch if needed. */
4561
4562 if (lfence_before_indirect_branch == lfence_branch_none)
4563 return;
4564
4565 if (i.operands != 1)
4566 abort ();
4567
4568 if (i.reg_operands == 1)
4569 {
4570 /* Indirect branch via register. Don't insert lfence with
4571 -mlfence-after-load=yes. */
4572 if (lfence_after_load
4573 || lfence_before_indirect_branch == lfence_branch_memory)
4574 return;
4575 }
4576 else if (i.mem_operands == 1
4577 && lfence_before_indirect_branch != lfence_branch_register)
4578 {
4579 as_warn (_("indirect `%s` with memory operand should be avoided"),
4580 i.tm.name);
4581 return;
4582 }
4583 else
4584 return;
4585
4586 if (last_insn.kind != last_insn_other
4587 && last_insn.seg == now_seg)
4588 {
4589 as_warn_where (last_insn.file, last_insn.line,
4590 _("`%s` skips -mlfence-before-indirect-branch on `%s`"),
4591 last_insn.name, i.tm.name);
4592 return;
4593 }
4594
4595 p = frag_more (3);
4596 *p++ = 0xf;
4597 *p++ = 0xae;
4598 *p = 0xe8;
4599 return;
4600 }
4601
4602 /* Output or/not/shl and lfence before near ret. */
4603 if (lfence_before_ret != lfence_before_ret_none
4604 && (i.tm.base_opcode == 0xc2
4605 || i.tm.base_opcode == 0xc3))
4606 {
4607 if (last_insn.kind != last_insn_other
4608 && last_insn.seg == now_seg)
4609 {
4610 as_warn_where (last_insn.file, last_insn.line,
4611 _("`%s` skips -mlfence-before-ret on `%s`"),
4612 last_insn.name, i.tm.name);
4613 return;
4614 }
4615
4616 /* Near ret ingore operand size override under CPU64. */
4617 char prefix = flag_code == CODE_64BIT
4618 ? 0x48
4619 : i.prefix[DATA_PREFIX] ? 0x66 : 0x0;
4620
4621 if (lfence_before_ret == lfence_before_ret_not)
4622 {
4623 /* not: 0xf71424, may add prefix
4624 for operand size override or 64-bit code. */
4625 p = frag_more ((prefix ? 2 : 0) + 6 + 3);
4626 if (prefix)
4627 *p++ = prefix;
4628 *p++ = 0xf7;
4629 *p++ = 0x14;
4630 *p++ = 0x24;
4631 if (prefix)
4632 *p++ = prefix;
4633 *p++ = 0xf7;
4634 *p++ = 0x14;
4635 *p++ = 0x24;
4636 }
4637 else
4638 {
4639 p = frag_more ((prefix ? 1 : 0) + 4 + 3);
4640 if (prefix)
4641 *p++ = prefix;
4642 if (lfence_before_ret == lfence_before_ret_or)
4643 {
4644 /* or: 0x830c2400, may add prefix
4645 for operand size override or 64-bit code. */
4646 *p++ = 0x83;
4647 *p++ = 0x0c;
4648 }
4649 else
4650 {
4651 /* shl: 0xc1242400, may add prefix
4652 for operand size override or 64-bit code. */
4653 *p++ = 0xc1;
4654 *p++ = 0x24;
4655 }
4656
4657 *p++ = 0x24;
4658 *p++ = 0x0;
4659 }
4660
4661 *p++ = 0xf;
4662 *p++ = 0xae;
4663 *p = 0xe8;
4664 }
4665 }
4666
4667 /* This is the guts of the machine-dependent assembler. LINE points to a
4668 machine dependent instruction. This function is supposed to emit
4669 the frags/bytes it assembles to. */
4670
4671 void
4672 md_assemble (char *line)
4673 {
4674 unsigned int j;
4675 char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
4676 const insn_template *t;
4677
4678 /* Initialize globals. */
4679 memset (&i, '\0', sizeof (i));
4680 for (j = 0; j < MAX_OPERANDS; j++)
4681 i.reloc[j] = NO_RELOC;
4682 memset (disp_expressions, '\0', sizeof (disp_expressions));
4683 memset (im_expressions, '\0', sizeof (im_expressions));
4684 save_stack_p = save_stack;
4685
4686 /* First parse an instruction mnemonic & call i386_operand for the operands.
4687 We assume that the scrubber has arranged it so that line[0] is the valid
4688 start of a (possibly prefixed) mnemonic. */
4689
4690 line = parse_insn (line, mnemonic);
4691 if (line == NULL)
4692 return;
4693 mnem_suffix = i.suffix;
4694
4695 line = parse_operands (line, mnemonic);
4696 this_operand = -1;
4697 xfree (i.memop1_string);
4698 i.memop1_string = NULL;
4699 if (line == NULL)
4700 return;
4701
4702 /* Now we've parsed the mnemonic into a set of templates, and have the
4703 operands at hand. */
4704
4705 /* All Intel opcodes have reversed operands except for "bound", "enter",
4706 "monitor*", "mwait*", "tpause", and "umwait". We also don't reverse
4707 intersegment "jmp" and "call" instructions with 2 immediate operands so
4708 that the immediate segment precedes the offset, as it does when in AT&T
4709 mode. */
4710 if (intel_syntax
4711 && i.operands > 1
4712 && (strcmp (mnemonic, "bound") != 0)
4713 && (strcmp (mnemonic, "invlpga") != 0)
4714 && (strncmp (mnemonic, "monitor", 7) != 0)
4715 && (strncmp (mnemonic, "mwait", 5) != 0)
4716 && (strcmp (mnemonic, "tpause") != 0)
4717 && (strcmp (mnemonic, "umwait") != 0)
4718 && !(operand_type_check (i.types[0], imm)
4719 && operand_type_check (i.types[1], imm)))
4720 swap_operands ();
4721
4722 /* The order of the immediates should be reversed
4723 for 2 immediates extrq and insertq instructions */
4724 if (i.imm_operands == 2
4725 && (strcmp (mnemonic, "extrq") == 0
4726 || strcmp (mnemonic, "insertq") == 0))
4727 swap_2_operands (0, 1);
4728
4729 if (i.imm_operands)
4730 optimize_imm ();
4731
4732 /* Don't optimize displacement for movabs since it only takes 64bit
4733 displacement. */
4734 if (i.disp_operands
4735 && i.disp_encoding != disp_encoding_32bit
4736 && (flag_code != CODE_64BIT
4737 || strcmp (mnemonic, "movabs") != 0))
4738 optimize_disp ();
4739
4740 /* Next, we find a template that matches the given insn,
4741 making sure the overlap of the given operands types is consistent
4742 with the template operand types. */
4743
4744 if (!(t = match_template (mnem_suffix)))
4745 return;
4746
4747 if (sse_check != check_none
4748 && !i.tm.opcode_modifier.noavx
4749 && !i.tm.cpu_flags.bitfield.cpuavx
4750 && !i.tm.cpu_flags.bitfield.cpuavx512f
4751 && (i.tm.cpu_flags.bitfield.cpusse
4752 || i.tm.cpu_flags.bitfield.cpusse2
4753 || i.tm.cpu_flags.bitfield.cpusse3
4754 || i.tm.cpu_flags.bitfield.cpussse3
4755 || i.tm.cpu_flags.bitfield.cpusse4_1
4756 || i.tm.cpu_flags.bitfield.cpusse4_2
4757 || i.tm.cpu_flags.bitfield.cpupclmul
4758 || i.tm.cpu_flags.bitfield.cpuaes
4759 || i.tm.cpu_flags.bitfield.cpusha
4760 || i.tm.cpu_flags.bitfield.cpugfni))
4761 {
4762 (sse_check == check_warning
4763 ? as_warn
4764 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
4765 }
4766
4767 if (i.tm.opcode_modifier.fwait)
4768 if (!add_prefix (FWAIT_OPCODE))
4769 return;
4770
4771 /* Check if REP prefix is OK. */
4772 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
4773 {
4774 as_bad (_("invalid instruction `%s' after `%s'"),
4775 i.tm.name, i.rep_prefix);
4776 return;
4777 }
4778
4779 /* Check for lock without a lockable instruction. Destination operand
4780 must be memory unless it is xchg (0x86). */
4781 if (i.prefix[LOCK_PREFIX]
4782 && (!i.tm.opcode_modifier.islockable
4783 || i.mem_operands == 0
4784 || (i.tm.base_opcode != 0x86
4785 && !(i.flags[i.operands - 1] & Operand_Mem))))
4786 {
4787 as_bad (_("expecting lockable instruction after `lock'"));
4788 return;
4789 }
4790
4791 /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
4792 if (i.prefix[DATA_PREFIX]
4793 && (is_any_vex_encoding (&i.tm)
4794 || i.tm.operand_types[i.imm_operands].bitfield.class >= RegMMX
4795 || i.tm.operand_types[i.imm_operands + 1].bitfield.class >= RegMMX))
4796 {
4797 as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
4798 return;
4799 }
4800
4801 /* Check if HLE prefix is OK. */
4802 if (i.hle_prefix && !check_hle ())
4803 return;
4804
4805 /* Check BND prefix. */
4806 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
4807 as_bad (_("expecting valid branch instruction after `bnd'"));
4808
4809 /* Check NOTRACK prefix. */
4810 if (i.notrack_prefix && !i.tm.opcode_modifier.notrackprefixok)
4811 as_bad (_("expecting indirect branch instruction after `notrack'"));
4812
4813 if (i.tm.cpu_flags.bitfield.cpumpx)
4814 {
4815 if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
4816 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
4817 else if (flag_code != CODE_16BIT
4818 ? i.prefix[ADDR_PREFIX]
4819 : i.mem_operands && !i.prefix[ADDR_PREFIX])
4820 as_bad (_("16-bit address isn't allowed in MPX instructions"));
4821 }
4822
4823 /* Insert BND prefix. */
4824 if (add_bnd_prefix && i.tm.opcode_modifier.bndprefixok)
4825 {
4826 if (!i.prefix[BND_PREFIX])
4827 add_prefix (BND_PREFIX_OPCODE);
4828 else if (i.prefix[BND_PREFIX] != BND_PREFIX_OPCODE)
4829 {
4830 as_warn (_("replacing `rep'/`repe' prefix by `bnd'"));
4831 i.prefix[BND_PREFIX] = BND_PREFIX_OPCODE;
4832 }
4833 }
4834
4835 /* Check string instruction segment overrides. */
4836 if (i.tm.opcode_modifier.isstring >= IS_STRING_ES_OP0)
4837 {
4838 gas_assert (i.mem_operands);
4839 if (!check_string ())
4840 return;
4841 i.disp_operands = 0;
4842 }
4843
4844 if (optimize && !i.no_optimize && i.tm.opcode_modifier.optimize)
4845 optimize_encoding ();
4846
4847 if (!process_suffix ())
4848 return;
4849
4850 /* Update operand types. */
4851 for (j = 0; j < i.operands; j++)
4852 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
4853
4854 /* Make still unresolved immediate matches conform to size of immediate
4855 given in i.suffix. */
4856 if (!finalize_imm ())
4857 return;
4858
4859 if (i.types[0].bitfield.imm1)
4860 i.imm_operands = 0; /* kludge for shift insns. */
4861
4862 /* We only need to check those implicit registers for instructions
4863 with 3 operands or less. */
4864 if (i.operands <= 3)
4865 for (j = 0; j < i.operands; j++)
4866 if (i.types[j].bitfield.instance != InstanceNone
4867 && !i.types[j].bitfield.xmmword)
4868 i.reg_operands--;
4869
4870 /* For insns with operands there are more diddles to do to the opcode. */
4871 if (i.operands)
4872 {
4873 if (!process_operands ())
4874 return;
4875 }
4876 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
4877 {
4878 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
4879 as_warn (_("translating to `%sp'"), i.tm.name);
4880 }
4881
4882 if (is_any_vex_encoding (&i.tm))
4883 {
4884 if (!cpu_arch_flags.bitfield.cpui286)
4885 {
4886 as_bad (_("instruction `%s' isn't supported outside of protected mode."),
4887 i.tm.name);
4888 return;
4889 }
4890
4891 /* Check for explicit REX prefix. */
4892 if (i.prefix[REX_PREFIX] || i.rex_encoding)
4893 {
4894 as_bad (_("REX prefix invalid with `%s'"), i.tm.name);
4895 return;
4896 }
4897
4898 if (i.tm.opcode_modifier.vex)
4899 build_vex_prefix (t);
4900 else
4901 build_evex_prefix ();
4902
4903 /* The individual REX.RXBW bits got consumed. */
4904 i.rex &= REX_OPCODE;
4905 }
4906
4907 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
4908 instructions may define INT_OPCODE as well, so avoid this corner
4909 case for those instructions that use MODRM. */
4910 if (i.tm.base_opcode == INT_OPCODE
4911 && !i.tm.opcode_modifier.modrm
4912 && i.op[0].imms->X_add_number == 3)
4913 {
4914 i.tm.base_opcode = INT3_OPCODE;
4915 i.imm_operands = 0;
4916 }
4917
4918 if ((i.tm.opcode_modifier.jump == JUMP
4919 || i.tm.opcode_modifier.jump == JUMP_BYTE
4920 || i.tm.opcode_modifier.jump == JUMP_DWORD)
4921 && i.op[0].disps->X_op == O_constant)
4922 {
4923 /* Convert "jmp constant" (and "call constant") to a jump (call) to
4924 the absolute address given by the constant. Since ix86 jumps and
4925 calls are pc relative, we need to generate a reloc. */
4926 i.op[0].disps->X_add_symbol = &abs_symbol;
4927 i.op[0].disps->X_op = O_symbol;
4928 }
4929
4930 /* For 8 bit registers we need an empty rex prefix. Also if the
4931 instruction already has a prefix, we need to convert old
4932 registers to new ones. */
4933
4934 if ((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte
4935 && (i.op[0].regs->reg_flags & RegRex64) != 0)
4936 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte
4937 && (i.op[1].regs->reg_flags & RegRex64) != 0)
4938 || (((i.types[0].bitfield.class == Reg && i.types[0].bitfield.byte)
4939 || (i.types[1].bitfield.class == Reg && i.types[1].bitfield.byte))
4940 && i.rex != 0))
4941 {
4942 int x;
4943
4944 i.rex |= REX_OPCODE;
4945 for (x = 0; x < 2; x++)
4946 {
4947 /* Look for 8 bit operand that uses old registers. */
4948 if (i.types[x].bitfield.class == Reg && i.types[x].bitfield.byte
4949 && (i.op[x].regs->reg_flags & RegRex64) == 0)
4950 {
4951 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4952 /* In case it is "hi" register, give up. */
4953 if (i.op[x].regs->reg_num > 3)
4954 as_bad (_("can't encode register '%s%s' in an "
4955 "instruction requiring REX prefix."),
4956 register_prefix, i.op[x].regs->reg_name);
4957
4958 /* Otherwise it is equivalent to the extended register.
4959 Since the encoding doesn't change this is merely
4960 cosmetic cleanup for debug output. */
4961
4962 i.op[x].regs = i.op[x].regs + 8;
4963 }
4964 }
4965 }
4966
4967 if (i.rex == 0 && i.rex_encoding)
4968 {
4969 /* Check if we can add a REX_OPCODE byte. Look for 8 bit operand
4970 that uses legacy register. If it is "hi" register, don't add
4971 the REX_OPCODE byte. */
4972 int x;
4973 for (x = 0; x < 2; x++)
4974 if (i.types[x].bitfield.class == Reg
4975 && i.types[x].bitfield.byte
4976 && (i.op[x].regs->reg_flags & RegRex64) == 0
4977 && i.op[x].regs->reg_num > 3)
4978 {
4979 gas_assert (!(i.op[x].regs->reg_flags & RegRex));
4980 i.rex_encoding = FALSE;
4981 break;
4982 }
4983
4984 if (i.rex_encoding)
4985 i.rex = REX_OPCODE;
4986 }
4987
4988 if (i.rex != 0)
4989 add_prefix (REX_OPCODE | i.rex);
4990
4991 insert_lfence_before ();
4992
4993 /* We are ready to output the insn. */
4994 output_insn ();
4995
4996 insert_lfence_after ();
4997
4998 last_insn.seg = now_seg;
4999
5000 if (i.tm.opcode_modifier.isprefix)
5001 {
5002 last_insn.kind = last_insn_prefix;
5003 last_insn.name = i.tm.name;
5004 last_insn.file = as_where (&last_insn.line);
5005 }
5006 else
5007 last_insn.kind = last_insn_other;
5008 }
5009
5010 static char *
5011 parse_insn (char *line, char *mnemonic)
5012 {
5013 char *l = line;
5014 char *token_start = l;
5015 char *mnem_p;
5016 int supported;
5017 const insn_template *t;
5018 char *dot_p = NULL;
5019
5020 while (1)
5021 {
5022 mnem_p = mnemonic;
5023 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
5024 {
5025 if (*mnem_p == '.')
5026 dot_p = mnem_p;
5027 mnem_p++;
5028 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
5029 {
5030 as_bad (_("no such instruction: `%s'"), token_start);
5031 return NULL;
5032 }
5033 l++;
5034 }
5035 if (!is_space_char (*l)
5036 && *l != END_OF_INSN
5037 && (intel_syntax
5038 || (*l != PREFIX_SEPARATOR
5039 && *l != ',')))
5040 {
5041 as_bad (_("invalid character %s in mnemonic"),
5042 output_invalid (*l));
5043 return NULL;
5044 }
5045 if (token_start == l)
5046 {
5047 if (!intel_syntax && *l == PREFIX_SEPARATOR)
5048 as_bad (_("expecting prefix; got nothing"));
5049 else
5050 as_bad (_("expecting mnemonic; got nothing"));
5051 return NULL;
5052 }
5053
5054 /* Look up instruction (or prefix) via hash table. */
5055 current_templates = (const templates *) hash_find (op_hash, mnemonic);
5056
5057 if (*l != END_OF_INSN
5058 && (!is_space_char (*l) || l[1] != END_OF_INSN)
5059 && current_templates
5060 && current_templates->start->opcode_modifier.isprefix)
5061 {
5062 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
5063 {
5064 as_bad ((flag_code != CODE_64BIT
5065 ? _("`%s' is only supported in 64-bit mode")
5066 : _("`%s' is not supported in 64-bit mode")),
5067 current_templates->start->name);
5068 return NULL;
5069 }
5070 /* If we are in 16-bit mode, do not allow addr16 or data16.
5071 Similarly, in 32-bit mode, do not allow addr32 or data32. */
5072 if ((current_templates->start->opcode_modifier.size == SIZE16
5073 || current_templates->start->opcode_modifier.size == SIZE32)
5074 && flag_code != CODE_64BIT
5075 && ((current_templates->start->opcode_modifier.size == SIZE32)
5076 ^ (flag_code == CODE_16BIT)))
5077 {
5078 as_bad (_("redundant %s prefix"),
5079 current_templates->start->name);
5080 return NULL;
5081 }
5082 if (current_templates->start->opcode_length == 0)
5083 {
5084 /* Handle pseudo prefixes. */
5085 switch (current_templates->start->base_opcode)
5086 {
5087 case 0x0:
5088 /* {disp8} */
5089 i.disp_encoding = disp_encoding_8bit;
5090 break;
5091 case 0x1:
5092 /* {disp32} */
5093 i.disp_encoding = disp_encoding_32bit;
5094 break;
5095 case 0x2:
5096 /* {load} */
5097 i.dir_encoding = dir_encoding_load;
5098 break;
5099 case 0x3:
5100 /* {store} */
5101 i.dir_encoding = dir_encoding_store;
5102 break;
5103 case 0x4:
5104 /* {vex} */
5105 i.vec_encoding = vex_encoding_vex;
5106 break;
5107 case 0x5:
5108 /* {vex3} */
5109 i.vec_encoding = vex_encoding_vex3;
5110 break;
5111 case 0x6:
5112 /* {evex} */
5113 i.vec_encoding = vex_encoding_evex;
5114 break;
5115 case 0x7:
5116 /* {rex} */
5117 i.rex_encoding = TRUE;
5118 break;
5119 case 0x8:
5120 /* {nooptimize} */
5121 i.no_optimize = TRUE;
5122 break;
5123 default:
5124 abort ();
5125 }
5126 }
5127 else
5128 {
5129 /* Add prefix, checking for repeated prefixes. */
5130 switch (add_prefix (current_templates->start->base_opcode))
5131 {
5132 case PREFIX_EXIST:
5133 return NULL;
5134 case PREFIX_DS:
5135 if (current_templates->start->cpu_flags.bitfield.cpuibt)
5136 i.notrack_prefix = current_templates->start->name;
5137 break;
5138 case PREFIX_REP:
5139 if (current_templates->start->cpu_flags.bitfield.cpuhle)
5140 i.hle_prefix = current_templates->start->name;
5141 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
5142 i.bnd_prefix = current_templates->start->name;
5143 else
5144 i.rep_prefix = current_templates->start->name;
5145 break;
5146 default:
5147 break;
5148 }
5149 }
5150 /* Skip past PREFIX_SEPARATOR and reset token_start. */
5151 token_start = ++l;
5152 }
5153 else
5154 break;
5155 }
5156
5157 if (!current_templates)
5158 {
5159 /* Deprecated functionality (new code should use pseudo-prefixes instead):
5160 Check if we should swap operand or force 32bit displacement in
5161 encoding. */
5162 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
5163 i.dir_encoding = dir_encoding_swap;
5164 else if (mnem_p - 3 == dot_p
5165 && dot_p[1] == 'd'
5166 && dot_p[2] == '8')
5167 i.disp_encoding = disp_encoding_8bit;
5168 else if (mnem_p - 4 == dot_p
5169 && dot_p[1] == 'd'
5170 && dot_p[2] == '3'
5171 && dot_p[3] == '2')
5172 i.disp_encoding = disp_encoding_32bit;
5173 else
5174 goto check_suffix;
5175 mnem_p = dot_p;
5176 *dot_p = '\0';
5177 current_templates = (const templates *) hash_find (op_hash, mnemonic);
5178 }
5179
5180 if (!current_templates)
5181 {
5182 check_suffix:
5183 if (mnem_p > mnemonic)
5184 {
5185 /* See if we can get a match by trimming off a suffix. */
5186 switch (mnem_p[-1])
5187 {
5188 case WORD_MNEM_SUFFIX:
5189 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
5190 i.suffix = SHORT_MNEM_SUFFIX;
5191 else
5192 /* Fall through. */
5193 case BYTE_MNEM_SUFFIX:
5194 case QWORD_MNEM_SUFFIX:
5195 i.suffix = mnem_p[-1];
5196 mnem_p[-1] = '\0';
5197 current_templates = (const templates *) hash_find (op_hash,
5198 mnemonic);
5199 break;
5200 case SHORT_MNEM_SUFFIX:
5201 case LONG_MNEM_SUFFIX:
5202 if (!intel_syntax)
5203 {
5204 i.suffix = mnem_p[-1];
5205 mnem_p[-1] = '\0';
5206 current_templates = (const templates *) hash_find (op_hash,
5207 mnemonic);
5208 }
5209 break;
5210
5211 /* Intel Syntax. */
5212 case 'd':
5213 if (intel_syntax)
5214 {
5215 if (intel_float_operand (mnemonic) == 1)
5216 i.suffix = SHORT_MNEM_SUFFIX;
5217 else
5218 i.suffix = LONG_MNEM_SUFFIX;
5219 mnem_p[-1] = '\0';
5220 current_templates = (const templates *) hash_find (op_hash,
5221 mnemonic);
5222 }
5223 break;
5224 }
5225 }
5226
5227 if (!current_templates)
5228 {
5229 as_bad (_("no such instruction: `%s'"), token_start);
5230 return NULL;
5231 }
5232 }
5233
5234 if (current_templates->start->opcode_modifier.jump == JUMP
5235 || current_templates->start->opcode_modifier.jump == JUMP_BYTE)
5236 {
5237 /* Check for a branch hint. We allow ",pt" and ",pn" for
5238 predict taken and predict not taken respectively.
5239 I'm not sure that branch hints actually do anything on loop
5240 and jcxz insns (JumpByte) for current Pentium4 chips. They
5241 may work in the future and it doesn't hurt to accept them
5242 now. */
5243 if (l[0] == ',' && l[1] == 'p')
5244 {
5245 if (l[2] == 't')
5246 {
5247 if (!add_prefix (DS_PREFIX_OPCODE))
5248 return NULL;
5249 l += 3;
5250 }
5251 else if (l[2] == 'n')
5252 {
5253 if (!add_prefix (CS_PREFIX_OPCODE))
5254 return NULL;
5255 l += 3;
5256 }
5257 }
5258 }
5259 /* Any other comma loses. */
5260 if (*l == ',')
5261 {
5262 as_bad (_("invalid character %s in mnemonic"),
5263 output_invalid (*l));
5264 return NULL;
5265 }
5266
5267 /* Check if instruction is supported on specified architecture. */
5268 supported = 0;
5269 for (t = current_templates->start; t < current_templates->end; ++t)
5270 {
5271 supported |= cpu_flags_match (t);
5272 if (supported == CPU_FLAGS_PERFECT_MATCH)
5273 {
5274 if (!cpu_arch_flags.bitfield.cpui386 && (flag_code != CODE_16BIT))
5275 as_warn (_("use .code16 to ensure correct addressing mode"));
5276
5277 return l;
5278 }
5279 }
5280
5281 if (!(supported & CPU_FLAGS_64BIT_MATCH))
5282 as_bad (flag_code == CODE_64BIT
5283 ? _("`%s' is not supported in 64-bit mode")
5284 : _("`%s' is only supported in 64-bit mode"),
5285 current_templates->start->name);
5286 else
5287 as_bad (_("`%s' is not supported on `%s%s'"),
5288 current_templates->start->name,
5289 cpu_arch_name ? cpu_arch_name : default_arch,
5290 cpu_sub_arch_name ? cpu_sub_arch_name : "");
5291
5292 return NULL;
5293 }
5294
5295 static char *
5296 parse_operands (char *l, const char *mnemonic)
5297 {
5298 char *token_start;
5299
5300 /* 1 if operand is pending after ','. */
5301 unsigned int expecting_operand = 0;
5302
5303 /* Non-zero if operand parens not balanced. */
5304 unsigned int paren_not_balanced;
5305
5306 while (*l != END_OF_INSN)
5307 {
5308 /* Skip optional white space before operand. */
5309 if (is_space_char (*l))
5310 ++l;
5311 if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
5312 {
5313 as_bad (_("invalid character %s before operand %d"),
5314 output_invalid (*l),
5315 i.operands + 1);
5316 return NULL;
5317 }
5318 token_start = l; /* After white space. */
5319 paren_not_balanced = 0;
5320 while (paren_not_balanced || *l != ',')
5321 {
5322 if (*l == END_OF_INSN)
5323 {
5324 if (paren_not_balanced)
5325 {
5326 if (!intel_syntax)
5327 as_bad (_("unbalanced parenthesis in operand %d."),
5328 i.operands + 1);
5329 else
5330 as_bad (_("unbalanced brackets in operand %d."),
5331 i.operands + 1);
5332 return NULL;
5333 }
5334 else
5335 break; /* we are done */
5336 }
5337 else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
5338 {
5339 as_bad (_("invalid character %s in operand %d"),
5340 output_invalid (*l),
5341 i.operands + 1);
5342 return NULL;
5343 }
5344 if (!intel_syntax)
5345 {
5346 if (*l == '(')
5347 ++paren_not_balanced;
5348 if (*l == ')')
5349 --paren_not_balanced;
5350 }
5351 else
5352 {
5353 if (*l == '[')
5354 ++paren_not_balanced;
5355 if (*l == ']')
5356 --paren_not_balanced;
5357 }
5358 l++;
5359 }
5360 if (l != token_start)
5361 { /* Yes, we've read in another operand. */
5362 unsigned int operand_ok;
5363 this_operand = i.operands++;
5364 if (i.operands > MAX_OPERANDS)
5365 {
5366 as_bad (_("spurious operands; (%d operands/instruction max)"),
5367 MAX_OPERANDS);
5368 return NULL;
5369 }
5370 i.types[this_operand].bitfield.unspecified = 1;
5371 /* Now parse operand adding info to 'i' as we go along. */
5372 END_STRING_AND_SAVE (l);
5373
5374 if (i.mem_operands > 1)
5375 {
5376 as_bad (_("too many memory references for `%s'"),
5377 mnemonic);
5378 return 0;
5379 }
5380
5381 if (intel_syntax)
5382 operand_ok =
5383 i386_intel_operand (token_start,
5384 intel_float_operand (mnemonic));
5385 else
5386 operand_ok = i386_att_operand (token_start);
5387
5388 RESTORE_END_STRING (l);
5389 if (!operand_ok)
5390 return NULL;
5391 }
5392 else
5393 {
5394 if (expecting_operand)
5395 {
5396 expecting_operand_after_comma:
5397 as_bad (_("expecting operand after ','; got nothing"));
5398 return NULL;
5399 }
5400 if (*l == ',')
5401 {
5402 as_bad (_("expecting operand before ','; got nothing"));
5403 return NULL;
5404 }
5405 }
5406
5407 /* Now *l must be either ',' or END_OF_INSN. */
5408 if (*l == ',')
5409 {
5410 if (*++l == END_OF_INSN)
5411 {
5412 /* Just skip it, if it's \n complain. */
5413 goto expecting_operand_after_comma;
5414 }
5415 expecting_operand = 1;
5416 }
5417 }
5418 return l;
5419 }
5420
5421 static void
5422 swap_2_operands (int xchg1, int xchg2)
5423 {
5424 union i386_op temp_op;
5425 i386_operand_type temp_type;
5426 unsigned int temp_flags;
5427 enum bfd_reloc_code_real temp_reloc;
5428
5429 temp_type = i.types[xchg2];
5430 i.types[xchg2] = i.types[xchg1];
5431 i.types[xchg1] = temp_type;
5432
5433 temp_flags = i.flags[xchg2];
5434 i.flags[xchg2] = i.flags[xchg1];
5435 i.flags[xchg1] = temp_flags;
5436
5437 temp_op = i.op[xchg2];
5438 i.op[xchg2] = i.op[xchg1];
5439 i.op[xchg1] = temp_op;
5440
5441 temp_reloc = i.reloc[xchg2];
5442 i.reloc[xchg2] = i.reloc[xchg1];
5443 i.reloc[xchg1] = temp_reloc;
5444
5445 if (i.mask)
5446 {
5447 if (i.mask->operand == xchg1)
5448 i.mask->operand = xchg2;
5449 else if (i.mask->operand == xchg2)
5450 i.mask->operand = xchg1;
5451 }
5452 if (i.broadcast)
5453 {
5454 if (i.broadcast->operand == xchg1)
5455 i.broadcast->operand = xchg2;
5456 else if (i.broadcast->operand == xchg2)
5457 i.broadcast->operand = xchg1;
5458 }
5459 if (i.rounding)
5460 {
5461 if (i.rounding->operand == xchg1)
5462 i.rounding->operand = xchg2;
5463 else if (i.rounding->operand == xchg2)
5464 i.rounding->operand = xchg1;
5465 }
5466 }
5467
5468 static void
5469 swap_operands (void)
5470 {
5471 switch (i.operands)
5472 {
5473 case 5:
5474 case 4:
5475 swap_2_operands (1, i.operands - 2);
5476 /* Fall through. */
5477 case 3:
5478 case 2:
5479 swap_2_operands (0, i.operands - 1);
5480 break;
5481 default:
5482 abort ();
5483 }
5484
5485 if (i.mem_operands == 2)
5486 {
5487 const seg_entry *temp_seg;
5488 temp_seg = i.seg[0];
5489 i.seg[0] = i.seg[1];
5490 i.seg[1] = temp_seg;
5491 }
5492 }
5493
5494 /* Try to ensure constant immediates are represented in the smallest
5495 opcode possible. */
5496 static void
5497 optimize_imm (void)
5498 {
5499 char guess_suffix = 0;
5500 int op;
5501
5502 if (i.suffix)
5503 guess_suffix = i.suffix;
5504 else if (i.reg_operands)
5505 {
5506 /* Figure out a suffix from the last register operand specified.
5507 We can't do this properly yet, i.e. excluding special register
5508 instances, but the following works for instructions with
5509 immediates. In any case, we can't set i.suffix yet. */
5510 for (op = i.operands; --op >= 0;)
5511 if (i.types[op].bitfield.class != Reg)
5512 continue;
5513 else if (i.types[op].bitfield.byte)
5514 {
5515 guess_suffix = BYTE_MNEM_SUFFIX;
5516 break;
5517 }
5518 else if (i.types[op].bitfield.word)
5519 {
5520 guess_suffix = WORD_MNEM_SUFFIX;
5521 break;
5522 }
5523 else if (i.types[op].bitfield.dword)
5524 {
5525 guess_suffix = LONG_MNEM_SUFFIX;
5526 break;
5527 }
5528 else if (i.types[op].bitfield.qword)
5529 {
5530 guess_suffix = QWORD_MNEM_SUFFIX;
5531 break;
5532 }
5533 }
5534 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5535 guess_suffix = WORD_MNEM_SUFFIX;
5536
5537 for (op = i.operands; --op >= 0;)
5538 if (operand_type_check (i.types[op], imm))
5539 {
5540 switch (i.op[op].imms->X_op)
5541 {
5542 case O_constant:
5543 /* If a suffix is given, this operand may be shortened. */
5544 switch (guess_suffix)
5545 {
5546 case LONG_MNEM_SUFFIX:
5547 i.types[op].bitfield.imm32 = 1;
5548 i.types[op].bitfield.imm64 = 1;
5549 break;
5550 case WORD_MNEM_SUFFIX:
5551 i.types[op].bitfield.imm16 = 1;
5552 i.types[op].bitfield.imm32 = 1;
5553 i.types[op].bitfield.imm32s = 1;
5554 i.types[op].bitfield.imm64 = 1;
5555 break;
5556 case BYTE_MNEM_SUFFIX:
5557 i.types[op].bitfield.imm8 = 1;
5558 i.types[op].bitfield.imm8s = 1;
5559 i.types[op].bitfield.imm16 = 1;
5560 i.types[op].bitfield.imm32 = 1;
5561 i.types[op].bitfield.imm32s = 1;
5562 i.types[op].bitfield.imm64 = 1;
5563 break;
5564 }
5565
5566 /* If this operand is at most 16 bits, convert it
5567 to a signed 16 bit number before trying to see
5568 whether it will fit in an even smaller size.
5569 This allows a 16-bit operand such as $0xffe0 to
5570 be recognised as within Imm8S range. */
5571 if ((i.types[op].bitfield.imm16)
5572 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
5573 {
5574 i.op[op].imms->X_add_number =
5575 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
5576 }
5577 #ifdef BFD64
5578 /* Store 32-bit immediate in 64-bit for 64-bit BFD. */
5579 if ((i.types[op].bitfield.imm32)
5580 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
5581 == 0))
5582 {
5583 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
5584 ^ ((offsetT) 1 << 31))
5585 - ((offsetT) 1 << 31));
5586 }
5587 #endif
5588 i.types[op]
5589 = operand_type_or (i.types[op],
5590 smallest_imm_type (i.op[op].imms->X_add_number));
5591
5592 /* We must avoid matching of Imm32 templates when 64bit
5593 only immediate is available. */
5594 if (guess_suffix == QWORD_MNEM_SUFFIX)
5595 i.types[op].bitfield.imm32 = 0;
5596 break;
5597
5598 case O_absent:
5599 case O_register:
5600 abort ();
5601
5602 /* Symbols and expressions. */
5603 default:
5604 /* Convert symbolic operand to proper sizes for matching, but don't
5605 prevent matching a set of insns that only supports sizes other
5606 than those matching the insn suffix. */
5607 {
5608 i386_operand_type mask, allowed;
5609 const insn_template *t;
5610
5611 operand_type_set (&mask, 0);
5612 operand_type_set (&allowed, 0);
5613
5614 for (t = current_templates->start;
5615 t < current_templates->end;
5616 ++t)
5617 {
5618 allowed = operand_type_or (allowed, t->operand_types[op]);
5619 allowed = operand_type_and (allowed, anyimm);
5620 }
5621 switch (guess_suffix)
5622 {
5623 case QWORD_MNEM_SUFFIX:
5624 mask.bitfield.imm64 = 1;
5625 mask.bitfield.imm32s = 1;
5626 break;
5627 case LONG_MNEM_SUFFIX:
5628 mask.bitfield.imm32 = 1;
5629 break;
5630 case WORD_MNEM_SUFFIX:
5631 mask.bitfield.imm16 = 1;
5632 break;
5633 case BYTE_MNEM_SUFFIX:
5634 mask.bitfield.imm8 = 1;
5635 break;
5636 default:
5637 break;
5638 }
5639 allowed = operand_type_and (mask, allowed);
5640 if (!operand_type_all_zero (&allowed))
5641 i.types[op] = operand_type_and (i.types[op], mask);
5642 }
5643 break;
5644 }
5645 }
5646 }
5647
5648 /* Try to use the smallest displacement type too. */
5649 static void
5650 optimize_disp (void)
5651 {
5652 int op;
5653
5654 for (op = i.operands; --op >= 0;)
5655 if (operand_type_check (i.types[op], disp))
5656 {
5657 if (i.op[op].disps->X_op == O_constant)
5658 {
5659 offsetT op_disp = i.op[op].disps->X_add_number;
5660
5661 if (i.types[op].bitfield.disp16
5662 && (op_disp & ~(offsetT) 0xffff) == 0)
5663 {
5664 /* If this operand is at most 16 bits, convert
5665 to a signed 16 bit number and don't use 64bit
5666 displacement. */
5667 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
5668 i.types[op].bitfield.disp64 = 0;
5669 }
5670 #ifdef BFD64
5671 /* Optimize 64-bit displacement to 32-bit for 64-bit BFD. */
5672 if (i.types[op].bitfield.disp32
5673 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
5674 {
5675 /* If this operand is at most 32 bits, convert
5676 to a signed 32 bit number and don't use 64bit
5677 displacement. */
5678 op_disp &= (((offsetT) 2 << 31) - 1);
5679 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
5680 i.types[op].bitfield.disp64 = 0;
5681 }
5682 #endif
5683 if (!op_disp && i.types[op].bitfield.baseindex)
5684 {
5685 i.types[op].bitfield.disp8 = 0;
5686 i.types[op].bitfield.disp16 = 0;
5687 i.types[op].bitfield.disp32 = 0;
5688 i.types[op].bitfield.disp32s = 0;
5689 i.types[op].bitfield.disp64 = 0;
5690 i.op[op].disps = 0;
5691 i.disp_operands--;
5692 }
5693 else if (flag_code == CODE_64BIT)
5694 {
5695 if (fits_in_signed_long (op_disp))
5696 {
5697 i.types[op].bitfield.disp64 = 0;
5698 i.types[op].bitfield.disp32s = 1;
5699 }
5700 if (i.prefix[ADDR_PREFIX]
5701 && fits_in_unsigned_long (op_disp))
5702 i.types[op].bitfield.disp32 = 1;
5703 }
5704 if ((i.types[op].bitfield.disp32
5705 || i.types[op].bitfield.disp32s
5706 || i.types[op].bitfield.disp16)
5707 && fits_in_disp8 (op_disp))
5708 i.types[op].bitfield.disp8 = 1;
5709 }
5710 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5711 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
5712 {
5713 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
5714 i.op[op].disps, 0, i.reloc[op]);
5715 i.types[op].bitfield.disp8 = 0;
5716 i.types[op].bitfield.disp16 = 0;
5717 i.types[op].bitfield.disp32 = 0;
5718 i.types[op].bitfield.disp32s = 0;
5719 i.types[op].bitfield.disp64 = 0;
5720 }
5721 else
5722 /* We only support 64bit displacement on constants. */
5723 i.types[op].bitfield.disp64 = 0;
5724 }
5725 }
5726
5727 /* Return 1 if there is a match in broadcast bytes between operand
5728 GIVEN and instruction template T. */
5729
5730 static INLINE int
5731 match_broadcast_size (const insn_template *t, unsigned int given)
5732 {
5733 return ((t->opcode_modifier.broadcast == BYTE_BROADCAST
5734 && i.types[given].bitfield.byte)
5735 || (t->opcode_modifier.broadcast == WORD_BROADCAST
5736 && i.types[given].bitfield.word)
5737 || (t->opcode_modifier.broadcast == DWORD_BROADCAST
5738 && i.types[given].bitfield.dword)
5739 || (t->opcode_modifier.broadcast == QWORD_BROADCAST
5740 && i.types[given].bitfield.qword));
5741 }
5742
5743 /* Check if operands are valid for the instruction. */
5744
5745 static int
5746 check_VecOperands (const insn_template *t)
5747 {
5748 unsigned int op;
5749 i386_cpu_flags cpu;
5750
5751 /* Templates allowing for ZMMword as well as YMMword and/or XMMword for
5752 any one operand are implicity requiring AVX512VL support if the actual
5753 operand size is YMMword or XMMword. Since this function runs after
5754 template matching, there's no need to check for YMMword/XMMword in
5755 the template. */
5756 cpu = cpu_flags_and (t->cpu_flags, avx512);
5757 if (!cpu_flags_all_zero (&cpu)
5758 && !t->cpu_flags.bitfield.cpuavx512vl
5759 && !cpu_arch_flags.bitfield.cpuavx512vl)
5760 {
5761 for (op = 0; op < t->operands; ++op)
5762 {
5763 if (t->operand_types[op].bitfield.zmmword
5764 && (i.types[op].bitfield.ymmword
5765 || i.types[op].bitfield.xmmword))
5766 {
5767 i.error = unsupported;
5768 return 1;
5769 }
5770 }
5771 }
5772
5773 /* Without VSIB byte, we can't have a vector register for index. */
5774 if (!t->opcode_modifier.sib
5775 && i.index_reg
5776 && (i.index_reg->reg_type.bitfield.xmmword
5777 || i.index_reg->reg_type.bitfield.ymmword
5778 || i.index_reg->reg_type.bitfield.zmmword))
5779 {
5780 i.error = unsupported_vector_index_register;
5781 return 1;
5782 }
5783
5784 /* Check if default mask is allowed. */
5785 if (t->opcode_modifier.nodefmask
5786 && (!i.mask || i.mask->mask->reg_num == 0))
5787 {
5788 i.error = no_default_mask;
5789 return 1;
5790 }
5791
5792 /* For VSIB byte, we need a vector register for index, and all vector
5793 registers must be distinct. */
5794 if (t->opcode_modifier.sib)
5795 {
5796 if (!i.index_reg
5797 || !((t->opcode_modifier.sib == VECSIB128
5798 && i.index_reg->reg_type.bitfield.xmmword)
5799 || (t->opcode_modifier.sib == VECSIB256
5800 && i.index_reg->reg_type.bitfield.ymmword)
5801 || (t->opcode_modifier.sib == VECSIB512
5802 && i.index_reg->reg_type.bitfield.zmmword)))
5803 {
5804 i.error = invalid_vsib_address;
5805 return 1;
5806 }
5807
5808 gas_assert (i.reg_operands == 2 || i.mask);
5809 if (i.reg_operands == 2 && !i.mask)
5810 {
5811 gas_assert (i.types[0].bitfield.class == RegSIMD);
5812 gas_assert (i.types[0].bitfield.xmmword
5813 || i.types[0].bitfield.ymmword);
5814 gas_assert (i.types[2].bitfield.class == RegSIMD);
5815 gas_assert (i.types[2].bitfield.xmmword
5816 || i.types[2].bitfield.ymmword);
5817 if (operand_check == check_none)
5818 return 0;
5819 if (register_number (i.op[0].regs)
5820 != register_number (i.index_reg)
5821 && register_number (i.op[2].regs)
5822 != register_number (i.index_reg)
5823 && register_number (i.op[0].regs)
5824 != register_number (i.op[2].regs))
5825 return 0;
5826 if (operand_check == check_error)
5827 {
5828 i.error = invalid_vector_register_set;
5829 return 1;
5830 }
5831 as_warn (_("mask, index, and destination registers should be distinct"));
5832 }
5833 else if (i.reg_operands == 1 && i.mask)
5834 {
5835 if (i.types[1].bitfield.class == RegSIMD
5836 && (i.types[1].bitfield.xmmword
5837 || i.types[1].bitfield.ymmword
5838 || i.types[1].bitfield.zmmword)
5839 && (register_number (i.op[1].regs)
5840 == register_number (i.index_reg)))
5841 {
5842 if (operand_check == check_error)
5843 {
5844 i.error = invalid_vector_register_set;
5845 return 1;
5846 }
5847 if (operand_check != check_none)
5848 as_warn (_("index and destination registers should be distinct"));
5849 }
5850 }
5851 }
5852
5853 /* Check if broadcast is supported by the instruction and is applied
5854 to the memory operand. */
5855 if (i.broadcast)
5856 {
5857 i386_operand_type type, overlap;
5858
5859 /* Check if specified broadcast is supported in this instruction,
5860 and its broadcast bytes match the memory operand. */
5861 op = i.broadcast->operand;
5862 if (!t->opcode_modifier.broadcast
5863 || !(i.flags[op] & Operand_Mem)
5864 || (!i.types[op].bitfield.unspecified
5865 && !match_broadcast_size (t, op)))
5866 {
5867 bad_broadcast:
5868 i.error = unsupported_broadcast;
5869 return 1;
5870 }
5871
5872 i.broadcast->bytes = ((1 << (t->opcode_modifier.broadcast - 1))
5873 * i.broadcast->type);
5874 operand_type_set (&type, 0);
5875 switch (i.broadcast->bytes)
5876 {
5877 case 2:
5878 type.bitfield.word = 1;
5879 break;
5880 case 4:
5881 type.bitfield.dword = 1;
5882 break;
5883 case 8:
5884 type.bitfield.qword = 1;
5885 break;
5886 case 16:
5887 type.bitfield.xmmword = 1;
5888 break;
5889 case 32:
5890 type.bitfield.ymmword = 1;
5891 break;
5892 case 64:
5893 type.bitfield.zmmword = 1;
5894 break;
5895 default:
5896 goto bad_broadcast;
5897 }
5898
5899 overlap = operand_type_and (type, t->operand_types[op]);
5900 if (t->operand_types[op].bitfield.class == RegSIMD
5901 && t->operand_types[op].bitfield.byte
5902 + t->operand_types[op].bitfield.word
5903 + t->operand_types[op].bitfield.dword
5904 + t->operand_types[op].bitfield.qword > 1)
5905 {
5906 overlap.bitfield.xmmword = 0;
5907 overlap.bitfield.ymmword = 0;
5908 overlap.bitfield.zmmword = 0;
5909 }
5910 if (operand_type_all_zero (&overlap))
5911 goto bad_broadcast;
5912
5913 if (t->opcode_modifier.checkregsize)
5914 {
5915 unsigned int j;
5916
5917 type.bitfield.baseindex = 1;
5918 for (j = 0; j < i.operands; ++j)
5919 {
5920 if (j != op
5921 && !operand_type_register_match(i.types[j],
5922 t->operand_types[j],
5923 type,
5924 t->operand_types[op]))
5925 goto bad_broadcast;
5926 }
5927 }
5928 }
5929 /* If broadcast is supported in this instruction, we need to check if
5930 operand of one-element size isn't specified without broadcast. */
5931 else if (t->opcode_modifier.broadcast && i.mem_operands)
5932 {
5933 /* Find memory operand. */
5934 for (op = 0; op < i.operands; op++)
5935 if (i.flags[op] & Operand_Mem)
5936 break;
5937 gas_assert (op < i.operands);
5938 /* Check size of the memory operand. */
5939 if (match_broadcast_size (t, op))
5940 {
5941 i.error = broadcast_needed;
5942 return 1;
5943 }
5944 }
5945 else
5946 op = MAX_OPERANDS - 1; /* Avoid uninitialized variable warning. */
5947
5948 /* Check if requested masking is supported. */
5949 if (i.mask)
5950 {
5951 switch (t->opcode_modifier.masking)
5952 {
5953 case BOTH_MASKING:
5954 break;
5955 case MERGING_MASKING:
5956 if (i.mask->zeroing)
5957 {
5958 case 0:
5959 i.error = unsupported_masking;
5960 return 1;
5961 }
5962 break;
5963 case DYNAMIC_MASKING:
5964 /* Memory destinations allow only merging masking. */
5965 if (i.mask->zeroing && i.mem_operands)
5966 {
5967 /* Find memory operand. */
5968 for (op = 0; op < i.operands; op++)
5969 if (i.flags[op] & Operand_Mem)
5970 break;
5971 gas_assert (op < i.operands);
5972 if (op == i.operands - 1)
5973 {
5974 i.error = unsupported_masking;
5975 return 1;
5976 }
5977 }
5978 break;
5979 default:
5980 abort ();
5981 }
5982 }
5983
5984 /* Check if masking is applied to dest operand. */
5985 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
5986 {
5987 i.error = mask_not_on_destination;
5988 return 1;
5989 }
5990
5991 /* Check RC/SAE. */
5992 if (i.rounding)
5993 {
5994 if (!t->opcode_modifier.sae
5995 || (i.rounding->type != saeonly && !t->opcode_modifier.staticrounding))
5996 {
5997 i.error = unsupported_rc_sae;
5998 return 1;
5999 }
6000 /* If the instruction has several immediate operands and one of
6001 them is rounding, the rounding operand should be the last
6002 immediate operand. */
6003 if (i.imm_operands > 1
6004 && i.rounding->operand != (int) (i.imm_operands - 1))
6005 {
6006 i.error = rc_sae_operand_not_last_imm;
6007 return 1;
6008 }
6009 }
6010
6011 /* Check the special Imm4 cases; must be the first operand. */
6012 if (t->cpu_flags.bitfield.cpuxop && t->operands == 5)
6013 {
6014 if (i.op[0].imms->X_op != O_constant
6015 || !fits_in_imm4 (i.op[0].imms->X_add_number))
6016 {
6017 i.error = bad_imm4;
6018 return 1;
6019 }
6020
6021 /* Turn off Imm<N> so that update_imm won't complain. */
6022 operand_type_set (&i.types[0], 0);
6023 }
6024
6025 /* Check vector Disp8 operand. */
6026 if (t->opcode_modifier.disp8memshift
6027 && i.disp_encoding != disp_encoding_32bit)
6028 {
6029 if (i.broadcast)
6030 i.memshift = t->opcode_modifier.broadcast - 1;
6031 else if (t->opcode_modifier.disp8memshift != DISP8_SHIFT_VL)
6032 i.memshift = t->opcode_modifier.disp8memshift;
6033 else
6034 {
6035 const i386_operand_type *type = NULL;
6036
6037 i.memshift = 0;
6038 for (op = 0; op < i.operands; op++)
6039 if (i.flags[op] & Operand_Mem)
6040 {
6041 if (t->opcode_modifier.evex == EVEXLIG)
6042 i.memshift = 2 + (i.suffix == QWORD_MNEM_SUFFIX);
6043 else if (t->operand_types[op].bitfield.xmmword
6044 + t->operand_types[op].bitfield.ymmword
6045 + t->operand_types[op].bitfield.zmmword <= 1)
6046 type = &t->operand_types[op];
6047 else if (!i.types[op].bitfield.unspecified)
6048 type = &i.types[op];
6049 }
6050 else if (i.types[op].bitfield.class == RegSIMD
6051 && t->opcode_modifier.evex != EVEXLIG)
6052 {
6053 if (i.types[op].bitfield.zmmword)
6054 i.memshift = 6;
6055 else if (i.types[op].bitfield.ymmword && i.memshift < 5)
6056 i.memshift = 5;
6057 else if (i.types[op].bitfield.xmmword && i.memshift < 4)
6058 i.memshift = 4;
6059 }
6060
6061 if (type)
6062 {
6063 if (type->bitfield.zmmword)
6064 i.memshift = 6;
6065 else if (type->bitfield.ymmword)
6066 i.memshift = 5;
6067 else if (type->bitfield.xmmword)
6068 i.memshift = 4;
6069 }
6070
6071 /* For the check in fits_in_disp8(). */
6072 if (i.memshift == 0)
6073 i.memshift = -1;
6074 }
6075
6076 for (op = 0; op < i.operands; op++)
6077 if (operand_type_check (i.types[op], disp)
6078 && i.op[op].disps->X_op == O_constant)
6079 {
6080 if (fits_in_disp8 (i.op[op].disps->X_add_number))
6081 {
6082 i.types[op].bitfield.disp8 = 1;
6083 return 0;
6084 }
6085 i.types[op].bitfield.disp8 = 0;
6086 }
6087 }
6088
6089 i.memshift = 0;
6090
6091 return 0;
6092 }
6093
6094 /* Check if encoding requirements are met by the instruction. */
6095
6096 static int
6097 VEX_check_encoding (const insn_template *t)
6098 {
6099 if (i.vec_encoding == vex_encoding_error)
6100 {
6101 i.error = unsupported;
6102 return 1;
6103 }
6104
6105 if (i.vec_encoding == vex_encoding_evex)
6106 {
6107 /* This instruction must be encoded with EVEX prefix. */
6108 if (!is_evex_encoding (t))
6109 {
6110 i.error = unsupported;
6111 return 1;
6112 }
6113 return 0;
6114 }
6115
6116 if (!t->opcode_modifier.vex)
6117 {
6118 /* This instruction template doesn't have VEX prefix. */
6119 if (i.vec_encoding != vex_encoding_default)
6120 {
6121 i.error = unsupported;
6122 return 1;
6123 }
6124 return 0;
6125 }
6126
6127 return 0;
6128 }
6129
6130 static const insn_template *
6131 match_template (char mnem_suffix)
6132 {
6133 /* Points to template once we've found it. */
6134 const insn_template *t;
6135 i386_operand_type overlap0, overlap1, overlap2, overlap3;
6136 i386_operand_type overlap4;
6137 unsigned int found_reverse_match;
6138 i386_opcode_modifier suffix_check;
6139 i386_operand_type operand_types [MAX_OPERANDS];
6140 int addr_prefix_disp;
6141 unsigned int j, size_match, check_register;
6142 enum i386_error specific_error = 0;
6143
6144 #if MAX_OPERANDS != 5
6145 # error "MAX_OPERANDS must be 5."
6146 #endif
6147
6148 found_reverse_match = 0;
6149 addr_prefix_disp = -1;
6150
6151 /* Prepare for mnemonic suffix check. */
6152 memset (&suffix_check, 0, sizeof (suffix_check));
6153 switch (mnem_suffix)
6154 {
6155 case BYTE_MNEM_SUFFIX:
6156 suffix_check.no_bsuf = 1;
6157 break;
6158 case WORD_MNEM_SUFFIX:
6159 suffix_check.no_wsuf = 1;
6160 break;
6161 case SHORT_MNEM_SUFFIX:
6162 suffix_check.no_ssuf = 1;
6163 break;
6164 case LONG_MNEM_SUFFIX:
6165 suffix_check.no_lsuf = 1;
6166 break;
6167 case QWORD_MNEM_SUFFIX:
6168 suffix_check.no_qsuf = 1;
6169 break;
6170 default:
6171 /* NB: In Intel syntax, normally we can check for memory operand
6172 size when there is no mnemonic suffix. But jmp and call have
6173 2 different encodings with Dword memory operand size, one with
6174 No_ldSuf and the other without. i.suffix is set to
6175 LONG_DOUBLE_MNEM_SUFFIX to skip the one with No_ldSuf. */
6176 if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
6177 suffix_check.no_ldsuf = 1;
6178 }
6179
6180 /* Must have right number of operands. */
6181 i.error = number_of_operands_mismatch;
6182
6183 for (t = current_templates->start; t < current_templates->end; t++)
6184 {
6185 addr_prefix_disp = -1;
6186 found_reverse_match = 0;
6187
6188 if (i.operands != t->operands)
6189 continue;
6190
6191 /* Check processor support. */
6192 i.error = unsupported;
6193 if (cpu_flags_match (t) != CPU_FLAGS_PERFECT_MATCH)
6194 continue;
6195
6196 /* Check AT&T mnemonic. */
6197 i.error = unsupported_with_intel_mnemonic;
6198 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
6199 continue;
6200
6201 /* Check AT&T/Intel syntax. */
6202 i.error = unsupported_syntax;
6203 if ((intel_syntax && t->opcode_modifier.attsyntax)
6204 || (!intel_syntax && t->opcode_modifier.intelsyntax))
6205 continue;
6206
6207 /* Check Intel64/AMD64 ISA. */
6208 switch (isa64)
6209 {
6210 default:
6211 /* Default: Don't accept Intel64. */
6212 if (t->opcode_modifier.isa64 == INTEL64)
6213 continue;
6214 break;
6215 case amd64:
6216 /* -mamd64: Don't accept Intel64 and Intel64 only. */
6217 if (t->opcode_modifier.isa64 >= INTEL64)
6218 continue;
6219 break;
6220 case intel64:
6221 /* -mintel64: Don't accept AMD64. */
6222 if (t->opcode_modifier.isa64 == AMD64 && flag_code == CODE_64BIT)
6223 continue;
6224 break;
6225 }
6226
6227 /* Check the suffix. */
6228 i.error = invalid_instruction_suffix;
6229 if ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
6230 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
6231 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
6232 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
6233 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
6234 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf))
6235 continue;
6236
6237 size_match = operand_size_match (t);
6238 if (!size_match)
6239 continue;
6240
6241 /* This is intentionally not
6242
6243 if (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE))
6244
6245 as the case of a missing * on the operand is accepted (perhaps with
6246 a warning, issued further down). */
6247 if (i.jumpabsolute && t->opcode_modifier.jump != JUMP_ABSOLUTE)
6248 {
6249 i.error = operand_type_mismatch;
6250 continue;
6251 }
6252
6253 for (j = 0; j < MAX_OPERANDS; j++)
6254 operand_types[j] = t->operand_types[j];
6255
6256 /* In general, don't allow
6257 - 64-bit operands outside of 64-bit mode,
6258 - 32-bit operands on pre-386. */
6259 j = i.imm_operands + (t->operands > i.imm_operands + 1);
6260 if (((i.suffix == QWORD_MNEM_SUFFIX
6261 && flag_code != CODE_64BIT
6262 && (t->base_opcode != 0x0fc7
6263 || t->extension_opcode != 1 /* cmpxchg8b */))
6264 || (i.suffix == LONG_MNEM_SUFFIX
6265 && !cpu_arch_flags.bitfield.cpui386))
6266 && (intel_syntax
6267 ? (t->opcode_modifier.mnemonicsize != IGNORESIZE
6268 && !intel_float_operand (t->name))
6269 : intel_float_operand (t->name) != 2)
6270 && (t->operands == i.imm_operands
6271 || (operand_types[i.imm_operands].bitfield.class != RegMMX
6272 && operand_types[i.imm_operands].bitfield.class != RegSIMD
6273 && operand_types[i.imm_operands].bitfield.class != RegMask)
6274 || (operand_types[j].bitfield.class != RegMMX
6275 && operand_types[j].bitfield.class != RegSIMD
6276 && operand_types[j].bitfield.class != RegMask))
6277 && !t->opcode_modifier.sib)
6278 continue;
6279
6280 /* Do not verify operands when there are none. */
6281 if (!t->operands)
6282 {
6283 if (VEX_check_encoding (t))
6284 {
6285 specific_error = i.error;
6286 continue;
6287 }
6288
6289 /* We've found a match; break out of loop. */
6290 break;
6291 }
6292
6293 if (!t->opcode_modifier.jump
6294 || t->opcode_modifier.jump == JUMP_ABSOLUTE)
6295 {
6296 /* There should be only one Disp operand. */
6297 for (j = 0; j < MAX_OPERANDS; j++)
6298 if (operand_type_check (operand_types[j], disp))
6299 break;
6300 if (j < MAX_OPERANDS)
6301 {
6302 bfd_boolean override = (i.prefix[ADDR_PREFIX] != 0);
6303
6304 addr_prefix_disp = j;
6305
6306 /* Address size prefix will turn Disp64/Disp32S/Disp32/Disp16
6307 operand into Disp32/Disp32/Disp16/Disp32 operand. */
6308 switch (flag_code)
6309 {
6310 case CODE_16BIT:
6311 override = !override;
6312 /* Fall through. */
6313 case CODE_32BIT:
6314 if (operand_types[j].bitfield.disp32
6315 && operand_types[j].bitfield.disp16)
6316 {
6317 operand_types[j].bitfield.disp16 = override;
6318 operand_types[j].bitfield.disp32 = !override;
6319 }
6320 operand_types[j].bitfield.disp32s = 0;
6321 operand_types[j].bitfield.disp64 = 0;
6322 break;
6323
6324 case CODE_64BIT:
6325 if (operand_types[j].bitfield.disp32s
6326 || operand_types[j].bitfield.disp64)
6327 {
6328 operand_types[j].bitfield.disp64 &= !override;
6329 operand_types[j].bitfield.disp32s &= !override;
6330 operand_types[j].bitfield.disp32 = override;
6331 }
6332 operand_types[j].bitfield.disp16 = 0;
6333 break;
6334 }
6335 }
6336 }
6337
6338 /* Force 0x8b encoding for "mov foo@GOT, %eax". */
6339 if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
6340 continue;
6341
6342 /* We check register size if needed. */
6343 if (t->opcode_modifier.checkregsize)
6344 {
6345 check_register = (1 << t->operands) - 1;
6346 if (i.broadcast)
6347 check_register &= ~(1 << i.broadcast->operand);
6348 }
6349 else
6350 check_register = 0;
6351
6352 overlap0 = operand_type_and (i.types[0], operand_types[0]);
6353 switch (t->operands)
6354 {
6355 case 1:
6356 if (!operand_type_match (overlap0, i.types[0]))
6357 continue;
6358 break;
6359 case 2:
6360 /* xchg %eax, %eax is a special case. It is an alias for nop
6361 only in 32bit mode and we can use opcode 0x90. In 64bit
6362 mode, we can't use 0x90 for xchg %eax, %eax since it should
6363 zero-extend %eax to %rax. */
6364 if (flag_code == CODE_64BIT
6365 && t->base_opcode == 0x90
6366 && i.types[0].bitfield.instance == Accum
6367 && i.types[0].bitfield.dword
6368 && i.types[1].bitfield.instance == Accum
6369 && i.types[1].bitfield.dword)
6370 continue;
6371 /* xrelease mov %eax, <disp> is another special case. It must not
6372 match the accumulator-only encoding of mov. */
6373 if (flag_code != CODE_64BIT
6374 && i.hle_prefix
6375 && t->base_opcode == 0xa0
6376 && i.types[0].bitfield.instance == Accum
6377 && (i.flags[1] & Operand_Mem))
6378 continue;
6379 /* Fall through. */
6380
6381 case 3:
6382 if (!(size_match & MATCH_STRAIGHT))
6383 goto check_reverse;
6384 /* Reverse direction of operands if swapping is possible in the first
6385 place (operands need to be symmetric) and
6386 - the load form is requested, and the template is a store form,
6387 - the store form is requested, and the template is a load form,
6388 - the non-default (swapped) form is requested. */
6389 overlap1 = operand_type_and (operand_types[0], operand_types[1]);
6390 if (t->opcode_modifier.d && i.reg_operands == i.operands
6391 && !operand_type_all_zero (&overlap1))
6392 switch (i.dir_encoding)
6393 {
6394 case dir_encoding_load:
6395 if (operand_type_check (operand_types[i.operands - 1], anymem)
6396 || t->opcode_modifier.regmem)
6397 goto check_reverse;
6398 break;
6399
6400 case dir_encoding_store:
6401 if (!operand_type_check (operand_types[i.operands - 1], anymem)
6402 && !t->opcode_modifier.regmem)
6403 goto check_reverse;
6404 break;
6405
6406 case dir_encoding_swap:
6407 goto check_reverse;
6408
6409 case dir_encoding_default:
6410 break;
6411 }
6412 /* If we want store form, we skip the current load. */
6413 if ((i.dir_encoding == dir_encoding_store
6414 || i.dir_encoding == dir_encoding_swap)
6415 && i.mem_operands == 0
6416 && t->opcode_modifier.load)
6417 continue;
6418 /* Fall through. */
6419 case 4:
6420 case 5:
6421 overlap1 = operand_type_and (i.types[1], operand_types[1]);
6422 if (!operand_type_match (overlap0, i.types[0])
6423 || !operand_type_match (overlap1, i.types[1])
6424 || ((check_register & 3) == 3
6425 && !operand_type_register_match (i.types[0],
6426 operand_types[0],
6427 i.types[1],
6428 operand_types[1])))
6429 {
6430 /* Check if other direction is valid ... */
6431 if (!t->opcode_modifier.d)
6432 continue;
6433
6434 check_reverse:
6435 if (!(size_match & MATCH_REVERSE))
6436 continue;
6437 /* Try reversing direction of operands. */
6438 overlap0 = operand_type_and (i.types[0], operand_types[i.operands - 1]);
6439 overlap1 = operand_type_and (i.types[i.operands - 1], operand_types[0]);
6440 if (!operand_type_match (overlap0, i.types[0])
6441 || !operand_type_match (overlap1, i.types[i.operands - 1])
6442 || (check_register
6443 && !operand_type_register_match (i.types[0],
6444 operand_types[i.operands - 1],
6445 i.types[i.operands - 1],
6446 operand_types[0])))
6447 {
6448 /* Does not match either direction. */
6449 continue;
6450 }
6451 /* found_reverse_match holds which of D or FloatR
6452 we've found. */
6453 if (!t->opcode_modifier.d)
6454 found_reverse_match = 0;
6455 else if (operand_types[0].bitfield.tbyte)
6456 found_reverse_match = Opcode_FloatD;
6457 else if (operand_types[0].bitfield.xmmword
6458 || operand_types[i.operands - 1].bitfield.xmmword
6459 || operand_types[0].bitfield.class == RegMMX
6460 || operand_types[i.operands - 1].bitfield.class == RegMMX
6461 || is_any_vex_encoding(t))
6462 found_reverse_match = (t->base_opcode & 0xee) != 0x6e
6463 ? Opcode_SIMD_FloatD : Opcode_SIMD_IntD;
6464 else
6465 found_reverse_match = Opcode_D;
6466 if (t->opcode_modifier.floatr)
6467 found_reverse_match |= Opcode_FloatR;
6468 }
6469 else
6470 {
6471 /* Found a forward 2 operand match here. */
6472 switch (t->operands)
6473 {
6474 case 5:
6475 overlap4 = operand_type_and (i.types[4],
6476 operand_types[4]);
6477 /* Fall through. */
6478 case 4:
6479 overlap3 = operand_type_and (i.types[3],
6480 operand_types[3]);
6481 /* Fall through. */
6482 case 3:
6483 overlap2 = operand_type_and (i.types[2],
6484 operand_types[2]);
6485 break;
6486 }
6487
6488 switch (t->operands)
6489 {
6490 case 5:
6491 if (!operand_type_match (overlap4, i.types[4])
6492 || !operand_type_register_match (i.types[3],
6493 operand_types[3],
6494 i.types[4],
6495 operand_types[4]))
6496 continue;
6497 /* Fall through. */
6498 case 4:
6499 if (!operand_type_match (overlap3, i.types[3])
6500 || ((check_register & 0xa) == 0xa
6501 && !operand_type_register_match (i.types[1],
6502 operand_types[1],
6503 i.types[3],
6504 operand_types[3]))
6505 || ((check_register & 0xc) == 0xc
6506 && !operand_type_register_match (i.types[2],
6507 operand_types[2],
6508 i.types[3],
6509 operand_types[3])))
6510 continue;
6511 /* Fall through. */
6512 case 3:
6513 /* Here we make use of the fact that there are no
6514 reverse match 3 operand instructions. */
6515 if (!operand_type_match (overlap2, i.types[2])
6516 || ((check_register & 5) == 5
6517 && !operand_type_register_match (i.types[0],
6518 operand_types[0],
6519 i.types[2],
6520 operand_types[2]))
6521 || ((check_register & 6) == 6
6522 && !operand_type_register_match (i.types[1],
6523 operand_types[1],
6524 i.types[2],
6525 operand_types[2])))
6526 continue;
6527 break;
6528 }
6529 }
6530 /* Found either forward/reverse 2, 3 or 4 operand match here:
6531 slip through to break. */
6532 }
6533
6534 /* Check if vector operands are valid. */
6535 if (check_VecOperands (t))
6536 {
6537 specific_error = i.error;
6538 continue;
6539 }
6540
6541 /* Check if VEX/EVEX encoding requirements can be satisfied. */
6542 if (VEX_check_encoding (t))
6543 {
6544 specific_error = i.error;
6545 continue;
6546 }
6547
6548 /* We've found a match; break out of loop. */
6549 break;
6550 }
6551
6552 if (t == current_templates->end)
6553 {
6554 /* We found no match. */
6555 const char *err_msg;
6556 switch (specific_error ? specific_error : i.error)
6557 {
6558 default:
6559 abort ();
6560 case operand_size_mismatch:
6561 err_msg = _("operand size mismatch");
6562 break;
6563 case operand_type_mismatch:
6564 err_msg = _("operand type mismatch");
6565 break;
6566 case register_type_mismatch:
6567 err_msg = _("register type mismatch");
6568 break;
6569 case number_of_operands_mismatch:
6570 err_msg = _("number of operands mismatch");
6571 break;
6572 case invalid_instruction_suffix:
6573 err_msg = _("invalid instruction suffix");
6574 break;
6575 case bad_imm4:
6576 err_msg = _("constant doesn't fit in 4 bits");
6577 break;
6578 case unsupported_with_intel_mnemonic:
6579 err_msg = _("unsupported with Intel mnemonic");
6580 break;
6581 case unsupported_syntax:
6582 err_msg = _("unsupported syntax");
6583 break;
6584 case unsupported:
6585 as_bad (_("unsupported instruction `%s'"),
6586 current_templates->start->name);
6587 return NULL;
6588 case invalid_vsib_address:
6589 err_msg = _("invalid VSIB address");
6590 break;
6591 case invalid_vector_register_set:
6592 err_msg = _("mask, index, and destination registers must be distinct");
6593 break;
6594 case unsupported_vector_index_register:
6595 err_msg = _("unsupported vector index register");
6596 break;
6597 case unsupported_broadcast:
6598 err_msg = _("unsupported broadcast");
6599 break;
6600 case broadcast_needed:
6601 err_msg = _("broadcast is needed for operand of such type");
6602 break;
6603 case unsupported_masking:
6604 err_msg = _("unsupported masking");
6605 break;
6606 case mask_not_on_destination:
6607 err_msg = _("mask not on destination operand");
6608 break;
6609 case no_default_mask:
6610 err_msg = _("default mask isn't allowed");
6611 break;
6612 case unsupported_rc_sae:
6613 err_msg = _("unsupported static rounding/sae");
6614 break;
6615 case rc_sae_operand_not_last_imm:
6616 if (intel_syntax)
6617 err_msg = _("RC/SAE operand must precede immediate operands");
6618 else
6619 err_msg = _("RC/SAE operand must follow immediate operands");
6620 break;
6621 case invalid_register_operand:
6622 err_msg = _("invalid register operand");
6623 break;
6624 }
6625 as_bad (_("%s for `%s'"), err_msg,
6626 current_templates->start->name);
6627 return NULL;
6628 }
6629
6630 if (!quiet_warnings)
6631 {
6632 if (!intel_syntax
6633 && (i.jumpabsolute != (t->opcode_modifier.jump == JUMP_ABSOLUTE)))
6634 as_warn (_("indirect %s without `*'"), t->name);
6635
6636 if (t->opcode_modifier.isprefix
6637 && t->opcode_modifier.mnemonicsize == IGNORESIZE)
6638 {
6639 /* Warn them that a data or address size prefix doesn't
6640 affect assembly of the next line of code. */
6641 as_warn (_("stand-alone `%s' prefix"), t->name);
6642 }
6643 }
6644
6645 /* Copy the template we found. */
6646 i.tm = *t;
6647
6648 if (addr_prefix_disp != -1)
6649 i.tm.operand_types[addr_prefix_disp]
6650 = operand_types[addr_prefix_disp];
6651
6652 if (found_reverse_match)
6653 {
6654 /* If we found a reverse match we must alter the opcode direction
6655 bit and clear/flip the regmem modifier one. found_reverse_match
6656 holds bits to change (different for int & float insns). */
6657
6658 i.tm.base_opcode ^= found_reverse_match;
6659
6660 i.tm.operand_types[0] = operand_types[i.operands - 1];
6661 i.tm.operand_types[i.operands - 1] = operand_types[0];
6662
6663 /* Certain SIMD insns have their load forms specified in the opcode
6664 table, and hence we need to _set_ RegMem instead of clearing it.
6665 We need to avoid setting the bit though on insns like KMOVW. */
6666 i.tm.opcode_modifier.regmem
6667 = i.tm.opcode_modifier.modrm && i.tm.opcode_modifier.d
6668 && i.tm.operands > 2U - i.tm.opcode_modifier.sse2avx
6669 && !i.tm.opcode_modifier.regmem;
6670 }
6671
6672 return t;
6673 }
6674
6675 static int
6676 check_string (void)
6677 {
6678 unsigned int es_op = i.tm.opcode_modifier.isstring - IS_STRING_ES_OP0;
6679 unsigned int op = i.tm.operand_types[0].bitfield.baseindex ? es_op : 0;
6680
6681 if (i.seg[op] != NULL && i.seg[op] != &es)
6682 {
6683 as_bad (_("`%s' operand %u must use `%ses' segment"),
6684 i.tm.name,
6685 intel_syntax ? i.tm.operands - es_op : es_op + 1,
6686 register_prefix);
6687 return 0;
6688 }
6689
6690 /* There's only ever one segment override allowed per instruction.
6691 This instruction possibly has a legal segment override on the
6692 second operand, so copy the segment to where non-string
6693 instructions store it, allowing common code. */
6694 i.seg[op] = i.seg[1];
6695
6696 return 1;
6697 }
6698
6699 static int
6700 process_suffix (void)
6701 {
6702 /* If matched instruction specifies an explicit instruction mnemonic
6703 suffix, use it. */
6704 if (i.tm.opcode_modifier.size == SIZE16)
6705 i.suffix = WORD_MNEM_SUFFIX;
6706 else if (i.tm.opcode_modifier.size == SIZE32)
6707 i.suffix = LONG_MNEM_SUFFIX;
6708 else if (i.tm.opcode_modifier.size == SIZE64)
6709 i.suffix = QWORD_MNEM_SUFFIX;
6710 else if (i.reg_operands
6711 && (i.operands > 1 || i.types[0].bitfield.class == Reg)
6712 && !i.tm.opcode_modifier.addrprefixopreg)
6713 {
6714 unsigned int numop = i.operands;
6715
6716 /* movsx/movzx want only their source operand considered here, for the
6717 ambiguity checking below. The suffix will be replaced afterwards
6718 to represent the destination (register). */
6719 if (((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w)
6720 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6721 --i.operands;
6722
6723 /* crc32 needs REX.W set regardless of suffix / source operand size. */
6724 if (i.tm.base_opcode == 0xf20f38f0
6725 && i.tm.operand_types[1].bitfield.qword)
6726 i.rex |= REX_W;
6727
6728 /* If there's no instruction mnemonic suffix we try to invent one
6729 based on GPR operands. */
6730 if (!i.suffix)
6731 {
6732 /* We take i.suffix from the last register operand specified,
6733 Destination register type is more significant than source
6734 register type. crc32 in SSE4.2 prefers source register
6735 type. */
6736 unsigned int op = i.tm.base_opcode != 0xf20f38f0 ? i.operands : 1;
6737
6738 while (op--)
6739 if (i.tm.operand_types[op].bitfield.instance == InstanceNone
6740 || i.tm.operand_types[op].bitfield.instance == Accum)
6741 {
6742 if (i.types[op].bitfield.class != Reg)
6743 continue;
6744 if (i.types[op].bitfield.byte)
6745 i.suffix = BYTE_MNEM_SUFFIX;
6746 else if (i.types[op].bitfield.word)
6747 i.suffix = WORD_MNEM_SUFFIX;
6748 else if (i.types[op].bitfield.dword)
6749 i.suffix = LONG_MNEM_SUFFIX;
6750 else if (i.types[op].bitfield.qword)
6751 i.suffix = QWORD_MNEM_SUFFIX;
6752 else
6753 continue;
6754 break;
6755 }
6756
6757 /* As an exception, movsx/movzx silently default to a byte source
6758 in AT&T mode. */
6759 if ((i.tm.base_opcode | 8) == 0xfbe && i.tm.opcode_modifier.w
6760 && !i.suffix && !intel_syntax)
6761 i.suffix = BYTE_MNEM_SUFFIX;
6762 }
6763 else if (i.suffix == BYTE_MNEM_SUFFIX)
6764 {
6765 if (intel_syntax
6766 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6767 && i.tm.opcode_modifier.no_bsuf)
6768 i.suffix = 0;
6769 else if (!check_byte_reg ())
6770 return 0;
6771 }
6772 else if (i.suffix == LONG_MNEM_SUFFIX)
6773 {
6774 if (intel_syntax
6775 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6776 && i.tm.opcode_modifier.no_lsuf
6777 && !i.tm.opcode_modifier.todword
6778 && !i.tm.opcode_modifier.toqword)
6779 i.suffix = 0;
6780 else if (!check_long_reg ())
6781 return 0;
6782 }
6783 else if (i.suffix == QWORD_MNEM_SUFFIX)
6784 {
6785 if (intel_syntax
6786 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6787 && i.tm.opcode_modifier.no_qsuf
6788 && !i.tm.opcode_modifier.todword
6789 && !i.tm.opcode_modifier.toqword)
6790 i.suffix = 0;
6791 else if (!check_qword_reg ())
6792 return 0;
6793 }
6794 else if (i.suffix == WORD_MNEM_SUFFIX)
6795 {
6796 if (intel_syntax
6797 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE
6798 && i.tm.opcode_modifier.no_wsuf)
6799 i.suffix = 0;
6800 else if (!check_word_reg ())
6801 return 0;
6802 }
6803 else if (intel_syntax
6804 && i.tm.opcode_modifier.mnemonicsize == IGNORESIZE)
6805 /* Do nothing if the instruction is going to ignore the prefix. */
6806 ;
6807 else
6808 abort ();
6809
6810 /* Undo the movsx/movzx change done above. */
6811 i.operands = numop;
6812 }
6813 else if (i.tm.opcode_modifier.mnemonicsize == DEFAULTSIZE
6814 && !i.suffix)
6815 {
6816 i.suffix = stackop_size;
6817 if (stackop_size == LONG_MNEM_SUFFIX)
6818 {
6819 /* stackop_size is set to LONG_MNEM_SUFFIX for the
6820 .code16gcc directive to support 16-bit mode with
6821 32-bit address. For IRET without a suffix, generate
6822 16-bit IRET (opcode 0xcf) to return from an interrupt
6823 handler. */
6824 if (i.tm.base_opcode == 0xcf)
6825 {
6826 i.suffix = WORD_MNEM_SUFFIX;
6827 as_warn (_("generating 16-bit `iret' for .code16gcc directive"));
6828 }
6829 /* Warn about changed behavior for segment register push/pop. */
6830 else if ((i.tm.base_opcode | 1) == 0x07)
6831 as_warn (_("generating 32-bit `%s', unlike earlier gas versions"),
6832 i.tm.name);
6833 }
6834 }
6835 else if (!i.suffix
6836 && (i.tm.opcode_modifier.jump == JUMP_ABSOLUTE
6837 || i.tm.opcode_modifier.jump == JUMP_BYTE
6838 || i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT
6839 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
6840 && i.tm.extension_opcode <= 3)))
6841 {
6842 switch (flag_code)
6843 {
6844 case CODE_64BIT:
6845 if (!i.tm.opcode_modifier.no_qsuf)
6846 {
6847 if (i.tm.opcode_modifier.jump == JUMP_BYTE
6848 || i.tm.opcode_modifier.no_lsuf)
6849 i.suffix = QWORD_MNEM_SUFFIX;
6850 break;
6851 }
6852 /* Fall through. */
6853 case CODE_32BIT:
6854 if (!i.tm.opcode_modifier.no_lsuf)
6855 i.suffix = LONG_MNEM_SUFFIX;
6856 break;
6857 case CODE_16BIT:
6858 if (!i.tm.opcode_modifier.no_wsuf)
6859 i.suffix = WORD_MNEM_SUFFIX;
6860 break;
6861 }
6862 }
6863
6864 if (!i.suffix
6865 && (i.tm.opcode_modifier.mnemonicsize != DEFAULTSIZE
6866 /* Also cover lret/retf/iret in 64-bit mode. */
6867 || (flag_code == CODE_64BIT
6868 && !i.tm.opcode_modifier.no_lsuf
6869 && !i.tm.opcode_modifier.no_qsuf))
6870 && i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
6871 /* Explicit sizing prefixes are assumed to disambiguate insns. */
6872 && !i.prefix[DATA_PREFIX] && !(i.prefix[REX_PREFIX] & REX_W)
6873 /* Accept FLDENV et al without suffix. */
6874 && (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
6875 {
6876 unsigned int suffixes, evex = 0;
6877
6878 suffixes = !i.tm.opcode_modifier.no_bsuf;
6879 if (!i.tm.opcode_modifier.no_wsuf)
6880 suffixes |= 1 << 1;
6881 if (!i.tm.opcode_modifier.no_lsuf)
6882 suffixes |= 1 << 2;
6883 if (!i.tm.opcode_modifier.no_ldsuf)
6884 suffixes |= 1 << 3;
6885 if (!i.tm.opcode_modifier.no_ssuf)
6886 suffixes |= 1 << 4;
6887 if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
6888 suffixes |= 1 << 5;
6889
6890 /* For [XYZ]MMWORD operands inspect operand sizes. While generally
6891 also suitable for AT&T syntax mode, it was requested that this be
6892 restricted to just Intel syntax. */
6893 if (intel_syntax && is_any_vex_encoding (&i.tm) && !i.broadcast)
6894 {
6895 unsigned int op;
6896
6897 for (op = 0; op < i.tm.operands; ++op)
6898 {
6899 if (is_evex_encoding (&i.tm)
6900 && !cpu_arch_flags.bitfield.cpuavx512vl)
6901 {
6902 if (i.tm.operand_types[op].bitfield.ymmword)
6903 i.tm.operand_types[op].bitfield.xmmword = 0;
6904 if (i.tm.operand_types[op].bitfield.zmmword)
6905 i.tm.operand_types[op].bitfield.ymmword = 0;
6906 if (!i.tm.opcode_modifier.evex
6907 || i.tm.opcode_modifier.evex == EVEXDYN)
6908 i.tm.opcode_modifier.evex = EVEX512;
6909 }
6910
6911 if (i.tm.operand_types[op].bitfield.xmmword
6912 + i.tm.operand_types[op].bitfield.ymmword
6913 + i.tm.operand_types[op].bitfield.zmmword < 2)
6914 continue;
6915
6916 /* Any properly sized operand disambiguates the insn. */
6917 if (i.types[op].bitfield.xmmword
6918 || i.types[op].bitfield.ymmword
6919 || i.types[op].bitfield.zmmword)
6920 {
6921 suffixes &= ~(7 << 6);
6922 evex = 0;
6923 break;
6924 }
6925
6926 if ((i.flags[op] & Operand_Mem)
6927 && i.tm.operand_types[op].bitfield.unspecified)
6928 {
6929 if (i.tm.operand_types[op].bitfield.xmmword)
6930 suffixes |= 1 << 6;
6931 if (i.tm.operand_types[op].bitfield.ymmword)
6932 suffixes |= 1 << 7;
6933 if (i.tm.operand_types[op].bitfield.zmmword)
6934 suffixes |= 1 << 8;
6935 if (is_evex_encoding (&i.tm))
6936 evex = EVEX512;
6937 }
6938 }
6939 }
6940
6941 /* Are multiple suffixes / operand sizes allowed? */
6942 if (suffixes & (suffixes - 1))
6943 {
6944 if (intel_syntax
6945 && (i.tm.opcode_modifier.mnemonicsize != DEFAULTSIZE
6946 || operand_check == check_error))
6947 {
6948 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
6949 return 0;
6950 }
6951 if (operand_check == check_error)
6952 {
6953 as_bad (_("no instruction mnemonic suffix given and "
6954 "no register operands; can't size `%s'"), i.tm.name);
6955 return 0;
6956 }
6957 if (operand_check == check_warning)
6958 as_warn (_("%s; using default for `%s'"),
6959 intel_syntax
6960 ? _("ambiguous operand size")
6961 : _("no instruction mnemonic suffix given and "
6962 "no register operands"),
6963 i.tm.name);
6964
6965 if (i.tm.opcode_modifier.floatmf)
6966 i.suffix = SHORT_MNEM_SUFFIX;
6967 else if ((i.tm.base_opcode | 8) == 0xfbe
6968 || (i.tm.base_opcode == 0x63
6969 && i.tm.cpu_flags.bitfield.cpu64))
6970 /* handled below */;
6971 else if (evex)
6972 i.tm.opcode_modifier.evex = evex;
6973 else if (flag_code == CODE_16BIT)
6974 i.suffix = WORD_MNEM_SUFFIX;
6975 else if (!i.tm.opcode_modifier.no_lsuf)
6976 i.suffix = LONG_MNEM_SUFFIX;
6977 else
6978 i.suffix = QWORD_MNEM_SUFFIX;
6979 }
6980 }
6981
6982 if ((i.tm.base_opcode | 8) == 0xfbe
6983 || (i.tm.base_opcode == 0x63 && i.tm.cpu_flags.bitfield.cpu64))
6984 {
6985 /* In Intel syntax, movsx/movzx must have a "suffix" (checked above).
6986 In AT&T syntax, if there is no suffix (warned about above), the default
6987 will be byte extension. */
6988 if (i.tm.opcode_modifier.w && i.suffix && i.suffix != BYTE_MNEM_SUFFIX)
6989 i.tm.base_opcode |= 1;
6990
6991 /* For further processing, the suffix should represent the destination
6992 (register). This is already the case when one was used with
6993 mov[sz][bw]*, but we need to replace it for mov[sz]x, or if there was
6994 no suffix to begin with. */
6995 if (i.tm.opcode_modifier.w || i.tm.base_opcode == 0x63 || !i.suffix)
6996 {
6997 if (i.types[1].bitfield.word)
6998 i.suffix = WORD_MNEM_SUFFIX;
6999 else if (i.types[1].bitfield.qword)
7000 i.suffix = QWORD_MNEM_SUFFIX;
7001 else
7002 i.suffix = LONG_MNEM_SUFFIX;
7003
7004 i.tm.opcode_modifier.w = 0;
7005 }
7006 }
7007
7008 if (!i.tm.opcode_modifier.modrm && i.reg_operands && i.tm.operands < 3)
7009 i.short_form = (i.tm.operand_types[0].bitfield.class == Reg)
7010 != (i.tm.operand_types[1].bitfield.class == Reg);
7011
7012 /* Change the opcode based on the operand size given by i.suffix. */
7013 switch (i.suffix)
7014 {
7015 /* Size floating point instruction. */
7016 case LONG_MNEM_SUFFIX:
7017 if (i.tm.opcode_modifier.floatmf)
7018 {
7019 i.tm.base_opcode ^= 4;
7020 break;
7021 }
7022 /* fall through */
7023 case WORD_MNEM_SUFFIX:
7024 case QWORD_MNEM_SUFFIX:
7025 /* It's not a byte, select word/dword operation. */
7026 if (i.tm.opcode_modifier.w)
7027 {
7028 if (i.short_form)
7029 i.tm.base_opcode |= 8;
7030 else
7031 i.tm.base_opcode |= 1;
7032 }
7033 /* fall through */
7034 case SHORT_MNEM_SUFFIX:
7035 /* Now select between word & dword operations via the operand
7036 size prefix, except for instructions that will ignore this
7037 prefix anyway. */
7038 if (i.suffix != QWORD_MNEM_SUFFIX
7039 && i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
7040 && !i.tm.opcode_modifier.floatmf
7041 && !is_any_vex_encoding (&i.tm)
7042 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
7043 || (flag_code == CODE_64BIT
7044 && i.tm.opcode_modifier.jump == JUMP_BYTE)))
7045 {
7046 unsigned int prefix = DATA_PREFIX_OPCODE;
7047
7048 if (i.tm.opcode_modifier.jump == JUMP_BYTE) /* jcxz, loop */
7049 prefix = ADDR_PREFIX_OPCODE;
7050
7051 if (!add_prefix (prefix))
7052 return 0;
7053 }
7054
7055 /* Set mode64 for an operand. */
7056 if (i.suffix == QWORD_MNEM_SUFFIX
7057 && flag_code == CODE_64BIT
7058 && !i.tm.opcode_modifier.norex64
7059 && !i.tm.opcode_modifier.vexw
7060 /* Special case for xchg %rax,%rax. It is NOP and doesn't
7061 need rex64. */
7062 && ! (i.operands == 2
7063 && i.tm.base_opcode == 0x90
7064 && i.tm.extension_opcode == None
7065 && i.types[0].bitfield.instance == Accum
7066 && i.types[0].bitfield.qword
7067 && i.types[1].bitfield.instance == Accum
7068 && i.types[1].bitfield.qword))
7069 i.rex |= REX_W;
7070
7071 break;
7072
7073 case 0:
7074 /* Select word/dword/qword operation with explict data sizing prefix
7075 when there are no suitable register operands. */
7076 if (i.tm.opcode_modifier.w
7077 && (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
7078 && (!i.reg_operands
7079 || (i.reg_operands == 1
7080 /* ShiftCount */
7081 && (i.tm.operand_types[0].bitfield.instance == RegC
7082 /* InOutPortReg */
7083 || i.tm.operand_types[0].bitfield.instance == RegD
7084 || i.tm.operand_types[1].bitfield.instance == RegD
7085 /* CRC32 */
7086 || i.tm.base_opcode == 0xf20f38f0))))
7087 i.tm.base_opcode |= 1;
7088 break;
7089 }
7090
7091 if (i.tm.opcode_modifier.addrprefixopreg)
7092 {
7093 gas_assert (!i.suffix);
7094 gas_assert (i.reg_operands);
7095
7096 if (i.tm.operand_types[0].bitfield.instance == Accum
7097 || i.operands == 1)
7098 {
7099 /* The address size override prefix changes the size of the
7100 first operand. */
7101 if (flag_code == CODE_64BIT
7102 && i.op[0].regs->reg_type.bitfield.word)
7103 {
7104 as_bad (_("16-bit addressing unavailable for `%s'"),
7105 i.tm.name);
7106 return 0;
7107 }
7108
7109 if ((flag_code == CODE_32BIT
7110 ? i.op[0].regs->reg_type.bitfield.word
7111 : i.op[0].regs->reg_type.bitfield.dword)
7112 && !add_prefix (ADDR_PREFIX_OPCODE))
7113 return 0;
7114 }
7115 else
7116 {
7117 /* Check invalid register operand when the address size override
7118 prefix changes the size of register operands. */
7119 unsigned int op;
7120 enum { need_word, need_dword, need_qword } need;
7121
7122 if (flag_code == CODE_32BIT)
7123 need = i.prefix[ADDR_PREFIX] ? need_word : need_dword;
7124 else if (i.prefix[ADDR_PREFIX])
7125 need = need_dword;
7126 else
7127 need = flag_code == CODE_64BIT ? need_qword : need_word;
7128
7129 for (op = 0; op < i.operands; op++)
7130 {
7131 if (i.types[op].bitfield.class != Reg)
7132 continue;
7133
7134 switch (need)
7135 {
7136 case need_word:
7137 if (i.op[op].regs->reg_type.bitfield.word)
7138 continue;
7139 break;
7140 case need_dword:
7141 if (i.op[op].regs->reg_type.bitfield.dword)
7142 continue;
7143 break;
7144 case need_qword:
7145 if (i.op[op].regs->reg_type.bitfield.qword)
7146 continue;
7147 break;
7148 }
7149
7150 as_bad (_("invalid register operand size for `%s'"),
7151 i.tm.name);
7152 return 0;
7153 }
7154 }
7155 }
7156
7157 return 1;
7158 }
7159
7160 static int
7161 check_byte_reg (void)
7162 {
7163 int op;
7164
7165 for (op = i.operands; --op >= 0;)
7166 {
7167 /* Skip non-register operands. */
7168 if (i.types[op].bitfield.class != Reg)
7169 continue;
7170
7171 /* If this is an eight bit register, it's OK. If it's the 16 or
7172 32 bit version of an eight bit register, we will just use the
7173 low portion, and that's OK too. */
7174 if (i.types[op].bitfield.byte)
7175 continue;
7176
7177 /* I/O port address operands are OK too. */
7178 if (i.tm.operand_types[op].bitfield.instance == RegD
7179 && i.tm.operand_types[op].bitfield.word)
7180 continue;
7181
7182 /* crc32 only wants its source operand checked here. */
7183 if (i.tm.base_opcode == 0xf20f38f0 && op)
7184 continue;
7185
7186 /* Any other register is bad. */
7187 as_bad (_("`%s%s' not allowed with `%s%c'"),
7188 register_prefix, i.op[op].regs->reg_name,
7189 i.tm.name, i.suffix);
7190 return 0;
7191 }
7192 return 1;
7193 }
7194
7195 static int
7196 check_long_reg (void)
7197 {
7198 int op;
7199
7200 for (op = i.operands; --op >= 0;)
7201 /* Skip non-register operands. */
7202 if (i.types[op].bitfield.class != Reg)
7203 continue;
7204 /* Reject eight bit registers, except where the template requires
7205 them. (eg. movzb) */
7206 else if (i.types[op].bitfield.byte
7207 && (i.tm.operand_types[op].bitfield.class == Reg
7208 || i.tm.operand_types[op].bitfield.instance == Accum)
7209 && (i.tm.operand_types[op].bitfield.word
7210 || i.tm.operand_types[op].bitfield.dword))
7211 {
7212 as_bad (_("`%s%s' not allowed with `%s%c'"),
7213 register_prefix,
7214 i.op[op].regs->reg_name,
7215 i.tm.name,
7216 i.suffix);
7217 return 0;
7218 }
7219 /* Error if the e prefix on a general reg is missing. */
7220 else if (i.types[op].bitfield.word
7221 && (i.tm.operand_types[op].bitfield.class == Reg
7222 || i.tm.operand_types[op].bitfield.instance == Accum)
7223 && i.tm.operand_types[op].bitfield.dword)
7224 {
7225 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7226 register_prefix, i.op[op].regs->reg_name,
7227 i.suffix);
7228 return 0;
7229 }
7230 /* Warn if the r prefix on a general reg is present. */
7231 else if (i.types[op].bitfield.qword
7232 && (i.tm.operand_types[op].bitfield.class == Reg
7233 || i.tm.operand_types[op].bitfield.instance == Accum)
7234 && i.tm.operand_types[op].bitfield.dword)
7235 {
7236 if (intel_syntax
7237 && i.tm.opcode_modifier.toqword
7238 && i.types[0].bitfield.class != RegSIMD)
7239 {
7240 /* Convert to QWORD. We want REX byte. */
7241 i.suffix = QWORD_MNEM_SUFFIX;
7242 }
7243 else
7244 {
7245 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7246 register_prefix, i.op[op].regs->reg_name,
7247 i.suffix);
7248 return 0;
7249 }
7250 }
7251 return 1;
7252 }
7253
7254 static int
7255 check_qword_reg (void)
7256 {
7257 int op;
7258
7259 for (op = i.operands; --op >= 0; )
7260 /* Skip non-register operands. */
7261 if (i.types[op].bitfield.class != Reg)
7262 continue;
7263 /* Reject eight bit registers, except where the template requires
7264 them. (eg. movzb) */
7265 else if (i.types[op].bitfield.byte
7266 && (i.tm.operand_types[op].bitfield.class == Reg
7267 || i.tm.operand_types[op].bitfield.instance == Accum)
7268 && (i.tm.operand_types[op].bitfield.word
7269 || i.tm.operand_types[op].bitfield.dword))
7270 {
7271 as_bad (_("`%s%s' not allowed with `%s%c'"),
7272 register_prefix,
7273 i.op[op].regs->reg_name,
7274 i.tm.name,
7275 i.suffix);
7276 return 0;
7277 }
7278 /* Warn if the r prefix on a general reg is missing. */
7279 else if ((i.types[op].bitfield.word
7280 || i.types[op].bitfield.dword)
7281 && (i.tm.operand_types[op].bitfield.class == Reg
7282 || i.tm.operand_types[op].bitfield.instance == Accum)
7283 && i.tm.operand_types[op].bitfield.qword)
7284 {
7285 /* Prohibit these changes in the 64bit mode, since the
7286 lowering is more complicated. */
7287 if (intel_syntax
7288 && i.tm.opcode_modifier.todword
7289 && i.types[0].bitfield.class != RegSIMD)
7290 {
7291 /* Convert to DWORD. We don't want REX byte. */
7292 i.suffix = LONG_MNEM_SUFFIX;
7293 }
7294 else
7295 {
7296 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7297 register_prefix, i.op[op].regs->reg_name,
7298 i.suffix);
7299 return 0;
7300 }
7301 }
7302 return 1;
7303 }
7304
7305 static int
7306 check_word_reg (void)
7307 {
7308 int op;
7309 for (op = i.operands; --op >= 0;)
7310 /* Skip non-register operands. */
7311 if (i.types[op].bitfield.class != Reg)
7312 continue;
7313 /* Reject eight bit registers, except where the template requires
7314 them. (eg. movzb) */
7315 else if (i.types[op].bitfield.byte
7316 && (i.tm.operand_types[op].bitfield.class == Reg
7317 || i.tm.operand_types[op].bitfield.instance == Accum)
7318 && (i.tm.operand_types[op].bitfield.word
7319 || i.tm.operand_types[op].bitfield.dword))
7320 {
7321 as_bad (_("`%s%s' not allowed with `%s%c'"),
7322 register_prefix,
7323 i.op[op].regs->reg_name,
7324 i.tm.name,
7325 i.suffix);
7326 return 0;
7327 }
7328 /* Error if the e or r prefix on a general reg is present. */
7329 else if ((i.types[op].bitfield.dword
7330 || i.types[op].bitfield.qword)
7331 && (i.tm.operand_types[op].bitfield.class == Reg
7332 || i.tm.operand_types[op].bitfield.instance == Accum)
7333 && i.tm.operand_types[op].bitfield.word)
7334 {
7335 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
7336 register_prefix, i.op[op].regs->reg_name,
7337 i.suffix);
7338 return 0;
7339 }
7340 return 1;
7341 }
7342
7343 static int
7344 update_imm (unsigned int j)
7345 {
7346 i386_operand_type overlap = i.types[j];
7347 if ((overlap.bitfield.imm8
7348 || overlap.bitfield.imm8s
7349 || overlap.bitfield.imm16
7350 || overlap.bitfield.imm32
7351 || overlap.bitfield.imm32s
7352 || overlap.bitfield.imm64)
7353 && !operand_type_equal (&overlap, &imm8)
7354 && !operand_type_equal (&overlap, &imm8s)
7355 && !operand_type_equal (&overlap, &imm16)
7356 && !operand_type_equal (&overlap, &imm32)
7357 && !operand_type_equal (&overlap, &imm32s)
7358 && !operand_type_equal (&overlap, &imm64))
7359 {
7360 if (i.suffix)
7361 {
7362 i386_operand_type temp;
7363
7364 operand_type_set (&temp, 0);
7365 if (i.suffix == BYTE_MNEM_SUFFIX)
7366 {
7367 temp.bitfield.imm8 = overlap.bitfield.imm8;
7368 temp.bitfield.imm8s = overlap.bitfield.imm8s;
7369 }
7370 else if (i.suffix == WORD_MNEM_SUFFIX)
7371 temp.bitfield.imm16 = overlap.bitfield.imm16;
7372 else if (i.suffix == QWORD_MNEM_SUFFIX)
7373 {
7374 temp.bitfield.imm64 = overlap.bitfield.imm64;
7375 temp.bitfield.imm32s = overlap.bitfield.imm32s;
7376 }
7377 else
7378 temp.bitfield.imm32 = overlap.bitfield.imm32;
7379 overlap = temp;
7380 }
7381 else if (operand_type_equal (&overlap, &imm16_32_32s)
7382 || operand_type_equal (&overlap, &imm16_32)
7383 || operand_type_equal (&overlap, &imm16_32s))
7384 {
7385 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
7386 overlap = imm16;
7387 else
7388 overlap = imm32s;
7389 }
7390 else if (i.prefix[REX_PREFIX] & REX_W)
7391 overlap = operand_type_and (overlap, imm32s);
7392 else if (i.prefix[DATA_PREFIX])
7393 overlap = operand_type_and (overlap,
7394 flag_code != CODE_16BIT ? imm16 : imm32);
7395 if (!operand_type_equal (&overlap, &imm8)
7396 && !operand_type_equal (&overlap, &imm8s)
7397 && !operand_type_equal (&overlap, &imm16)
7398 && !operand_type_equal (&overlap, &imm32)
7399 && !operand_type_equal (&overlap, &imm32s)
7400 && !operand_type_equal (&overlap, &imm64))
7401 {
7402 as_bad (_("no instruction mnemonic suffix given; "
7403 "can't determine immediate size"));
7404 return 0;
7405 }
7406 }
7407 i.types[j] = overlap;
7408
7409 return 1;
7410 }
7411
7412 static int
7413 finalize_imm (void)
7414 {
7415 unsigned int j, n;
7416
7417 /* Update the first 2 immediate operands. */
7418 n = i.operands > 2 ? 2 : i.operands;
7419 if (n)
7420 {
7421 for (j = 0; j < n; j++)
7422 if (update_imm (j) == 0)
7423 return 0;
7424
7425 /* The 3rd operand can't be immediate operand. */
7426 gas_assert (operand_type_check (i.types[2], imm) == 0);
7427 }
7428
7429 return 1;
7430 }
7431
7432 static int
7433 process_operands (void)
7434 {
7435 /* Default segment register this instruction will use for memory
7436 accesses. 0 means unknown. This is only for optimizing out
7437 unnecessary segment overrides. */
7438 const seg_entry *default_seg = 0;
7439
7440 if (i.tm.opcode_modifier.sse2avx)
7441 {
7442 /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
7443 need converting. */
7444 i.rex |= i.prefix[REX_PREFIX] & (REX_W | REX_R | REX_X | REX_B);
7445 i.prefix[REX_PREFIX] = 0;
7446 i.rex_encoding = 0;
7447 }
7448 /* ImmExt should be processed after SSE2AVX. */
7449 else if (i.tm.opcode_modifier.immext)
7450 process_immext ();
7451
7452 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
7453 {
7454 unsigned int dupl = i.operands;
7455 unsigned int dest = dupl - 1;
7456 unsigned int j;
7457
7458 /* The destination must be an xmm register. */
7459 gas_assert (i.reg_operands
7460 && MAX_OPERANDS > dupl
7461 && operand_type_equal (&i.types[dest], &regxmm));
7462
7463 if (i.tm.operand_types[0].bitfield.instance == Accum
7464 && i.tm.operand_types[0].bitfield.xmmword)
7465 {
7466 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
7467 {
7468 /* Keep xmm0 for instructions with VEX prefix and 3
7469 sources. */
7470 i.tm.operand_types[0].bitfield.instance = InstanceNone;
7471 i.tm.operand_types[0].bitfield.class = RegSIMD;
7472 goto duplicate;
7473 }
7474 else
7475 {
7476 /* We remove the first xmm0 and keep the number of
7477 operands unchanged, which in fact duplicates the
7478 destination. */
7479 for (j = 1; j < i.operands; j++)
7480 {
7481 i.op[j - 1] = i.op[j];
7482 i.types[j - 1] = i.types[j];
7483 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
7484 i.flags[j - 1] = i.flags[j];
7485 }
7486 }
7487 }
7488 else if (i.tm.opcode_modifier.implicit1stxmm0)
7489 {
7490 gas_assert ((MAX_OPERANDS - 1) > dupl
7491 && (i.tm.opcode_modifier.vexsources
7492 == VEX3SOURCES));
7493
7494 /* Add the implicit xmm0 for instructions with VEX prefix
7495 and 3 sources. */
7496 for (j = i.operands; j > 0; j--)
7497 {
7498 i.op[j] = i.op[j - 1];
7499 i.types[j] = i.types[j - 1];
7500 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
7501 i.flags[j] = i.flags[j - 1];
7502 }
7503 i.op[0].regs
7504 = (const reg_entry *) hash_find (reg_hash, "xmm0");
7505 i.types[0] = regxmm;
7506 i.tm.operand_types[0] = regxmm;
7507
7508 i.operands += 2;
7509 i.reg_operands += 2;
7510 i.tm.operands += 2;
7511
7512 dupl++;
7513 dest++;
7514 i.op[dupl] = i.op[dest];
7515 i.types[dupl] = i.types[dest];
7516 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7517 i.flags[dupl] = i.flags[dest];
7518 }
7519 else
7520 {
7521 duplicate:
7522 i.operands++;
7523 i.reg_operands++;
7524 i.tm.operands++;
7525
7526 i.op[dupl] = i.op[dest];
7527 i.types[dupl] = i.types[dest];
7528 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
7529 i.flags[dupl] = i.flags[dest];
7530 }
7531
7532 if (i.tm.opcode_modifier.immext)
7533 process_immext ();
7534 }
7535 else if (i.tm.operand_types[0].bitfield.instance == Accum
7536 && i.tm.operand_types[0].bitfield.xmmword)
7537 {
7538 unsigned int j;
7539
7540 for (j = 1; j < i.operands; j++)
7541 {
7542 i.op[j - 1] = i.op[j];
7543 i.types[j - 1] = i.types[j];
7544
7545 /* We need to adjust fields in i.tm since they are used by
7546 build_modrm_byte. */
7547 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
7548
7549 i.flags[j - 1] = i.flags[j];
7550 }
7551
7552 i.operands--;
7553 i.reg_operands--;
7554 i.tm.operands--;
7555 }
7556 else if (i.tm.opcode_modifier.implicitquadgroup)
7557 {
7558 unsigned int regnum, first_reg_in_group, last_reg_in_group;
7559
7560 /* The second operand must be {x,y,z}mmN, where N is a multiple of 4. */
7561 gas_assert (i.operands >= 2 && i.types[1].bitfield.class == RegSIMD);
7562 regnum = register_number (i.op[1].regs);
7563 first_reg_in_group = regnum & ~3;
7564 last_reg_in_group = first_reg_in_group + 3;
7565 if (regnum != first_reg_in_group)
7566 as_warn (_("source register `%s%s' implicitly denotes"
7567 " `%s%.3s%u' to `%s%.3s%u' source group in `%s'"),
7568 register_prefix, i.op[1].regs->reg_name,
7569 register_prefix, i.op[1].regs->reg_name, first_reg_in_group,
7570 register_prefix, i.op[1].regs->reg_name, last_reg_in_group,
7571 i.tm.name);
7572 }
7573 else if (i.tm.opcode_modifier.regkludge)
7574 {
7575 /* The imul $imm, %reg instruction is converted into
7576 imul $imm, %reg, %reg, and the clr %reg instruction
7577 is converted into xor %reg, %reg. */
7578
7579 unsigned int first_reg_op;
7580
7581 if (operand_type_check (i.types[0], reg))
7582 first_reg_op = 0;
7583 else
7584 first_reg_op = 1;
7585 /* Pretend we saw the extra register operand. */
7586 gas_assert (i.reg_operands == 1
7587 && i.op[first_reg_op + 1].regs == 0);
7588 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
7589 i.types[first_reg_op + 1] = i.types[first_reg_op];
7590 i.operands++;
7591 i.reg_operands++;
7592 }
7593
7594 if (i.tm.opcode_modifier.modrm)
7595 {
7596 /* The opcode is completed (modulo i.tm.extension_opcode which
7597 must be put into the modrm byte). Now, we make the modrm and
7598 index base bytes based on all the info we've collected. */
7599
7600 default_seg = build_modrm_byte ();
7601 }
7602 else if (i.types[0].bitfield.class == SReg)
7603 {
7604 if (flag_code != CODE_64BIT
7605 ? i.tm.base_opcode == POP_SEG_SHORT
7606 && i.op[0].regs->reg_num == 1
7607 : (i.tm.base_opcode | 1) == POP_SEG386_SHORT
7608 && i.op[0].regs->reg_num < 4)
7609 {
7610 as_bad (_("you can't `%s %s%s'"),
7611 i.tm.name, register_prefix, i.op[0].regs->reg_name);
7612 return 0;
7613 }
7614 if ( i.op[0].regs->reg_num > 3 && i.tm.opcode_length == 1 )
7615 {
7616 i.tm.base_opcode ^= POP_SEG_SHORT ^ POP_SEG386_SHORT;
7617 i.tm.opcode_length = 2;
7618 }
7619 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
7620 }
7621 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
7622 {
7623 default_seg = &ds;
7624 }
7625 else if (i.tm.opcode_modifier.isstring)
7626 {
7627 /* For the string instructions that allow a segment override
7628 on one of their operands, the default segment is ds. */
7629 default_seg = &ds;
7630 }
7631 else if (i.short_form)
7632 {
7633 /* The register or float register operand is in operand
7634 0 or 1. */
7635 unsigned int op = i.tm.operand_types[0].bitfield.class != Reg;
7636
7637 /* Register goes in low 3 bits of opcode. */
7638 i.tm.base_opcode |= i.op[op].regs->reg_num;
7639 if ((i.op[op].regs->reg_flags & RegRex) != 0)
7640 i.rex |= REX_B;
7641 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
7642 {
7643 /* Warn about some common errors, but press on regardless.
7644 The first case can be generated by gcc (<= 2.8.1). */
7645 if (i.operands == 2)
7646 {
7647 /* Reversed arguments on faddp, fsubp, etc. */
7648 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
7649 register_prefix, i.op[!intel_syntax].regs->reg_name,
7650 register_prefix, i.op[intel_syntax].regs->reg_name);
7651 }
7652 else
7653 {
7654 /* Extraneous `l' suffix on fp insn. */
7655 as_warn (_("translating to `%s %s%s'"), i.tm.name,
7656 register_prefix, i.op[0].regs->reg_name);
7657 }
7658 }
7659 }
7660
7661 if ((i.seg[0] || i.prefix[SEG_PREFIX])
7662 && i.tm.base_opcode == 0x8d /* lea */
7663 && !is_any_vex_encoding(&i.tm))
7664 {
7665 if (!quiet_warnings)
7666 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
7667 if (optimize)
7668 {
7669 i.seg[0] = NULL;
7670 i.prefix[SEG_PREFIX] = 0;
7671 }
7672 }
7673
7674 /* If a segment was explicitly specified, and the specified segment
7675 is neither the default nor the one already recorded from a prefix,
7676 use an opcode prefix to select it. If we never figured out what
7677 the default segment is, then default_seg will be zero at this
7678 point, and the specified segment prefix will always be used. */
7679 if (i.seg[0]
7680 && i.seg[0] != default_seg
7681 && i.seg[0]->seg_prefix != i.prefix[SEG_PREFIX])
7682 {
7683 if (!add_prefix (i.seg[0]->seg_prefix))
7684 return 0;
7685 }
7686 return 1;
7687 }
7688
7689 static INLINE void set_rex_vrex (const reg_entry *r, unsigned int rex_bit,
7690 bfd_boolean do_sse2avx)
7691 {
7692 if (r->reg_flags & RegRex)
7693 {
7694 if (i.rex & rex_bit)
7695 as_bad (_("same type of prefix used twice"));
7696 i.rex |= rex_bit;
7697 }
7698 else if (do_sse2avx && (i.rex & rex_bit) && i.vex.register_specifier)
7699 {
7700 gas_assert (i.vex.register_specifier == r);
7701 i.vex.register_specifier += 8;
7702 }
7703
7704 if (r->reg_flags & RegVRex)
7705 i.vrex |= rex_bit;
7706 }
7707
7708 static const seg_entry *
7709 build_modrm_byte (void)
7710 {
7711 const seg_entry *default_seg = 0;
7712 unsigned int source, dest;
7713 int vex_3_sources;
7714
7715 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
7716 if (vex_3_sources)
7717 {
7718 unsigned int nds, reg_slot;
7719 expressionS *exp;
7720
7721 dest = i.operands - 1;
7722 nds = dest - 1;
7723
7724 /* There are 2 kinds of instructions:
7725 1. 5 operands: 4 register operands or 3 register operands
7726 plus 1 memory operand plus one Imm4 operand, VexXDS, and
7727 VexW0 or VexW1. The destination must be either XMM, YMM or
7728 ZMM register.
7729 2. 4 operands: 4 register operands or 3 register operands
7730 plus 1 memory operand, with VexXDS. */
7731 gas_assert ((i.reg_operands == 4
7732 || (i.reg_operands == 3 && i.mem_operands == 1))
7733 && i.tm.opcode_modifier.vexvvvv == VEXXDS
7734 && i.tm.opcode_modifier.vexw
7735 && i.tm.operand_types[dest].bitfield.class == RegSIMD);
7736
7737 /* If VexW1 is set, the first non-immediate operand is the source and
7738 the second non-immediate one is encoded in the immediate operand. */
7739 if (i.tm.opcode_modifier.vexw == VEXW1)
7740 {
7741 source = i.imm_operands;
7742 reg_slot = i.imm_operands + 1;
7743 }
7744 else
7745 {
7746 source = i.imm_operands + 1;
7747 reg_slot = i.imm_operands;
7748 }
7749
7750 if (i.imm_operands == 0)
7751 {
7752 /* When there is no immediate operand, generate an 8bit
7753 immediate operand to encode the first operand. */
7754 exp = &im_expressions[i.imm_operands++];
7755 i.op[i.operands].imms = exp;
7756 i.types[i.operands] = imm8;
7757 i.operands++;
7758
7759 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7760 exp->X_op = O_constant;
7761 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
7762 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7763 }
7764 else
7765 {
7766 gas_assert (i.imm_operands == 1);
7767 gas_assert (fits_in_imm4 (i.op[0].imms->X_add_number));
7768 gas_assert (!i.tm.opcode_modifier.immext);
7769
7770 /* Turn on Imm8 again so that output_imm will generate it. */
7771 i.types[0].bitfield.imm8 = 1;
7772
7773 gas_assert (i.tm.operand_types[reg_slot].bitfield.class == RegSIMD);
7774 i.op[0].imms->X_add_number
7775 |= register_number (i.op[reg_slot].regs) << 4;
7776 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
7777 }
7778
7779 gas_assert (i.tm.operand_types[nds].bitfield.class == RegSIMD);
7780 i.vex.register_specifier = i.op[nds].regs;
7781 }
7782 else
7783 source = dest = 0;
7784
7785 /* i.reg_operands MUST be the number of real register operands;
7786 implicit registers do not count. If there are 3 register
7787 operands, it must be a instruction with VexNDS. For a
7788 instruction with VexNDD, the destination register is encoded
7789 in VEX prefix. If there are 4 register operands, it must be
7790 a instruction with VEX prefix and 3 sources. */
7791 if (i.mem_operands == 0
7792 && ((i.reg_operands == 2
7793 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
7794 || (i.reg_operands == 3
7795 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
7796 || (i.reg_operands == 4 && vex_3_sources)))
7797 {
7798 switch (i.operands)
7799 {
7800 case 2:
7801 source = 0;
7802 break;
7803 case 3:
7804 /* When there are 3 operands, one of them may be immediate,
7805 which may be the first or the last operand. Otherwise,
7806 the first operand must be shift count register (cl) or it
7807 is an instruction with VexNDS. */
7808 gas_assert (i.imm_operands == 1
7809 || (i.imm_operands == 0
7810 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
7811 || (i.types[0].bitfield.instance == RegC
7812 && i.types[0].bitfield.byte))));
7813 if (operand_type_check (i.types[0], imm)
7814 || (i.types[0].bitfield.instance == RegC
7815 && i.types[0].bitfield.byte))
7816 source = 1;
7817 else
7818 source = 0;
7819 break;
7820 case 4:
7821 /* When there are 4 operands, the first two must be 8bit
7822 immediate operands. The source operand will be the 3rd
7823 one.
7824
7825 For instructions with VexNDS, if the first operand
7826 an imm8, the source operand is the 2nd one. If the last
7827 operand is imm8, the source operand is the first one. */
7828 gas_assert ((i.imm_operands == 2
7829 && i.types[0].bitfield.imm8
7830 && i.types[1].bitfield.imm8)
7831 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
7832 && i.imm_operands == 1
7833 && (i.types[0].bitfield.imm8
7834 || i.types[i.operands - 1].bitfield.imm8
7835 || i.rounding)));
7836 if (i.imm_operands == 2)
7837 source = 2;
7838 else
7839 {
7840 if (i.types[0].bitfield.imm8)
7841 source = 1;
7842 else
7843 source = 0;
7844 }
7845 break;
7846 case 5:
7847 if (is_evex_encoding (&i.tm))
7848 {
7849 /* For EVEX instructions, when there are 5 operands, the
7850 first one must be immediate operand. If the second one
7851 is immediate operand, the source operand is the 3th
7852 one. If the last one is immediate operand, the source
7853 operand is the 2nd one. */
7854 gas_assert (i.imm_operands == 2
7855 && i.tm.opcode_modifier.sae
7856 && operand_type_check (i.types[0], imm));
7857 if (operand_type_check (i.types[1], imm))
7858 source = 2;
7859 else if (operand_type_check (i.types[4], imm))
7860 source = 1;
7861 else
7862 abort ();
7863 }
7864 break;
7865 default:
7866 abort ();
7867 }
7868
7869 if (!vex_3_sources)
7870 {
7871 dest = source + 1;
7872
7873 /* RC/SAE operand could be between DEST and SRC. That happens
7874 when one operand is GPR and the other one is XMM/YMM/ZMM
7875 register. */
7876 if (i.rounding && i.rounding->operand == (int) dest)
7877 dest++;
7878
7879 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
7880 {
7881 /* For instructions with VexNDS, the register-only source
7882 operand must be a 32/64bit integer, XMM, YMM, ZMM, or mask
7883 register. It is encoded in VEX prefix. */
7884
7885 i386_operand_type op;
7886 unsigned int vvvv;
7887
7888 /* Check register-only source operand when two source
7889 operands are swapped. */
7890 if (!i.tm.operand_types[source].bitfield.baseindex
7891 && i.tm.operand_types[dest].bitfield.baseindex)
7892 {
7893 vvvv = source;
7894 source = dest;
7895 }
7896 else
7897 vvvv = dest;
7898
7899 op = i.tm.operand_types[vvvv];
7900 if ((dest + 1) >= i.operands
7901 || ((op.bitfield.class != Reg
7902 || (!op.bitfield.dword && !op.bitfield.qword))
7903 && op.bitfield.class != RegSIMD
7904 && !operand_type_equal (&op, &regmask)))
7905 abort ();
7906 i.vex.register_specifier = i.op[vvvv].regs;
7907 dest++;
7908 }
7909 }
7910
7911 i.rm.mode = 3;
7912 /* One of the register operands will be encoded in the i.rm.reg
7913 field, the other in the combined i.rm.mode and i.rm.regmem
7914 fields. If no form of this instruction supports a memory
7915 destination operand, then we assume the source operand may
7916 sometimes be a memory operand and so we need to store the
7917 destination in the i.rm.reg field. */
7918 if (!i.tm.opcode_modifier.regmem
7919 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
7920 {
7921 i.rm.reg = i.op[dest].regs->reg_num;
7922 i.rm.regmem = i.op[source].regs->reg_num;
7923 if (i.op[dest].regs->reg_type.bitfield.class == RegMMX
7924 || i.op[source].regs->reg_type.bitfield.class == RegMMX)
7925 i.has_regmmx = TRUE;
7926 else if (i.op[dest].regs->reg_type.bitfield.class == RegSIMD
7927 || i.op[source].regs->reg_type.bitfield.class == RegSIMD)
7928 {
7929 if (i.types[dest].bitfield.zmmword
7930 || i.types[source].bitfield.zmmword)
7931 i.has_regzmm = TRUE;
7932 else if (i.types[dest].bitfield.ymmword
7933 || i.types[source].bitfield.ymmword)
7934 i.has_regymm = TRUE;
7935 else
7936 i.has_regxmm = TRUE;
7937 }
7938 set_rex_vrex (i.op[dest].regs, REX_R, i.tm.opcode_modifier.sse2avx);
7939 set_rex_vrex (i.op[source].regs, REX_B, FALSE);
7940 }
7941 else
7942 {
7943 i.rm.reg = i.op[source].regs->reg_num;
7944 i.rm.regmem = i.op[dest].regs->reg_num;
7945 set_rex_vrex (i.op[dest].regs, REX_B, i.tm.opcode_modifier.sse2avx);
7946 set_rex_vrex (i.op[source].regs, REX_R, FALSE);
7947 }
7948 if (flag_code != CODE_64BIT && (i.rex & REX_R))
7949 {
7950 if (i.types[!i.tm.opcode_modifier.regmem].bitfield.class != RegCR)
7951 abort ();
7952 i.rex &= ~REX_R;
7953 add_prefix (LOCK_PREFIX_OPCODE);
7954 }
7955 }
7956 else
7957 { /* If it's not 2 reg operands... */
7958 unsigned int mem;
7959
7960 if (i.mem_operands)
7961 {
7962 unsigned int fake_zero_displacement = 0;
7963 unsigned int op;
7964
7965 for (op = 0; op < i.operands; op++)
7966 if (i.flags[op] & Operand_Mem)
7967 break;
7968 gas_assert (op < i.operands);
7969
7970 if (i.tm.opcode_modifier.sib)
7971 {
7972 if (i.index_reg->reg_num == RegIZ)
7973 abort ();
7974
7975 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
7976 if (!i.base_reg)
7977 {
7978 i.sib.base = NO_BASE_REGISTER;
7979 i.sib.scale = i.log2_scale_factor;
7980 i.types[op].bitfield.disp8 = 0;
7981 i.types[op].bitfield.disp16 = 0;
7982 i.types[op].bitfield.disp64 = 0;
7983 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
7984 {
7985 /* Must be 32 bit */
7986 i.types[op].bitfield.disp32 = 1;
7987 i.types[op].bitfield.disp32s = 0;
7988 }
7989 else
7990 {
7991 i.types[op].bitfield.disp32 = 0;
7992 i.types[op].bitfield.disp32s = 1;
7993 }
7994 }
7995 i.sib.index = i.index_reg->reg_num;
7996 set_rex_vrex (i.index_reg, REX_X, FALSE);
7997 }
7998
7999 default_seg = &ds;
8000
8001 if (i.base_reg == 0)
8002 {
8003 i.rm.mode = 0;
8004 if (!i.disp_operands)
8005 fake_zero_displacement = 1;
8006 if (i.index_reg == 0)
8007 {
8008 i386_operand_type newdisp;
8009
8010 gas_assert (!i.tm.opcode_modifier.sib);
8011 /* Operand is just <disp> */
8012 if (flag_code == CODE_64BIT)
8013 {
8014 /* 64bit mode overwrites the 32bit absolute
8015 addressing by RIP relative addressing and
8016 absolute addressing is encoded by one of the
8017 redundant SIB forms. */
8018 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8019 i.sib.base = NO_BASE_REGISTER;
8020 i.sib.index = NO_INDEX_REGISTER;
8021 newdisp = (!i.prefix[ADDR_PREFIX] ? disp32s : disp32);
8022 }
8023 else if ((flag_code == CODE_16BIT)
8024 ^ (i.prefix[ADDR_PREFIX] != 0))
8025 {
8026 i.rm.regmem = NO_BASE_REGISTER_16;
8027 newdisp = disp16;
8028 }
8029 else
8030 {
8031 i.rm.regmem = NO_BASE_REGISTER;
8032 newdisp = disp32;
8033 }
8034 i.types[op] = operand_type_and_not (i.types[op], anydisp);
8035 i.types[op] = operand_type_or (i.types[op], newdisp);
8036 }
8037 else if (!i.tm.opcode_modifier.sib)
8038 {
8039 /* !i.base_reg && i.index_reg */
8040 if (i.index_reg->reg_num == RegIZ)
8041 i.sib.index = NO_INDEX_REGISTER;
8042 else
8043 i.sib.index = i.index_reg->reg_num;
8044 i.sib.base = NO_BASE_REGISTER;
8045 i.sib.scale = i.log2_scale_factor;
8046 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8047 i.types[op].bitfield.disp8 = 0;
8048 i.types[op].bitfield.disp16 = 0;
8049 i.types[op].bitfield.disp64 = 0;
8050 if (flag_code != CODE_64BIT || i.prefix[ADDR_PREFIX])
8051 {
8052 /* Must be 32 bit */
8053 i.types[op].bitfield.disp32 = 1;
8054 i.types[op].bitfield.disp32s = 0;
8055 }
8056 else
8057 {
8058 i.types[op].bitfield.disp32 = 0;
8059 i.types[op].bitfield.disp32s = 1;
8060 }
8061 if ((i.index_reg->reg_flags & RegRex) != 0)
8062 i.rex |= REX_X;
8063 }
8064 }
8065 /* RIP addressing for 64bit mode. */
8066 else if (i.base_reg->reg_num == RegIP)
8067 {
8068 gas_assert (!i.tm.opcode_modifier.sib);
8069 i.rm.regmem = NO_BASE_REGISTER;
8070 i.types[op].bitfield.disp8 = 0;
8071 i.types[op].bitfield.disp16 = 0;
8072 i.types[op].bitfield.disp32 = 0;
8073 i.types[op].bitfield.disp32s = 1;
8074 i.types[op].bitfield.disp64 = 0;
8075 i.flags[op] |= Operand_PCrel;
8076 if (! i.disp_operands)
8077 fake_zero_displacement = 1;
8078 }
8079 else if (i.base_reg->reg_type.bitfield.word)
8080 {
8081 gas_assert (!i.tm.opcode_modifier.sib);
8082 switch (i.base_reg->reg_num)
8083 {
8084 case 3: /* (%bx) */
8085 if (i.index_reg == 0)
8086 i.rm.regmem = 7;
8087 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
8088 i.rm.regmem = i.index_reg->reg_num - 6;
8089 break;
8090 case 5: /* (%bp) */
8091 default_seg = &ss;
8092 if (i.index_reg == 0)
8093 {
8094 i.rm.regmem = 6;
8095 if (operand_type_check (i.types[op], disp) == 0)
8096 {
8097 /* fake (%bp) into 0(%bp) */
8098 i.types[op].bitfield.disp8 = 1;
8099 fake_zero_displacement = 1;
8100 }
8101 }
8102 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
8103 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
8104 break;
8105 default: /* (%si) -> 4 or (%di) -> 5 */
8106 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
8107 }
8108 i.rm.mode = mode_from_disp_size (i.types[op]);
8109 }
8110 else /* i.base_reg and 32/64 bit mode */
8111 {
8112 if (flag_code == CODE_64BIT
8113 && operand_type_check (i.types[op], disp))
8114 {
8115 i.types[op].bitfield.disp16 = 0;
8116 i.types[op].bitfield.disp64 = 0;
8117 if (i.prefix[ADDR_PREFIX] == 0)
8118 {
8119 i.types[op].bitfield.disp32 = 0;
8120 i.types[op].bitfield.disp32s = 1;
8121 }
8122 else
8123 {
8124 i.types[op].bitfield.disp32 = 1;
8125 i.types[op].bitfield.disp32s = 0;
8126 }
8127 }
8128
8129 if (!i.tm.opcode_modifier.sib)
8130 i.rm.regmem = i.base_reg->reg_num;
8131 if ((i.base_reg->reg_flags & RegRex) != 0)
8132 i.rex |= REX_B;
8133 i.sib.base = i.base_reg->reg_num;
8134 /* x86-64 ignores REX prefix bit here to avoid decoder
8135 complications. */
8136 if (!(i.base_reg->reg_flags & RegRex)
8137 && (i.base_reg->reg_num == EBP_REG_NUM
8138 || i.base_reg->reg_num == ESP_REG_NUM))
8139 default_seg = &ss;
8140 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
8141 {
8142 fake_zero_displacement = 1;
8143 i.types[op].bitfield.disp8 = 1;
8144 }
8145 i.sib.scale = i.log2_scale_factor;
8146 if (i.index_reg == 0)
8147 {
8148 gas_assert (!i.tm.opcode_modifier.sib);
8149 /* <disp>(%esp) becomes two byte modrm with no index
8150 register. We've already stored the code for esp
8151 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
8152 Any base register besides %esp will not use the
8153 extra modrm byte. */
8154 i.sib.index = NO_INDEX_REGISTER;
8155 }
8156 else if (!i.tm.opcode_modifier.sib)
8157 {
8158 if (i.index_reg->reg_num == RegIZ)
8159 i.sib.index = NO_INDEX_REGISTER;
8160 else
8161 i.sib.index = i.index_reg->reg_num;
8162 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
8163 if ((i.index_reg->reg_flags & RegRex) != 0)
8164 i.rex |= REX_X;
8165 }
8166
8167 if (i.disp_operands
8168 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
8169 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
8170 i.rm.mode = 0;
8171 else
8172 {
8173 if (!fake_zero_displacement
8174 && !i.disp_operands
8175 && i.disp_encoding)
8176 {
8177 fake_zero_displacement = 1;
8178 if (i.disp_encoding == disp_encoding_8bit)
8179 i.types[op].bitfield.disp8 = 1;
8180 else
8181 i.types[op].bitfield.disp32 = 1;
8182 }
8183 i.rm.mode = mode_from_disp_size (i.types[op]);
8184 }
8185 }
8186
8187 if (fake_zero_displacement)
8188 {
8189 /* Fakes a zero displacement assuming that i.types[op]
8190 holds the correct displacement size. */
8191 expressionS *exp;
8192
8193 gas_assert (i.op[op].disps == 0);
8194 exp = &disp_expressions[i.disp_operands++];
8195 i.op[op].disps = exp;
8196 exp->X_op = O_constant;
8197 exp->X_add_number = 0;
8198 exp->X_add_symbol = (symbolS *) 0;
8199 exp->X_op_symbol = (symbolS *) 0;
8200 }
8201
8202 mem = op;
8203 }
8204 else
8205 mem = ~0;
8206
8207 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
8208 {
8209 if (operand_type_check (i.types[0], imm))
8210 i.vex.register_specifier = NULL;
8211 else
8212 {
8213 /* VEX.vvvv encodes one of the sources when the first
8214 operand is not an immediate. */
8215 if (i.tm.opcode_modifier.vexw == VEXW0)
8216 i.vex.register_specifier = i.op[0].regs;
8217 else
8218 i.vex.register_specifier = i.op[1].regs;
8219 }
8220
8221 /* Destination is a XMM register encoded in the ModRM.reg
8222 and VEX.R bit. */
8223 i.rm.reg = i.op[2].regs->reg_num;
8224 if ((i.op[2].regs->reg_flags & RegRex) != 0)
8225 i.rex |= REX_R;
8226
8227 /* ModRM.rm and VEX.B encodes the other source. */
8228 if (!i.mem_operands)
8229 {
8230 i.rm.mode = 3;
8231
8232 if (i.tm.opcode_modifier.vexw == VEXW0)
8233 i.rm.regmem = i.op[1].regs->reg_num;
8234 else
8235 i.rm.regmem = i.op[0].regs->reg_num;
8236
8237 if ((i.op[1].regs->reg_flags & RegRex) != 0)
8238 i.rex |= REX_B;
8239 }
8240 }
8241 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
8242 {
8243 i.vex.register_specifier = i.op[2].regs;
8244 if (!i.mem_operands)
8245 {
8246 i.rm.mode = 3;
8247 i.rm.regmem = i.op[1].regs->reg_num;
8248 if ((i.op[1].regs->reg_flags & RegRex) != 0)
8249 i.rex |= REX_B;
8250 }
8251 }
8252 /* Fill in i.rm.reg or i.rm.regmem field with register operand
8253 (if any) based on i.tm.extension_opcode. Again, we must be
8254 careful to make sure that segment/control/debug/test/MMX
8255 registers are coded into the i.rm.reg field. */
8256 else if (i.reg_operands)
8257 {
8258 unsigned int op;
8259 unsigned int vex_reg = ~0;
8260
8261 for (op = 0; op < i.operands; op++)
8262 {
8263 if (i.types[op].bitfield.class == Reg
8264 || i.types[op].bitfield.class == RegBND
8265 || i.types[op].bitfield.class == RegMask
8266 || i.types[op].bitfield.class == SReg
8267 || i.types[op].bitfield.class == RegCR
8268 || i.types[op].bitfield.class == RegDR
8269 || i.types[op].bitfield.class == RegTR)
8270 break;
8271 if (i.types[op].bitfield.class == RegSIMD)
8272 {
8273 if (i.types[op].bitfield.zmmword)
8274 i.has_regzmm = TRUE;
8275 else if (i.types[op].bitfield.ymmword)
8276 i.has_regymm = TRUE;
8277 else
8278 i.has_regxmm = TRUE;
8279 break;
8280 }
8281 if (i.types[op].bitfield.class == RegMMX)
8282 {
8283 i.has_regmmx = TRUE;
8284 break;
8285 }
8286 }
8287
8288 if (vex_3_sources)
8289 op = dest;
8290 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
8291 {
8292 /* For instructions with VexNDS, the register-only
8293 source operand is encoded in VEX prefix. */
8294 gas_assert (mem != (unsigned int) ~0);
8295
8296 if (op > mem)
8297 {
8298 vex_reg = op++;
8299 gas_assert (op < i.operands);
8300 }
8301 else
8302 {
8303 /* Check register-only source operand when two source
8304 operands are swapped. */
8305 if (!i.tm.operand_types[op].bitfield.baseindex
8306 && i.tm.operand_types[op + 1].bitfield.baseindex)
8307 {
8308 vex_reg = op;
8309 op += 2;
8310 gas_assert (mem == (vex_reg + 1)
8311 && op < i.operands);
8312 }
8313 else
8314 {
8315 vex_reg = op + 1;
8316 gas_assert (vex_reg < i.operands);
8317 }
8318 }
8319 }
8320 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
8321 {
8322 /* For instructions with VexNDD, the register destination
8323 is encoded in VEX prefix. */
8324 if (i.mem_operands == 0)
8325 {
8326 /* There is no memory operand. */
8327 gas_assert ((op + 2) == i.operands);
8328 vex_reg = op + 1;
8329 }
8330 else
8331 {
8332 /* There are only 2 non-immediate operands. */
8333 gas_assert (op < i.imm_operands + 2
8334 && i.operands == i.imm_operands + 2);
8335 vex_reg = i.imm_operands + 1;
8336 }
8337 }
8338 else
8339 gas_assert (op < i.operands);
8340
8341 if (vex_reg != (unsigned int) ~0)
8342 {
8343 i386_operand_type *type = &i.tm.operand_types[vex_reg];
8344
8345 if ((type->bitfield.class != Reg
8346 || (!type->bitfield.dword && !type->bitfield.qword))
8347 && type->bitfield.class != RegSIMD
8348 && !operand_type_equal (type, &regmask))
8349 abort ();
8350
8351 i.vex.register_specifier = i.op[vex_reg].regs;
8352 }
8353
8354 /* Don't set OP operand twice. */
8355 if (vex_reg != op)
8356 {
8357 /* If there is an extension opcode to put here, the
8358 register number must be put into the regmem field. */
8359 if (i.tm.extension_opcode != None)
8360 {
8361 i.rm.regmem = i.op[op].regs->reg_num;
8362 set_rex_vrex (i.op[op].regs, REX_B,
8363 i.tm.opcode_modifier.sse2avx);
8364 }
8365 else
8366 {
8367 i.rm.reg = i.op[op].regs->reg_num;
8368 set_rex_vrex (i.op[op].regs, REX_R,
8369 i.tm.opcode_modifier.sse2avx);
8370 }
8371 }
8372
8373 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
8374 must set it to 3 to indicate this is a register operand
8375 in the regmem field. */
8376 if (!i.mem_operands)
8377 i.rm.mode = 3;
8378 }
8379
8380 /* Fill in i.rm.reg field with extension opcode (if any). */
8381 if (i.tm.extension_opcode != None)
8382 i.rm.reg = i.tm.extension_opcode;
8383 }
8384 return default_seg;
8385 }
8386
8387 static unsigned int
8388 flip_code16 (unsigned int code16)
8389 {
8390 gas_assert (i.tm.operands == 1);
8391
8392 return !(i.prefix[REX_PREFIX] & REX_W)
8393 && (code16 ? i.tm.operand_types[0].bitfield.disp32
8394 || i.tm.operand_types[0].bitfield.disp32s
8395 : i.tm.operand_types[0].bitfield.disp16)
8396 ? CODE16 : 0;
8397 }
8398
8399 static void
8400 output_branch (void)
8401 {
8402 char *p;
8403 int size;
8404 int code16;
8405 int prefix;
8406 relax_substateT subtype;
8407 symbolS *sym;
8408 offsetT off;
8409
8410 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
8411 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
8412
8413 prefix = 0;
8414 if (i.prefix[DATA_PREFIX] != 0)
8415 {
8416 prefix = 1;
8417 i.prefixes -= 1;
8418 code16 ^= flip_code16(code16);
8419 }
8420 /* Pentium4 branch hints. */
8421 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
8422 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
8423 {
8424 prefix++;
8425 i.prefixes--;
8426 }
8427 if (i.prefix[REX_PREFIX] != 0)
8428 {
8429 prefix++;
8430 i.prefixes--;
8431 }
8432
8433 /* BND prefixed jump. */
8434 if (i.prefix[BND_PREFIX] != 0)
8435 {
8436 prefix++;
8437 i.prefixes--;
8438 }
8439
8440 if (i.prefixes != 0)
8441 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8442
8443 /* It's always a symbol; End frag & setup for relax.
8444 Make sure there is enough room in this frag for the largest
8445 instruction we may generate in md_convert_frag. This is 2
8446 bytes for the opcode and room for the prefix and largest
8447 displacement. */
8448 frag_grow (prefix + 2 + 4);
8449 /* Prefix and 1 opcode byte go in fr_fix. */
8450 p = frag_more (prefix + 1);
8451 if (i.prefix[DATA_PREFIX] != 0)
8452 *p++ = DATA_PREFIX_OPCODE;
8453 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
8454 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
8455 *p++ = i.prefix[SEG_PREFIX];
8456 if (i.prefix[BND_PREFIX] != 0)
8457 *p++ = BND_PREFIX_OPCODE;
8458 if (i.prefix[REX_PREFIX] != 0)
8459 *p++ = i.prefix[REX_PREFIX];
8460 *p = i.tm.base_opcode;
8461
8462 if ((unsigned char) *p == JUMP_PC_RELATIVE)
8463 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
8464 else if (cpu_arch_flags.bitfield.cpui386)
8465 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
8466 else
8467 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
8468 subtype |= code16;
8469
8470 sym = i.op[0].disps->X_add_symbol;
8471 off = i.op[0].disps->X_add_number;
8472
8473 if (i.op[0].disps->X_op != O_constant
8474 && i.op[0].disps->X_op != O_symbol)
8475 {
8476 /* Handle complex expressions. */
8477 sym = make_expr_symbol (i.op[0].disps);
8478 off = 0;
8479 }
8480
8481 /* 1 possible extra opcode + 4 byte displacement go in var part.
8482 Pass reloc in fr_var. */
8483 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
8484 }
8485
8486 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8487 /* Return TRUE iff PLT32 relocation should be used for branching to
8488 symbol S. */
8489
8490 static bfd_boolean
8491 need_plt32_p (symbolS *s)
8492 {
8493 /* PLT32 relocation is ELF only. */
8494 if (!IS_ELF)
8495 return FALSE;
8496
8497 #ifdef TE_SOLARIS
8498 /* Don't emit PLT32 relocation on Solaris: neither native linker nor
8499 krtld support it. */
8500 return FALSE;
8501 #endif
8502
8503 /* Since there is no need to prepare for PLT branch on x86-64, we
8504 can generate R_X86_64_PLT32, instead of R_X86_64_PC32, which can
8505 be used as a marker for 32-bit PC-relative branches. */
8506 if (!object_64bit)
8507 return FALSE;
8508
8509 /* Weak or undefined symbol need PLT32 relocation. */
8510 if (S_IS_WEAK (s) || !S_IS_DEFINED (s))
8511 return TRUE;
8512
8513 /* Non-global symbol doesn't need PLT32 relocation. */
8514 if (! S_IS_EXTERNAL (s))
8515 return FALSE;
8516
8517 /* Other global symbols need PLT32 relocation. NB: Symbol with
8518 non-default visibilities are treated as normal global symbol
8519 so that PLT32 relocation can be used as a marker for 32-bit
8520 PC-relative branches. It is useful for linker relaxation. */
8521 return TRUE;
8522 }
8523 #endif
8524
8525 static void
8526 output_jump (void)
8527 {
8528 char *p;
8529 int size;
8530 fixS *fixP;
8531 bfd_reloc_code_real_type jump_reloc = i.reloc[0];
8532
8533 if (i.tm.opcode_modifier.jump == JUMP_BYTE)
8534 {
8535 /* This is a loop or jecxz type instruction. */
8536 size = 1;
8537 if (i.prefix[ADDR_PREFIX] != 0)
8538 {
8539 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
8540 i.prefixes -= 1;
8541 }
8542 /* Pentium4 branch hints. */
8543 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
8544 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
8545 {
8546 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
8547 i.prefixes--;
8548 }
8549 }
8550 else
8551 {
8552 int code16;
8553
8554 code16 = 0;
8555 if (flag_code == CODE_16BIT)
8556 code16 = CODE16;
8557
8558 if (i.prefix[DATA_PREFIX] != 0)
8559 {
8560 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
8561 i.prefixes -= 1;
8562 code16 ^= flip_code16(code16);
8563 }
8564
8565 size = 4;
8566 if (code16)
8567 size = 2;
8568 }
8569
8570 /* BND prefixed jump. */
8571 if (i.prefix[BND_PREFIX] != 0)
8572 {
8573 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
8574 i.prefixes -= 1;
8575 }
8576
8577 if (i.prefix[REX_PREFIX] != 0)
8578 {
8579 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
8580 i.prefixes -= 1;
8581 }
8582
8583 if (i.prefixes != 0)
8584 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8585
8586 p = frag_more (i.tm.opcode_length + size);
8587 switch (i.tm.opcode_length)
8588 {
8589 case 2:
8590 *p++ = i.tm.base_opcode >> 8;
8591 /* Fall through. */
8592 case 1:
8593 *p++ = i.tm.base_opcode;
8594 break;
8595 default:
8596 abort ();
8597 }
8598
8599 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8600 if (size == 4
8601 && jump_reloc == NO_RELOC
8602 && need_plt32_p (i.op[0].disps->X_add_symbol))
8603 jump_reloc = BFD_RELOC_X86_64_PLT32;
8604 #endif
8605
8606 jump_reloc = reloc (size, 1, 1, jump_reloc);
8607
8608 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8609 i.op[0].disps, 1, jump_reloc);
8610
8611 /* All jumps handled here are signed, but don't use a signed limit
8612 check for 32 and 16 bit jumps as we want to allow wrap around at
8613 4G and 64k respectively. */
8614 if (size == 1)
8615 fixP->fx_signed = 1;
8616 }
8617
8618 static void
8619 output_interseg_jump (void)
8620 {
8621 char *p;
8622 int size;
8623 int prefix;
8624 int code16;
8625
8626 code16 = 0;
8627 if (flag_code == CODE_16BIT)
8628 code16 = CODE16;
8629
8630 prefix = 0;
8631 if (i.prefix[DATA_PREFIX] != 0)
8632 {
8633 prefix = 1;
8634 i.prefixes -= 1;
8635 code16 ^= CODE16;
8636 }
8637
8638 gas_assert (!i.prefix[REX_PREFIX]);
8639
8640 size = 4;
8641 if (code16)
8642 size = 2;
8643
8644 if (i.prefixes != 0)
8645 as_warn (_("skipping prefixes on `%s'"), i.tm.name);
8646
8647 /* 1 opcode; 2 segment; offset */
8648 p = frag_more (prefix + 1 + 2 + size);
8649
8650 if (i.prefix[DATA_PREFIX] != 0)
8651 *p++ = DATA_PREFIX_OPCODE;
8652
8653 if (i.prefix[REX_PREFIX] != 0)
8654 *p++ = i.prefix[REX_PREFIX];
8655
8656 *p++ = i.tm.base_opcode;
8657 if (i.op[1].imms->X_op == O_constant)
8658 {
8659 offsetT n = i.op[1].imms->X_add_number;
8660
8661 if (size == 2
8662 && !fits_in_unsigned_word (n)
8663 && !fits_in_signed_word (n))
8664 {
8665 as_bad (_("16-bit jump out of range"));
8666 return;
8667 }
8668 md_number_to_chars (p, n, size);
8669 }
8670 else
8671 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
8672 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
8673 if (i.op[0].imms->X_op != O_constant)
8674 as_bad (_("can't handle non absolute segment in `%s'"),
8675 i.tm.name);
8676 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
8677 }
8678
8679 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8680 void
8681 x86_cleanup (void)
8682 {
8683 char *p;
8684 asection *seg = now_seg;
8685 subsegT subseg = now_subseg;
8686 asection *sec;
8687 unsigned int alignment, align_size_1;
8688 unsigned int isa_1_descsz, feature_2_descsz, descsz;
8689 unsigned int isa_1_descsz_raw, feature_2_descsz_raw;
8690 unsigned int padding;
8691
8692 if (!IS_ELF || !x86_used_note)
8693 return;
8694
8695 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X86;
8696
8697 /* The .note.gnu.property section layout:
8698
8699 Field Length Contents
8700 ---- ---- ----
8701 n_namsz 4 4
8702 n_descsz 4 The note descriptor size
8703 n_type 4 NT_GNU_PROPERTY_TYPE_0
8704 n_name 4 "GNU"
8705 n_desc n_descsz The program property array
8706 .... .... ....
8707 */
8708
8709 /* Create the .note.gnu.property section. */
8710 sec = subseg_new (NOTE_GNU_PROPERTY_SECTION_NAME, 0);
8711 bfd_set_section_flags (sec,
8712 (SEC_ALLOC
8713 | SEC_LOAD
8714 | SEC_DATA
8715 | SEC_HAS_CONTENTS
8716 | SEC_READONLY));
8717
8718 if (get_elf_backend_data (stdoutput)->s->elfclass == ELFCLASS64)
8719 {
8720 align_size_1 = 7;
8721 alignment = 3;
8722 }
8723 else
8724 {
8725 align_size_1 = 3;
8726 alignment = 2;
8727 }
8728
8729 bfd_set_section_alignment (sec, alignment);
8730 elf_section_type (sec) = SHT_NOTE;
8731
8732 /* GNU_PROPERTY_X86_ISA_1_USED: 4-byte type + 4-byte data size
8733 + 4-byte data */
8734 isa_1_descsz_raw = 4 + 4 + 4;
8735 /* Align GNU_PROPERTY_X86_ISA_1_USED. */
8736 isa_1_descsz = (isa_1_descsz_raw + align_size_1) & ~align_size_1;
8737
8738 feature_2_descsz_raw = isa_1_descsz;
8739 /* GNU_PROPERTY_X86_FEATURE_2_USED: 4-byte type + 4-byte data size
8740 + 4-byte data */
8741 feature_2_descsz_raw += 4 + 4 + 4;
8742 /* Align GNU_PROPERTY_X86_FEATURE_2_USED. */
8743 feature_2_descsz = ((feature_2_descsz_raw + align_size_1)
8744 & ~align_size_1);
8745
8746 descsz = feature_2_descsz;
8747 /* Section size: n_namsz + n_descsz + n_type + n_name + n_descsz. */
8748 p = frag_more (4 + 4 + 4 + 4 + descsz);
8749
8750 /* Write n_namsz. */
8751 md_number_to_chars (p, (valueT) 4, 4);
8752
8753 /* Write n_descsz. */
8754 md_number_to_chars (p + 4, (valueT) descsz, 4);
8755
8756 /* Write n_type. */
8757 md_number_to_chars (p + 4 * 2, (valueT) NT_GNU_PROPERTY_TYPE_0, 4);
8758
8759 /* Write n_name. */
8760 memcpy (p + 4 * 3, "GNU", 4);
8761
8762 /* Write 4-byte type. */
8763 md_number_to_chars (p + 4 * 4,
8764 (valueT) GNU_PROPERTY_X86_ISA_1_USED, 4);
8765
8766 /* Write 4-byte data size. */
8767 md_number_to_chars (p + 4 * 5, (valueT) 4, 4);
8768
8769 /* Write 4-byte data. */
8770 md_number_to_chars (p + 4 * 6, (valueT) x86_isa_1_used, 4);
8771
8772 /* Zero out paddings. */
8773 padding = isa_1_descsz - isa_1_descsz_raw;
8774 if (padding)
8775 memset (p + 4 * 7, 0, padding);
8776
8777 /* Write 4-byte type. */
8778 md_number_to_chars (p + isa_1_descsz + 4 * 4,
8779 (valueT) GNU_PROPERTY_X86_FEATURE_2_USED, 4);
8780
8781 /* Write 4-byte data size. */
8782 md_number_to_chars (p + isa_1_descsz + 4 * 5, (valueT) 4, 4);
8783
8784 /* Write 4-byte data. */
8785 md_number_to_chars (p + isa_1_descsz + 4 * 6,
8786 (valueT) x86_feature_2_used, 4);
8787
8788 /* Zero out paddings. */
8789 padding = feature_2_descsz - feature_2_descsz_raw;
8790 if (padding)
8791 memset (p + isa_1_descsz + 4 * 7, 0, padding);
8792
8793 /* We probably can't restore the current segment, for there likely
8794 isn't one yet... */
8795 if (seg && subseg)
8796 subseg_set (seg, subseg);
8797 }
8798 #endif
8799
8800 static unsigned int
8801 encoding_length (const fragS *start_frag, offsetT start_off,
8802 const char *frag_now_ptr)
8803 {
8804 unsigned int len = 0;
8805
8806 if (start_frag != frag_now)
8807 {
8808 const fragS *fr = start_frag;
8809
8810 do {
8811 len += fr->fr_fix;
8812 fr = fr->fr_next;
8813 } while (fr && fr != frag_now);
8814 }
8815
8816 return len - start_off + (frag_now_ptr - frag_now->fr_literal);
8817 }
8818
8819 /* Return 1 for test, and, cmp, add, sub, inc and dec which may
8820 be macro-fused with conditional jumps.
8821 NB: If TEST/AND/CMP/ADD/SUB/INC/DEC is of RIP relative address,
8822 or is one of the following format:
8823
8824 cmp m, imm
8825 add m, imm
8826 sub m, imm
8827 test m, imm
8828 and m, imm
8829 inc m
8830 dec m
8831
8832 it is unfusible. */
8833
8834 static int
8835 maybe_fused_with_jcc_p (enum mf_cmp_kind* mf_cmp_p)
8836 {
8837 /* No RIP address. */
8838 if (i.base_reg && i.base_reg->reg_num == RegIP)
8839 return 0;
8840
8841 /* No VEX/EVEX encoding. */
8842 if (is_any_vex_encoding (&i.tm))
8843 return 0;
8844
8845 /* add, sub without add/sub m, imm. */
8846 if (i.tm.base_opcode <= 5
8847 || (i.tm.base_opcode >= 0x28 && i.tm.base_opcode <= 0x2d)
8848 || ((i.tm.base_opcode | 3) == 0x83
8849 && (i.tm.extension_opcode == 0x5
8850 || i.tm.extension_opcode == 0x0)))
8851 {
8852 *mf_cmp_p = mf_cmp_alu_cmp;
8853 return !(i.mem_operands && i.imm_operands);
8854 }
8855
8856 /* and without and m, imm. */
8857 if ((i.tm.base_opcode >= 0x20 && i.tm.base_opcode <= 0x25)
8858 || ((i.tm.base_opcode | 3) == 0x83
8859 && i.tm.extension_opcode == 0x4))
8860 {
8861 *mf_cmp_p = mf_cmp_test_and;
8862 return !(i.mem_operands && i.imm_operands);
8863 }
8864
8865 /* test without test m imm. */
8866 if ((i.tm.base_opcode | 1) == 0x85
8867 || (i.tm.base_opcode | 1) == 0xa9
8868 || ((i.tm.base_opcode | 1) == 0xf7
8869 && i.tm.extension_opcode == 0))
8870 {
8871 *mf_cmp_p = mf_cmp_test_and;
8872 return !(i.mem_operands && i.imm_operands);
8873 }
8874
8875 /* cmp without cmp m, imm. */
8876 if ((i.tm.base_opcode >= 0x38 && i.tm.base_opcode <= 0x3d)
8877 || ((i.tm.base_opcode | 3) == 0x83
8878 && (i.tm.extension_opcode == 0x7)))
8879 {
8880 *mf_cmp_p = mf_cmp_alu_cmp;
8881 return !(i.mem_operands && i.imm_operands);
8882 }
8883
8884 /* inc, dec without inc/dec m. */
8885 if ((i.tm.cpu_flags.bitfield.cpuno64
8886 && (i.tm.base_opcode | 0xf) == 0x4f)
8887 || ((i.tm.base_opcode | 1) == 0xff
8888 && i.tm.extension_opcode <= 0x1))
8889 {
8890 *mf_cmp_p = mf_cmp_incdec;
8891 return !i.mem_operands;
8892 }
8893
8894 return 0;
8895 }
8896
8897 /* Return 1 if a FUSED_JCC_PADDING frag should be generated. */
8898
8899 static int
8900 add_fused_jcc_padding_frag_p (enum mf_cmp_kind* mf_cmp_p)
8901 {
8902 /* NB: Don't work with COND_JUMP86 without i386. */
8903 if (!align_branch_power
8904 || now_seg == absolute_section
8905 || !cpu_arch_flags.bitfield.cpui386
8906 || !(align_branch & align_branch_fused_bit))
8907 return 0;
8908
8909 if (maybe_fused_with_jcc_p (mf_cmp_p))
8910 {
8911 if (last_insn.kind == last_insn_other
8912 || last_insn.seg != now_seg)
8913 return 1;
8914 if (flag_debug)
8915 as_warn_where (last_insn.file, last_insn.line,
8916 _("`%s` skips -malign-branch-boundary on `%s`"),
8917 last_insn.name, i.tm.name);
8918 }
8919
8920 return 0;
8921 }
8922
8923 /* Return 1 if a BRANCH_PREFIX frag should be generated. */
8924
8925 static int
8926 add_branch_prefix_frag_p (void)
8927 {
8928 /* NB: Don't work with COND_JUMP86 without i386. Don't add prefix
8929 to PadLock instructions since they include prefixes in opcode. */
8930 if (!align_branch_power
8931 || !align_branch_prefix_size
8932 || now_seg == absolute_section
8933 || i.tm.cpu_flags.bitfield.cpupadlock
8934 || !cpu_arch_flags.bitfield.cpui386)
8935 return 0;
8936
8937 /* Don't add prefix if it is a prefix or there is no operand in case
8938 that segment prefix is special. */
8939 if (!i.operands || i.tm.opcode_modifier.isprefix)
8940 return 0;
8941
8942 if (last_insn.kind == last_insn_other
8943 || last_insn.seg != now_seg)
8944 return 1;
8945
8946 if (flag_debug)
8947 as_warn_where (last_insn.file, last_insn.line,
8948 _("`%s` skips -malign-branch-boundary on `%s`"),
8949 last_insn.name, i.tm.name);
8950
8951 return 0;
8952 }
8953
8954 /* Return 1 if a BRANCH_PADDING frag should be generated. */
8955
8956 static int
8957 add_branch_padding_frag_p (enum align_branch_kind *branch_p,
8958 enum mf_jcc_kind *mf_jcc_p)
8959 {
8960 int add_padding;
8961
8962 /* NB: Don't work with COND_JUMP86 without i386. */
8963 if (!align_branch_power
8964 || now_seg == absolute_section
8965 || !cpu_arch_flags.bitfield.cpui386)
8966 return 0;
8967
8968 add_padding = 0;
8969
8970 /* Check for jcc and direct jmp. */
8971 if (i.tm.opcode_modifier.jump == JUMP)
8972 {
8973 if (i.tm.base_opcode == JUMP_PC_RELATIVE)
8974 {
8975 *branch_p = align_branch_jmp;
8976 add_padding = align_branch & align_branch_jmp_bit;
8977 }
8978 else
8979 {
8980 /* Because J<cc> and JN<cc> share same group in macro-fusible table,
8981 igore the lowest bit. */
8982 *mf_jcc_p = (i.tm.base_opcode & 0x0e) >> 1;
8983 *branch_p = align_branch_jcc;
8984 if ((align_branch & align_branch_jcc_bit))
8985 add_padding = 1;
8986 }
8987 }
8988 else if (is_any_vex_encoding (&i.tm))
8989 return 0;
8990 else if ((i.tm.base_opcode | 1) == 0xc3)
8991 {
8992 /* Near ret. */
8993 *branch_p = align_branch_ret;
8994 if ((align_branch & align_branch_ret_bit))
8995 add_padding = 1;
8996 }
8997 else
8998 {
8999 /* Check for indirect jmp, direct and indirect calls. */
9000 if (i.tm.base_opcode == 0xe8)
9001 {
9002 /* Direct call. */
9003 *branch_p = align_branch_call;
9004 if ((align_branch & align_branch_call_bit))
9005 add_padding = 1;
9006 }
9007 else if (i.tm.base_opcode == 0xff
9008 && (i.tm.extension_opcode == 2
9009 || i.tm.extension_opcode == 4))
9010 {
9011 /* Indirect call and jmp. */
9012 *branch_p = align_branch_indirect;
9013 if ((align_branch & align_branch_indirect_bit))
9014 add_padding = 1;
9015 }
9016
9017 if (add_padding
9018 && i.disp_operands
9019 && tls_get_addr
9020 && (i.op[0].disps->X_op == O_symbol
9021 || (i.op[0].disps->X_op == O_subtract
9022 && i.op[0].disps->X_op_symbol == GOT_symbol)))
9023 {
9024 symbolS *s = i.op[0].disps->X_add_symbol;
9025 /* No padding to call to global or undefined tls_get_addr. */
9026 if ((S_IS_EXTERNAL (s) || !S_IS_DEFINED (s))
9027 && strcmp (S_GET_NAME (s), tls_get_addr) == 0)
9028 return 0;
9029 }
9030 }
9031
9032 if (add_padding
9033 && last_insn.kind != last_insn_other
9034 && last_insn.seg == now_seg)
9035 {
9036 if (flag_debug)
9037 as_warn_where (last_insn.file, last_insn.line,
9038 _("`%s` skips -malign-branch-boundary on `%s`"),
9039 last_insn.name, i.tm.name);
9040 return 0;
9041 }
9042
9043 return add_padding;
9044 }
9045
9046 static void
9047 output_insn (void)
9048 {
9049 fragS *insn_start_frag;
9050 offsetT insn_start_off;
9051 fragS *fragP = NULL;
9052 enum align_branch_kind branch = align_branch_none;
9053 /* The initializer is arbitrary just to avoid uninitialized error.
9054 it's actually either assigned in add_branch_padding_frag_p
9055 or never be used. */
9056 enum mf_jcc_kind mf_jcc = mf_jcc_jo;
9057
9058 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9059 if (IS_ELF && x86_used_note)
9060 {
9061 if (i.tm.cpu_flags.bitfield.cpucmov)
9062 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
9063 if (i.tm.cpu_flags.bitfield.cpusse)
9064 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE;
9065 if (i.tm.cpu_flags.bitfield.cpusse2)
9066 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE2;
9067 if (i.tm.cpu_flags.bitfield.cpusse3)
9068 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE3;
9069 if (i.tm.cpu_flags.bitfield.cpussse3)
9070 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSSE3;
9071 if (i.tm.cpu_flags.bitfield.cpusse4_1)
9072 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_1;
9073 if (i.tm.cpu_flags.bitfield.cpusse4_2)
9074 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_SSE4_2;
9075 if (i.tm.cpu_flags.bitfield.cpuavx)
9076 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX;
9077 if (i.tm.cpu_flags.bitfield.cpuavx2)
9078 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX2;
9079 if (i.tm.cpu_flags.bitfield.cpufma)
9080 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_FMA;
9081 if (i.tm.cpu_flags.bitfield.cpuavx512f)
9082 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512F;
9083 if (i.tm.cpu_flags.bitfield.cpuavx512cd)
9084 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512CD;
9085 if (i.tm.cpu_flags.bitfield.cpuavx512er)
9086 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512ER;
9087 if (i.tm.cpu_flags.bitfield.cpuavx512pf)
9088 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512PF;
9089 if (i.tm.cpu_flags.bitfield.cpuavx512vl)
9090 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512VL;
9091 if (i.tm.cpu_flags.bitfield.cpuavx512dq)
9092 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512DQ;
9093 if (i.tm.cpu_flags.bitfield.cpuavx512bw)
9094 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512BW;
9095 if (i.tm.cpu_flags.bitfield.cpuavx512_4fmaps)
9096 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4FMAPS;
9097 if (i.tm.cpu_flags.bitfield.cpuavx512_4vnniw)
9098 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_4VNNIW;
9099 if (i.tm.cpu_flags.bitfield.cpuavx512_bitalg)
9100 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BITALG;
9101 if (i.tm.cpu_flags.bitfield.cpuavx512ifma)
9102 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_IFMA;
9103 if (i.tm.cpu_flags.bitfield.cpuavx512vbmi)
9104 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI;
9105 if (i.tm.cpu_flags.bitfield.cpuavx512_vbmi2)
9106 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VBMI2;
9107 if (i.tm.cpu_flags.bitfield.cpuavx512_vnni)
9108 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_VNNI;
9109 if (i.tm.cpu_flags.bitfield.cpuavx512_bf16)
9110 x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_AVX512_BF16;
9111
9112 if (i.tm.cpu_flags.bitfield.cpu8087
9113 || i.tm.cpu_flags.bitfield.cpu287
9114 || i.tm.cpu_flags.bitfield.cpu387
9115 || i.tm.cpu_flags.bitfield.cpu687
9116 || i.tm.cpu_flags.bitfield.cpufisttp)
9117 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
9118 if (i.has_regmmx
9119 || i.tm.base_opcode == 0xf77 /* emms */
9120 || i.tm.base_opcode == 0xf0e /* femms */
9121 || i.tm.base_opcode == 0xf2a /* cvtpi2ps */
9122 || i.tm.base_opcode == 0x660f2a /* cvtpi2pd */)
9123 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
9124 if (i.has_regxmm)
9125 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
9126 if (i.has_regymm)
9127 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
9128 if (i.has_regzmm)
9129 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
9130 if (i.tm.cpu_flags.bitfield.cpufxsr)
9131 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
9132 if (i.tm.cpu_flags.bitfield.cpuxsave)
9133 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVE;
9134 if (i.tm.cpu_flags.bitfield.cpuxsaveopt)
9135 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
9136 if (i.tm.cpu_flags.bitfield.cpuxsavec)
9137 x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
9138 }
9139 #endif
9140
9141 /* Tie dwarf2 debug info to the address at the start of the insn.
9142 We can't do this after the insn has been output as the current
9143 frag may have been closed off. eg. by frag_var. */
9144 dwarf2_emit_insn (0);
9145
9146 insn_start_frag = frag_now;
9147 insn_start_off = frag_now_fix ();
9148
9149 if (add_branch_padding_frag_p (&branch, &mf_jcc))
9150 {
9151 char *p;
9152 /* Branch can be 8 bytes. Leave some room for prefixes. */
9153 unsigned int max_branch_padding_size = 14;
9154
9155 /* Align section to boundary. */
9156 record_alignment (now_seg, align_branch_power);
9157
9158 /* Make room for padding. */
9159 frag_grow (max_branch_padding_size);
9160
9161 /* Start of the padding. */
9162 p = frag_more (0);
9163
9164 fragP = frag_now;
9165
9166 frag_var (rs_machine_dependent, max_branch_padding_size, 0,
9167 ENCODE_RELAX_STATE (BRANCH_PADDING, 0),
9168 NULL, 0, p);
9169
9170 fragP->tc_frag_data.mf_type = mf_jcc;
9171 fragP->tc_frag_data.branch_type = branch;
9172 fragP->tc_frag_data.max_bytes = max_branch_padding_size;
9173 }
9174
9175 /* Output jumps. */
9176 if (i.tm.opcode_modifier.jump == JUMP)
9177 output_branch ();
9178 else if (i.tm.opcode_modifier.jump == JUMP_BYTE
9179 || i.tm.opcode_modifier.jump == JUMP_DWORD)
9180 output_jump ();
9181 else if (i.tm.opcode_modifier.jump == JUMP_INTERSEGMENT)
9182 output_interseg_jump ();
9183 else
9184 {
9185 /* Output normal instructions here. */
9186 char *p;
9187 unsigned char *q;
9188 unsigned int j;
9189 unsigned int prefix;
9190 enum mf_cmp_kind mf_cmp;
9191
9192 if (avoid_fence
9193 && (i.tm.base_opcode == 0xfaee8
9194 || i.tm.base_opcode == 0xfaef0
9195 || i.tm.base_opcode == 0xfaef8))
9196 {
9197 /* Encode lfence, mfence, and sfence as
9198 f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
9199 offsetT val = 0x240483f0ULL;
9200 p = frag_more (5);
9201 md_number_to_chars (p, val, 5);
9202 return;
9203 }
9204
9205 /* Some processors fail on LOCK prefix. This options makes
9206 assembler ignore LOCK prefix and serves as a workaround. */
9207 if (omit_lock_prefix)
9208 {
9209 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
9210 return;
9211 i.prefix[LOCK_PREFIX] = 0;
9212 }
9213
9214 if (branch)
9215 /* Skip if this is a branch. */
9216 ;
9217 else if (add_fused_jcc_padding_frag_p (&mf_cmp))
9218 {
9219 /* Make room for padding. */
9220 frag_grow (MAX_FUSED_JCC_PADDING_SIZE);
9221 p = frag_more (0);
9222
9223 fragP = frag_now;
9224
9225 frag_var (rs_machine_dependent, MAX_FUSED_JCC_PADDING_SIZE, 0,
9226 ENCODE_RELAX_STATE (FUSED_JCC_PADDING, 0),
9227 NULL, 0, p);
9228
9229 fragP->tc_frag_data.mf_type = mf_cmp;
9230 fragP->tc_frag_data.branch_type = align_branch_fused;
9231 fragP->tc_frag_data.max_bytes = MAX_FUSED_JCC_PADDING_SIZE;
9232 }
9233 else if (add_branch_prefix_frag_p ())
9234 {
9235 unsigned int max_prefix_size = align_branch_prefix_size;
9236
9237 /* Make room for padding. */
9238 frag_grow (max_prefix_size);
9239 p = frag_more (0);
9240
9241 fragP = frag_now;
9242
9243 frag_var (rs_machine_dependent, max_prefix_size, 0,
9244 ENCODE_RELAX_STATE (BRANCH_PREFIX, 0),
9245 NULL, 0, p);
9246
9247 fragP->tc_frag_data.max_bytes = max_prefix_size;
9248 }
9249
9250 /* Since the VEX/EVEX prefix contains the implicit prefix, we
9251 don't need the explicit prefix. */
9252 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
9253 {
9254 switch (i.tm.opcode_length)
9255 {
9256 case 3:
9257 if (i.tm.base_opcode & 0xff000000)
9258 {
9259 prefix = (i.tm.base_opcode >> 24) & 0xff;
9260 if (!i.tm.cpu_flags.bitfield.cpupadlock
9261 || prefix != REPE_PREFIX_OPCODE
9262 || (i.prefix[REP_PREFIX] != REPE_PREFIX_OPCODE))
9263 add_prefix (prefix);
9264 }
9265 break;
9266 case 2:
9267 if ((i.tm.base_opcode & 0xff0000) != 0)
9268 {
9269 prefix = (i.tm.base_opcode >> 16) & 0xff;
9270 add_prefix (prefix);
9271 }
9272 break;
9273 case 1:
9274 break;
9275 case 0:
9276 /* Check for pseudo prefixes. */
9277 as_bad_where (insn_start_frag->fr_file,
9278 insn_start_frag->fr_line,
9279 _("pseudo prefix without instruction"));
9280 return;
9281 default:
9282 abort ();
9283 }
9284
9285 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9286 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
9287 R_X86_64_GOTTPOFF relocation so that linker can safely
9288 perform IE->LE optimization. A dummy REX_OPCODE prefix
9289 is also needed for lea with R_X86_64_GOTPC32_TLSDESC
9290 relocation for GDesc -> IE/LE optimization. */
9291 if (x86_elf_abi == X86_64_X32_ABI
9292 && i.operands == 2
9293 && (i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
9294 || i.reloc[0] == BFD_RELOC_X86_64_GOTPC32_TLSDESC)
9295 && i.prefix[REX_PREFIX] == 0)
9296 add_prefix (REX_OPCODE);
9297 #endif
9298
9299 /* The prefix bytes. */
9300 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
9301 if (*q)
9302 FRAG_APPEND_1_CHAR (*q);
9303 }
9304 else
9305 {
9306 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
9307 if (*q)
9308 switch (j)
9309 {
9310 case SEG_PREFIX:
9311 case ADDR_PREFIX:
9312 FRAG_APPEND_1_CHAR (*q);
9313 break;
9314 default:
9315 /* There should be no other prefixes for instructions
9316 with VEX prefix. */
9317 abort ();
9318 }
9319
9320 /* For EVEX instructions i.vrex should become 0 after
9321 build_evex_prefix. For VEX instructions upper 16 registers
9322 aren't available, so VREX should be 0. */
9323 if (i.vrex)
9324 abort ();
9325 /* Now the VEX prefix. */
9326 p = frag_more (i.vex.length);
9327 for (j = 0; j < i.vex.length; j++)
9328 p[j] = i.vex.bytes[j];
9329 }
9330
9331 /* Now the opcode; be careful about word order here! */
9332 if (i.tm.opcode_length == 1)
9333 {
9334 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
9335 }
9336 else
9337 {
9338 switch (i.tm.opcode_length)
9339 {
9340 case 4:
9341 p = frag_more (4);
9342 *p++ = (i.tm.base_opcode >> 24) & 0xff;
9343 *p++ = (i.tm.base_opcode >> 16) & 0xff;
9344 break;
9345 case 3:
9346 p = frag_more (3);
9347 *p++ = (i.tm.base_opcode >> 16) & 0xff;
9348 break;
9349 case 2:
9350 p = frag_more (2);
9351 break;
9352 default:
9353 abort ();
9354 break;
9355 }
9356
9357 /* Put out high byte first: can't use md_number_to_chars! */
9358 *p++ = (i.tm.base_opcode >> 8) & 0xff;
9359 *p = i.tm.base_opcode & 0xff;
9360 }
9361
9362 /* Now the modrm byte and sib byte (if present). */
9363 if (i.tm.opcode_modifier.modrm)
9364 {
9365 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
9366 | i.rm.reg << 3
9367 | i.rm.mode << 6));
9368 /* If i.rm.regmem == ESP (4)
9369 && i.rm.mode != (Register mode)
9370 && not 16 bit
9371 ==> need second modrm byte. */
9372 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
9373 && i.rm.mode != 3
9374 && !(i.base_reg && i.base_reg->reg_type.bitfield.word))
9375 FRAG_APPEND_1_CHAR ((i.sib.base << 0
9376 | i.sib.index << 3
9377 | i.sib.scale << 6));
9378 }
9379
9380 if (i.disp_operands)
9381 output_disp (insn_start_frag, insn_start_off);
9382
9383 if (i.imm_operands)
9384 output_imm (insn_start_frag, insn_start_off);
9385
9386 /*
9387 * frag_now_fix () returning plain abs_section_offset when we're in the
9388 * absolute section, and abs_section_offset not getting updated as data
9389 * gets added to the frag breaks the logic below.
9390 */
9391 if (now_seg != absolute_section)
9392 {
9393 j = encoding_length (insn_start_frag, insn_start_off, frag_more (0));
9394 if (j > 15)
9395 as_warn (_("instruction length of %u bytes exceeds the limit of 15"),
9396 j);
9397 else if (fragP)
9398 {
9399 /* NB: Don't add prefix with GOTPC relocation since
9400 output_disp() above depends on the fixed encoding
9401 length. Can't add prefix with TLS relocation since
9402 it breaks TLS linker optimization. */
9403 unsigned int max = i.has_gotpc_tls_reloc ? 0 : 15 - j;
9404 /* Prefix count on the current instruction. */
9405 unsigned int count = i.vex.length;
9406 unsigned int k;
9407 for (k = 0; k < ARRAY_SIZE (i.prefix); k++)
9408 /* REX byte is encoded in VEX/EVEX prefix. */
9409 if (i.prefix[k] && (k != REX_PREFIX || !i.vex.length))
9410 count++;
9411
9412 /* Count prefixes for extended opcode maps. */
9413 if (!i.vex.length)
9414 switch (i.tm.opcode_length)
9415 {
9416 case 3:
9417 if (((i.tm.base_opcode >> 16) & 0xff) == 0xf)
9418 {
9419 count++;
9420 switch ((i.tm.base_opcode >> 8) & 0xff)
9421 {
9422 case 0x38:
9423 case 0x3a:
9424 count++;
9425 break;
9426 default:
9427 break;
9428 }
9429 }
9430 break;
9431 case 2:
9432 if (((i.tm.base_opcode >> 8) & 0xff) == 0xf)
9433 count++;
9434 break;
9435 case 1:
9436 break;
9437 default:
9438 abort ();
9439 }
9440
9441 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
9442 == BRANCH_PREFIX)
9443 {
9444 /* Set the maximum prefix size in BRANCH_PREFIX
9445 frag. */
9446 if (fragP->tc_frag_data.max_bytes > max)
9447 fragP->tc_frag_data.max_bytes = max;
9448 if (fragP->tc_frag_data.max_bytes > count)
9449 fragP->tc_frag_data.max_bytes -= count;
9450 else
9451 fragP->tc_frag_data.max_bytes = 0;
9452 }
9453 else
9454 {
9455 /* Remember the maximum prefix size in FUSED_JCC_PADDING
9456 frag. */
9457 unsigned int max_prefix_size;
9458 if (align_branch_prefix_size > max)
9459 max_prefix_size = max;
9460 else
9461 max_prefix_size = align_branch_prefix_size;
9462 if (max_prefix_size > count)
9463 fragP->tc_frag_data.max_prefix_length
9464 = max_prefix_size - count;
9465 }
9466
9467 /* Use existing segment prefix if possible. Use CS
9468 segment prefix in 64-bit mode. In 32-bit mode, use SS
9469 segment prefix with ESP/EBP base register and use DS
9470 segment prefix without ESP/EBP base register. */
9471 if (i.prefix[SEG_PREFIX])
9472 fragP->tc_frag_data.default_prefix = i.prefix[SEG_PREFIX];
9473 else if (flag_code == CODE_64BIT)
9474 fragP->tc_frag_data.default_prefix = CS_PREFIX_OPCODE;
9475 else if (i.base_reg
9476 && (i.base_reg->reg_num == 4
9477 || i.base_reg->reg_num == 5))
9478 fragP->tc_frag_data.default_prefix = SS_PREFIX_OPCODE;
9479 else
9480 fragP->tc_frag_data.default_prefix = DS_PREFIX_OPCODE;
9481 }
9482 }
9483 }
9484
9485 /* NB: Don't work with COND_JUMP86 without i386. */
9486 if (align_branch_power
9487 && now_seg != absolute_section
9488 && cpu_arch_flags.bitfield.cpui386)
9489 {
9490 /* Terminate each frag so that we can add prefix and check for
9491 fused jcc. */
9492 frag_wane (frag_now);
9493 frag_new (0);
9494 }
9495
9496 #ifdef DEBUG386
9497 if (flag_debug)
9498 {
9499 pi ("" /*line*/, &i);
9500 }
9501 #endif /* DEBUG386 */
9502 }
9503
9504 /* Return the size of the displacement operand N. */
9505
9506 static int
9507 disp_size (unsigned int n)
9508 {
9509 int size = 4;
9510
9511 if (i.types[n].bitfield.disp64)
9512 size = 8;
9513 else if (i.types[n].bitfield.disp8)
9514 size = 1;
9515 else if (i.types[n].bitfield.disp16)
9516 size = 2;
9517 return size;
9518 }
9519
9520 /* Return the size of the immediate operand N. */
9521
9522 static int
9523 imm_size (unsigned int n)
9524 {
9525 int size = 4;
9526 if (i.types[n].bitfield.imm64)
9527 size = 8;
9528 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
9529 size = 1;
9530 else if (i.types[n].bitfield.imm16)
9531 size = 2;
9532 return size;
9533 }
9534
9535 static void
9536 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
9537 {
9538 char *p;
9539 unsigned int n;
9540
9541 for (n = 0; n < i.operands; n++)
9542 {
9543 if (operand_type_check (i.types[n], disp))
9544 {
9545 if (i.op[n].disps->X_op == O_constant)
9546 {
9547 int size = disp_size (n);
9548 offsetT val = i.op[n].disps->X_add_number;
9549
9550 val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
9551 size);
9552 p = frag_more (size);
9553 md_number_to_chars (p, val, size);
9554 }
9555 else
9556 {
9557 enum bfd_reloc_code_real reloc_type;
9558 int size = disp_size (n);
9559 int sign = i.types[n].bitfield.disp32s;
9560 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
9561 fixS *fixP;
9562
9563 /* We can't have 8 bit displacement here. */
9564 gas_assert (!i.types[n].bitfield.disp8);
9565
9566 /* The PC relative address is computed relative
9567 to the instruction boundary, so in case immediate
9568 fields follows, we need to adjust the value. */
9569 if (pcrel && i.imm_operands)
9570 {
9571 unsigned int n1;
9572 int sz = 0;
9573
9574 for (n1 = 0; n1 < i.operands; n1++)
9575 if (operand_type_check (i.types[n1], imm))
9576 {
9577 /* Only one immediate is allowed for PC
9578 relative address. */
9579 gas_assert (sz == 0);
9580 sz = imm_size (n1);
9581 i.op[n].disps->X_add_number -= sz;
9582 }
9583 /* We should find the immediate. */
9584 gas_assert (sz != 0);
9585 }
9586
9587 p = frag_more (size);
9588 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
9589 if (GOT_symbol
9590 && GOT_symbol == i.op[n].disps->X_add_symbol
9591 && (((reloc_type == BFD_RELOC_32
9592 || reloc_type == BFD_RELOC_X86_64_32S
9593 || (reloc_type == BFD_RELOC_64
9594 && object_64bit))
9595 && (i.op[n].disps->X_op == O_symbol
9596 || (i.op[n].disps->X_op == O_add
9597 && ((symbol_get_value_expression
9598 (i.op[n].disps->X_op_symbol)->X_op)
9599 == O_subtract))))
9600 || reloc_type == BFD_RELOC_32_PCREL))
9601 {
9602 if (!object_64bit)
9603 {
9604 reloc_type = BFD_RELOC_386_GOTPC;
9605 i.has_gotpc_tls_reloc = TRUE;
9606 i.op[n].imms->X_add_number +=
9607 encoding_length (insn_start_frag, insn_start_off, p);
9608 }
9609 else if (reloc_type == BFD_RELOC_64)
9610 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9611 else
9612 /* Don't do the adjustment for x86-64, as there
9613 the pcrel addressing is relative to the _next_
9614 insn, and that is taken care of in other code. */
9615 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9616 }
9617 else if (align_branch_power)
9618 {
9619 switch (reloc_type)
9620 {
9621 case BFD_RELOC_386_TLS_GD:
9622 case BFD_RELOC_386_TLS_LDM:
9623 case BFD_RELOC_386_TLS_IE:
9624 case BFD_RELOC_386_TLS_IE_32:
9625 case BFD_RELOC_386_TLS_GOTIE:
9626 case BFD_RELOC_386_TLS_GOTDESC:
9627 case BFD_RELOC_386_TLS_DESC_CALL:
9628 case BFD_RELOC_X86_64_TLSGD:
9629 case BFD_RELOC_X86_64_TLSLD:
9630 case BFD_RELOC_X86_64_GOTTPOFF:
9631 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9632 case BFD_RELOC_X86_64_TLSDESC_CALL:
9633 i.has_gotpc_tls_reloc = TRUE;
9634 default:
9635 break;
9636 }
9637 }
9638 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
9639 size, i.op[n].disps, pcrel,
9640 reloc_type);
9641 /* Check for "call/jmp *mem", "mov mem, %reg",
9642 "test %reg, mem" and "binop mem, %reg" where binop
9643 is one of adc, add, and, cmp, or, sbb, sub, xor
9644 instructions without data prefix. Always generate
9645 R_386_GOT32X for "sym*GOT" operand in 32-bit mode. */
9646 if (i.prefix[DATA_PREFIX] == 0
9647 && (generate_relax_relocations
9648 || (!object_64bit
9649 && i.rm.mode == 0
9650 && i.rm.regmem == 5))
9651 && (i.rm.mode == 2
9652 || (i.rm.mode == 0 && i.rm.regmem == 5))
9653 && !is_any_vex_encoding(&i.tm)
9654 && ((i.operands == 1
9655 && i.tm.base_opcode == 0xff
9656 && (i.rm.reg == 2 || i.rm.reg == 4))
9657 || (i.operands == 2
9658 && (i.tm.base_opcode == 0x8b
9659 || i.tm.base_opcode == 0x85
9660 || (i.tm.base_opcode & ~0x38) == 0x03))))
9661 {
9662 if (object_64bit)
9663 {
9664 fixP->fx_tcbit = i.rex != 0;
9665 if (i.base_reg
9666 && (i.base_reg->reg_num == RegIP))
9667 fixP->fx_tcbit2 = 1;
9668 }
9669 else
9670 fixP->fx_tcbit2 = 1;
9671 }
9672 }
9673 }
9674 }
9675 }
9676
9677 static void
9678 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
9679 {
9680 char *p;
9681 unsigned int n;
9682
9683 for (n = 0; n < i.operands; n++)
9684 {
9685 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
9686 if (i.rounding && (int) n == i.rounding->operand)
9687 continue;
9688
9689 if (operand_type_check (i.types[n], imm))
9690 {
9691 if (i.op[n].imms->X_op == O_constant)
9692 {
9693 int size = imm_size (n);
9694 offsetT val;
9695
9696 val = offset_in_range (i.op[n].imms->X_add_number,
9697 size);
9698 p = frag_more (size);
9699 md_number_to_chars (p, val, size);
9700 }
9701 else
9702 {
9703 /* Not absolute_section.
9704 Need a 32-bit fixup (don't support 8bit
9705 non-absolute imms). Try to support other
9706 sizes ... */
9707 enum bfd_reloc_code_real reloc_type;
9708 int size = imm_size (n);
9709 int sign;
9710
9711 if (i.types[n].bitfield.imm32s
9712 && (i.suffix == QWORD_MNEM_SUFFIX
9713 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
9714 sign = 1;
9715 else
9716 sign = 0;
9717
9718 p = frag_more (size);
9719 reloc_type = reloc (size, 0, sign, i.reloc[n]);
9720
9721 /* This is tough to explain. We end up with this one if we
9722 * have operands that look like
9723 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
9724 * obtain the absolute address of the GOT, and it is strongly
9725 * preferable from a performance point of view to avoid using
9726 * a runtime relocation for this. The actual sequence of
9727 * instructions often look something like:
9728 *
9729 * call .L66
9730 * .L66:
9731 * popl %ebx
9732 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
9733 *
9734 * The call and pop essentially return the absolute address
9735 * of the label .L66 and store it in %ebx. The linker itself
9736 * will ultimately change the first operand of the addl so
9737 * that %ebx points to the GOT, but to keep things simple, the
9738 * .o file must have this operand set so that it generates not
9739 * the absolute address of .L66, but the absolute address of
9740 * itself. This allows the linker itself simply treat a GOTPC
9741 * relocation as asking for a pcrel offset to the GOT to be
9742 * added in, and the addend of the relocation is stored in the
9743 * operand field for the instruction itself.
9744 *
9745 * Our job here is to fix the operand so that it would add
9746 * the correct offset so that %ebx would point to itself. The
9747 * thing that is tricky is that .-.L66 will point to the
9748 * beginning of the instruction, so we need to further modify
9749 * the operand so that it will point to itself. There are
9750 * other cases where you have something like:
9751 *
9752 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
9753 *
9754 * and here no correction would be required. Internally in
9755 * the assembler we treat operands of this form as not being
9756 * pcrel since the '.' is explicitly mentioned, and I wonder
9757 * whether it would simplify matters to do it this way. Who
9758 * knows. In earlier versions of the PIC patches, the
9759 * pcrel_adjust field was used to store the correction, but
9760 * since the expression is not pcrel, I felt it would be
9761 * confusing to do it this way. */
9762
9763 if ((reloc_type == BFD_RELOC_32
9764 || reloc_type == BFD_RELOC_X86_64_32S
9765 || reloc_type == BFD_RELOC_64)
9766 && GOT_symbol
9767 && GOT_symbol == i.op[n].imms->X_add_symbol
9768 && (i.op[n].imms->X_op == O_symbol
9769 || (i.op[n].imms->X_op == O_add
9770 && ((symbol_get_value_expression
9771 (i.op[n].imms->X_op_symbol)->X_op)
9772 == O_subtract))))
9773 {
9774 if (!object_64bit)
9775 reloc_type = BFD_RELOC_386_GOTPC;
9776 else if (size == 4)
9777 reloc_type = BFD_RELOC_X86_64_GOTPC32;
9778 else if (size == 8)
9779 reloc_type = BFD_RELOC_X86_64_GOTPC64;
9780 i.has_gotpc_tls_reloc = TRUE;
9781 i.op[n].imms->X_add_number +=
9782 encoding_length (insn_start_frag, insn_start_off, p);
9783 }
9784 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
9785 i.op[n].imms, 0, reloc_type);
9786 }
9787 }
9788 }
9789 }
9790 \f
9791 /* x86_cons_fix_new is called via the expression parsing code when a
9792 reloc is needed. We use this hook to get the correct .got reloc. */
9793 static int cons_sign = -1;
9794
9795 void
9796 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
9797 expressionS *exp, bfd_reloc_code_real_type r)
9798 {
9799 r = reloc (len, 0, cons_sign, r);
9800
9801 #ifdef TE_PE
9802 if (exp->X_op == O_secrel)
9803 {
9804 exp->X_op = O_symbol;
9805 r = BFD_RELOC_32_SECREL;
9806 }
9807 #endif
9808
9809 fix_new_exp (frag, off, len, exp, 0, r);
9810 }
9811
9812 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
9813 purpose of the `.dc.a' internal pseudo-op. */
9814
9815 int
9816 x86_address_bytes (void)
9817 {
9818 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
9819 return 4;
9820 return stdoutput->arch_info->bits_per_address / 8;
9821 }
9822
9823 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
9824 || defined (LEX_AT)
9825 # define lex_got(reloc, adjust, types) NULL
9826 #else
9827 /* Parse operands of the form
9828 <symbol>@GOTOFF+<nnn>
9829 and similar .plt or .got references.
9830
9831 If we find one, set up the correct relocation in RELOC and copy the
9832 input string, minus the `@GOTOFF' into a malloc'd buffer for
9833 parsing by the calling routine. Return this buffer, and if ADJUST
9834 is non-null set it to the length of the string we removed from the
9835 input line. Otherwise return NULL. */
9836 static char *
9837 lex_got (enum bfd_reloc_code_real *rel,
9838 int *adjust,
9839 i386_operand_type *types)
9840 {
9841 /* Some of the relocations depend on the size of what field is to
9842 be relocated. But in our callers i386_immediate and i386_displacement
9843 we don't yet know the operand size (this will be set by insn
9844 matching). Hence we record the word32 relocation here,
9845 and adjust the reloc according to the real size in reloc(). */
9846 static const struct {
9847 const char *str;
9848 int len;
9849 const enum bfd_reloc_code_real rel[2];
9850 const i386_operand_type types64;
9851 } gotrel[] = {
9852 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9853 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
9854 BFD_RELOC_SIZE32 },
9855 OPERAND_TYPE_IMM32_64 },
9856 #endif
9857 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
9858 BFD_RELOC_X86_64_PLTOFF64 },
9859 OPERAND_TYPE_IMM64 },
9860 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
9861 BFD_RELOC_X86_64_PLT32 },
9862 OPERAND_TYPE_IMM32_32S_DISP32 },
9863 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
9864 BFD_RELOC_X86_64_GOTPLT64 },
9865 OPERAND_TYPE_IMM64_DISP64 },
9866 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
9867 BFD_RELOC_X86_64_GOTOFF64 },
9868 OPERAND_TYPE_IMM64_DISP64 },
9869 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
9870 BFD_RELOC_X86_64_GOTPCREL },
9871 OPERAND_TYPE_IMM32_32S_DISP32 },
9872 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
9873 BFD_RELOC_X86_64_TLSGD },
9874 OPERAND_TYPE_IMM32_32S_DISP32 },
9875 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
9876 _dummy_first_bfd_reloc_code_real },
9877 OPERAND_TYPE_NONE },
9878 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
9879 BFD_RELOC_X86_64_TLSLD },
9880 OPERAND_TYPE_IMM32_32S_DISP32 },
9881 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
9882 BFD_RELOC_X86_64_GOTTPOFF },
9883 OPERAND_TYPE_IMM32_32S_DISP32 },
9884 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
9885 BFD_RELOC_X86_64_TPOFF32 },
9886 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9887 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
9888 _dummy_first_bfd_reloc_code_real },
9889 OPERAND_TYPE_NONE },
9890 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
9891 BFD_RELOC_X86_64_DTPOFF32 },
9892 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
9893 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
9894 _dummy_first_bfd_reloc_code_real },
9895 OPERAND_TYPE_NONE },
9896 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
9897 _dummy_first_bfd_reloc_code_real },
9898 OPERAND_TYPE_NONE },
9899 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
9900 BFD_RELOC_X86_64_GOT32 },
9901 OPERAND_TYPE_IMM32_32S_64_DISP32 },
9902 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
9903 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
9904 OPERAND_TYPE_IMM32_32S_DISP32 },
9905 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
9906 BFD_RELOC_X86_64_TLSDESC_CALL },
9907 OPERAND_TYPE_IMM32_32S_DISP32 },
9908 };
9909 char *cp;
9910 unsigned int j;
9911
9912 #if defined (OBJ_MAYBE_ELF)
9913 if (!IS_ELF)
9914 return NULL;
9915 #endif
9916
9917 for (cp = input_line_pointer; *cp != '@'; cp++)
9918 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
9919 return NULL;
9920
9921 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
9922 {
9923 int len = gotrel[j].len;
9924 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
9925 {
9926 if (gotrel[j].rel[object_64bit] != 0)
9927 {
9928 int first, second;
9929 char *tmpbuf, *past_reloc;
9930
9931 *rel = gotrel[j].rel[object_64bit];
9932
9933 if (types)
9934 {
9935 if (flag_code != CODE_64BIT)
9936 {
9937 types->bitfield.imm32 = 1;
9938 types->bitfield.disp32 = 1;
9939 }
9940 else
9941 *types = gotrel[j].types64;
9942 }
9943
9944 if (j != 0 && GOT_symbol == NULL)
9945 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
9946
9947 /* The length of the first part of our input line. */
9948 first = cp - input_line_pointer;
9949
9950 /* The second part goes from after the reloc token until
9951 (and including) an end_of_line char or comma. */
9952 past_reloc = cp + 1 + len;
9953 cp = past_reloc;
9954 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
9955 ++cp;
9956 second = cp + 1 - past_reloc;
9957
9958 /* Allocate and copy string. The trailing NUL shouldn't
9959 be necessary, but be safe. */
9960 tmpbuf = XNEWVEC (char, first + second + 2);
9961 memcpy (tmpbuf, input_line_pointer, first);
9962 if (second != 0 && *past_reloc != ' ')
9963 /* Replace the relocation token with ' ', so that
9964 errors like foo@GOTOFF1 will be detected. */
9965 tmpbuf[first++] = ' ';
9966 else
9967 /* Increment length by 1 if the relocation token is
9968 removed. */
9969 len++;
9970 if (adjust)
9971 *adjust = len;
9972 memcpy (tmpbuf + first, past_reloc, second);
9973 tmpbuf[first + second] = '\0';
9974 return tmpbuf;
9975 }
9976
9977 as_bad (_("@%s reloc is not supported with %d-bit output format"),
9978 gotrel[j].str, 1 << (5 + object_64bit));
9979 return NULL;
9980 }
9981 }
9982
9983 /* Might be a symbol version string. Don't as_bad here. */
9984 return NULL;
9985 }
9986 #endif
9987
9988 #ifdef TE_PE
9989 #ifdef lex_got
9990 #undef lex_got
9991 #endif
9992 /* Parse operands of the form
9993 <symbol>@SECREL32+<nnn>
9994
9995 If we find one, set up the correct relocation in RELOC and copy the
9996 input string, minus the `@SECREL32' into a malloc'd buffer for
9997 parsing by the calling routine. Return this buffer, and if ADJUST
9998 is non-null set it to the length of the string we removed from the
9999 input line. Otherwise return NULL.
10000
10001 This function is copied from the ELF version above adjusted for PE targets. */
10002
10003 static char *
10004 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
10005 int *adjust ATTRIBUTE_UNUSED,
10006 i386_operand_type *types)
10007 {
10008 static const struct
10009 {
10010 const char *str;
10011 int len;
10012 const enum bfd_reloc_code_real rel[2];
10013 const i386_operand_type types64;
10014 }
10015 gotrel[] =
10016 {
10017 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
10018 BFD_RELOC_32_SECREL },
10019 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
10020 };
10021
10022 char *cp;
10023 unsigned j;
10024
10025 for (cp = input_line_pointer; *cp != '@'; cp++)
10026 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
10027 return NULL;
10028
10029 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
10030 {
10031 int len = gotrel[j].len;
10032
10033 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
10034 {
10035 if (gotrel[j].rel[object_64bit] != 0)
10036 {
10037 int first, second;
10038 char *tmpbuf, *past_reloc;
10039
10040 *rel = gotrel[j].rel[object_64bit];
10041 if (adjust)
10042 *adjust = len;
10043
10044 if (types)
10045 {
10046 if (flag_code != CODE_64BIT)
10047 {
10048 types->bitfield.imm32 = 1;
10049 types->bitfield.disp32 = 1;
10050 }
10051 else
10052 *types = gotrel[j].types64;
10053 }
10054
10055 /* The length of the first part of our input line. */
10056 first = cp - input_line_pointer;
10057
10058 /* The second part goes from after the reloc token until
10059 (and including) an end_of_line char or comma. */
10060 past_reloc = cp + 1 + len;
10061 cp = past_reloc;
10062 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
10063 ++cp;
10064 second = cp + 1 - past_reloc;
10065
10066 /* Allocate and copy string. The trailing NUL shouldn't
10067 be necessary, but be safe. */
10068 tmpbuf = XNEWVEC (char, first + second + 2);
10069 memcpy (tmpbuf, input_line_pointer, first);
10070 if (second != 0 && *past_reloc != ' ')
10071 /* Replace the relocation token with ' ', so that
10072 errors like foo@SECLREL321 will be detected. */
10073 tmpbuf[first++] = ' ';
10074 memcpy (tmpbuf + first, past_reloc, second);
10075 tmpbuf[first + second] = '\0';
10076 return tmpbuf;
10077 }
10078
10079 as_bad (_("@%s reloc is not supported with %d-bit output format"),
10080 gotrel[j].str, 1 << (5 + object_64bit));
10081 return NULL;
10082 }
10083 }
10084
10085 /* Might be a symbol version string. Don't as_bad here. */
10086 return NULL;
10087 }
10088
10089 #endif /* TE_PE */
10090
10091 bfd_reloc_code_real_type
10092 x86_cons (expressionS *exp, int size)
10093 {
10094 bfd_reloc_code_real_type got_reloc = NO_RELOC;
10095
10096 intel_syntax = -intel_syntax;
10097
10098 exp->X_md = 0;
10099 if (size == 4 || (object_64bit && size == 8))
10100 {
10101 /* Handle @GOTOFF and the like in an expression. */
10102 char *save;
10103 char *gotfree_input_line;
10104 int adjust = 0;
10105
10106 save = input_line_pointer;
10107 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
10108 if (gotfree_input_line)
10109 input_line_pointer = gotfree_input_line;
10110
10111 expression (exp);
10112
10113 if (gotfree_input_line)
10114 {
10115 /* expression () has merrily parsed up to the end of line,
10116 or a comma - in the wrong buffer. Transfer how far
10117 input_line_pointer has moved to the right buffer. */
10118 input_line_pointer = (save
10119 + (input_line_pointer - gotfree_input_line)
10120 + adjust);
10121 free (gotfree_input_line);
10122 if (exp->X_op == O_constant
10123 || exp->X_op == O_absent
10124 || exp->X_op == O_illegal
10125 || exp->X_op == O_register
10126 || exp->X_op == O_big)
10127 {
10128 char c = *input_line_pointer;
10129 *input_line_pointer = 0;
10130 as_bad (_("missing or invalid expression `%s'"), save);
10131 *input_line_pointer = c;
10132 }
10133 else if ((got_reloc == BFD_RELOC_386_PLT32
10134 || got_reloc == BFD_RELOC_X86_64_PLT32)
10135 && exp->X_op != O_symbol)
10136 {
10137 char c = *input_line_pointer;
10138 *input_line_pointer = 0;
10139 as_bad (_("invalid PLT expression `%s'"), save);
10140 *input_line_pointer = c;
10141 }
10142 }
10143 }
10144 else
10145 expression (exp);
10146
10147 intel_syntax = -intel_syntax;
10148
10149 if (intel_syntax)
10150 i386_intel_simplify (exp);
10151
10152 return got_reloc;
10153 }
10154
10155 static void
10156 signed_cons (int size)
10157 {
10158 if (flag_code == CODE_64BIT)
10159 cons_sign = 1;
10160 cons (size);
10161 cons_sign = -1;
10162 }
10163
10164 #ifdef TE_PE
10165 static void
10166 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
10167 {
10168 expressionS exp;
10169
10170 do
10171 {
10172 expression (&exp);
10173 if (exp.X_op == O_symbol)
10174 exp.X_op = O_secrel;
10175
10176 emit_expr (&exp, 4);
10177 }
10178 while (*input_line_pointer++ == ',');
10179
10180 input_line_pointer--;
10181 demand_empty_rest_of_line ();
10182 }
10183 #endif
10184
10185 /* Handle Vector operations. */
10186
10187 static char *
10188 check_VecOperations (char *op_string, char *op_end)
10189 {
10190 const reg_entry *mask;
10191 const char *saved;
10192 char *end_op;
10193
10194 while (*op_string
10195 && (op_end == NULL || op_string < op_end))
10196 {
10197 saved = op_string;
10198 if (*op_string == '{')
10199 {
10200 op_string++;
10201
10202 /* Check broadcasts. */
10203 if (strncmp (op_string, "1to", 3) == 0)
10204 {
10205 int bcst_type;
10206
10207 if (i.broadcast)
10208 goto duplicated_vec_op;
10209
10210 op_string += 3;
10211 if (*op_string == '8')
10212 bcst_type = 8;
10213 else if (*op_string == '4')
10214 bcst_type = 4;
10215 else if (*op_string == '2')
10216 bcst_type = 2;
10217 else if (*op_string == '1'
10218 && *(op_string+1) == '6')
10219 {
10220 bcst_type = 16;
10221 op_string++;
10222 }
10223 else
10224 {
10225 as_bad (_("Unsupported broadcast: `%s'"), saved);
10226 return NULL;
10227 }
10228 op_string++;
10229
10230 broadcast_op.type = bcst_type;
10231 broadcast_op.operand = this_operand;
10232 broadcast_op.bytes = 0;
10233 i.broadcast = &broadcast_op;
10234 }
10235 /* Check masking operation. */
10236 else if ((mask = parse_register (op_string, &end_op)) != NULL)
10237 {
10238 if (mask == &bad_reg)
10239 return NULL;
10240
10241 /* k0 can't be used for write mask. */
10242 if (mask->reg_type.bitfield.class != RegMask || !mask->reg_num)
10243 {
10244 as_bad (_("`%s%s' can't be used for write mask"),
10245 register_prefix, mask->reg_name);
10246 return NULL;
10247 }
10248
10249 if (!i.mask)
10250 {
10251 mask_op.mask = mask;
10252 mask_op.zeroing = 0;
10253 mask_op.operand = this_operand;
10254 i.mask = &mask_op;
10255 }
10256 else
10257 {
10258 if (i.mask->mask)
10259 goto duplicated_vec_op;
10260
10261 i.mask->mask = mask;
10262
10263 /* Only "{z}" is allowed here. No need to check
10264 zeroing mask explicitly. */
10265 if (i.mask->operand != this_operand)
10266 {
10267 as_bad (_("invalid write mask `%s'"), saved);
10268 return NULL;
10269 }
10270 }
10271
10272 op_string = end_op;
10273 }
10274 /* Check zeroing-flag for masking operation. */
10275 else if (*op_string == 'z')
10276 {
10277 if (!i.mask)
10278 {
10279 mask_op.mask = NULL;
10280 mask_op.zeroing = 1;
10281 mask_op.operand = this_operand;
10282 i.mask = &mask_op;
10283 }
10284 else
10285 {
10286 if (i.mask->zeroing)
10287 {
10288 duplicated_vec_op:
10289 as_bad (_("duplicated `%s'"), saved);
10290 return NULL;
10291 }
10292
10293 i.mask->zeroing = 1;
10294
10295 /* Only "{%k}" is allowed here. No need to check mask
10296 register explicitly. */
10297 if (i.mask->operand != this_operand)
10298 {
10299 as_bad (_("invalid zeroing-masking `%s'"),
10300 saved);
10301 return NULL;
10302 }
10303 }
10304
10305 op_string++;
10306 }
10307 else
10308 goto unknown_vec_op;
10309
10310 if (*op_string != '}')
10311 {
10312 as_bad (_("missing `}' in `%s'"), saved);
10313 return NULL;
10314 }
10315 op_string++;
10316
10317 /* Strip whitespace since the addition of pseudo prefixes
10318 changed how the scrubber treats '{'. */
10319 if (is_space_char (*op_string))
10320 ++op_string;
10321
10322 continue;
10323 }
10324 unknown_vec_op:
10325 /* We don't know this one. */
10326 as_bad (_("unknown vector operation: `%s'"), saved);
10327 return NULL;
10328 }
10329
10330 if (i.mask && i.mask->zeroing && !i.mask->mask)
10331 {
10332 as_bad (_("zeroing-masking only allowed with write mask"));
10333 return NULL;
10334 }
10335
10336 return op_string;
10337 }
10338
10339 static int
10340 i386_immediate (char *imm_start)
10341 {
10342 char *save_input_line_pointer;
10343 char *gotfree_input_line;
10344 segT exp_seg = 0;
10345 expressionS *exp;
10346 i386_operand_type types;
10347
10348 operand_type_set (&types, ~0);
10349
10350 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
10351 {
10352 as_bad (_("at most %d immediate operands are allowed"),
10353 MAX_IMMEDIATE_OPERANDS);
10354 return 0;
10355 }
10356
10357 exp = &im_expressions[i.imm_operands++];
10358 i.op[this_operand].imms = exp;
10359
10360 if (is_space_char (*imm_start))
10361 ++imm_start;
10362
10363 save_input_line_pointer = input_line_pointer;
10364 input_line_pointer = imm_start;
10365
10366 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
10367 if (gotfree_input_line)
10368 input_line_pointer = gotfree_input_line;
10369
10370 exp_seg = expression (exp);
10371
10372 SKIP_WHITESPACE ();
10373
10374 /* Handle vector operations. */
10375 if (*input_line_pointer == '{')
10376 {
10377 input_line_pointer = check_VecOperations (input_line_pointer,
10378 NULL);
10379 if (input_line_pointer == NULL)
10380 return 0;
10381 }
10382
10383 if (*input_line_pointer)
10384 as_bad (_("junk `%s' after expression"), input_line_pointer);
10385
10386 input_line_pointer = save_input_line_pointer;
10387 if (gotfree_input_line)
10388 {
10389 free (gotfree_input_line);
10390
10391 if (exp->X_op == O_constant || exp->X_op == O_register)
10392 exp->X_op = O_illegal;
10393 }
10394
10395 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
10396 }
10397
10398 static int
10399 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
10400 i386_operand_type types, const char *imm_start)
10401 {
10402 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
10403 {
10404 if (imm_start)
10405 as_bad (_("missing or invalid immediate expression `%s'"),
10406 imm_start);
10407 return 0;
10408 }
10409 else if (exp->X_op == O_constant)
10410 {
10411 /* Size it properly later. */
10412 i.types[this_operand].bitfield.imm64 = 1;
10413 /* If not 64bit, sign extend val. */
10414 if (flag_code != CODE_64BIT
10415 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
10416 exp->X_add_number
10417 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
10418 }
10419 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10420 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
10421 && exp_seg != absolute_section
10422 && exp_seg != text_section
10423 && exp_seg != data_section
10424 && exp_seg != bss_section
10425 && exp_seg != undefined_section
10426 && !bfd_is_com_section (exp_seg))
10427 {
10428 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
10429 return 0;
10430 }
10431 #endif
10432 else if (!intel_syntax && exp_seg == reg_section)
10433 {
10434 if (imm_start)
10435 as_bad (_("illegal immediate register operand %s"), imm_start);
10436 return 0;
10437 }
10438 else
10439 {
10440 /* This is an address. The size of the address will be
10441 determined later, depending on destination register,
10442 suffix, or the default for the section. */
10443 i.types[this_operand].bitfield.imm8 = 1;
10444 i.types[this_operand].bitfield.imm16 = 1;
10445 i.types[this_operand].bitfield.imm32 = 1;
10446 i.types[this_operand].bitfield.imm32s = 1;
10447 i.types[this_operand].bitfield.imm64 = 1;
10448 i.types[this_operand] = operand_type_and (i.types[this_operand],
10449 types);
10450 }
10451
10452 return 1;
10453 }
10454
10455 static char *
10456 i386_scale (char *scale)
10457 {
10458 offsetT val;
10459 char *save = input_line_pointer;
10460
10461 input_line_pointer = scale;
10462 val = get_absolute_expression ();
10463
10464 switch (val)
10465 {
10466 case 1:
10467 i.log2_scale_factor = 0;
10468 break;
10469 case 2:
10470 i.log2_scale_factor = 1;
10471 break;
10472 case 4:
10473 i.log2_scale_factor = 2;
10474 break;
10475 case 8:
10476 i.log2_scale_factor = 3;
10477 break;
10478 default:
10479 {
10480 char sep = *input_line_pointer;
10481
10482 *input_line_pointer = '\0';
10483 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
10484 scale);
10485 *input_line_pointer = sep;
10486 input_line_pointer = save;
10487 return NULL;
10488 }
10489 }
10490 if (i.log2_scale_factor != 0 && i.index_reg == 0)
10491 {
10492 as_warn (_("scale factor of %d without an index register"),
10493 1 << i.log2_scale_factor);
10494 i.log2_scale_factor = 0;
10495 }
10496 scale = input_line_pointer;
10497 input_line_pointer = save;
10498 return scale;
10499 }
10500
10501 static int
10502 i386_displacement (char *disp_start, char *disp_end)
10503 {
10504 expressionS *exp;
10505 segT exp_seg = 0;
10506 char *save_input_line_pointer;
10507 char *gotfree_input_line;
10508 int override;
10509 i386_operand_type bigdisp, types = anydisp;
10510 int ret;
10511
10512 if (i.disp_operands == MAX_MEMORY_OPERANDS)
10513 {
10514 as_bad (_("at most %d displacement operands are allowed"),
10515 MAX_MEMORY_OPERANDS);
10516 return 0;
10517 }
10518
10519 operand_type_set (&bigdisp, 0);
10520 if (i.jumpabsolute
10521 || i.types[this_operand].bitfield.baseindex
10522 || (current_templates->start->opcode_modifier.jump != JUMP
10523 && current_templates->start->opcode_modifier.jump != JUMP_DWORD))
10524 {
10525 i386_addressing_mode ();
10526 override = (i.prefix[ADDR_PREFIX] != 0);
10527 if (flag_code == CODE_64BIT)
10528 {
10529 if (!override)
10530 {
10531 bigdisp.bitfield.disp32s = 1;
10532 bigdisp.bitfield.disp64 = 1;
10533 }
10534 else
10535 bigdisp.bitfield.disp32 = 1;
10536 }
10537 else if ((flag_code == CODE_16BIT) ^ override)
10538 bigdisp.bitfield.disp16 = 1;
10539 else
10540 bigdisp.bitfield.disp32 = 1;
10541 }
10542 else
10543 {
10544 /* For PC-relative branches, the width of the displacement may be
10545 dependent upon data size, but is never dependent upon address size.
10546 Also make sure to not unintentionally match against a non-PC-relative
10547 branch template. */
10548 static templates aux_templates;
10549 const insn_template *t = current_templates->start;
10550 bfd_boolean has_intel64 = FALSE;
10551
10552 aux_templates.start = t;
10553 while (++t < current_templates->end)
10554 {
10555 if (t->opcode_modifier.jump
10556 != current_templates->start->opcode_modifier.jump)
10557 break;
10558 if ((t->opcode_modifier.isa64 >= INTEL64))
10559 has_intel64 = TRUE;
10560 }
10561 if (t < current_templates->end)
10562 {
10563 aux_templates.end = t;
10564 current_templates = &aux_templates;
10565 }
10566
10567 override = (i.prefix[DATA_PREFIX] != 0);
10568 if (flag_code == CODE_64BIT)
10569 {
10570 if ((override || i.suffix == WORD_MNEM_SUFFIX)
10571 && (!intel64 || !has_intel64))
10572 bigdisp.bitfield.disp16 = 1;
10573 else
10574 bigdisp.bitfield.disp32s = 1;
10575 }
10576 else
10577 {
10578 if (!override)
10579 override = (i.suffix == (flag_code != CODE_16BIT
10580 ? WORD_MNEM_SUFFIX
10581 : LONG_MNEM_SUFFIX));
10582 bigdisp.bitfield.disp32 = 1;
10583 if ((flag_code == CODE_16BIT) ^ override)
10584 {
10585 bigdisp.bitfield.disp32 = 0;
10586 bigdisp.bitfield.disp16 = 1;
10587 }
10588 }
10589 }
10590 i.types[this_operand] = operand_type_or (i.types[this_operand],
10591 bigdisp);
10592
10593 exp = &disp_expressions[i.disp_operands];
10594 i.op[this_operand].disps = exp;
10595 i.disp_operands++;
10596 save_input_line_pointer = input_line_pointer;
10597 input_line_pointer = disp_start;
10598 END_STRING_AND_SAVE (disp_end);
10599
10600 #ifndef GCC_ASM_O_HACK
10601 #define GCC_ASM_O_HACK 0
10602 #endif
10603 #if GCC_ASM_O_HACK
10604 END_STRING_AND_SAVE (disp_end + 1);
10605 if (i.types[this_operand].bitfield.baseIndex
10606 && displacement_string_end[-1] == '+')
10607 {
10608 /* This hack is to avoid a warning when using the "o"
10609 constraint within gcc asm statements.
10610 For instance:
10611
10612 #define _set_tssldt_desc(n,addr,limit,type) \
10613 __asm__ __volatile__ ( \
10614 "movw %w2,%0\n\t" \
10615 "movw %w1,2+%0\n\t" \
10616 "rorl $16,%1\n\t" \
10617 "movb %b1,4+%0\n\t" \
10618 "movb %4,5+%0\n\t" \
10619 "movb $0,6+%0\n\t" \
10620 "movb %h1,7+%0\n\t" \
10621 "rorl $16,%1" \
10622 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
10623
10624 This works great except that the output assembler ends
10625 up looking a bit weird if it turns out that there is
10626 no offset. You end up producing code that looks like:
10627
10628 #APP
10629 movw $235,(%eax)
10630 movw %dx,2+(%eax)
10631 rorl $16,%edx
10632 movb %dl,4+(%eax)
10633 movb $137,5+(%eax)
10634 movb $0,6+(%eax)
10635 movb %dh,7+(%eax)
10636 rorl $16,%edx
10637 #NO_APP
10638
10639 So here we provide the missing zero. */
10640
10641 *displacement_string_end = '0';
10642 }
10643 #endif
10644 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
10645 if (gotfree_input_line)
10646 input_line_pointer = gotfree_input_line;
10647
10648 exp_seg = expression (exp);
10649
10650 SKIP_WHITESPACE ();
10651 if (*input_line_pointer)
10652 as_bad (_("junk `%s' after expression"), input_line_pointer);
10653 #if GCC_ASM_O_HACK
10654 RESTORE_END_STRING (disp_end + 1);
10655 #endif
10656 input_line_pointer = save_input_line_pointer;
10657 if (gotfree_input_line)
10658 {
10659 free (gotfree_input_line);
10660
10661 if (exp->X_op == O_constant || exp->X_op == O_register)
10662 exp->X_op = O_illegal;
10663 }
10664
10665 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
10666
10667 RESTORE_END_STRING (disp_end);
10668
10669 return ret;
10670 }
10671
10672 static int
10673 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
10674 i386_operand_type types, const char *disp_start)
10675 {
10676 i386_operand_type bigdisp;
10677 int ret = 1;
10678
10679 /* We do this to make sure that the section symbol is in
10680 the symbol table. We will ultimately change the relocation
10681 to be relative to the beginning of the section. */
10682 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
10683 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
10684 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10685 {
10686 if (exp->X_op != O_symbol)
10687 goto inv_disp;
10688
10689 if (S_IS_LOCAL (exp->X_add_symbol)
10690 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
10691 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
10692 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
10693 exp->X_op = O_subtract;
10694 exp->X_op_symbol = GOT_symbol;
10695 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
10696 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
10697 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
10698 i.reloc[this_operand] = BFD_RELOC_64;
10699 else
10700 i.reloc[this_operand] = BFD_RELOC_32;
10701 }
10702
10703 else if (exp->X_op == O_absent
10704 || exp->X_op == O_illegal
10705 || exp->X_op == O_big)
10706 {
10707 inv_disp:
10708 as_bad (_("missing or invalid displacement expression `%s'"),
10709 disp_start);
10710 ret = 0;
10711 }
10712
10713 else if (flag_code == CODE_64BIT
10714 && !i.prefix[ADDR_PREFIX]
10715 && exp->X_op == O_constant)
10716 {
10717 /* Since displacement is signed extended to 64bit, don't allow
10718 disp32 and turn off disp32s if they are out of range. */
10719 i.types[this_operand].bitfield.disp32 = 0;
10720 if (!fits_in_signed_long (exp->X_add_number))
10721 {
10722 i.types[this_operand].bitfield.disp32s = 0;
10723 if (i.types[this_operand].bitfield.baseindex)
10724 {
10725 as_bad (_("0x%lx out range of signed 32bit displacement"),
10726 (long) exp->X_add_number);
10727 ret = 0;
10728 }
10729 }
10730 }
10731
10732 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10733 else if (exp->X_op != O_constant
10734 && OUTPUT_FLAVOR == bfd_target_aout_flavour
10735 && exp_seg != absolute_section
10736 && exp_seg != text_section
10737 && exp_seg != data_section
10738 && exp_seg != bss_section
10739 && exp_seg != undefined_section
10740 && !bfd_is_com_section (exp_seg))
10741 {
10742 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
10743 ret = 0;
10744 }
10745 #endif
10746
10747 if (current_templates->start->opcode_modifier.jump == JUMP_BYTE
10748 /* Constants get taken care of by optimize_disp(). */
10749 && exp->X_op != O_constant)
10750 i.types[this_operand].bitfield.disp8 = 1;
10751
10752 /* Check if this is a displacement only operand. */
10753 bigdisp = i.types[this_operand];
10754 bigdisp.bitfield.disp8 = 0;
10755 bigdisp.bitfield.disp16 = 0;
10756 bigdisp.bitfield.disp32 = 0;
10757 bigdisp.bitfield.disp32s = 0;
10758 bigdisp.bitfield.disp64 = 0;
10759 if (operand_type_all_zero (&bigdisp))
10760 i.types[this_operand] = operand_type_and (i.types[this_operand],
10761 types);
10762
10763 return ret;
10764 }
10765
10766 /* Return the active addressing mode, taking address override and
10767 registers forming the address into consideration. Update the
10768 address override prefix if necessary. */
10769
10770 static enum flag_code
10771 i386_addressing_mode (void)
10772 {
10773 enum flag_code addr_mode;
10774
10775 if (i.prefix[ADDR_PREFIX])
10776 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
10777 else if (flag_code == CODE_16BIT
10778 && current_templates->start->cpu_flags.bitfield.cpumpx
10779 /* Avoid replacing the "16-bit addressing not allowed" diagnostic
10780 from md_assemble() by "is not a valid base/index expression"
10781 when there is a base and/or index. */
10782 && !i.types[this_operand].bitfield.baseindex)
10783 {
10784 /* MPX insn memory operands with neither base nor index must be forced
10785 to use 32-bit addressing in 16-bit mode. */
10786 addr_mode = CODE_32BIT;
10787 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
10788 ++i.prefixes;
10789 gas_assert (!i.types[this_operand].bitfield.disp16);
10790 gas_assert (!i.types[this_operand].bitfield.disp32);
10791 }
10792 else
10793 {
10794 addr_mode = flag_code;
10795
10796 #if INFER_ADDR_PREFIX
10797 if (i.mem_operands == 0)
10798 {
10799 /* Infer address prefix from the first memory operand. */
10800 const reg_entry *addr_reg = i.base_reg;
10801
10802 if (addr_reg == NULL)
10803 addr_reg = i.index_reg;
10804
10805 if (addr_reg)
10806 {
10807 if (addr_reg->reg_type.bitfield.dword)
10808 addr_mode = CODE_32BIT;
10809 else if (flag_code != CODE_64BIT
10810 && addr_reg->reg_type.bitfield.word)
10811 addr_mode = CODE_16BIT;
10812
10813 if (addr_mode != flag_code)
10814 {
10815 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
10816 i.prefixes += 1;
10817 /* Change the size of any displacement too. At most one
10818 of Disp16 or Disp32 is set.
10819 FIXME. There doesn't seem to be any real need for
10820 separate Disp16 and Disp32 flags. The same goes for
10821 Imm16 and Imm32. Removing them would probably clean
10822 up the code quite a lot. */
10823 if (flag_code != CODE_64BIT
10824 && (i.types[this_operand].bitfield.disp16
10825 || i.types[this_operand].bitfield.disp32))
10826 i.types[this_operand]
10827 = operand_type_xor (i.types[this_operand], disp16_32);
10828 }
10829 }
10830 }
10831 #endif
10832 }
10833
10834 return addr_mode;
10835 }
10836
10837 /* Make sure the memory operand we've been dealt is valid.
10838 Return 1 on success, 0 on a failure. */
10839
10840 static int
10841 i386_index_check (const char *operand_string)
10842 {
10843 const char *kind = "base/index";
10844 enum flag_code addr_mode = i386_addressing_mode ();
10845
10846 if (current_templates->start->opcode_modifier.isstring
10847 && !current_templates->start->cpu_flags.bitfield.cpupadlock
10848 && (current_templates->end[-1].opcode_modifier.isstring
10849 || i.mem_operands))
10850 {
10851 /* Memory operands of string insns are special in that they only allow
10852 a single register (rDI, rSI, or rBX) as their memory address. */
10853 const reg_entry *expected_reg;
10854 static const char *di_si[][2] =
10855 {
10856 { "esi", "edi" },
10857 { "si", "di" },
10858 { "rsi", "rdi" }
10859 };
10860 static const char *bx[] = { "ebx", "bx", "rbx" };
10861
10862 kind = "string address";
10863
10864 if (current_templates->start->opcode_modifier.repprefixok)
10865 {
10866 int es_op = current_templates->end[-1].opcode_modifier.isstring
10867 - IS_STRING_ES_OP0;
10868 int op = 0;
10869
10870 if (!current_templates->end[-1].operand_types[0].bitfield.baseindex
10871 || ((!i.mem_operands != !intel_syntax)
10872 && current_templates->end[-1].operand_types[1]
10873 .bitfield.baseindex))
10874 op = 1;
10875 expected_reg = hash_find (reg_hash, di_si[addr_mode][op == es_op]);
10876 }
10877 else
10878 expected_reg = hash_find (reg_hash, bx[addr_mode]);
10879
10880 if (i.base_reg != expected_reg
10881 || i.index_reg
10882 || operand_type_check (i.types[this_operand], disp))
10883 {
10884 /* The second memory operand must have the same size as
10885 the first one. */
10886 if (i.mem_operands
10887 && i.base_reg
10888 && !((addr_mode == CODE_64BIT
10889 && i.base_reg->reg_type.bitfield.qword)
10890 || (addr_mode == CODE_32BIT
10891 ? i.base_reg->reg_type.bitfield.dword
10892 : i.base_reg->reg_type.bitfield.word)))
10893 goto bad_address;
10894
10895 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
10896 operand_string,
10897 intel_syntax ? '[' : '(',
10898 register_prefix,
10899 expected_reg->reg_name,
10900 intel_syntax ? ']' : ')');
10901 return 1;
10902 }
10903 else
10904 return 1;
10905
10906 bad_address:
10907 as_bad (_("`%s' is not a valid %s expression"),
10908 operand_string, kind);
10909 return 0;
10910 }
10911 else
10912 {
10913 if (addr_mode != CODE_16BIT)
10914 {
10915 /* 32-bit/64-bit checks. */
10916 if ((i.base_reg
10917 && ((addr_mode == CODE_64BIT
10918 ? !i.base_reg->reg_type.bitfield.qword
10919 : !i.base_reg->reg_type.bitfield.dword)
10920 || (i.index_reg && i.base_reg->reg_num == RegIP)
10921 || i.base_reg->reg_num == RegIZ))
10922 || (i.index_reg
10923 && !i.index_reg->reg_type.bitfield.xmmword
10924 && !i.index_reg->reg_type.bitfield.ymmword
10925 && !i.index_reg->reg_type.bitfield.zmmword
10926 && ((addr_mode == CODE_64BIT
10927 ? !i.index_reg->reg_type.bitfield.qword
10928 : !i.index_reg->reg_type.bitfield.dword)
10929 || !i.index_reg->reg_type.bitfield.baseindex)))
10930 goto bad_address;
10931
10932 /* bndmk, bndldx, and bndstx have special restrictions. */
10933 if (current_templates->start->base_opcode == 0xf30f1b
10934 || (current_templates->start->base_opcode & ~1) == 0x0f1a)
10935 {
10936 /* They cannot use RIP-relative addressing. */
10937 if (i.base_reg && i.base_reg->reg_num == RegIP)
10938 {
10939 as_bad (_("`%s' cannot be used here"), operand_string);
10940 return 0;
10941 }
10942
10943 /* bndldx and bndstx ignore their scale factor. */
10944 if (current_templates->start->base_opcode != 0xf30f1b
10945 && i.log2_scale_factor)
10946 as_warn (_("register scaling is being ignored here"));
10947 }
10948 }
10949 else
10950 {
10951 /* 16-bit checks. */
10952 if ((i.base_reg
10953 && (!i.base_reg->reg_type.bitfield.word
10954 || !i.base_reg->reg_type.bitfield.baseindex))
10955 || (i.index_reg
10956 && (!i.index_reg->reg_type.bitfield.word
10957 || !i.index_reg->reg_type.bitfield.baseindex
10958 || !(i.base_reg
10959 && i.base_reg->reg_num < 6
10960 && i.index_reg->reg_num >= 6
10961 && i.log2_scale_factor == 0))))
10962 goto bad_address;
10963 }
10964 }
10965 return 1;
10966 }
10967
10968 /* Handle vector immediates. */
10969
10970 static int
10971 RC_SAE_immediate (const char *imm_start)
10972 {
10973 unsigned int match_found, j;
10974 const char *pstr = imm_start;
10975 expressionS *exp;
10976
10977 if (*pstr != '{')
10978 return 0;
10979
10980 pstr++;
10981 match_found = 0;
10982 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
10983 {
10984 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
10985 {
10986 if (!i.rounding)
10987 {
10988 rc_op.type = RC_NamesTable[j].type;
10989 rc_op.operand = this_operand;
10990 i.rounding = &rc_op;
10991 }
10992 else
10993 {
10994 as_bad (_("duplicated `%s'"), imm_start);
10995 return 0;
10996 }
10997 pstr += RC_NamesTable[j].len;
10998 match_found = 1;
10999 break;
11000 }
11001 }
11002 if (!match_found)
11003 return 0;
11004
11005 if (*pstr++ != '}')
11006 {
11007 as_bad (_("Missing '}': '%s'"), imm_start);
11008 return 0;
11009 }
11010 /* RC/SAE immediate string should contain nothing more. */;
11011 if (*pstr != 0)
11012 {
11013 as_bad (_("Junk after '}': '%s'"), imm_start);
11014 return 0;
11015 }
11016
11017 exp = &im_expressions[i.imm_operands++];
11018 i.op[this_operand].imms = exp;
11019
11020 exp->X_op = O_constant;
11021 exp->X_add_number = 0;
11022 exp->X_add_symbol = (symbolS *) 0;
11023 exp->X_op_symbol = (symbolS *) 0;
11024
11025 i.types[this_operand].bitfield.imm8 = 1;
11026 return 1;
11027 }
11028
11029 /* Only string instructions can have a second memory operand, so
11030 reduce current_templates to just those if it contains any. */
11031 static int
11032 maybe_adjust_templates (void)
11033 {
11034 const insn_template *t;
11035
11036 gas_assert (i.mem_operands == 1);
11037
11038 for (t = current_templates->start; t < current_templates->end; ++t)
11039 if (t->opcode_modifier.isstring)
11040 break;
11041
11042 if (t < current_templates->end)
11043 {
11044 static templates aux_templates;
11045 bfd_boolean recheck;
11046
11047 aux_templates.start = t;
11048 for (; t < current_templates->end; ++t)
11049 if (!t->opcode_modifier.isstring)
11050 break;
11051 aux_templates.end = t;
11052
11053 /* Determine whether to re-check the first memory operand. */
11054 recheck = (aux_templates.start != current_templates->start
11055 || t != current_templates->end);
11056
11057 current_templates = &aux_templates;
11058
11059 if (recheck)
11060 {
11061 i.mem_operands = 0;
11062 if (i.memop1_string != NULL
11063 && i386_index_check (i.memop1_string) == 0)
11064 return 0;
11065 i.mem_operands = 1;
11066 }
11067 }
11068
11069 return 1;
11070 }
11071
11072 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
11073 on error. */
11074
11075 static int
11076 i386_att_operand (char *operand_string)
11077 {
11078 const reg_entry *r;
11079 char *end_op;
11080 char *op_string = operand_string;
11081
11082 if (is_space_char (*op_string))
11083 ++op_string;
11084
11085 /* We check for an absolute prefix (differentiating,
11086 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
11087 if (*op_string == ABSOLUTE_PREFIX)
11088 {
11089 ++op_string;
11090 if (is_space_char (*op_string))
11091 ++op_string;
11092 i.jumpabsolute = TRUE;
11093 }
11094
11095 /* Check if operand is a register. */
11096 if ((r = parse_register (op_string, &end_op)) != NULL)
11097 {
11098 i386_operand_type temp;
11099
11100 if (r == &bad_reg)
11101 return 0;
11102
11103 /* Check for a segment override by searching for ':' after a
11104 segment register. */
11105 op_string = end_op;
11106 if (is_space_char (*op_string))
11107 ++op_string;
11108 if (*op_string == ':' && r->reg_type.bitfield.class == SReg)
11109 {
11110 switch (r->reg_num)
11111 {
11112 case 0:
11113 i.seg[i.mem_operands] = &es;
11114 break;
11115 case 1:
11116 i.seg[i.mem_operands] = &cs;
11117 break;
11118 case 2:
11119 i.seg[i.mem_operands] = &ss;
11120 break;
11121 case 3:
11122 i.seg[i.mem_operands] = &ds;
11123 break;
11124 case 4:
11125 i.seg[i.mem_operands] = &fs;
11126 break;
11127 case 5:
11128 i.seg[i.mem_operands] = &gs;
11129 break;
11130 }
11131
11132 /* Skip the ':' and whitespace. */
11133 ++op_string;
11134 if (is_space_char (*op_string))
11135 ++op_string;
11136
11137 if (!is_digit_char (*op_string)
11138 && !is_identifier_char (*op_string)
11139 && *op_string != '('
11140 && *op_string != ABSOLUTE_PREFIX)
11141 {
11142 as_bad (_("bad memory operand `%s'"), op_string);
11143 return 0;
11144 }
11145 /* Handle case of %es:*foo. */
11146 if (*op_string == ABSOLUTE_PREFIX)
11147 {
11148 ++op_string;
11149 if (is_space_char (*op_string))
11150 ++op_string;
11151 i.jumpabsolute = TRUE;
11152 }
11153 goto do_memory_reference;
11154 }
11155
11156 /* Handle vector operations. */
11157 if (*op_string == '{')
11158 {
11159 op_string = check_VecOperations (op_string, NULL);
11160 if (op_string == NULL)
11161 return 0;
11162 }
11163
11164 if (*op_string)
11165 {
11166 as_bad (_("junk `%s' after register"), op_string);
11167 return 0;
11168 }
11169 temp = r->reg_type;
11170 temp.bitfield.baseindex = 0;
11171 i.types[this_operand] = operand_type_or (i.types[this_operand],
11172 temp);
11173 i.types[this_operand].bitfield.unspecified = 0;
11174 i.op[this_operand].regs = r;
11175 i.reg_operands++;
11176 }
11177 else if (*op_string == REGISTER_PREFIX)
11178 {
11179 as_bad (_("bad register name `%s'"), op_string);
11180 return 0;
11181 }
11182 else if (*op_string == IMMEDIATE_PREFIX)
11183 {
11184 ++op_string;
11185 if (i.jumpabsolute)
11186 {
11187 as_bad (_("immediate operand illegal with absolute jump"));
11188 return 0;
11189 }
11190 if (!i386_immediate (op_string))
11191 return 0;
11192 }
11193 else if (RC_SAE_immediate (operand_string))
11194 {
11195 /* If it is a RC or SAE immediate, do nothing. */
11196 ;
11197 }
11198 else if (is_digit_char (*op_string)
11199 || is_identifier_char (*op_string)
11200 || *op_string == '"'
11201 || *op_string == '(')
11202 {
11203 /* This is a memory reference of some sort. */
11204 char *base_string;
11205
11206 /* Start and end of displacement string expression (if found). */
11207 char *displacement_string_start;
11208 char *displacement_string_end;
11209 char *vop_start;
11210
11211 do_memory_reference:
11212 if (i.mem_operands == 1 && !maybe_adjust_templates ())
11213 return 0;
11214 if ((i.mem_operands == 1
11215 && !current_templates->start->opcode_modifier.isstring)
11216 || i.mem_operands == 2)
11217 {
11218 as_bad (_("too many memory references for `%s'"),
11219 current_templates->start->name);
11220 return 0;
11221 }
11222
11223 /* Check for base index form. We detect the base index form by
11224 looking for an ')' at the end of the operand, searching
11225 for the '(' matching it, and finding a REGISTER_PREFIX or ','
11226 after the '('. */
11227 base_string = op_string + strlen (op_string);
11228
11229 /* Handle vector operations. */
11230 vop_start = strchr (op_string, '{');
11231 if (vop_start && vop_start < base_string)
11232 {
11233 if (check_VecOperations (vop_start, base_string) == NULL)
11234 return 0;
11235 base_string = vop_start;
11236 }
11237
11238 --base_string;
11239 if (is_space_char (*base_string))
11240 --base_string;
11241
11242 /* If we only have a displacement, set-up for it to be parsed later. */
11243 displacement_string_start = op_string;
11244 displacement_string_end = base_string + 1;
11245
11246 if (*base_string == ')')
11247 {
11248 char *temp_string;
11249 unsigned int parens_balanced = 1;
11250 /* We've already checked that the number of left & right ()'s are
11251 equal, so this loop will not be infinite. */
11252 do
11253 {
11254 base_string--;
11255 if (*base_string == ')')
11256 parens_balanced++;
11257 if (*base_string == '(')
11258 parens_balanced--;
11259 }
11260 while (parens_balanced);
11261
11262 temp_string = base_string;
11263
11264 /* Skip past '(' and whitespace. */
11265 ++base_string;
11266 if (is_space_char (*base_string))
11267 ++base_string;
11268
11269 if (*base_string == ','
11270 || ((i.base_reg = parse_register (base_string, &end_op))
11271 != NULL))
11272 {
11273 displacement_string_end = temp_string;
11274
11275 i.types[this_operand].bitfield.baseindex = 1;
11276
11277 if (i.base_reg)
11278 {
11279 if (i.base_reg == &bad_reg)
11280 return 0;
11281 base_string = end_op;
11282 if (is_space_char (*base_string))
11283 ++base_string;
11284 }
11285
11286 /* There may be an index reg or scale factor here. */
11287 if (*base_string == ',')
11288 {
11289 ++base_string;
11290 if (is_space_char (*base_string))
11291 ++base_string;
11292
11293 if ((i.index_reg = parse_register (base_string, &end_op))
11294 != NULL)
11295 {
11296 if (i.index_reg == &bad_reg)
11297 return 0;
11298 base_string = end_op;
11299 if (is_space_char (*base_string))
11300 ++base_string;
11301 if (*base_string == ',')
11302 {
11303 ++base_string;
11304 if (is_space_char (*base_string))
11305 ++base_string;
11306 }
11307 else if (*base_string != ')')
11308 {
11309 as_bad (_("expecting `,' or `)' "
11310 "after index register in `%s'"),
11311 operand_string);
11312 return 0;
11313 }
11314 }
11315 else if (*base_string == REGISTER_PREFIX)
11316 {
11317 end_op = strchr (base_string, ',');
11318 if (end_op)
11319 *end_op = '\0';
11320 as_bad (_("bad register name `%s'"), base_string);
11321 return 0;
11322 }
11323
11324 /* Check for scale factor. */
11325 if (*base_string != ')')
11326 {
11327 char *end_scale = i386_scale (base_string);
11328
11329 if (!end_scale)
11330 return 0;
11331
11332 base_string = end_scale;
11333 if (is_space_char (*base_string))
11334 ++base_string;
11335 if (*base_string != ')')
11336 {
11337 as_bad (_("expecting `)' "
11338 "after scale factor in `%s'"),
11339 operand_string);
11340 return 0;
11341 }
11342 }
11343 else if (!i.index_reg)
11344 {
11345 as_bad (_("expecting index register or scale factor "
11346 "after `,'; got '%c'"),
11347 *base_string);
11348 return 0;
11349 }
11350 }
11351 else if (*base_string != ')')
11352 {
11353 as_bad (_("expecting `,' or `)' "
11354 "after base register in `%s'"),
11355 operand_string);
11356 return 0;
11357 }
11358 }
11359 else if (*base_string == REGISTER_PREFIX)
11360 {
11361 end_op = strchr (base_string, ',');
11362 if (end_op)
11363 *end_op = '\0';
11364 as_bad (_("bad register name `%s'"), base_string);
11365 return 0;
11366 }
11367 }
11368
11369 /* If there's an expression beginning the operand, parse it,
11370 assuming displacement_string_start and
11371 displacement_string_end are meaningful. */
11372 if (displacement_string_start != displacement_string_end)
11373 {
11374 if (!i386_displacement (displacement_string_start,
11375 displacement_string_end))
11376 return 0;
11377 }
11378
11379 /* Special case for (%dx) while doing input/output op. */
11380 if (i.base_reg
11381 && i.base_reg->reg_type.bitfield.instance == RegD
11382 && i.base_reg->reg_type.bitfield.word
11383 && i.index_reg == 0
11384 && i.log2_scale_factor == 0
11385 && i.seg[i.mem_operands] == 0
11386 && !operand_type_check (i.types[this_operand], disp))
11387 {
11388 i.types[this_operand] = i.base_reg->reg_type;
11389 return 1;
11390 }
11391
11392 if (i386_index_check (operand_string) == 0)
11393 return 0;
11394 i.flags[this_operand] |= Operand_Mem;
11395 if (i.mem_operands == 0)
11396 i.memop1_string = xstrdup (operand_string);
11397 i.mem_operands++;
11398 }
11399 else
11400 {
11401 /* It's not a memory operand; argh! */
11402 as_bad (_("invalid char %s beginning operand %d `%s'"),
11403 output_invalid (*op_string),
11404 this_operand + 1,
11405 op_string);
11406 return 0;
11407 }
11408 return 1; /* Normal return. */
11409 }
11410 \f
11411 /* Calculate the maximum variable size (i.e., excluding fr_fix)
11412 that an rs_machine_dependent frag may reach. */
11413
11414 unsigned int
11415 i386_frag_max_var (fragS *frag)
11416 {
11417 /* The only relaxable frags are for jumps.
11418 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
11419 gas_assert (frag->fr_type == rs_machine_dependent);
11420 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
11421 }
11422
11423 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11424 static int
11425 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
11426 {
11427 /* STT_GNU_IFUNC symbol must go through PLT. */
11428 if ((symbol_get_bfdsym (fr_symbol)->flags
11429 & BSF_GNU_INDIRECT_FUNCTION) != 0)
11430 return 0;
11431
11432 if (!S_IS_EXTERNAL (fr_symbol))
11433 /* Symbol may be weak or local. */
11434 return !S_IS_WEAK (fr_symbol);
11435
11436 /* Global symbols with non-default visibility can't be preempted. */
11437 if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
11438 return 1;
11439
11440 if (fr_var != NO_RELOC)
11441 switch ((enum bfd_reloc_code_real) fr_var)
11442 {
11443 case BFD_RELOC_386_PLT32:
11444 case BFD_RELOC_X86_64_PLT32:
11445 /* Symbol with PLT relocation may be preempted. */
11446 return 0;
11447 default:
11448 abort ();
11449 }
11450
11451 /* Global symbols with default visibility in a shared library may be
11452 preempted by another definition. */
11453 return !shared;
11454 }
11455 #endif
11456
11457 /* Table 3-2. Macro-Fusible Instructions in Haswell Microarchitecture
11458 Note also work for Skylake and Cascadelake.
11459 ---------------------------------------------------------------------
11460 | JCC | ADD/SUB/CMP | INC/DEC | TEST/AND |
11461 | ------ | ----------- | ------- | -------- |
11462 | Jo | N | N | Y |
11463 | Jno | N | N | Y |
11464 | Jc/Jb | Y | N | Y |
11465 | Jae/Jnb | Y | N | Y |
11466 | Je/Jz | Y | Y | Y |
11467 | Jne/Jnz | Y | Y | Y |
11468 | Jna/Jbe | Y | N | Y |
11469 | Ja/Jnbe | Y | N | Y |
11470 | Js | N | N | Y |
11471 | Jns | N | N | Y |
11472 | Jp/Jpe | N | N | Y |
11473 | Jnp/Jpo | N | N | Y |
11474 | Jl/Jnge | Y | Y | Y |
11475 | Jge/Jnl | Y | Y | Y |
11476 | Jle/Jng | Y | Y | Y |
11477 | Jg/Jnle | Y | Y | Y |
11478 --------------------------------------------------------------------- */
11479 static int
11480 i386_macro_fusible_p (enum mf_cmp_kind mf_cmp, enum mf_jcc_kind mf_jcc)
11481 {
11482 if (mf_cmp == mf_cmp_alu_cmp)
11483 return ((mf_jcc >= mf_jcc_jc && mf_jcc <= mf_jcc_jna)
11484 || mf_jcc == mf_jcc_jl || mf_jcc == mf_jcc_jle);
11485 if (mf_cmp == mf_cmp_incdec)
11486 return (mf_jcc == mf_jcc_je || mf_jcc == mf_jcc_jl
11487 || mf_jcc == mf_jcc_jle);
11488 if (mf_cmp == mf_cmp_test_and)
11489 return 1;
11490 return 0;
11491 }
11492
11493 /* Return the next non-empty frag. */
11494
11495 static fragS *
11496 i386_next_non_empty_frag (fragS *fragP)
11497 {
11498 /* There may be a frag with a ".fill 0" when there is no room in
11499 the current frag for frag_grow in output_insn. */
11500 for (fragP = fragP->fr_next;
11501 (fragP != NULL
11502 && fragP->fr_type == rs_fill
11503 && fragP->fr_fix == 0);
11504 fragP = fragP->fr_next)
11505 ;
11506 return fragP;
11507 }
11508
11509 /* Return the next jcc frag after BRANCH_PADDING. */
11510
11511 static fragS *
11512 i386_next_fusible_jcc_frag (fragS *maybe_cmp_fragP, fragS *pad_fragP)
11513 {
11514 fragS *branch_fragP;
11515 if (!pad_fragP)
11516 return NULL;
11517
11518 if (pad_fragP->fr_type == rs_machine_dependent
11519 && (TYPE_FROM_RELAX_STATE (pad_fragP->fr_subtype)
11520 == BRANCH_PADDING))
11521 {
11522 branch_fragP = i386_next_non_empty_frag (pad_fragP);
11523 if (branch_fragP->fr_type != rs_machine_dependent)
11524 return NULL;
11525 if (TYPE_FROM_RELAX_STATE (branch_fragP->fr_subtype) == COND_JUMP
11526 && i386_macro_fusible_p (maybe_cmp_fragP->tc_frag_data.mf_type,
11527 pad_fragP->tc_frag_data.mf_type))
11528 return branch_fragP;
11529 }
11530
11531 return NULL;
11532 }
11533
11534 /* Classify BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags. */
11535
11536 static void
11537 i386_classify_machine_dependent_frag (fragS *fragP)
11538 {
11539 fragS *cmp_fragP;
11540 fragS *pad_fragP;
11541 fragS *branch_fragP;
11542 fragS *next_fragP;
11543 unsigned int max_prefix_length;
11544
11545 if (fragP->tc_frag_data.classified)
11546 return;
11547
11548 /* First scan for BRANCH_PADDING and FUSED_JCC_PADDING. Convert
11549 FUSED_JCC_PADDING and merge BRANCH_PADDING. */
11550 for (next_fragP = fragP;
11551 next_fragP != NULL;
11552 next_fragP = next_fragP->fr_next)
11553 {
11554 next_fragP->tc_frag_data.classified = 1;
11555 if (next_fragP->fr_type == rs_machine_dependent)
11556 switch (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype))
11557 {
11558 case BRANCH_PADDING:
11559 /* The BRANCH_PADDING frag must be followed by a branch
11560 frag. */
11561 branch_fragP = i386_next_non_empty_frag (next_fragP);
11562 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
11563 break;
11564 case FUSED_JCC_PADDING:
11565 /* Check if this is a fused jcc:
11566 FUSED_JCC_PADDING
11567 CMP like instruction
11568 BRANCH_PADDING
11569 COND_JUMP
11570 */
11571 cmp_fragP = i386_next_non_empty_frag (next_fragP);
11572 pad_fragP = i386_next_non_empty_frag (cmp_fragP);
11573 branch_fragP = i386_next_fusible_jcc_frag (next_fragP, pad_fragP);
11574 if (branch_fragP)
11575 {
11576 /* The BRANCH_PADDING frag is merged with the
11577 FUSED_JCC_PADDING frag. */
11578 next_fragP->tc_frag_data.u.branch_fragP = branch_fragP;
11579 /* CMP like instruction size. */
11580 next_fragP->tc_frag_data.cmp_size = cmp_fragP->fr_fix;
11581 frag_wane (pad_fragP);
11582 /* Skip to branch_fragP. */
11583 next_fragP = branch_fragP;
11584 }
11585 else if (next_fragP->tc_frag_data.max_prefix_length)
11586 {
11587 /* Turn FUSED_JCC_PADDING into BRANCH_PREFIX if it isn't
11588 a fused jcc. */
11589 next_fragP->fr_subtype
11590 = ENCODE_RELAX_STATE (BRANCH_PREFIX, 0);
11591 next_fragP->tc_frag_data.max_bytes
11592 = next_fragP->tc_frag_data.max_prefix_length;
11593 /* This will be updated in the BRANCH_PREFIX scan. */
11594 next_fragP->tc_frag_data.max_prefix_length = 0;
11595 }
11596 else
11597 frag_wane (next_fragP);
11598 break;
11599 }
11600 }
11601
11602 /* Stop if there is no BRANCH_PREFIX. */
11603 if (!align_branch_prefix_size)
11604 return;
11605
11606 /* Scan for BRANCH_PREFIX. */
11607 for (; fragP != NULL; fragP = fragP->fr_next)
11608 {
11609 if (fragP->fr_type != rs_machine_dependent
11610 || (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
11611 != BRANCH_PREFIX))
11612 continue;
11613
11614 /* Count all BRANCH_PREFIX frags before BRANCH_PADDING and
11615 COND_JUMP_PREFIX. */
11616 max_prefix_length = 0;
11617 for (next_fragP = fragP;
11618 next_fragP != NULL;
11619 next_fragP = next_fragP->fr_next)
11620 {
11621 if (next_fragP->fr_type == rs_fill)
11622 /* Skip rs_fill frags. */
11623 continue;
11624 else if (next_fragP->fr_type != rs_machine_dependent)
11625 /* Stop for all other frags. */
11626 break;
11627
11628 /* rs_machine_dependent frags. */
11629 if (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11630 == BRANCH_PREFIX)
11631 {
11632 /* Count BRANCH_PREFIX frags. */
11633 if (max_prefix_length >= MAX_FUSED_JCC_PADDING_SIZE)
11634 {
11635 max_prefix_length = MAX_FUSED_JCC_PADDING_SIZE;
11636 frag_wane (next_fragP);
11637 }
11638 else
11639 max_prefix_length
11640 += next_fragP->tc_frag_data.max_bytes;
11641 }
11642 else if ((TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11643 == BRANCH_PADDING)
11644 || (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11645 == FUSED_JCC_PADDING))
11646 {
11647 /* Stop at BRANCH_PADDING and FUSED_JCC_PADDING. */
11648 fragP->tc_frag_data.u.padding_fragP = next_fragP;
11649 break;
11650 }
11651 else
11652 /* Stop for other rs_machine_dependent frags. */
11653 break;
11654 }
11655
11656 fragP->tc_frag_data.max_prefix_length = max_prefix_length;
11657
11658 /* Skip to the next frag. */
11659 fragP = next_fragP;
11660 }
11661 }
11662
11663 /* Compute padding size for
11664
11665 FUSED_JCC_PADDING
11666 CMP like instruction
11667 BRANCH_PADDING
11668 COND_JUMP/UNCOND_JUMP
11669
11670 or
11671
11672 BRANCH_PADDING
11673 COND_JUMP/UNCOND_JUMP
11674 */
11675
11676 static int
11677 i386_branch_padding_size (fragS *fragP, offsetT address)
11678 {
11679 unsigned int offset, size, padding_size;
11680 fragS *branch_fragP = fragP->tc_frag_data.u.branch_fragP;
11681
11682 /* The start address of the BRANCH_PADDING or FUSED_JCC_PADDING frag. */
11683 if (!address)
11684 address = fragP->fr_address;
11685 address += fragP->fr_fix;
11686
11687 /* CMP like instrunction size. */
11688 size = fragP->tc_frag_data.cmp_size;
11689
11690 /* The base size of the branch frag. */
11691 size += branch_fragP->fr_fix;
11692
11693 /* Add opcode and displacement bytes for the rs_machine_dependent
11694 branch frag. */
11695 if (branch_fragP->fr_type == rs_machine_dependent)
11696 size += md_relax_table[branch_fragP->fr_subtype].rlx_length;
11697
11698 /* Check if branch is within boundary and doesn't end at the last
11699 byte. */
11700 offset = address & ((1U << align_branch_power) - 1);
11701 if ((offset + size) >= (1U << align_branch_power))
11702 /* Padding needed to avoid crossing boundary. */
11703 padding_size = (1U << align_branch_power) - offset;
11704 else
11705 /* No padding needed. */
11706 padding_size = 0;
11707
11708 /* The return value may be saved in tc_frag_data.length which is
11709 unsigned byte. */
11710 if (!fits_in_unsigned_byte (padding_size))
11711 abort ();
11712
11713 return padding_size;
11714 }
11715
11716 /* i386_generic_table_relax_frag()
11717
11718 Handle BRANCH_PADDING, BRANCH_PREFIX and FUSED_JCC_PADDING frags to
11719 grow/shrink padding to align branch frags. Hand others to
11720 relax_frag(). */
11721
11722 long
11723 i386_generic_table_relax_frag (segT segment, fragS *fragP, long stretch)
11724 {
11725 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11726 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11727 {
11728 long padding_size = i386_branch_padding_size (fragP, 0);
11729 long grow = padding_size - fragP->tc_frag_data.length;
11730
11731 /* When the BRANCH_PREFIX frag is used, the computed address
11732 must match the actual address and there should be no padding. */
11733 if (fragP->tc_frag_data.padding_address
11734 && (fragP->tc_frag_data.padding_address != fragP->fr_address
11735 || padding_size))
11736 abort ();
11737
11738 /* Update the padding size. */
11739 if (grow)
11740 fragP->tc_frag_data.length = padding_size;
11741
11742 return grow;
11743 }
11744 else if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
11745 {
11746 fragS *padding_fragP, *next_fragP;
11747 long padding_size, left_size, last_size;
11748
11749 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
11750 if (!padding_fragP)
11751 /* Use the padding set by the leading BRANCH_PREFIX frag. */
11752 return (fragP->tc_frag_data.length
11753 - fragP->tc_frag_data.last_length);
11754
11755 /* Compute the relative address of the padding frag in the very
11756 first time where the BRANCH_PREFIX frag sizes are zero. */
11757 if (!fragP->tc_frag_data.padding_address)
11758 fragP->tc_frag_data.padding_address
11759 = padding_fragP->fr_address - (fragP->fr_address - stretch);
11760
11761 /* First update the last length from the previous interation. */
11762 left_size = fragP->tc_frag_data.prefix_length;
11763 for (next_fragP = fragP;
11764 next_fragP != padding_fragP;
11765 next_fragP = next_fragP->fr_next)
11766 if (next_fragP->fr_type == rs_machine_dependent
11767 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11768 == BRANCH_PREFIX))
11769 {
11770 if (left_size)
11771 {
11772 int max = next_fragP->tc_frag_data.max_bytes;
11773 if (max)
11774 {
11775 int size;
11776 if (max > left_size)
11777 size = left_size;
11778 else
11779 size = max;
11780 left_size -= size;
11781 next_fragP->tc_frag_data.last_length = size;
11782 }
11783 }
11784 else
11785 next_fragP->tc_frag_data.last_length = 0;
11786 }
11787
11788 /* Check the padding size for the padding frag. */
11789 padding_size = i386_branch_padding_size
11790 (padding_fragP, (fragP->fr_address
11791 + fragP->tc_frag_data.padding_address));
11792
11793 last_size = fragP->tc_frag_data.prefix_length;
11794 /* Check if there is change from the last interation. */
11795 if (padding_size == last_size)
11796 {
11797 /* Update the expected address of the padding frag. */
11798 padding_fragP->tc_frag_data.padding_address
11799 = (fragP->fr_address + padding_size
11800 + fragP->tc_frag_data.padding_address);
11801 return 0;
11802 }
11803
11804 if (padding_size > fragP->tc_frag_data.max_prefix_length)
11805 {
11806 /* No padding if there is no sufficient room. Clear the
11807 expected address of the padding frag. */
11808 padding_fragP->tc_frag_data.padding_address = 0;
11809 padding_size = 0;
11810 }
11811 else
11812 /* Store the expected address of the padding frag. */
11813 padding_fragP->tc_frag_data.padding_address
11814 = (fragP->fr_address + padding_size
11815 + fragP->tc_frag_data.padding_address);
11816
11817 fragP->tc_frag_data.prefix_length = padding_size;
11818
11819 /* Update the length for the current interation. */
11820 left_size = padding_size;
11821 for (next_fragP = fragP;
11822 next_fragP != padding_fragP;
11823 next_fragP = next_fragP->fr_next)
11824 if (next_fragP->fr_type == rs_machine_dependent
11825 && (TYPE_FROM_RELAX_STATE (next_fragP->fr_subtype)
11826 == BRANCH_PREFIX))
11827 {
11828 if (left_size)
11829 {
11830 int max = next_fragP->tc_frag_data.max_bytes;
11831 if (max)
11832 {
11833 int size;
11834 if (max > left_size)
11835 size = left_size;
11836 else
11837 size = max;
11838 left_size -= size;
11839 next_fragP->tc_frag_data.length = size;
11840 }
11841 }
11842 else
11843 next_fragP->tc_frag_data.length = 0;
11844 }
11845
11846 return (fragP->tc_frag_data.length
11847 - fragP->tc_frag_data.last_length);
11848 }
11849 return relax_frag (segment, fragP, stretch);
11850 }
11851
11852 /* md_estimate_size_before_relax()
11853
11854 Called just before relax() for rs_machine_dependent frags. The x86
11855 assembler uses these frags to handle variable size jump
11856 instructions.
11857
11858 Any symbol that is now undefined will not become defined.
11859 Return the correct fr_subtype in the frag.
11860 Return the initial "guess for variable size of frag" to caller.
11861 The guess is actually the growth beyond the fixed part. Whatever
11862 we do to grow the fixed or variable part contributes to our
11863 returned value. */
11864
11865 int
11866 md_estimate_size_before_relax (fragS *fragP, segT segment)
11867 {
11868 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
11869 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX
11870 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING)
11871 {
11872 i386_classify_machine_dependent_frag (fragP);
11873 return fragP->tc_frag_data.length;
11874 }
11875
11876 /* We've already got fragP->fr_subtype right; all we have to do is
11877 check for un-relaxable symbols. On an ELF system, we can't relax
11878 an externally visible symbol, because it may be overridden by a
11879 shared library. */
11880 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
11881 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11882 || (IS_ELF
11883 && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
11884 fragP->fr_var))
11885 #endif
11886 #if defined (OBJ_COFF) && defined (TE_PE)
11887 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
11888 && S_IS_WEAK (fragP->fr_symbol))
11889 #endif
11890 )
11891 {
11892 /* Symbol is undefined in this segment, or we need to keep a
11893 reloc so that weak symbols can be overridden. */
11894 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
11895 enum bfd_reloc_code_real reloc_type;
11896 unsigned char *opcode;
11897 int old_fr_fix;
11898
11899 if (fragP->fr_var != NO_RELOC)
11900 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
11901 else if (size == 2)
11902 reloc_type = BFD_RELOC_16_PCREL;
11903 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11904 else if (need_plt32_p (fragP->fr_symbol))
11905 reloc_type = BFD_RELOC_X86_64_PLT32;
11906 #endif
11907 else
11908 reloc_type = BFD_RELOC_32_PCREL;
11909
11910 old_fr_fix = fragP->fr_fix;
11911 opcode = (unsigned char *) fragP->fr_opcode;
11912
11913 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
11914 {
11915 case UNCOND_JUMP:
11916 /* Make jmp (0xeb) a (d)word displacement jump. */
11917 opcode[0] = 0xe9;
11918 fragP->fr_fix += size;
11919 fix_new (fragP, old_fr_fix, size,
11920 fragP->fr_symbol,
11921 fragP->fr_offset, 1,
11922 reloc_type);
11923 break;
11924
11925 case COND_JUMP86:
11926 if (size == 2
11927 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
11928 {
11929 /* Negate the condition, and branch past an
11930 unconditional jump. */
11931 opcode[0] ^= 1;
11932 opcode[1] = 3;
11933 /* Insert an unconditional jump. */
11934 opcode[2] = 0xe9;
11935 /* We added two extra opcode bytes, and have a two byte
11936 offset. */
11937 fragP->fr_fix += 2 + 2;
11938 fix_new (fragP, old_fr_fix + 2, 2,
11939 fragP->fr_symbol,
11940 fragP->fr_offset, 1,
11941 reloc_type);
11942 break;
11943 }
11944 /* Fall through. */
11945
11946 case COND_JUMP:
11947 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
11948 {
11949 fixS *fixP;
11950
11951 fragP->fr_fix += 1;
11952 fixP = fix_new (fragP, old_fr_fix, 1,
11953 fragP->fr_symbol,
11954 fragP->fr_offset, 1,
11955 BFD_RELOC_8_PCREL);
11956 fixP->fx_signed = 1;
11957 break;
11958 }
11959
11960 /* This changes the byte-displacement jump 0x7N
11961 to the (d)word-displacement jump 0x0f,0x8N. */
11962 opcode[1] = opcode[0] + 0x10;
11963 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
11964 /* We've added an opcode byte. */
11965 fragP->fr_fix += 1 + size;
11966 fix_new (fragP, old_fr_fix + 1, size,
11967 fragP->fr_symbol,
11968 fragP->fr_offset, 1,
11969 reloc_type);
11970 break;
11971
11972 default:
11973 BAD_CASE (fragP->fr_subtype);
11974 break;
11975 }
11976 frag_wane (fragP);
11977 return fragP->fr_fix - old_fr_fix;
11978 }
11979
11980 /* Guess size depending on current relax state. Initially the relax
11981 state will correspond to a short jump and we return 1, because
11982 the variable part of the frag (the branch offset) is one byte
11983 long. However, we can relax a section more than once and in that
11984 case we must either set fr_subtype back to the unrelaxed state,
11985 or return the value for the appropriate branch. */
11986 return md_relax_table[fragP->fr_subtype].rlx_length;
11987 }
11988
11989 /* Called after relax() is finished.
11990
11991 In: Address of frag.
11992 fr_type == rs_machine_dependent.
11993 fr_subtype is what the address relaxed to.
11994
11995 Out: Any fixSs and constants are set up.
11996 Caller will turn frag into a ".space 0". */
11997
11998 void
11999 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
12000 fragS *fragP)
12001 {
12002 unsigned char *opcode;
12003 unsigned char *where_to_put_displacement = NULL;
12004 offsetT target_address;
12005 offsetT opcode_address;
12006 unsigned int extension = 0;
12007 offsetT displacement_from_opcode_start;
12008
12009 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PADDING
12010 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == FUSED_JCC_PADDING
12011 || TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
12012 {
12013 /* Generate nop padding. */
12014 unsigned int size = fragP->tc_frag_data.length;
12015 if (size)
12016 {
12017 if (size > fragP->tc_frag_data.max_bytes)
12018 abort ();
12019
12020 if (flag_debug)
12021 {
12022 const char *msg;
12023 const char *branch = "branch";
12024 const char *prefix = "";
12025 fragS *padding_fragP;
12026 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype)
12027 == BRANCH_PREFIX)
12028 {
12029 padding_fragP = fragP->tc_frag_data.u.padding_fragP;
12030 switch (fragP->tc_frag_data.default_prefix)
12031 {
12032 default:
12033 abort ();
12034 break;
12035 case CS_PREFIX_OPCODE:
12036 prefix = " cs";
12037 break;
12038 case DS_PREFIX_OPCODE:
12039 prefix = " ds";
12040 break;
12041 case ES_PREFIX_OPCODE:
12042 prefix = " es";
12043 break;
12044 case FS_PREFIX_OPCODE:
12045 prefix = " fs";
12046 break;
12047 case GS_PREFIX_OPCODE:
12048 prefix = " gs";
12049 break;
12050 case SS_PREFIX_OPCODE:
12051 prefix = " ss";
12052 break;
12053 }
12054 if (padding_fragP)
12055 msg = _("%s:%u: add %d%s at 0x%llx to align "
12056 "%s within %d-byte boundary\n");
12057 else
12058 msg = _("%s:%u: add additional %d%s at 0x%llx to "
12059 "align %s within %d-byte boundary\n");
12060 }
12061 else
12062 {
12063 padding_fragP = fragP;
12064 msg = _("%s:%u: add %d%s-byte nop at 0x%llx to align "
12065 "%s within %d-byte boundary\n");
12066 }
12067
12068 if (padding_fragP)
12069 switch (padding_fragP->tc_frag_data.branch_type)
12070 {
12071 case align_branch_jcc:
12072 branch = "jcc";
12073 break;
12074 case align_branch_fused:
12075 branch = "fused jcc";
12076 break;
12077 case align_branch_jmp:
12078 branch = "jmp";
12079 break;
12080 case align_branch_call:
12081 branch = "call";
12082 break;
12083 case align_branch_indirect:
12084 branch = "indiret branch";
12085 break;
12086 case align_branch_ret:
12087 branch = "ret";
12088 break;
12089 default:
12090 break;
12091 }
12092
12093 fprintf (stdout, msg,
12094 fragP->fr_file, fragP->fr_line, size, prefix,
12095 (long long) fragP->fr_address, branch,
12096 1 << align_branch_power);
12097 }
12098 if (TYPE_FROM_RELAX_STATE (fragP->fr_subtype) == BRANCH_PREFIX)
12099 memset (fragP->fr_opcode,
12100 fragP->tc_frag_data.default_prefix, size);
12101 else
12102 i386_generate_nops (fragP, (char *) fragP->fr_opcode,
12103 size, 0);
12104 fragP->fr_fix += size;
12105 }
12106 return;
12107 }
12108
12109 opcode = (unsigned char *) fragP->fr_opcode;
12110
12111 /* Address we want to reach in file space. */
12112 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
12113
12114 /* Address opcode resides at in file space. */
12115 opcode_address = fragP->fr_address + fragP->fr_fix;
12116
12117 /* Displacement from opcode start to fill into instruction. */
12118 displacement_from_opcode_start = target_address - opcode_address;
12119
12120 if ((fragP->fr_subtype & BIG) == 0)
12121 {
12122 /* Don't have to change opcode. */
12123 extension = 1; /* 1 opcode + 1 displacement */
12124 where_to_put_displacement = &opcode[1];
12125 }
12126 else
12127 {
12128 if (no_cond_jump_promotion
12129 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
12130 as_warn_where (fragP->fr_file, fragP->fr_line,
12131 _("long jump required"));
12132
12133 switch (fragP->fr_subtype)
12134 {
12135 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
12136 extension = 4; /* 1 opcode + 4 displacement */
12137 opcode[0] = 0xe9;
12138 where_to_put_displacement = &opcode[1];
12139 break;
12140
12141 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
12142 extension = 2; /* 1 opcode + 2 displacement */
12143 opcode[0] = 0xe9;
12144 where_to_put_displacement = &opcode[1];
12145 break;
12146
12147 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
12148 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
12149 extension = 5; /* 2 opcode + 4 displacement */
12150 opcode[1] = opcode[0] + 0x10;
12151 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
12152 where_to_put_displacement = &opcode[2];
12153 break;
12154
12155 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
12156 extension = 3; /* 2 opcode + 2 displacement */
12157 opcode[1] = opcode[0] + 0x10;
12158 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
12159 where_to_put_displacement = &opcode[2];
12160 break;
12161
12162 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
12163 extension = 4;
12164 opcode[0] ^= 1;
12165 opcode[1] = 3;
12166 opcode[2] = 0xe9;
12167 where_to_put_displacement = &opcode[3];
12168 break;
12169
12170 default:
12171 BAD_CASE (fragP->fr_subtype);
12172 break;
12173 }
12174 }
12175
12176 /* If size if less then four we are sure that the operand fits,
12177 but if it's 4, then it could be that the displacement is larger
12178 then -/+ 2GB. */
12179 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
12180 && object_64bit
12181 && ((addressT) (displacement_from_opcode_start - extension
12182 + ((addressT) 1 << 31))
12183 > (((addressT) 2 << 31) - 1)))
12184 {
12185 as_bad_where (fragP->fr_file, fragP->fr_line,
12186 _("jump target out of range"));
12187 /* Make us emit 0. */
12188 displacement_from_opcode_start = extension;
12189 }
12190 /* Now put displacement after opcode. */
12191 md_number_to_chars ((char *) where_to_put_displacement,
12192 (valueT) (displacement_from_opcode_start - extension),
12193 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
12194 fragP->fr_fix += extension;
12195 }
12196 \f
12197 /* Apply a fixup (fixP) to segment data, once it has been determined
12198 by our caller that we have all the info we need to fix it up.
12199
12200 Parameter valP is the pointer to the value of the bits.
12201
12202 On the 386, immediates, displacements, and data pointers are all in
12203 the same (little-endian) format, so we don't need to care about which
12204 we are handling. */
12205
12206 void
12207 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
12208 {
12209 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
12210 valueT value = *valP;
12211
12212 #if !defined (TE_Mach)
12213 if (fixP->fx_pcrel)
12214 {
12215 switch (fixP->fx_r_type)
12216 {
12217 default:
12218 break;
12219
12220 case BFD_RELOC_64:
12221 fixP->fx_r_type = BFD_RELOC_64_PCREL;
12222 break;
12223 case BFD_RELOC_32:
12224 case BFD_RELOC_X86_64_32S:
12225 fixP->fx_r_type = BFD_RELOC_32_PCREL;
12226 break;
12227 case BFD_RELOC_16:
12228 fixP->fx_r_type = BFD_RELOC_16_PCREL;
12229 break;
12230 case BFD_RELOC_8:
12231 fixP->fx_r_type = BFD_RELOC_8_PCREL;
12232 break;
12233 }
12234 }
12235
12236 if (fixP->fx_addsy != NULL
12237 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
12238 || fixP->fx_r_type == BFD_RELOC_64_PCREL
12239 || fixP->fx_r_type == BFD_RELOC_16_PCREL
12240 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
12241 && !use_rela_relocations)
12242 {
12243 /* This is a hack. There should be a better way to handle this.
12244 This covers for the fact that bfd_install_relocation will
12245 subtract the current location (for partial_inplace, PC relative
12246 relocations); see more below. */
12247 #ifndef OBJ_AOUT
12248 if (IS_ELF
12249 #ifdef TE_PE
12250 || OUTPUT_FLAVOR == bfd_target_coff_flavour
12251 #endif
12252 )
12253 value += fixP->fx_where + fixP->fx_frag->fr_address;
12254 #endif
12255 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12256 if (IS_ELF)
12257 {
12258 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
12259
12260 if ((sym_seg == seg
12261 || (symbol_section_p (fixP->fx_addsy)
12262 && sym_seg != absolute_section))
12263 && !generic_force_reloc (fixP))
12264 {
12265 /* Yes, we add the values in twice. This is because
12266 bfd_install_relocation subtracts them out again. I think
12267 bfd_install_relocation is broken, but I don't dare change
12268 it. FIXME. */
12269 value += fixP->fx_where + fixP->fx_frag->fr_address;
12270 }
12271 }
12272 #endif
12273 #if defined (OBJ_COFF) && defined (TE_PE)
12274 /* For some reason, the PE format does not store a
12275 section address offset for a PC relative symbol. */
12276 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
12277 || S_IS_WEAK (fixP->fx_addsy))
12278 value += md_pcrel_from (fixP);
12279 #endif
12280 }
12281 #if defined (OBJ_COFF) && defined (TE_PE)
12282 if (fixP->fx_addsy != NULL
12283 && S_IS_WEAK (fixP->fx_addsy)
12284 /* PR 16858: Do not modify weak function references. */
12285 && ! fixP->fx_pcrel)
12286 {
12287 #if !defined (TE_PEP)
12288 /* For x86 PE weak function symbols are neither PC-relative
12289 nor do they set S_IS_FUNCTION. So the only reliable way
12290 to detect them is to check the flags of their containing
12291 section. */
12292 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
12293 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
12294 ;
12295 else
12296 #endif
12297 value -= S_GET_VALUE (fixP->fx_addsy);
12298 }
12299 #endif
12300
12301 /* Fix a few things - the dynamic linker expects certain values here,
12302 and we must not disappoint it. */
12303 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12304 if (IS_ELF && fixP->fx_addsy)
12305 switch (fixP->fx_r_type)
12306 {
12307 case BFD_RELOC_386_PLT32:
12308 case BFD_RELOC_X86_64_PLT32:
12309 /* Make the jump instruction point to the address of the operand.
12310 At runtime we merely add the offset to the actual PLT entry.
12311 NB: Subtract the offset size only for jump instructions. */
12312 if (fixP->fx_pcrel)
12313 value = -4;
12314 break;
12315
12316 case BFD_RELOC_386_TLS_GD:
12317 case BFD_RELOC_386_TLS_LDM:
12318 case BFD_RELOC_386_TLS_IE_32:
12319 case BFD_RELOC_386_TLS_IE:
12320 case BFD_RELOC_386_TLS_GOTIE:
12321 case BFD_RELOC_386_TLS_GOTDESC:
12322 case BFD_RELOC_X86_64_TLSGD:
12323 case BFD_RELOC_X86_64_TLSLD:
12324 case BFD_RELOC_X86_64_GOTTPOFF:
12325 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
12326 value = 0; /* Fully resolved at runtime. No addend. */
12327 /* Fallthrough */
12328 case BFD_RELOC_386_TLS_LE:
12329 case BFD_RELOC_386_TLS_LDO_32:
12330 case BFD_RELOC_386_TLS_LE_32:
12331 case BFD_RELOC_X86_64_DTPOFF32:
12332 case BFD_RELOC_X86_64_DTPOFF64:
12333 case BFD_RELOC_X86_64_TPOFF32:
12334 case BFD_RELOC_X86_64_TPOFF64:
12335 S_SET_THREAD_LOCAL (fixP->fx_addsy);
12336 break;
12337
12338 case BFD_RELOC_386_TLS_DESC_CALL:
12339 case BFD_RELOC_X86_64_TLSDESC_CALL:
12340 value = 0; /* Fully resolved at runtime. No addend. */
12341 S_SET_THREAD_LOCAL (fixP->fx_addsy);
12342 fixP->fx_done = 0;
12343 return;
12344
12345 case BFD_RELOC_VTABLE_INHERIT:
12346 case BFD_RELOC_VTABLE_ENTRY:
12347 fixP->fx_done = 0;
12348 return;
12349
12350 default:
12351 break;
12352 }
12353 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
12354 *valP = value;
12355 #endif /* !defined (TE_Mach) */
12356
12357 /* Are we finished with this relocation now? */
12358 if (fixP->fx_addsy == NULL)
12359 fixP->fx_done = 1;
12360 #if defined (OBJ_COFF) && defined (TE_PE)
12361 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
12362 {
12363 fixP->fx_done = 0;
12364 /* Remember value for tc_gen_reloc. */
12365 fixP->fx_addnumber = value;
12366 /* Clear out the frag for now. */
12367 value = 0;
12368 }
12369 #endif
12370 else if (use_rela_relocations)
12371 {
12372 fixP->fx_no_overflow = 1;
12373 /* Remember value for tc_gen_reloc. */
12374 fixP->fx_addnumber = value;
12375 value = 0;
12376 }
12377
12378 md_number_to_chars (p, value, fixP->fx_size);
12379 }
12380 \f
12381 const char *
12382 md_atof (int type, char *litP, int *sizeP)
12383 {
12384 /* This outputs the LITTLENUMs in REVERSE order;
12385 in accord with the bigendian 386. */
12386 return ieee_md_atof (type, litP, sizeP, FALSE);
12387 }
12388 \f
12389 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
12390
12391 static char *
12392 output_invalid (int c)
12393 {
12394 if (ISPRINT (c))
12395 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
12396 "'%c'", c);
12397 else
12398 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
12399 "(0x%x)", (unsigned char) c);
12400 return output_invalid_buf;
12401 }
12402
12403 /* Verify that @r can be used in the current context. */
12404
12405 static bfd_boolean check_register (const reg_entry *r)
12406 {
12407 if (allow_pseudo_reg)
12408 return TRUE;
12409
12410 if (operand_type_all_zero (&r->reg_type))
12411 return FALSE;
12412
12413 if ((r->reg_type.bitfield.dword
12414 || (r->reg_type.bitfield.class == SReg && r->reg_num > 3)
12415 || r->reg_type.bitfield.class == RegCR
12416 || r->reg_type.bitfield.class == RegDR)
12417 && !cpu_arch_flags.bitfield.cpui386)
12418 return FALSE;
12419
12420 if (r->reg_type.bitfield.class == RegTR
12421 && (flag_code == CODE_64BIT
12422 || !cpu_arch_flags.bitfield.cpui386
12423 || cpu_arch_isa_flags.bitfield.cpui586
12424 || cpu_arch_isa_flags.bitfield.cpui686))
12425 return FALSE;
12426
12427 if (r->reg_type.bitfield.class == RegMMX && !cpu_arch_flags.bitfield.cpummx)
12428 return FALSE;
12429
12430 if (!cpu_arch_flags.bitfield.cpuavx512f)
12431 {
12432 if (r->reg_type.bitfield.zmmword
12433 || r->reg_type.bitfield.class == RegMask)
12434 return FALSE;
12435
12436 if (!cpu_arch_flags.bitfield.cpuavx)
12437 {
12438 if (r->reg_type.bitfield.ymmword)
12439 return FALSE;
12440
12441 if (!cpu_arch_flags.bitfield.cpusse && r->reg_type.bitfield.xmmword)
12442 return FALSE;
12443 }
12444 }
12445
12446 if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
12447 return FALSE;
12448
12449 /* Don't allow fake index register unless allow_index_reg isn't 0. */
12450 if (!allow_index_reg && r->reg_num == RegIZ)
12451 return FALSE;
12452
12453 /* Upper 16 vector registers are only available with VREX in 64bit
12454 mode, and require EVEX encoding. */
12455 if (r->reg_flags & RegVRex)
12456 {
12457 if (!cpu_arch_flags.bitfield.cpuavx512f
12458 || flag_code != CODE_64BIT)
12459 return FALSE;
12460
12461 if (i.vec_encoding == vex_encoding_default)
12462 i.vec_encoding = vex_encoding_evex;
12463 else if (i.vec_encoding != vex_encoding_evex)
12464 i.vec_encoding = vex_encoding_error;
12465 }
12466
12467 if (((r->reg_flags & (RegRex64 | RegRex)) || r->reg_type.bitfield.qword)
12468 && (!cpu_arch_flags.bitfield.cpulm || r->reg_type.bitfield.class != RegCR)
12469 && flag_code != CODE_64BIT)
12470 return FALSE;
12471
12472 if (r->reg_type.bitfield.class == SReg && r->reg_num == RegFlat
12473 && !intel_syntax)
12474 return FALSE;
12475
12476 return TRUE;
12477 }
12478
12479 /* REG_STRING starts *before* REGISTER_PREFIX. */
12480
12481 static const reg_entry *
12482 parse_real_register (char *reg_string, char **end_op)
12483 {
12484 char *s = reg_string;
12485 char *p;
12486 char reg_name_given[MAX_REG_NAME_SIZE + 1];
12487 const reg_entry *r;
12488
12489 /* Skip possible REGISTER_PREFIX and possible whitespace. */
12490 if (*s == REGISTER_PREFIX)
12491 ++s;
12492
12493 if (is_space_char (*s))
12494 ++s;
12495
12496 p = reg_name_given;
12497 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
12498 {
12499 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
12500 return (const reg_entry *) NULL;
12501 s++;
12502 }
12503
12504 /* For naked regs, make sure that we are not dealing with an identifier.
12505 This prevents confusing an identifier like `eax_var' with register
12506 `eax'. */
12507 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
12508 return (const reg_entry *) NULL;
12509
12510 *end_op = s;
12511
12512 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
12513
12514 /* Handle floating point regs, allowing spaces in the (i) part. */
12515 if (r == i386_regtab /* %st is first entry of table */)
12516 {
12517 if (!cpu_arch_flags.bitfield.cpu8087
12518 && !cpu_arch_flags.bitfield.cpu287
12519 && !cpu_arch_flags.bitfield.cpu387
12520 && !allow_pseudo_reg)
12521 return (const reg_entry *) NULL;
12522
12523 if (is_space_char (*s))
12524 ++s;
12525 if (*s == '(')
12526 {
12527 ++s;
12528 if (is_space_char (*s))
12529 ++s;
12530 if (*s >= '0' && *s <= '7')
12531 {
12532 int fpr = *s - '0';
12533 ++s;
12534 if (is_space_char (*s))
12535 ++s;
12536 if (*s == ')')
12537 {
12538 *end_op = s + 1;
12539 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
12540 know (r);
12541 return r + fpr;
12542 }
12543 }
12544 /* We have "%st(" then garbage. */
12545 return (const reg_entry *) NULL;
12546 }
12547 }
12548
12549 return r && check_register (r) ? r : NULL;
12550 }
12551
12552 /* REG_STRING starts *before* REGISTER_PREFIX. */
12553
12554 static const reg_entry *
12555 parse_register (char *reg_string, char **end_op)
12556 {
12557 const reg_entry *r;
12558
12559 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
12560 r = parse_real_register (reg_string, end_op);
12561 else
12562 r = NULL;
12563 if (!r)
12564 {
12565 char *save = input_line_pointer;
12566 char c;
12567 symbolS *symbolP;
12568
12569 input_line_pointer = reg_string;
12570 c = get_symbol_name (&reg_string);
12571 symbolP = symbol_find (reg_string);
12572 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
12573 {
12574 const expressionS *e = symbol_get_value_expression (symbolP);
12575
12576 know (e->X_op == O_register);
12577 know (e->X_add_number >= 0
12578 && (valueT) e->X_add_number < i386_regtab_size);
12579 r = i386_regtab + e->X_add_number;
12580 if (!check_register (r))
12581 {
12582 as_bad (_("register '%s%s' cannot be used here"),
12583 register_prefix, r->reg_name);
12584 r = &bad_reg;
12585 }
12586 *end_op = input_line_pointer;
12587 }
12588 *input_line_pointer = c;
12589 input_line_pointer = save;
12590 }
12591 return r;
12592 }
12593
12594 int
12595 i386_parse_name (char *name, expressionS *e, char *nextcharP)
12596 {
12597 const reg_entry *r;
12598 char *end = input_line_pointer;
12599
12600 *end = *nextcharP;
12601 r = parse_register (name, &input_line_pointer);
12602 if (r && end <= input_line_pointer)
12603 {
12604 *nextcharP = *input_line_pointer;
12605 *input_line_pointer = 0;
12606 if (r != &bad_reg)
12607 {
12608 e->X_op = O_register;
12609 e->X_add_number = r - i386_regtab;
12610 }
12611 else
12612 e->X_op = O_illegal;
12613 return 1;
12614 }
12615 input_line_pointer = end;
12616 *end = 0;
12617 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
12618 }
12619
12620 void
12621 md_operand (expressionS *e)
12622 {
12623 char *end;
12624 const reg_entry *r;
12625
12626 switch (*input_line_pointer)
12627 {
12628 case REGISTER_PREFIX:
12629 r = parse_real_register (input_line_pointer, &end);
12630 if (r)
12631 {
12632 e->X_op = O_register;
12633 e->X_add_number = r - i386_regtab;
12634 input_line_pointer = end;
12635 }
12636 break;
12637
12638 case '[':
12639 gas_assert (intel_syntax);
12640 end = input_line_pointer++;
12641 expression (e);
12642 if (*input_line_pointer == ']')
12643 {
12644 ++input_line_pointer;
12645 e->X_op_symbol = make_expr_symbol (e);
12646 e->X_add_symbol = NULL;
12647 e->X_add_number = 0;
12648 e->X_op = O_index;
12649 }
12650 else
12651 {
12652 e->X_op = O_absent;
12653 input_line_pointer = end;
12654 }
12655 break;
12656 }
12657 }
12658
12659 \f
12660 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12661 const char *md_shortopts = "kVQ:sqnO::";
12662 #else
12663 const char *md_shortopts = "qnO::";
12664 #endif
12665
12666 #define OPTION_32 (OPTION_MD_BASE + 0)
12667 #define OPTION_64 (OPTION_MD_BASE + 1)
12668 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
12669 #define OPTION_MARCH (OPTION_MD_BASE + 3)
12670 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
12671 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
12672 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
12673 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
12674 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
12675 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 9)
12676 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
12677 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
12678 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
12679 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
12680 #define OPTION_X32 (OPTION_MD_BASE + 14)
12681 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
12682 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
12683 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
12684 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
12685 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
12686 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
12687 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
12688 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
12689 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
12690 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
12691 #define OPTION_X86_USED_NOTE (OPTION_MD_BASE + 25)
12692 #define OPTION_MVEXWIG (OPTION_MD_BASE + 26)
12693 #define OPTION_MALIGN_BRANCH_BOUNDARY (OPTION_MD_BASE + 27)
12694 #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28)
12695 #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29)
12696 #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30)
12697 #define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31)
12698 #define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32)
12699 #define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33)
12700
12701 struct option md_longopts[] =
12702 {
12703 {"32", no_argument, NULL, OPTION_32},
12704 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12705 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12706 {"64", no_argument, NULL, OPTION_64},
12707 #endif
12708 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12709 {"x32", no_argument, NULL, OPTION_X32},
12710 {"mshared", no_argument, NULL, OPTION_MSHARED},
12711 {"mx86-used-note", required_argument, NULL, OPTION_X86_USED_NOTE},
12712 #endif
12713 {"divide", no_argument, NULL, OPTION_DIVIDE},
12714 {"march", required_argument, NULL, OPTION_MARCH},
12715 {"mtune", required_argument, NULL, OPTION_MTUNE},
12716 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
12717 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
12718 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
12719 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
12720 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
12721 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
12722 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
12723 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
12724 {"mvexwig", required_argument, NULL, OPTION_MVEXWIG},
12725 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
12726 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
12727 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
12728 # if defined (TE_PE) || defined (TE_PEP)
12729 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
12730 #endif
12731 {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
12732 {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
12733 {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
12734 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
12735 {"malign-branch-boundary", required_argument, NULL, OPTION_MALIGN_BRANCH_BOUNDARY},
12736 {"malign-branch-prefix-size", required_argument, NULL, OPTION_MALIGN_BRANCH_PREFIX_SIZE},
12737 {"malign-branch", required_argument, NULL, OPTION_MALIGN_BRANCH},
12738 {"mbranches-within-32B-boundaries", no_argument, NULL, OPTION_MBRANCHES_WITH_32B_BOUNDARIES},
12739 {"mlfence-after-load", required_argument, NULL, OPTION_MLFENCE_AFTER_LOAD},
12740 {"mlfence-before-indirect-branch", required_argument, NULL,
12741 OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH},
12742 {"mlfence-before-ret", required_argument, NULL, OPTION_MLFENCE_BEFORE_RET},
12743 {"mamd64", no_argument, NULL, OPTION_MAMD64},
12744 {"mintel64", no_argument, NULL, OPTION_MINTEL64},
12745 {NULL, no_argument, NULL, 0}
12746 };
12747 size_t md_longopts_size = sizeof (md_longopts);
12748
12749 int
12750 md_parse_option (int c, const char *arg)
12751 {
12752 unsigned int j;
12753 char *arch, *next, *saved, *type;
12754
12755 switch (c)
12756 {
12757 case 'n':
12758 optimize_align_code = 0;
12759 break;
12760
12761 case 'q':
12762 quiet_warnings = 1;
12763 break;
12764
12765 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12766 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
12767 should be emitted or not. FIXME: Not implemented. */
12768 case 'Q':
12769 if ((arg[0] != 'y' && arg[0] != 'n') || arg[1])
12770 return 0;
12771 break;
12772
12773 /* -V: SVR4 argument to print version ID. */
12774 case 'V':
12775 print_version_id ();
12776 break;
12777
12778 /* -k: Ignore for FreeBSD compatibility. */
12779 case 'k':
12780 break;
12781
12782 case 's':
12783 /* -s: On i386 Solaris, this tells the native assembler to use
12784 .stab instead of .stab.excl. We always use .stab anyhow. */
12785 break;
12786
12787 case OPTION_MSHARED:
12788 shared = 1;
12789 break;
12790
12791 case OPTION_X86_USED_NOTE:
12792 if (strcasecmp (arg, "yes") == 0)
12793 x86_used_note = 1;
12794 else if (strcasecmp (arg, "no") == 0)
12795 x86_used_note = 0;
12796 else
12797 as_fatal (_("invalid -mx86-used-note= option: `%s'"), arg);
12798 break;
12799
12800
12801 #endif
12802 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
12803 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
12804 case OPTION_64:
12805 {
12806 const char **list, **l;
12807
12808 list = bfd_target_list ();
12809 for (l = list; *l != NULL; l++)
12810 if (CONST_STRNEQ (*l, "elf64-x86-64")
12811 || strcmp (*l, "coff-x86-64") == 0
12812 || strcmp (*l, "pe-x86-64") == 0
12813 || strcmp (*l, "pei-x86-64") == 0
12814 || strcmp (*l, "mach-o-x86-64") == 0)
12815 {
12816 default_arch = "x86_64";
12817 break;
12818 }
12819 if (*l == NULL)
12820 as_fatal (_("no compiled in support for x86_64"));
12821 free (list);
12822 }
12823 break;
12824 #endif
12825
12826 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
12827 case OPTION_X32:
12828 if (IS_ELF)
12829 {
12830 const char **list, **l;
12831
12832 list = bfd_target_list ();
12833 for (l = list; *l != NULL; l++)
12834 if (CONST_STRNEQ (*l, "elf32-x86-64"))
12835 {
12836 default_arch = "x86_64:32";
12837 break;
12838 }
12839 if (*l == NULL)
12840 as_fatal (_("no compiled in support for 32bit x86_64"));
12841 free (list);
12842 }
12843 else
12844 as_fatal (_("32bit x86_64 is only supported for ELF"));
12845 break;
12846 #endif
12847
12848 case OPTION_32:
12849 default_arch = "i386";
12850 break;
12851
12852 case OPTION_DIVIDE:
12853 #ifdef SVR4_COMMENT_CHARS
12854 {
12855 char *n, *t;
12856 const char *s;
12857
12858 n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
12859 t = n;
12860 for (s = i386_comment_chars; *s != '\0'; s++)
12861 if (*s != '/')
12862 *t++ = *s;
12863 *t = '\0';
12864 i386_comment_chars = n;
12865 }
12866 #endif
12867 break;
12868
12869 case OPTION_MARCH:
12870 saved = xstrdup (arg);
12871 arch = saved;
12872 /* Allow -march=+nosse. */
12873 if (*arch == '+')
12874 arch++;
12875 do
12876 {
12877 if (*arch == '.')
12878 as_fatal (_("invalid -march= option: `%s'"), arg);
12879 next = strchr (arch, '+');
12880 if (next)
12881 *next++ = '\0';
12882 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12883 {
12884 if (strcmp (arch, cpu_arch [j].name) == 0)
12885 {
12886 /* Processor. */
12887 if (! cpu_arch[j].flags.bitfield.cpui386)
12888 continue;
12889
12890 cpu_arch_name = cpu_arch[j].name;
12891 cpu_sub_arch_name = NULL;
12892 cpu_arch_flags = cpu_arch[j].flags;
12893 cpu_arch_isa = cpu_arch[j].type;
12894 cpu_arch_isa_flags = cpu_arch[j].flags;
12895 if (!cpu_arch_tune_set)
12896 {
12897 cpu_arch_tune = cpu_arch_isa;
12898 cpu_arch_tune_flags = cpu_arch_isa_flags;
12899 }
12900 break;
12901 }
12902 else if (*cpu_arch [j].name == '.'
12903 && strcmp (arch, cpu_arch [j].name + 1) == 0)
12904 {
12905 /* ISA extension. */
12906 i386_cpu_flags flags;
12907
12908 flags = cpu_flags_or (cpu_arch_flags,
12909 cpu_arch[j].flags);
12910
12911 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12912 {
12913 if (cpu_sub_arch_name)
12914 {
12915 char *name = cpu_sub_arch_name;
12916 cpu_sub_arch_name = concat (name,
12917 cpu_arch[j].name,
12918 (const char *) NULL);
12919 free (name);
12920 }
12921 else
12922 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
12923 cpu_arch_flags = flags;
12924 cpu_arch_isa_flags = flags;
12925 }
12926 else
12927 cpu_arch_isa_flags
12928 = cpu_flags_or (cpu_arch_isa_flags,
12929 cpu_arch[j].flags);
12930 break;
12931 }
12932 }
12933
12934 if (j >= ARRAY_SIZE (cpu_arch))
12935 {
12936 /* Disable an ISA extension. */
12937 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
12938 if (strcmp (arch, cpu_noarch [j].name) == 0)
12939 {
12940 i386_cpu_flags flags;
12941
12942 flags = cpu_flags_and_not (cpu_arch_flags,
12943 cpu_noarch[j].flags);
12944 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
12945 {
12946 if (cpu_sub_arch_name)
12947 {
12948 char *name = cpu_sub_arch_name;
12949 cpu_sub_arch_name = concat (arch,
12950 (const char *) NULL);
12951 free (name);
12952 }
12953 else
12954 cpu_sub_arch_name = xstrdup (arch);
12955 cpu_arch_flags = flags;
12956 cpu_arch_isa_flags = flags;
12957 }
12958 break;
12959 }
12960
12961 if (j >= ARRAY_SIZE (cpu_noarch))
12962 j = ARRAY_SIZE (cpu_arch);
12963 }
12964
12965 if (j >= ARRAY_SIZE (cpu_arch))
12966 as_fatal (_("invalid -march= option: `%s'"), arg);
12967
12968 arch = next;
12969 }
12970 while (next != NULL);
12971 free (saved);
12972 break;
12973
12974 case OPTION_MTUNE:
12975 if (*arg == '.')
12976 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12977 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
12978 {
12979 if (strcmp (arg, cpu_arch [j].name) == 0)
12980 {
12981 cpu_arch_tune_set = 1;
12982 cpu_arch_tune = cpu_arch [j].type;
12983 cpu_arch_tune_flags = cpu_arch[j].flags;
12984 break;
12985 }
12986 }
12987 if (j >= ARRAY_SIZE (cpu_arch))
12988 as_fatal (_("invalid -mtune= option: `%s'"), arg);
12989 break;
12990
12991 case OPTION_MMNEMONIC:
12992 if (strcasecmp (arg, "att") == 0)
12993 intel_mnemonic = 0;
12994 else if (strcasecmp (arg, "intel") == 0)
12995 intel_mnemonic = 1;
12996 else
12997 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
12998 break;
12999
13000 case OPTION_MSYNTAX:
13001 if (strcasecmp (arg, "att") == 0)
13002 intel_syntax = 0;
13003 else if (strcasecmp (arg, "intel") == 0)
13004 intel_syntax = 1;
13005 else
13006 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
13007 break;
13008
13009 case OPTION_MINDEX_REG:
13010 allow_index_reg = 1;
13011 break;
13012
13013 case OPTION_MNAKED_REG:
13014 allow_naked_reg = 1;
13015 break;
13016
13017 case OPTION_MSSE2AVX:
13018 sse2avx = 1;
13019 break;
13020
13021 case OPTION_MSSE_CHECK:
13022 if (strcasecmp (arg, "error") == 0)
13023 sse_check = check_error;
13024 else if (strcasecmp (arg, "warning") == 0)
13025 sse_check = check_warning;
13026 else if (strcasecmp (arg, "none") == 0)
13027 sse_check = check_none;
13028 else
13029 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
13030 break;
13031
13032 case OPTION_MOPERAND_CHECK:
13033 if (strcasecmp (arg, "error") == 0)
13034 operand_check = check_error;
13035 else if (strcasecmp (arg, "warning") == 0)
13036 operand_check = check_warning;
13037 else if (strcasecmp (arg, "none") == 0)
13038 operand_check = check_none;
13039 else
13040 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
13041 break;
13042
13043 case OPTION_MAVXSCALAR:
13044 if (strcasecmp (arg, "128") == 0)
13045 avxscalar = vex128;
13046 else if (strcasecmp (arg, "256") == 0)
13047 avxscalar = vex256;
13048 else
13049 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
13050 break;
13051
13052 case OPTION_MVEXWIG:
13053 if (strcmp (arg, "0") == 0)
13054 vexwig = vexw0;
13055 else if (strcmp (arg, "1") == 0)
13056 vexwig = vexw1;
13057 else
13058 as_fatal (_("invalid -mvexwig= option: `%s'"), arg);
13059 break;
13060
13061 case OPTION_MADD_BND_PREFIX:
13062 add_bnd_prefix = 1;
13063 break;
13064
13065 case OPTION_MEVEXLIG:
13066 if (strcmp (arg, "128") == 0)
13067 evexlig = evexl128;
13068 else if (strcmp (arg, "256") == 0)
13069 evexlig = evexl256;
13070 else if (strcmp (arg, "512") == 0)
13071 evexlig = evexl512;
13072 else
13073 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
13074 break;
13075
13076 case OPTION_MEVEXRCIG:
13077 if (strcmp (arg, "rne") == 0)
13078 evexrcig = rne;
13079 else if (strcmp (arg, "rd") == 0)
13080 evexrcig = rd;
13081 else if (strcmp (arg, "ru") == 0)
13082 evexrcig = ru;
13083 else if (strcmp (arg, "rz") == 0)
13084 evexrcig = rz;
13085 else
13086 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
13087 break;
13088
13089 case OPTION_MEVEXWIG:
13090 if (strcmp (arg, "0") == 0)
13091 evexwig = evexw0;
13092 else if (strcmp (arg, "1") == 0)
13093 evexwig = evexw1;
13094 else
13095 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
13096 break;
13097
13098 # if defined (TE_PE) || defined (TE_PEP)
13099 case OPTION_MBIG_OBJ:
13100 use_big_obj = 1;
13101 break;
13102 #endif
13103
13104 case OPTION_MOMIT_LOCK_PREFIX:
13105 if (strcasecmp (arg, "yes") == 0)
13106 omit_lock_prefix = 1;
13107 else if (strcasecmp (arg, "no") == 0)
13108 omit_lock_prefix = 0;
13109 else
13110 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
13111 break;
13112
13113 case OPTION_MFENCE_AS_LOCK_ADD:
13114 if (strcasecmp (arg, "yes") == 0)
13115 avoid_fence = 1;
13116 else if (strcasecmp (arg, "no") == 0)
13117 avoid_fence = 0;
13118 else
13119 as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
13120 break;
13121
13122 case OPTION_MLFENCE_AFTER_LOAD:
13123 if (strcasecmp (arg, "yes") == 0)
13124 lfence_after_load = 1;
13125 else if (strcasecmp (arg, "no") == 0)
13126 lfence_after_load = 0;
13127 else
13128 as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg);
13129 break;
13130
13131 case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH:
13132 if (strcasecmp (arg, "all") == 0)
13133 {
13134 lfence_before_indirect_branch = lfence_branch_all;
13135 if (lfence_before_ret == lfence_before_ret_none)
13136 lfence_before_ret = lfence_before_ret_shl;
13137 }
13138 else if (strcasecmp (arg, "memory") == 0)
13139 lfence_before_indirect_branch = lfence_branch_memory;
13140 else if (strcasecmp (arg, "register") == 0)
13141 lfence_before_indirect_branch = lfence_branch_register;
13142 else if (strcasecmp (arg, "none") == 0)
13143 lfence_before_indirect_branch = lfence_branch_none;
13144 else
13145 as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"),
13146 arg);
13147 break;
13148
13149 case OPTION_MLFENCE_BEFORE_RET:
13150 if (strcasecmp (arg, "or") == 0)
13151 lfence_before_ret = lfence_before_ret_or;
13152 else if (strcasecmp (arg, "not") == 0)
13153 lfence_before_ret = lfence_before_ret_not;
13154 else if (strcasecmp (arg, "shl") == 0 || strcasecmp (arg, "yes") == 0)
13155 lfence_before_ret = lfence_before_ret_shl;
13156 else if (strcasecmp (arg, "none") == 0)
13157 lfence_before_ret = lfence_before_ret_none;
13158 else
13159 as_fatal (_("invalid -mlfence-before-ret= option: `%s'"),
13160 arg);
13161 break;
13162
13163 case OPTION_MRELAX_RELOCATIONS:
13164 if (strcasecmp (arg, "yes") == 0)
13165 generate_relax_relocations = 1;
13166 else if (strcasecmp (arg, "no") == 0)
13167 generate_relax_relocations = 0;
13168 else
13169 as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
13170 break;
13171
13172 case OPTION_MALIGN_BRANCH_BOUNDARY:
13173 {
13174 char *end;
13175 long int align = strtoul (arg, &end, 0);
13176 if (*end == '\0')
13177 {
13178 if (align == 0)
13179 {
13180 align_branch_power = 0;
13181 break;
13182 }
13183 else if (align >= 16)
13184 {
13185 int align_power;
13186 for (align_power = 0;
13187 (align & 1) == 0;
13188 align >>= 1, align_power++)
13189 continue;
13190 /* Limit alignment power to 31. */
13191 if (align == 1 && align_power < 32)
13192 {
13193 align_branch_power = align_power;
13194 break;
13195 }
13196 }
13197 }
13198 as_fatal (_("invalid -malign-branch-boundary= value: %s"), arg);
13199 }
13200 break;
13201
13202 case OPTION_MALIGN_BRANCH_PREFIX_SIZE:
13203 {
13204 char *end;
13205 int align = strtoul (arg, &end, 0);
13206 /* Some processors only support 5 prefixes. */
13207 if (*end == '\0' && align >= 0 && align < 6)
13208 {
13209 align_branch_prefix_size = align;
13210 break;
13211 }
13212 as_fatal (_("invalid -malign-branch-prefix-size= value: %s"),
13213 arg);
13214 }
13215 break;
13216
13217 case OPTION_MALIGN_BRANCH:
13218 align_branch = 0;
13219 saved = xstrdup (arg);
13220 type = saved;
13221 do
13222 {
13223 next = strchr (type, '+');
13224 if (next)
13225 *next++ = '\0';
13226 if (strcasecmp (type, "jcc") == 0)
13227 align_branch |= align_branch_jcc_bit;
13228 else if (strcasecmp (type, "fused") == 0)
13229 align_branch |= align_branch_fused_bit;
13230 else if (strcasecmp (type, "jmp") == 0)
13231 align_branch |= align_branch_jmp_bit;
13232 else if (strcasecmp (type, "call") == 0)
13233 align_branch |= align_branch_call_bit;
13234 else if (strcasecmp (type, "ret") == 0)
13235 align_branch |= align_branch_ret_bit;
13236 else if (strcasecmp (type, "indirect") == 0)
13237 align_branch |= align_branch_indirect_bit;
13238 else
13239 as_fatal (_("invalid -malign-branch= option: `%s'"), arg);
13240 type = next;
13241 }
13242 while (next != NULL);
13243 free (saved);
13244 break;
13245
13246 case OPTION_MBRANCHES_WITH_32B_BOUNDARIES:
13247 align_branch_power = 5;
13248 align_branch_prefix_size = 5;
13249 align_branch = (align_branch_jcc_bit
13250 | align_branch_fused_bit
13251 | align_branch_jmp_bit);
13252 break;
13253
13254 case OPTION_MAMD64:
13255 isa64 = amd64;
13256 break;
13257
13258 case OPTION_MINTEL64:
13259 isa64 = intel64;
13260 break;
13261
13262 case 'O':
13263 if (arg == NULL)
13264 {
13265 optimize = 1;
13266 /* Turn off -Os. */
13267 optimize_for_space = 0;
13268 }
13269 else if (*arg == 's')
13270 {
13271 optimize_for_space = 1;
13272 /* Turn on all encoding optimizations. */
13273 optimize = INT_MAX;
13274 }
13275 else
13276 {
13277 optimize = atoi (arg);
13278 /* Turn off -Os. */
13279 optimize_for_space = 0;
13280 }
13281 break;
13282
13283 default:
13284 return 0;
13285 }
13286 return 1;
13287 }
13288
13289 #define MESSAGE_TEMPLATE \
13290 " "
13291
13292 static char *
13293 output_message (FILE *stream, char *p, char *message, char *start,
13294 int *left_p, const char *name, int len)
13295 {
13296 int size = sizeof (MESSAGE_TEMPLATE);
13297 int left = *left_p;
13298
13299 /* Reserve 2 spaces for ", " or ",\0" */
13300 left -= len + 2;
13301
13302 /* Check if there is any room. */
13303 if (left >= 0)
13304 {
13305 if (p != start)
13306 {
13307 *p++ = ',';
13308 *p++ = ' ';
13309 }
13310 p = mempcpy (p, name, len);
13311 }
13312 else
13313 {
13314 /* Output the current message now and start a new one. */
13315 *p++ = ',';
13316 *p = '\0';
13317 fprintf (stream, "%s\n", message);
13318 p = start;
13319 left = size - (start - message) - len - 2;
13320
13321 gas_assert (left >= 0);
13322
13323 p = mempcpy (p, name, len);
13324 }
13325
13326 *left_p = left;
13327 return p;
13328 }
13329
13330 static void
13331 show_arch (FILE *stream, int ext, int check)
13332 {
13333 static char message[] = MESSAGE_TEMPLATE;
13334 char *start = message + 27;
13335 char *p;
13336 int size = sizeof (MESSAGE_TEMPLATE);
13337 int left;
13338 const char *name;
13339 int len;
13340 unsigned int j;
13341
13342 p = start;
13343 left = size - (start - message);
13344 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
13345 {
13346 /* Should it be skipped? */
13347 if (cpu_arch [j].skip)
13348 continue;
13349
13350 name = cpu_arch [j].name;
13351 len = cpu_arch [j].len;
13352 if (*name == '.')
13353 {
13354 /* It is an extension. Skip if we aren't asked to show it. */
13355 if (ext)
13356 {
13357 name++;
13358 len--;
13359 }
13360 else
13361 continue;
13362 }
13363 else if (ext)
13364 {
13365 /* It is an processor. Skip if we show only extension. */
13366 continue;
13367 }
13368 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
13369 {
13370 /* It is an impossible processor - skip. */
13371 continue;
13372 }
13373
13374 p = output_message (stream, p, message, start, &left, name, len);
13375 }
13376
13377 /* Display disabled extensions. */
13378 if (ext)
13379 for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
13380 {
13381 name = cpu_noarch [j].name;
13382 len = cpu_noarch [j].len;
13383 p = output_message (stream, p, message, start, &left, name,
13384 len);
13385 }
13386
13387 *p = '\0';
13388 fprintf (stream, "%s\n", message);
13389 }
13390
13391 void
13392 md_show_usage (FILE *stream)
13393 {
13394 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13395 fprintf (stream, _("\
13396 -Qy, -Qn ignored\n\
13397 -V print assembler version number\n\
13398 -k ignored\n"));
13399 #endif
13400 fprintf (stream, _("\
13401 -n Do not optimize code alignment\n\
13402 -q quieten some warnings\n"));
13403 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13404 fprintf (stream, _("\
13405 -s ignored\n"));
13406 #endif
13407 #if defined BFD64 && (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13408 || defined (TE_PE) || defined (TE_PEP))
13409 fprintf (stream, _("\
13410 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
13411 #endif
13412 #ifdef SVR4_COMMENT_CHARS
13413 fprintf (stream, _("\
13414 --divide do not treat `/' as a comment character\n"));
13415 #else
13416 fprintf (stream, _("\
13417 --divide ignored\n"));
13418 #endif
13419 fprintf (stream, _("\
13420 -march=CPU[,+EXTENSION...]\n\
13421 generate code for CPU and EXTENSION, CPU is one of:\n"));
13422 show_arch (stream, 0, 1);
13423 fprintf (stream, _("\
13424 EXTENSION is combination of:\n"));
13425 show_arch (stream, 1, 0);
13426 fprintf (stream, _("\
13427 -mtune=CPU optimize for CPU, CPU is one of:\n"));
13428 show_arch (stream, 0, 0);
13429 fprintf (stream, _("\
13430 -msse2avx encode SSE instructions with VEX prefix\n"));
13431 fprintf (stream, _("\
13432 -msse-check=[none|error|warning] (default: warning)\n\
13433 check SSE instructions\n"));
13434 fprintf (stream, _("\
13435 -moperand-check=[none|error|warning] (default: warning)\n\
13436 check operand combinations for validity\n"));
13437 fprintf (stream, _("\
13438 -mavxscalar=[128|256] (default: 128)\n\
13439 encode scalar AVX instructions with specific vector\n\
13440 length\n"));
13441 fprintf (stream, _("\
13442 -mvexwig=[0|1] (default: 0)\n\
13443 encode VEX instructions with specific VEX.W value\n\
13444 for VEX.W bit ignored instructions\n"));
13445 fprintf (stream, _("\
13446 -mevexlig=[128|256|512] (default: 128)\n\
13447 encode scalar EVEX instructions with specific vector\n\
13448 length\n"));
13449 fprintf (stream, _("\
13450 -mevexwig=[0|1] (default: 0)\n\
13451 encode EVEX instructions with specific EVEX.W value\n\
13452 for EVEX.W bit ignored instructions\n"));
13453 fprintf (stream, _("\
13454 -mevexrcig=[rne|rd|ru|rz] (default: rne)\n\
13455 encode EVEX instructions with specific EVEX.RC value\n\
13456 for SAE-only ignored instructions\n"));
13457 fprintf (stream, _("\
13458 -mmnemonic=[att|intel] "));
13459 if (SYSV386_COMPAT)
13460 fprintf (stream, _("(default: att)\n"));
13461 else
13462 fprintf (stream, _("(default: intel)\n"));
13463 fprintf (stream, _("\
13464 use AT&T/Intel mnemonic\n"));
13465 fprintf (stream, _("\
13466 -msyntax=[att|intel] (default: att)\n\
13467 use AT&T/Intel syntax\n"));
13468 fprintf (stream, _("\
13469 -mindex-reg support pseudo index registers\n"));
13470 fprintf (stream, _("\
13471 -mnaked-reg don't require `%%' prefix for registers\n"));
13472 fprintf (stream, _("\
13473 -madd-bnd-prefix add BND prefix for all valid branches\n"));
13474 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13475 fprintf (stream, _("\
13476 -mshared disable branch optimization for shared code\n"));
13477 fprintf (stream, _("\
13478 -mx86-used-note=[no|yes] "));
13479 if (DEFAULT_X86_USED_NOTE)
13480 fprintf (stream, _("(default: yes)\n"));
13481 else
13482 fprintf (stream, _("(default: no)\n"));
13483 fprintf (stream, _("\
13484 generate x86 used ISA and feature properties\n"));
13485 #endif
13486 #if defined (TE_PE) || defined (TE_PEP)
13487 fprintf (stream, _("\
13488 -mbig-obj generate big object files\n"));
13489 #endif
13490 fprintf (stream, _("\
13491 -momit-lock-prefix=[no|yes] (default: no)\n\
13492 strip all lock prefixes\n"));
13493 fprintf (stream, _("\
13494 -mfence-as-lock-add=[no|yes] (default: no)\n\
13495 encode lfence, mfence and sfence as\n\
13496 lock addl $0x0, (%%{re}sp)\n"));
13497 fprintf (stream, _("\
13498 -mrelax-relocations=[no|yes] "));
13499 if (DEFAULT_GENERATE_X86_RELAX_RELOCATIONS)
13500 fprintf (stream, _("(default: yes)\n"));
13501 else
13502 fprintf (stream, _("(default: no)\n"));
13503 fprintf (stream, _("\
13504 generate relax relocations\n"));
13505 fprintf (stream, _("\
13506 -malign-branch-boundary=NUM (default: 0)\n\
13507 align branches within NUM byte boundary\n"));
13508 fprintf (stream, _("\
13509 -malign-branch=TYPE[+TYPE...] (default: jcc+fused+jmp)\n\
13510 TYPE is combination of jcc, fused, jmp, call, ret,\n\
13511 indirect\n\
13512 specify types of branches to align\n"));
13513 fprintf (stream, _("\
13514 -malign-branch-prefix-size=NUM (default: 5)\n\
13515 align branches with NUM prefixes per instruction\n"));
13516 fprintf (stream, _("\
13517 -mbranches-within-32B-boundaries\n\
13518 align branches within 32 byte boundary\n"));
13519 fprintf (stream, _("\
13520 -mlfence-after-load=[no|yes] (default: no)\n\
13521 generate lfence after load\n"));
13522 fprintf (stream, _("\
13523 -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\
13524 generate lfence before indirect near branch\n"));
13525 fprintf (stream, _("\
13526 -mlfence-before-ret=[none|or|not|shl|yes] (default: none)\n\
13527 generate lfence before ret\n"));
13528 fprintf (stream, _("\
13529 -mamd64 accept only AMD64 ISA [default]\n"));
13530 fprintf (stream, _("\
13531 -mintel64 accept only Intel64 ISA\n"));
13532 }
13533
13534 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
13535 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
13536 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
13537
13538 /* Pick the target format to use. */
13539
13540 const char *
13541 i386_target_format (void)
13542 {
13543 if (!strncmp (default_arch, "x86_64", 6))
13544 {
13545 update_code_flag (CODE_64BIT, 1);
13546 if (default_arch[6] == '\0')
13547 x86_elf_abi = X86_64_ABI;
13548 else
13549 x86_elf_abi = X86_64_X32_ABI;
13550 }
13551 else if (!strcmp (default_arch, "i386"))
13552 update_code_flag (CODE_32BIT, 1);
13553 else if (!strcmp (default_arch, "iamcu"))
13554 {
13555 update_code_flag (CODE_32BIT, 1);
13556 if (cpu_arch_isa == PROCESSOR_UNKNOWN)
13557 {
13558 static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
13559 cpu_arch_name = "iamcu";
13560 cpu_sub_arch_name = NULL;
13561 cpu_arch_flags = iamcu_flags;
13562 cpu_arch_isa = PROCESSOR_IAMCU;
13563 cpu_arch_isa_flags = iamcu_flags;
13564 if (!cpu_arch_tune_set)
13565 {
13566 cpu_arch_tune = cpu_arch_isa;
13567 cpu_arch_tune_flags = cpu_arch_isa_flags;
13568 }
13569 }
13570 else if (cpu_arch_isa != PROCESSOR_IAMCU)
13571 as_fatal (_("Intel MCU doesn't support `%s' architecture"),
13572 cpu_arch_name);
13573 }
13574 else
13575 as_fatal (_("unknown architecture"));
13576
13577 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
13578 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
13579 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
13580 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
13581
13582 switch (OUTPUT_FLAVOR)
13583 {
13584 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
13585 case bfd_target_aout_flavour:
13586 return AOUT_TARGET_FORMAT;
13587 #endif
13588 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
13589 # if defined (TE_PE) || defined (TE_PEP)
13590 case bfd_target_coff_flavour:
13591 if (flag_code == CODE_64BIT)
13592 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
13593 else
13594 return use_big_obj ? "pe-bigobj-i386" : "pe-i386";
13595 # elif defined (TE_GO32)
13596 case bfd_target_coff_flavour:
13597 return "coff-go32";
13598 # else
13599 case bfd_target_coff_flavour:
13600 return "coff-i386";
13601 # endif
13602 #endif
13603 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
13604 case bfd_target_elf_flavour:
13605 {
13606 const char *format;
13607
13608 switch (x86_elf_abi)
13609 {
13610 default:
13611 format = ELF_TARGET_FORMAT;
13612 #ifndef TE_SOLARIS
13613 tls_get_addr = "___tls_get_addr";
13614 #endif
13615 break;
13616 case X86_64_ABI:
13617 use_rela_relocations = 1;
13618 object_64bit = 1;
13619 #ifndef TE_SOLARIS
13620 tls_get_addr = "__tls_get_addr";
13621 #endif
13622 format = ELF_TARGET_FORMAT64;
13623 break;
13624 case X86_64_X32_ABI:
13625 use_rela_relocations = 1;
13626 object_64bit = 1;
13627 #ifndef TE_SOLARIS
13628 tls_get_addr = "__tls_get_addr";
13629 #endif
13630 disallow_64bit_reloc = 1;
13631 format = ELF_TARGET_FORMAT32;
13632 break;
13633 }
13634 if (cpu_arch_isa == PROCESSOR_L1OM)
13635 {
13636 if (x86_elf_abi != X86_64_ABI)
13637 as_fatal (_("Intel L1OM is 64bit only"));
13638 return ELF_TARGET_L1OM_FORMAT;
13639 }
13640 else if (cpu_arch_isa == PROCESSOR_K1OM)
13641 {
13642 if (x86_elf_abi != X86_64_ABI)
13643 as_fatal (_("Intel K1OM is 64bit only"));
13644 return ELF_TARGET_K1OM_FORMAT;
13645 }
13646 else if (cpu_arch_isa == PROCESSOR_IAMCU)
13647 {
13648 if (x86_elf_abi != I386_ABI)
13649 as_fatal (_("Intel MCU is 32bit only"));
13650 return ELF_TARGET_IAMCU_FORMAT;
13651 }
13652 else
13653 return format;
13654 }
13655 #endif
13656 #if defined (OBJ_MACH_O)
13657 case bfd_target_mach_o_flavour:
13658 if (flag_code == CODE_64BIT)
13659 {
13660 use_rela_relocations = 1;
13661 object_64bit = 1;
13662 return "mach-o-x86-64";
13663 }
13664 else
13665 return "mach-o-i386";
13666 #endif
13667 default:
13668 abort ();
13669 return NULL;
13670 }
13671 }
13672
13673 #endif /* OBJ_MAYBE_ more than one */
13674 \f
13675 symbolS *
13676 md_undefined_symbol (char *name)
13677 {
13678 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
13679 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
13680 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
13681 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
13682 {
13683 if (!GOT_symbol)
13684 {
13685 if (symbol_find (name))
13686 as_bad (_("GOT already in symbol table"));
13687 GOT_symbol = symbol_new (name, undefined_section,
13688 (valueT) 0, &zero_address_frag);
13689 };
13690 return GOT_symbol;
13691 }
13692 return 0;
13693 }
13694
13695 /* Round up a section size to the appropriate boundary. */
13696
13697 valueT
13698 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
13699 {
13700 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
13701 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
13702 {
13703 /* For a.out, force the section size to be aligned. If we don't do
13704 this, BFD will align it for us, but it will not write out the
13705 final bytes of the section. This may be a bug in BFD, but it is
13706 easier to fix it here since that is how the other a.out targets
13707 work. */
13708 int align;
13709
13710 align = bfd_section_alignment (segment);
13711 size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
13712 }
13713 #endif
13714
13715 return size;
13716 }
13717
13718 /* On the i386, PC-relative offsets are relative to the start of the
13719 next instruction. That is, the address of the offset, plus its
13720 size, since the offset is always the last part of the insn. */
13721
13722 long
13723 md_pcrel_from (fixS *fixP)
13724 {
13725 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
13726 }
13727
13728 #ifndef I386COFF
13729
13730 static void
13731 s_bss (int ignore ATTRIBUTE_UNUSED)
13732 {
13733 int temp;
13734
13735 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13736 if (IS_ELF)
13737 obj_elf_section_change_hook ();
13738 #endif
13739 temp = get_absolute_expression ();
13740 subseg_set (bss_section, (subsegT) temp);
13741 demand_empty_rest_of_line ();
13742 }
13743
13744 #endif
13745
13746 /* Remember constant directive. */
13747
13748 void
13749 i386_cons_align (int ignore ATTRIBUTE_UNUSED)
13750 {
13751 if (last_insn.kind != last_insn_directive
13752 && (bfd_section_flags (now_seg) & SEC_CODE))
13753 {
13754 last_insn.seg = now_seg;
13755 last_insn.kind = last_insn_directive;
13756 last_insn.name = "constant directive";
13757 last_insn.file = as_where (&last_insn.line);
13758 if (lfence_before_ret != lfence_before_ret_none)
13759 {
13760 if (lfence_before_indirect_branch != lfence_branch_none)
13761 as_warn (_("constant directive skips -mlfence-before-ret "
13762 "and -mlfence-before-indirect-branch"));
13763 else
13764 as_warn (_("constant directive skips -mlfence-before-ret"));
13765 }
13766 else if (lfence_before_indirect_branch != lfence_branch_none)
13767 as_warn (_("constant directive skips -mlfence-before-indirect-branch"));
13768 }
13769 }
13770
13771 void
13772 i386_validate_fix (fixS *fixp)
13773 {
13774 if (fixp->fx_subsy)
13775 {
13776 if (fixp->fx_subsy == GOT_symbol)
13777 {
13778 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
13779 {
13780 if (!object_64bit)
13781 abort ();
13782 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13783 if (fixp->fx_tcbit2)
13784 fixp->fx_r_type = (fixp->fx_tcbit
13785 ? BFD_RELOC_X86_64_REX_GOTPCRELX
13786 : BFD_RELOC_X86_64_GOTPCRELX);
13787 else
13788 #endif
13789 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
13790 }
13791 else
13792 {
13793 if (!object_64bit)
13794 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
13795 else
13796 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
13797 }
13798 fixp->fx_subsy = 0;
13799 }
13800 }
13801 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13802 else if (!object_64bit)
13803 {
13804 if (fixp->fx_r_type == BFD_RELOC_386_GOT32
13805 && fixp->fx_tcbit2)
13806 fixp->fx_r_type = BFD_RELOC_386_GOT32X;
13807 }
13808 #endif
13809 }
13810
13811 arelent *
13812 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
13813 {
13814 arelent *rel;
13815 bfd_reloc_code_real_type code;
13816
13817 switch (fixp->fx_r_type)
13818 {
13819 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
13820 case BFD_RELOC_SIZE32:
13821 case BFD_RELOC_SIZE64:
13822 if (S_IS_DEFINED (fixp->fx_addsy)
13823 && !S_IS_EXTERNAL (fixp->fx_addsy))
13824 {
13825 /* Resolve size relocation against local symbol to size of
13826 the symbol plus addend. */
13827 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
13828 if (fixp->fx_r_type == BFD_RELOC_SIZE32
13829 && !fits_in_unsigned_long (value))
13830 as_bad_where (fixp->fx_file, fixp->fx_line,
13831 _("symbol size computation overflow"));
13832 fixp->fx_addsy = NULL;
13833 fixp->fx_subsy = NULL;
13834 md_apply_fix (fixp, (valueT *) &value, NULL);
13835 return NULL;
13836 }
13837 #endif
13838 /* Fall through. */
13839
13840 case BFD_RELOC_X86_64_PLT32:
13841 case BFD_RELOC_X86_64_GOT32:
13842 case BFD_RELOC_X86_64_GOTPCREL:
13843 case BFD_RELOC_X86_64_GOTPCRELX:
13844 case BFD_RELOC_X86_64_REX_GOTPCRELX:
13845 case BFD_RELOC_386_PLT32:
13846 case BFD_RELOC_386_GOT32:
13847 case BFD_RELOC_386_GOT32X:
13848 case BFD_RELOC_386_GOTOFF:
13849 case BFD_RELOC_386_GOTPC:
13850 case BFD_RELOC_386_TLS_GD:
13851 case BFD_RELOC_386_TLS_LDM:
13852 case BFD_RELOC_386_TLS_LDO_32:
13853 case BFD_RELOC_386_TLS_IE_32:
13854 case BFD_RELOC_386_TLS_IE:
13855 case BFD_RELOC_386_TLS_GOTIE:
13856 case BFD_RELOC_386_TLS_LE_32:
13857 case BFD_RELOC_386_TLS_LE:
13858 case BFD_RELOC_386_TLS_GOTDESC:
13859 case BFD_RELOC_386_TLS_DESC_CALL:
13860 case BFD_RELOC_X86_64_TLSGD:
13861 case BFD_RELOC_X86_64_TLSLD:
13862 case BFD_RELOC_X86_64_DTPOFF32:
13863 case BFD_RELOC_X86_64_DTPOFF64:
13864 case BFD_RELOC_X86_64_GOTTPOFF:
13865 case BFD_RELOC_X86_64_TPOFF32:
13866 case BFD_RELOC_X86_64_TPOFF64:
13867 case BFD_RELOC_X86_64_GOTOFF64:
13868 case BFD_RELOC_X86_64_GOTPC32:
13869 case BFD_RELOC_X86_64_GOT64:
13870 case BFD_RELOC_X86_64_GOTPCREL64:
13871 case BFD_RELOC_X86_64_GOTPC64:
13872 case BFD_RELOC_X86_64_GOTPLT64:
13873 case BFD_RELOC_X86_64_PLTOFF64:
13874 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
13875 case BFD_RELOC_X86_64_TLSDESC_CALL:
13876 case BFD_RELOC_RVA:
13877 case BFD_RELOC_VTABLE_ENTRY:
13878 case BFD_RELOC_VTABLE_INHERIT:
13879 #ifdef TE_PE
13880 case BFD_RELOC_32_SECREL:
13881 #endif
13882 code = fixp->fx_r_type;
13883 break;
13884 case BFD_RELOC_X86_64_32S:
13885 if (!fixp->fx_pcrel)
13886 {
13887 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
13888 code = fixp->fx_r_type;
13889 break;
13890 }
13891 /* Fall through. */
13892 default:
13893 if (fixp->fx_pcrel)
13894 {
13895 switch (fixp->fx_size)
13896 {
13897 default:
13898 as_bad_where (fixp->fx_file, fixp->fx_line,
13899 _("can not do %d byte pc-relative relocation"),
13900 fixp->fx_size);
13901 code = BFD_RELOC_32_PCREL;
13902 break;
13903 case 1: code = BFD_RELOC_8_PCREL; break;
13904 case 2: code = BFD_RELOC_16_PCREL; break;
13905 case 4: code = BFD_RELOC_32_PCREL; break;
13906 #ifdef BFD64
13907 case 8: code = BFD_RELOC_64_PCREL; break;
13908 #endif
13909 }
13910 }
13911 else
13912 {
13913 switch (fixp->fx_size)
13914 {
13915 default:
13916 as_bad_where (fixp->fx_file, fixp->fx_line,
13917 _("can not do %d byte relocation"),
13918 fixp->fx_size);
13919 code = BFD_RELOC_32;
13920 break;
13921 case 1: code = BFD_RELOC_8; break;
13922 case 2: code = BFD_RELOC_16; break;
13923 case 4: code = BFD_RELOC_32; break;
13924 #ifdef BFD64
13925 case 8: code = BFD_RELOC_64; break;
13926 #endif
13927 }
13928 }
13929 break;
13930 }
13931
13932 if ((code == BFD_RELOC_32
13933 || code == BFD_RELOC_32_PCREL
13934 || code == BFD_RELOC_X86_64_32S)
13935 && GOT_symbol
13936 && fixp->fx_addsy == GOT_symbol)
13937 {
13938 if (!object_64bit)
13939 code = BFD_RELOC_386_GOTPC;
13940 else
13941 code = BFD_RELOC_X86_64_GOTPC32;
13942 }
13943 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
13944 && GOT_symbol
13945 && fixp->fx_addsy == GOT_symbol)
13946 {
13947 code = BFD_RELOC_X86_64_GOTPC64;
13948 }
13949
13950 rel = XNEW (arelent);
13951 rel->sym_ptr_ptr = XNEW (asymbol *);
13952 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
13953
13954 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
13955
13956 if (!use_rela_relocations)
13957 {
13958 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
13959 vtable entry to be used in the relocation's section offset. */
13960 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
13961 rel->address = fixp->fx_offset;
13962 #if defined (OBJ_COFF) && defined (TE_PE)
13963 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
13964 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
13965 else
13966 #endif
13967 rel->addend = 0;
13968 }
13969 /* Use the rela in 64bit mode. */
13970 else
13971 {
13972 if (disallow_64bit_reloc)
13973 switch (code)
13974 {
13975 case BFD_RELOC_X86_64_DTPOFF64:
13976 case BFD_RELOC_X86_64_TPOFF64:
13977 case BFD_RELOC_64_PCREL:
13978 case BFD_RELOC_X86_64_GOTOFF64:
13979 case BFD_RELOC_X86_64_GOT64:
13980 case BFD_RELOC_X86_64_GOTPCREL64:
13981 case BFD_RELOC_X86_64_GOTPC64:
13982 case BFD_RELOC_X86_64_GOTPLT64:
13983 case BFD_RELOC_X86_64_PLTOFF64:
13984 as_bad_where (fixp->fx_file, fixp->fx_line,
13985 _("cannot represent relocation type %s in x32 mode"),
13986 bfd_get_reloc_code_name (code));
13987 break;
13988 default:
13989 break;
13990 }
13991
13992 if (!fixp->fx_pcrel)
13993 rel->addend = fixp->fx_offset;
13994 else
13995 switch (code)
13996 {
13997 case BFD_RELOC_X86_64_PLT32:
13998 case BFD_RELOC_X86_64_GOT32:
13999 case BFD_RELOC_X86_64_GOTPCREL:
14000 case BFD_RELOC_X86_64_GOTPCRELX:
14001 case BFD_RELOC_X86_64_REX_GOTPCRELX:
14002 case BFD_RELOC_X86_64_TLSGD:
14003 case BFD_RELOC_X86_64_TLSLD:
14004 case BFD_RELOC_X86_64_GOTTPOFF:
14005 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
14006 case BFD_RELOC_X86_64_TLSDESC_CALL:
14007 rel->addend = fixp->fx_offset - fixp->fx_size;
14008 break;
14009 default:
14010 rel->addend = (section->vma
14011 - fixp->fx_size
14012 + fixp->fx_addnumber
14013 + md_pcrel_from (fixp));
14014 break;
14015 }
14016 }
14017
14018 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
14019 if (rel->howto == NULL)
14020 {
14021 as_bad_where (fixp->fx_file, fixp->fx_line,
14022 _("cannot represent relocation type %s"),
14023 bfd_get_reloc_code_name (code));
14024 /* Set howto to a garbage value so that we can keep going. */
14025 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
14026 gas_assert (rel->howto != NULL);
14027 }
14028
14029 return rel;
14030 }
14031
14032 #include "tc-i386-intel.c"
14033
14034 void
14035 tc_x86_parse_to_dw2regnum (expressionS *exp)
14036 {
14037 int saved_naked_reg;
14038 char saved_register_dot;
14039
14040 saved_naked_reg = allow_naked_reg;
14041 allow_naked_reg = 1;
14042 saved_register_dot = register_chars['.'];
14043 register_chars['.'] = '.';
14044 allow_pseudo_reg = 1;
14045 expression_and_evaluate (exp);
14046 allow_pseudo_reg = 0;
14047 register_chars['.'] = saved_register_dot;
14048 allow_naked_reg = saved_naked_reg;
14049
14050 if (exp->X_op == O_register && exp->X_add_number >= 0)
14051 {
14052 if ((addressT) exp->X_add_number < i386_regtab_size)
14053 {
14054 exp->X_op = O_constant;
14055 exp->X_add_number = i386_regtab[exp->X_add_number]
14056 .dw2_regnum[flag_code >> 1];
14057 }
14058 else
14059 exp->X_op = O_illegal;
14060 }
14061 }
14062
14063 void
14064 tc_x86_frame_initial_instructions (void)
14065 {
14066 static unsigned int sp_regno[2];
14067
14068 if (!sp_regno[flag_code >> 1])
14069 {
14070 char *saved_input = input_line_pointer;
14071 char sp[][4] = {"esp", "rsp"};
14072 expressionS exp;
14073
14074 input_line_pointer = sp[flag_code >> 1];
14075 tc_x86_parse_to_dw2regnum (&exp);
14076 gas_assert (exp.X_op == O_constant);
14077 sp_regno[flag_code >> 1] = exp.X_add_number;
14078 input_line_pointer = saved_input;
14079 }
14080
14081 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
14082 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
14083 }
14084
14085 int
14086 x86_dwarf2_addr_size (void)
14087 {
14088 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
14089 if (x86_elf_abi == X86_64_X32_ABI)
14090 return 4;
14091 #endif
14092 return bfd_arch_bits_per_address (stdoutput) / 8;
14093 }
14094
14095 int
14096 i386_elf_section_type (const char *str, size_t len)
14097 {
14098 if (flag_code == CODE_64BIT
14099 && len == sizeof ("unwind") - 1
14100 && strncmp (str, "unwind", 6) == 0)
14101 return SHT_X86_64_UNWIND;
14102
14103 return -1;
14104 }
14105
14106 #ifdef TE_SOLARIS
14107 void
14108 i386_solaris_fix_up_eh_frame (segT sec)
14109 {
14110 if (flag_code == CODE_64BIT)
14111 elf_section_type (sec) = SHT_X86_64_UNWIND;
14112 }
14113 #endif
14114
14115 #ifdef TE_PE
14116 void
14117 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
14118 {
14119 expressionS exp;
14120
14121 exp.X_op = O_secrel;
14122 exp.X_add_symbol = symbol;
14123 exp.X_add_number = 0;
14124 emit_expr (&exp, size);
14125 }
14126 #endif
14127
14128 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
14129 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
14130
14131 bfd_vma
14132 x86_64_section_letter (int letter, const char **ptr_msg)
14133 {
14134 if (flag_code == CODE_64BIT)
14135 {
14136 if (letter == 'l')
14137 return SHF_X86_64_LARGE;
14138
14139 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
14140 }
14141 else
14142 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
14143 return -1;
14144 }
14145
14146 bfd_vma
14147 x86_64_section_word (char *str, size_t len)
14148 {
14149 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
14150 return SHF_X86_64_LARGE;
14151
14152 return -1;
14153 }
14154
14155 static void
14156 handle_large_common (int small ATTRIBUTE_UNUSED)
14157 {
14158 if (flag_code != CODE_64BIT)
14159 {
14160 s_comm_internal (0, elf_common_parse);
14161 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
14162 }
14163 else
14164 {
14165 static segT lbss_section;
14166 asection *saved_com_section_ptr = elf_com_section_ptr;
14167 asection *saved_bss_section = bss_section;
14168
14169 if (lbss_section == NULL)
14170 {
14171 flagword applicable;
14172 segT seg = now_seg;
14173 subsegT subseg = now_subseg;
14174
14175 /* The .lbss section is for local .largecomm symbols. */
14176 lbss_section = subseg_new (".lbss", 0);
14177 applicable = bfd_applicable_section_flags (stdoutput);
14178 bfd_set_section_flags (lbss_section, applicable & SEC_ALLOC);
14179 seg_info (lbss_section)->bss = 1;
14180
14181 subseg_set (seg, subseg);
14182 }
14183
14184 elf_com_section_ptr = &_bfd_elf_large_com_section;
14185 bss_section = lbss_section;
14186
14187 s_comm_internal (0, elf_common_parse);
14188
14189 elf_com_section_ptr = saved_com_section_ptr;
14190 bss_section = saved_bss_section;
14191 }
14192 }
14193 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */