unsupported_with_intel_mnemonic,
unsupported_syntax,
unsupported,
+ invalid_sib_address,
invalid_vsib_address,
invalid_vector_register_set,
+ invalid_tmm_register_set,
unsupported_vector_index_register,
unsupported_broadcast,
broadcast_needed,
/* The operand to a branch insn indicates an absolute branch. */
bfd_boolean jumpabsolute;
- /* Has MMX register operands. */
- bfd_boolean has_regmmx;
-
- /* Has XMM register operands. */
- bfd_boolean has_regxmm;
-
- /* Has YMM register operands. */
- bfd_boolean has_regymm;
-
- /* Has ZMM register operands. */
- bfd_boolean has_regzmm;
+ /* Extended states. */
+ enum
+ {
+ /* Use MMX state. */
+ xstate_mmx = 1 << 0,
+ /* Use XMM state. */
+ xstate_xmm = 1 << 1,
+ /* Use YMM state. */
+ xstate_ymm = 1 << 2 | xstate_xmm,
+ /* Use ZMM state. */
+ xstate_zmm = 1 << 3 | xstate_ymm,
+ /* Use TMM state. */
+ xstate_tmm = 1 << 4
+ } xstate;
/* Has GOTPC or TLS relocation. */
bfd_boolean has_gotpc_tls_reloc;
dir_encoding_swap
} dir_encoding;
- /* Prefer 8bit or 32bit displacement in encoding. */
+ /* Prefer 8bit, 16bit, 32bit displacement in encoding. */
enum
{
disp_encoding_default = 0,
disp_encoding_8bit,
+ disp_encoding_16bit,
disp_encoding_32bit
} disp_encoding;
#endif
;
-#if (defined (TE_I386AIX) \
- || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
- && !defined (TE_GNU) \
- && !defined (TE_LINUX) \
- && !defined (TE_NACL) \
- && !defined (TE_FreeBSD) \
- && !defined (TE_DragonFly) \
- && !defined (TE_NetBSD)))
+#if ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
+ && !defined (TE_GNU) \
+ && !defined (TE_LINUX) \
+ && !defined (TE_FreeBSD) \
+ && !defined (TE_DragonFly) \
+ && !defined (TE_NetBSD))
/* This array holds the chars that always start a comment. If the
pre-processor is disabled, these aren't very useful. The option
--divide will remove '/' from this list. */
CPU_WAITPKG_FLAGS, 0 },
{ STRING_COMMA_LEN (".cldemote"), PROCESSOR_UNKNOWN,
CPU_CLDEMOTE_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_int8"), PROCESSOR_UNKNOWN,
+ CPU_AMX_INT8_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_bf16"), PROCESSOR_UNKNOWN,
+ CPU_AMX_BF16_FLAGS, 0 },
+ { STRING_COMMA_LEN (".amx_tile"), PROCESSOR_UNKNOWN,
+ CPU_AMX_TILE_FLAGS, 0 },
{ STRING_COMMA_LEN (".movdiri"), PROCESSOR_UNKNOWN,
CPU_MOVDIRI_FLAGS, 0 },
{ STRING_COMMA_LEN (".movdir64b"), PROCESSOR_UNKNOWN,
CPU_SEV_ES_FLAGS, 0 },
{ STRING_COMMA_LEN (".tsxldtrk"), PROCESSOR_UNKNOWN,
CPU_TSXLDTRK_FLAGS, 0 },
+ { STRING_COMMA_LEN (".kl"), PROCESSOR_UNKNOWN,
+ CPU_KL_FLAGS, 0 },
+ { STRING_COMMA_LEN (".widekl"), PROCESSOR_UNKNOWN,
+ CPU_WIDEKL_FLAGS, 0 },
};
static const noarch_entry cpu_noarch[] =
{ STRING_COMMA_LEN ("noavx512_bitalg"), CPU_ANY_AVX512_BITALG_FLAGS },
{ STRING_COMMA_LEN ("noibt"), CPU_ANY_IBT_FLAGS },
{ STRING_COMMA_LEN ("noshstk"), CPU_ANY_SHSTK_FLAGS },
+ { STRING_COMMA_LEN ("noamx_int8"), CPU_ANY_AMX_INT8_FLAGS },
+ { STRING_COMMA_LEN ("noamx_bf16"), CPU_ANY_AMX_BF16_FLAGS },
+ { STRING_COMMA_LEN ("noamx_tile"), CPU_ANY_AMX_TILE_FLAGS },
{ STRING_COMMA_LEN ("nomovdiri"), CPU_ANY_MOVDIRI_FLAGS },
{ STRING_COMMA_LEN ("nomovdir64b"), CPU_ANY_MOVDIR64B_FLAGS },
{ STRING_COMMA_LEN ("noavx512_bf16"), CPU_ANY_AVX512_BF16_FLAGS },
- { STRING_COMMA_LEN ("noavx512_vp2intersect"), CPU_ANY_SHSTK_FLAGS },
+ { STRING_COMMA_LEN ("noavx512_vp2intersect"),
+ CPU_ANY_AVX512_VP2INTERSECT_FLAGS },
{ STRING_COMMA_LEN ("noenqcmd"), CPU_ANY_ENQCMD_FLAGS },
{ STRING_COMMA_LEN ("noserialize"), CPU_ANY_SERIALIZE_FLAGS },
{ STRING_COMMA_LEN ("notsxldtrk"), CPU_ANY_TSXLDTRK_FLAGS },
+ { STRING_COMMA_LEN ("nokl"), CPU_ANY_KL_FLAGS },
+ { STRING_COMMA_LEN ("nowidekl"), CPU_ANY_WIDEKL_FLAGS },
};
#ifdef I386COFF
extern char *input_line_pointer;
/* Hash table for instruction mnemonic lookup. */
-static struct hash_control *op_hash;
+static htab_t op_hash;
/* Hash table for register lookup. */
-static struct hash_control *reg_hash;
+static htab_t reg_hash;
\f
/* Various efficient no-op patterns for aligning code labels.
Note: Don't try to assemble the instructions in the comments.
{
/* We need to check a few extra flags with AVX. */
if (cpu.bitfield.cpuavx
- && (!t->opcode_modifier.sse2avx || sse2avx)
+ && (!t->opcode_modifier.sse2avx
+ || (sse2avx && !i.prefix[DATA_PREFIX]))
&& (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
&& (!x.bitfield.cpugfni || cpu.bitfield.cpugfni)
&& (!x.bitfield.cpupclmul || cpu.bitfield.cpupclmul))
|| (i.types[given].bitfield.ymmword
&& !t->operand_types[wanted].bitfield.ymmword)
|| (i.types[given].bitfield.zmmword
- && !t->operand_types[wanted].bitfield.zmmword));
+ && !t->operand_types[wanted].bitfield.zmmword)
+ || (i.types[given].bitfield.tmmword
+ && !t->operand_types[wanted].bitfield.tmmword));
}
/* Return 1 if there is no conflict in any size between operand GIVEN
temp.bitfield.xmmword = 0;
temp.bitfield.ymmword = 0;
temp.bitfield.zmmword = 0;
+ temp.bitfield.tmmword = 0;
if (operand_type_all_zero (&temp))
goto mismatch;
default: abort ();
}
-#ifdef BFD64
- /* If BFD64, sign extend val for 32bit address mode. */
- if (flag_code != CODE_64BIT
- || i.prefix[ADDR_PREFIX])
- if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
- val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
-#endif
-
if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
{
char buf1[40], buf2[40];
void
md_begin (void)
{
- const char *hash_err;
-
/* Support pseudo prefixes like {disp32}. */
lex_type ['{'] = LEX_BEGIN_NAME;
/* Initialize op_hash hash table. */
- op_hash = hash_new ();
+ op_hash = str_htab_create ();
{
const insn_template *optab;
/* different name --> ship out current template list;
add to hash table; & begin anew. */
core_optab->end = optab;
- hash_err = hash_insert (op_hash,
- (optab - 1)->name,
- (void *) core_optab);
- if (hash_err)
- {
- as_fatal (_("can't hash %s: %s"),
- (optab - 1)->name,
- hash_err);
- }
+ if (str_hash_insert (op_hash, (optab - 1)->name, core_optab, 0))
+ as_fatal (_("duplicate %s"), (optab - 1)->name);
+
if (optab->name == NULL)
break;
core_optab = XNEW (templates);
}
/* Initialize reg_hash hash table. */
- reg_hash = hash_new ();
+ reg_hash = str_htab_create ();
{
const reg_entry *regtab;
unsigned int regtab_size = i386_regtab_size;
for (regtab = i386_regtab; regtab_size--; regtab++)
- {
- hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
- if (hash_err)
- as_fatal (_("can't hash %s: %s"),
- regtab->reg_name,
- hash_err);
- }
+ if (str_hash_insert (reg_hash, regtab->reg_name, regtab, 0) != NULL)
+ as_fatal (_("duplicate %s"), regtab->reg_name);
}
/* Fill in lexical tables: mnemonic_chars, operand_chars. */
mnemonic_chars[c] = c;
operand_chars[c] = c;
}
+#ifdef SVR4_COMMENT_CHARS
+ else if (c == '\\' && strchr (i386_comment_chars, '/'))
+ operand_chars[c] = c;
+#endif
if (ISALPHA (c) || ISDIGIT (c))
identifier_chars[c] = c;
void
i386_print_statistics (FILE *file)
{
- hash_print_statistics (file, "i386 opcode", op_hash);
- hash_print_statistics (file, "i386 register", reg_hash);
+ htab_print_statistics (file, "i386 opcode", op_hash);
+ htab_print_statistics (file, "i386 register", reg_hash);
}
\f
#ifdef DEBUG386
{ OPERAND_TYPE_REGXMM, "rXMM" },
{ OPERAND_TYPE_REGYMM, "rYMM" },
{ OPERAND_TYPE_REGZMM, "rZMM" },
+ { OPERAND_TYPE_REGTMM, "rTMM" },
{ OPERAND_TYPE_REGMASK, "Mask reg" },
};
}
}
- switch ((i.tm.base_opcode >> 8) & 0xff)
+ switch ((i.tm.base_opcode >> (i.tm.opcode_length << 3)) & 0xff)
{
case 0:
implied_prefix = 0;
/* Determine vector length from the last multi-length vector
operand. */
- vec_length = 0;
for (op = i.operands; op--;)
if (i.tm.operand_types[op].bitfield.xmmword
+ i.tm.operand_types[op].bitfield.ymmword
|| i.tm.cpu_flags.bitfield.cpussse3
|| i.tm.cpu_flags.bitfield.cpusse4_1
|| i.tm.cpu_flags.bitfield.cpusse4_2
- || i.tm.cpu_flags.bitfield.cpusse4a
|| i.tm.cpu_flags.bitfield.cpupclmul
|| i.tm.cpu_flags.bitfield.cpuaes
|| i.tm.cpu_flags.bitfield.cpusha
return;
}
- /* Check for data size prefix on VEX/XOP/EVEX encoded insns. */
- if (i.prefix[DATA_PREFIX] && is_any_vex_encoding (&i.tm))
+ /* Check for data size prefix on VEX/XOP/EVEX encoded and SIMD insns. */
+ if (i.prefix[DATA_PREFIX]
+ && (is_any_vex_encoding (&i.tm)
+ || i.tm.operand_types[i.imm_operands].bitfield.class >= RegMMX
+ || i.tm.operand_types[i.imm_operands + 1].bitfield.class >= RegMMX))
{
as_bad (_("data size prefix invalid with `%s'"), i.tm.name);
return;
if (!process_suffix ())
return;
- /* Update operand types. */
+ /* Update operand types and check extended states. */
for (j = 0; j < i.operands; j++)
- i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
+ {
+ i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
+ switch (i.tm.operand_types[j].bitfield.class)
+ {
+ default:
+ break;
+ case RegMMX:
+ i.xstate |= xstate_mmx;
+ break;
+ case RegMask:
+ i.xstate |= xstate_zmm;
+ break;
+ case RegSIMD:
+ if (i.tm.operand_types[j].bitfield.tmmword)
+ i.xstate |= xstate_tmm;
+ else if (i.tm.operand_types[j].bitfield.zmmword)
+ i.xstate |= xstate_zmm;
+ else if (i.tm.operand_types[j].bitfield.ymmword)
+ i.xstate |= xstate_ymm;
+ else if (i.tm.operand_types[j].bitfield.xmmword)
+ i.xstate |= xstate_xmm;
+ break;
+ }
+ }
/* Make still unresolved immediate matches conform to size of immediate
given in i.suffix. */
&& !i.types[j].bitfield.xmmword)
i.reg_operands--;
- /* ImmExt should be processed after SSE2AVX. */
- if (!i.tm.opcode_modifier.sse2avx
- && i.tm.opcode_modifier.immext)
- process_immext ();
-
/* For insns with operands there are more diddles to do to the opcode. */
if (i.operands)
{
return;
}
+ /* Check for explicit REX prefix. */
+ if (i.prefix[REX_PREFIX] || i.rex_encoding)
+ {
+ as_bad (_("REX prefix invalid with `%s'"), i.tm.name);
+ return;
+ }
+
if (i.tm.opcode_modifier.vex)
build_vex_prefix (t);
else
build_evex_prefix ();
+
+ /* The individual REX.RXBW bits got consumed. */
+ i.rex &= REX_OPCODE;
}
/* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
}
/* Look up instruction (or prefix) via hash table. */
- current_templates = (const templates *) hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) str_hash_find (op_hash, mnemonic);
if (*l != END_OF_INSN
&& (!is_space_char (*l) || l[1] != END_OF_INSN)
/* Handle pseudo prefixes. */
switch (current_templates->start->base_opcode)
{
- case 0x0:
+ case Prefix_Disp8:
/* {disp8} */
i.disp_encoding = disp_encoding_8bit;
break;
- case 0x1:
+ case Prefix_Disp16:
+ /* {disp16} */
+ i.disp_encoding = disp_encoding_16bit;
+ break;
+ case Prefix_Disp32:
/* {disp32} */
i.disp_encoding = disp_encoding_32bit;
break;
- case 0x2:
+ case Prefix_Load:
/* {load} */
i.dir_encoding = dir_encoding_load;
break;
- case 0x3:
+ case Prefix_Store:
/* {store} */
i.dir_encoding = dir_encoding_store;
break;
- case 0x4:
+ case Prefix_VEX:
/* {vex} */
i.vec_encoding = vex_encoding_vex;
break;
- case 0x5:
+ case Prefix_VEX3:
/* {vex3} */
i.vec_encoding = vex_encoding_vex3;
break;
- case 0x6:
+ case Prefix_EVEX:
/* {evex} */
i.vec_encoding = vex_encoding_evex;
break;
- case 0x7:
+ case Prefix_REX:
/* {rex} */
i.rex_encoding = TRUE;
break;
- case 0x8:
+ case Prefix_NoOptimize:
/* {nooptimize} */
i.no_optimize = TRUE;
break;
goto check_suffix;
mnem_p = dot_p;
*dot_p = '\0';
- current_templates = (const templates *) hash_find (op_hash, mnemonic);
+ current_templates = (const templates *) str_hash_find (op_hash, mnemonic);
}
if (!current_templates)
case QWORD_MNEM_SUFFIX:
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
break;
case SHORT_MNEM_SUFFIX:
case LONG_MNEM_SUFFIX:
{
i.suffix = mnem_p[-1];
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
}
break;
else
i.suffix = LONG_MNEM_SUFFIX;
mnem_p[-1] = '\0';
- current_templates = (const templates *) hash_find (op_hash,
- mnemonic);
+ current_templates
+ = (const templates *) str_hash_find (op_hash, mnemonic);
}
break;
}
}
/* Without VSIB byte, we can't have a vector register for index. */
- if (!t->opcode_modifier.vecsib
+ if (!t->opcode_modifier.sib
&& i.index_reg
&& (i.index_reg->reg_type.bitfield.xmmword
|| i.index_reg->reg_type.bitfield.ymmword
/* For VSIB byte, we need a vector register for index, and all vector
registers must be distinct. */
- if (t->opcode_modifier.vecsib)
+ if (t->opcode_modifier.sib && t->opcode_modifier.sib != SIBMEM)
{
if (!i.index_reg
- || !((t->opcode_modifier.vecsib == VecSIB128
+ || !((t->opcode_modifier.sib == VECSIB128
&& i.index_reg->reg_type.bitfield.xmmword)
- || (t->opcode_modifier.vecsib == VecSIB256
+ || (t->opcode_modifier.sib == VECSIB256
&& i.index_reg->reg_type.bitfield.ymmword)
- || (t->opcode_modifier.vecsib == VecSIB512
+ || (t->opcode_modifier.sib == VECSIB512
&& i.index_reg->reg_type.bitfield.zmmword)))
{
i.error = invalid_vsib_address;
}
}
+ /* For AMX instructions with three tmmword operands, all tmmword operand must be
+ distinct */
+ if (t->operand_types[0].bitfield.tmmword
+ && i.reg_operands == 3)
+ {
+ if (register_number (i.op[0].regs)
+ == register_number (i.op[1].regs)
+ || register_number (i.op[0].regs)
+ == register_number (i.op[2].regs)
+ || register_number (i.op[1].regs)
+ == register_number (i.op[2].regs))
+ {
+ i.error = invalid_tmm_register_set;
+ return 1;
+ }
+ }
+
/* Check if broadcast is supported by the instruction and is applied
to the memory operand. */
if (i.broadcast)
|| (operand_types[j].bitfield.class != RegMMX
&& operand_types[j].bitfield.class != RegSIMD
&& operand_types[j].bitfield.class != RegMask))
- && !t->opcode_modifier.vecsib)
+ && !t->opcode_modifier.sib)
continue;
/* Do not verify operands when there are none. */
as_bad (_("unsupported instruction `%s'"),
current_templates->start->name);
return NULL;
+ case invalid_sib_address:
+ err_msg = _("invalid SIB address");
+ break;
case invalid_vsib_address:
err_msg = _("invalid VSIB address");
break;
case invalid_vector_register_set:
err_msg = _("mask, index, and destination registers must be distinct");
break;
+ case invalid_tmm_register_set:
+ err_msg = _("all tmm registers must be distinct");
+ break;
case unsupported_vector_index_register:
err_msg = _("unsupported vector index register");
break;
&& !i.tm.opcode_modifier.no_lsuf
&& !i.tm.opcode_modifier.no_qsuf))
&& i.tm.opcode_modifier.mnemonicsize != IGNORESIZE
+ /* Explicit sizing prefixes are assumed to disambiguate insns. */
+ && !i.prefix[DATA_PREFIX] && !(i.prefix[REX_PREFIX] & REX_W)
/* Accept FLDENV et al without suffix. */
&& (i.tm.opcode_modifier.no_ssuf || i.tm.opcode_modifier.floatmf))
{
i.rex |= REX_W;
break;
+
+ case 0:
+ /* Select word/dword/qword operation with explict data sizing prefix
+ when there are no suitable register operands. */
+ if (i.tm.opcode_modifier.w
+ && (i.prefix[DATA_PREFIX] || (i.prefix[REX_PREFIX] & REX_W))
+ && (!i.reg_operands
+ || (i.reg_operands == 1
+ /* ShiftCount */
+ && (i.tm.operand_types[0].bitfield.instance == RegC
+ /* InOutPortReg */
+ || i.tm.operand_types[0].bitfield.instance == RegD
+ || i.tm.operand_types[1].bitfield.instance == RegD
+ /* CRC32 */
+ || i.tm.base_opcode == 0xf20f38f0))))
+ i.tm.base_opcode |= 1;
+ break;
}
if (i.tm.opcode_modifier.addrprefixopreg)
else
overlap = imm32s;
}
+ else if (i.prefix[REX_PREFIX] & REX_W)
+ overlap = operand_type_and (overlap, imm32s);
+ else if (i.prefix[DATA_PREFIX])
+ overlap = operand_type_and (overlap,
+ flag_code != CODE_16BIT ? imm16 : imm32);
if (!operand_type_equal (&overlap, &imm8)
&& !operand_type_equal (&overlap, &imm8s)
&& !operand_type_equal (&overlap, &imm16)
unnecessary segment overrides. */
const seg_entry *default_seg = 0;
+ if (i.tm.opcode_modifier.sse2avx)
+ {
+ /* Legacy encoded insns allow explicit REX prefixes, so these prefixes
+ need converting. */
+ i.rex |= i.prefix[REX_PREFIX] & (REX_W | REX_R | REX_X | REX_B);
+ i.prefix[REX_PREFIX] = 0;
+ i.rex_encoding = 0;
+ }
+ /* ImmExt should be processed after SSE2AVX. */
+ else if (i.tm.opcode_modifier.immext)
+ process_immext ();
+
if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
{
unsigned int dupl = i.operands;
i.flags[j] = i.flags[j - 1];
}
i.op[0].regs
- = (const reg_entry *) hash_find (reg_hash, "xmm0");
+ = (const reg_entry *) str_hash_find (reg_hash, "xmm0");
i.types[0] = regxmm;
i.tm.operand_types[0] = regxmm;
return 1;
}
+static INLINE void set_rex_vrex (const reg_entry *r, unsigned int rex_bit,
+ bfd_boolean do_sse2avx)
+{
+ if (r->reg_flags & RegRex)
+ {
+ if (i.rex & rex_bit)
+ as_bad (_("same type of prefix used twice"));
+ i.rex |= rex_bit;
+ }
+ else if (do_sse2avx && (i.rex & rex_bit) && i.vex.register_specifier)
+ {
+ gas_assert (i.vex.register_specifier == r);
+ i.vex.register_specifier += 8;
+ }
+
+ if (r->reg_flags & RegVRex)
+ i.vrex |= rex_bit;
+}
+
static const seg_entry *
build_modrm_byte (void)
{
i386_operand_type op;
unsigned int vvvv;
- /* Check register-only source operand when two source
- operands are swapped. */
- if (!i.tm.operand_types[source].bitfield.baseindex
- && i.tm.operand_types[dest].bitfield.baseindex)
+ /* Swap two source operands if needed. */
+ if (i.tm.opcode_modifier.swapsources)
{
vvvv = source;
source = dest;
{
i.rm.reg = i.op[dest].regs->reg_num;
i.rm.regmem = i.op[source].regs->reg_num;
- if (i.op[dest].regs->reg_type.bitfield.class == RegMMX
- || i.op[source].regs->reg_type.bitfield.class == RegMMX)
- i.has_regmmx = TRUE;
- else if (i.op[dest].regs->reg_type.bitfield.class == RegSIMD
- || i.op[source].regs->reg_type.bitfield.class == RegSIMD)
- {
- if (i.types[dest].bitfield.zmmword
- || i.types[source].bitfield.zmmword)
- i.has_regzmm = TRUE;
- else if (i.types[dest].bitfield.ymmword
- || i.types[source].bitfield.ymmword)
- i.has_regymm = TRUE;
- else
- i.has_regxmm = TRUE;
- }
- if ((i.op[dest].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
- if ((i.op[source].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[source].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
+ set_rex_vrex (i.op[dest].regs, REX_R, i.tm.opcode_modifier.sse2avx);
+ set_rex_vrex (i.op[source].regs, REX_B, FALSE);
}
else
{
i.rm.reg = i.op[source].regs->reg_num;
i.rm.regmem = i.op[dest].regs->reg_num;
- if ((i.op[dest].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
- if ((i.op[source].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[source].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
+ set_rex_vrex (i.op[dest].regs, REX_B, i.tm.opcode_modifier.sse2avx);
+ set_rex_vrex (i.op[source].regs, REX_R, FALSE);
}
if (flag_code != CODE_64BIT && (i.rex & REX_R))
{
break;
gas_assert (op < i.operands);
- if (i.tm.opcode_modifier.vecsib)
+ if (i.tm.opcode_modifier.sib)
{
- if (i.index_reg->reg_num == RegIZ)
+ /* The index register of VSIB shouldn't be RegIZ. */
+ if (i.tm.opcode_modifier.sib != SIBMEM
+ && i.index_reg->reg_num == RegIZ)
abort ();
i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
i.types[op].bitfield.disp32s = 1;
}
}
- i.sib.index = i.index_reg->reg_num;
- if ((i.index_reg->reg_flags & RegRex) != 0)
- i.rex |= REX_X;
- if ((i.index_reg->reg_flags & RegVRex) != 0)
- i.vrex |= REX_X;
+
+ /* Since the mandatory SIB always has index register, so
+ the code logic remains unchanged. The non-mandatory SIB
+ without index register is allowed and will be handled
+ later. */
+ if (i.index_reg)
+ {
+ if (i.index_reg->reg_num == RegIZ)
+ i.sib.index = NO_INDEX_REGISTER;
+ else
+ i.sib.index = i.index_reg->reg_num;
+ set_rex_vrex (i.index_reg, REX_X, FALSE);
+ }
}
default_seg = &ds;
{
i386_operand_type newdisp;
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ /* Both check for VSIB and mandatory non-vector SIB. */
+ gas_assert (!i.tm.opcode_modifier.sib
+ || i.tm.opcode_modifier.sib == SIBMEM);
/* Operand is just <disp> */
if (flag_code == CODE_64BIT)
{
i.types[op] = operand_type_and_not (i.types[op], anydisp);
i.types[op] = operand_type_or (i.types[op], newdisp);
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
/* !i.base_reg && i.index_reg */
if (i.index_reg->reg_num == RegIZ)
/* RIP addressing for 64bit mode. */
else if (i.base_reg->reg_num == RegIP)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
i.rm.regmem = NO_BASE_REGISTER;
i.types[op].bitfield.disp8 = 0;
i.types[op].bitfield.disp16 = 0;
}
else if (i.base_reg->reg_type.bitfield.word)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ gas_assert (!i.tm.opcode_modifier.sib);
switch (i.base_reg->reg_num)
{
case 3: /* (%bx) */
if (operand_type_check (i.types[op], disp) == 0)
{
/* fake (%bp) into 0(%bp) */
- i.types[op].bitfield.disp8 = 1;
+ if (i.disp_encoding == disp_encoding_16bit)
+ i.types[op].bitfield.disp16 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
fake_zero_displacement = 1;
}
}
default: /* (%si) -> 4 or (%di) -> 5 */
i.rm.regmem = i.base_reg->reg_num - 6 + 4;
}
+ if (!fake_zero_displacement
+ && !i.disp_operands
+ && i.disp_encoding)
+ {
+ fake_zero_displacement = 1;
+ if (i.disp_encoding == disp_encoding_8bit)
+ i.types[op].bitfield.disp8 = 1;
+ else
+ i.types[op].bitfield.disp16 = 1;
+ }
i.rm.mode = mode_from_disp_size (i.types[op]);
}
else /* i.base_reg and 32/64 bit mode */
}
}
- if (!i.tm.opcode_modifier.vecsib)
+ if (!i.tm.opcode_modifier.sib)
i.rm.regmem = i.base_reg->reg_num;
if ((i.base_reg->reg_flags & RegRex) != 0)
i.rex |= REX_B;
if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
{
fake_zero_displacement = 1;
- i.types[op].bitfield.disp8 = 1;
+ if (i.disp_encoding == disp_encoding_32bit)
+ i.types[op].bitfield.disp32 = 1;
+ else
+ i.types[op].bitfield.disp8 = 1;
}
i.sib.scale = i.log2_scale_factor;
if (i.index_reg == 0)
{
- gas_assert (!i.tm.opcode_modifier.vecsib);
+ /* Only check for VSIB. */
+ gas_assert (i.tm.opcode_modifier.sib != VECSIB128
+ && i.tm.opcode_modifier.sib != VECSIB256
+ && i.tm.opcode_modifier.sib != VECSIB512);
+
/* <disp>(%esp) becomes two byte modrm with no index
register. We've already stored the code for esp
in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
extra modrm byte. */
i.sib.index = NO_INDEX_REGISTER;
}
- else if (!i.tm.opcode_modifier.vecsib)
+ else if (!i.tm.opcode_modifier.sib)
{
if (i.index_reg->reg_num == RegIZ)
i.sib.index = NO_INDEX_REGISTER;
unsigned int vex_reg = ~0;
for (op = 0; op < i.operands; op++)
- {
- if (i.types[op].bitfield.class == Reg
- || i.types[op].bitfield.class == RegBND
- || i.types[op].bitfield.class == RegMask
- || i.types[op].bitfield.class == SReg
- || i.types[op].bitfield.class == RegCR
- || i.types[op].bitfield.class == RegDR
- || i.types[op].bitfield.class == RegTR)
- break;
- if (i.types[op].bitfield.class == RegSIMD)
- {
- if (i.types[op].bitfield.zmmword)
- i.has_regzmm = TRUE;
- else if (i.types[op].bitfield.ymmword)
- i.has_regymm = TRUE;
- else
- i.has_regxmm = TRUE;
- break;
- }
- if (i.types[op].bitfield.class == RegMMX)
- {
- i.has_regmmx = TRUE;
- break;
- }
- }
+ if (i.types[op].bitfield.class == Reg
+ || i.types[op].bitfield.class == RegBND
+ || i.types[op].bitfield.class == RegMask
+ || i.types[op].bitfield.class == SReg
+ || i.types[op].bitfield.class == RegCR
+ || i.types[op].bitfield.class == RegDR
+ || i.types[op].bitfield.class == RegTR
+ || i.types[op].bitfield.class == RegSIMD
+ || i.types[op].bitfield.class == RegMMX)
+ break;
if (vex_3_sources)
op = dest;
if (i.tm.extension_opcode != None)
{
i.rm.regmem = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_B;
- if ((i.op[op].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_B;
+ set_rex_vrex (i.op[op].regs, REX_B,
+ i.tm.opcode_modifier.sse2avx);
}
else
{
i.rm.reg = i.op[op].regs->reg_num;
- if ((i.op[op].regs->reg_flags & RegRex) != 0)
- i.rex |= REX_R;
- if ((i.op[op].regs->reg_flags & RegVRex) != 0)
- i.vrex |= REX_R;
+ set_rex_vrex (i.op[op].regs, REX_R,
+ i.tm.opcode_modifier.sse2avx);
}
}
return default_seg;
}
+static INLINE void
+frag_opcode_byte (unsigned char byte)
+{
+ if (now_seg != absolute_section)
+ FRAG_APPEND_1_CHAR (byte);
+ else
+ ++abs_section_offset;
+}
+
static unsigned int
flip_code16 (unsigned int code16)
{
symbolS *sym;
offsetT off;
+ if (now_seg == absolute_section)
+ {
+ as_bad (_("relaxable branches not supported in absolute section"));
+ return;
+ }
+
code16 = flag_code == CODE_16BIT ? CODE16 : 0;
size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
size = 1;
if (i.prefix[ADDR_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
+ frag_opcode_byte (ADDR_PREFIX_OPCODE);
i.prefixes -= 1;
}
/* Pentium4 branch hints. */
if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
|| i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
{
- FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
+ frag_opcode_byte (i.prefix[SEG_PREFIX]);
i.prefixes--;
}
}
if (i.prefix[DATA_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
+ frag_opcode_byte (DATA_PREFIX_OPCODE);
i.prefixes -= 1;
code16 ^= flip_code16(code16);
}
/* BND prefixed jump. */
if (i.prefix[BND_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
+ frag_opcode_byte (i.prefix[BND_PREFIX]);
i.prefixes -= 1;
}
if (i.prefix[REX_PREFIX] != 0)
{
- FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
+ frag_opcode_byte (i.prefix[REX_PREFIX]);
i.prefixes -= 1;
}
if (i.prefixes != 0)
as_warn (_("skipping prefixes on `%s'"), i.tm.name);
+ if (now_seg == absolute_section)
+ {
+ abs_section_offset += i.tm.opcode_length + size;
+ return;
+ }
+
p = frag_more (i.tm.opcode_length + size);
switch (i.tm.opcode_length)
{
if (i.prefixes != 0)
as_warn (_("skipping prefixes on `%s'"), i.tm.name);
+ if (now_seg == absolute_section)
+ {
+ abs_section_offset += prefix + 1 + 2 + size;
+ return;
+ }
+
/* 1 opcode; 2 segment; offset */
p = frag_more (prefix + 1 + 2 + size);
enum mf_jcc_kind mf_jcc = mf_jcc_jo;
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- if (IS_ELF && x86_used_note)
+ if (IS_ELF && x86_used_note && now_seg != absolute_section)
{
if (i.tm.cpu_flags.bitfield.cpucmov)
x86_isa_1_used |= GNU_PROPERTY_X86_ISA_1_CMOV;
|| i.tm.cpu_flags.bitfield.cpu687
|| i.tm.cpu_flags.bitfield.cpufisttp)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_X87;
- if (i.has_regmmx
+ if ((i.xstate & xstate_mmx)
|| i.tm.base_opcode == 0xf77 /* emms */
- || i.tm.base_opcode == 0xf0e /* femms */
- || i.tm.base_opcode == 0xf2a /* cvtpi2ps */
- || i.tm.base_opcode == 0x660f2a /* cvtpi2pd */)
+ || i.tm.base_opcode == 0xf0e /* femms */)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_MMX;
- if (i.has_regxmm)
+ if ((i.xstate & xstate_xmm)
+ || i.tm.cpu_flags.bitfield.cpuwidekl
+ || i.tm.cpu_flags.bitfield.cpukl)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XMM;
- if (i.has_regymm)
+ if ((i.xstate & xstate_ymm) == xstate_ymm)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_YMM;
- if (i.has_regzmm)
+ if ((i.xstate & xstate_zmm) == xstate_zmm)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_ZMM;
if (i.tm.cpu_flags.bitfield.cpufxsr)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_FXSR;
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEOPT;
if (i.tm.cpu_flags.bitfield.cpuxsavec)
x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_XSAVEC;
+
+ if ((i.xstate & xstate_tmm) == xstate_tmm
+ || i.tm.cpu_flags.bitfield.cpuamx_tile)
+ x86_feature_2_used |= GNU_PROPERTY_X86_FEATURE_2_TMM;
}
#endif
&& (i.tm.base_opcode == 0xfaee8
|| i.tm.base_opcode == 0xfaef0
|| i.tm.base_opcode == 0xfaef8))
- {
- /* Encode lfence, mfence, and sfence as
- f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
- offsetT val = 0x240483f0ULL;
- p = frag_more (5);
- md_number_to_chars (p, val, 5);
- return;
- }
+ {
+ /* Encode lfence, mfence, and sfence as
+ f0 83 04 24 00 lock addl $0x0, (%{re}sp). */
+ if (now_seg != absolute_section)
+ {
+ offsetT val = 0x240483f0ULL;
+
+ p = frag_more (5);
+ md_number_to_chars (p, val, 5);
+ }
+ else
+ abs_section_offset += 5;
+ return;
+ }
/* Some processors fail on LOCK prefix. This options makes
assembler ignore LOCK prefix and serves as a workaround. */
/* The prefix bytes. */
for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
if (*q)
- FRAG_APPEND_1_CHAR (*q);
+ frag_opcode_byte (*q);
}
else
{
if (*q)
switch (j)
{
- case REX_PREFIX:
- /* REX byte is encoded in VEX prefix. */
- break;
case SEG_PREFIX:
case ADDR_PREFIX:
- FRAG_APPEND_1_CHAR (*q);
+ frag_opcode_byte (*q);
break;
default:
/* There should be no other prefixes for instructions
if (i.vrex)
abort ();
/* Now the VEX prefix. */
- p = frag_more (i.vex.length);
- for (j = 0; j < i.vex.length; j++)
- p[j] = i.vex.bytes[j];
+ if (now_seg != absolute_section)
+ {
+ p = frag_more (i.vex.length);
+ for (j = 0; j < i.vex.length; j++)
+ p[j] = i.vex.bytes[j];
+ }
+ else
+ abs_section_offset += i.vex.length;
}
/* Now the opcode; be careful about word order here! */
- if (i.tm.opcode_length == 1)
+ if (now_seg == absolute_section)
+ abs_section_offset += i.tm.opcode_length;
+ else if (i.tm.opcode_length == 1)
{
FRAG_APPEND_1_CHAR (i.tm.base_opcode);
}
/* Now the modrm byte and sib byte (if present). */
if (i.tm.opcode_modifier.modrm)
{
- FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
- | i.rm.reg << 3
- | i.rm.mode << 6));
+ frag_opcode_byte ((i.rm.regmem << 0)
+ | (i.rm.reg << 3)
+ | (i.rm.mode << 6));
/* If i.rm.regmem == ESP (4)
&& i.rm.mode != (Register mode)
&& not 16 bit
if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
&& i.rm.mode != 3
&& !(i.base_reg && i.base_reg->reg_type.bitfield.word))
- FRAG_APPEND_1_CHAR ((i.sib.base << 0
- | i.sib.index << 3
- | i.sib.scale << 6));
+ frag_opcode_byte ((i.sib.base << 0)
+ | (i.sib.index << 3)
+ | (i.sib.scale << 6));
}
if (i.disp_operands)
{
if (operand_type_check (i.types[n], disp))
{
- if (i.op[n].disps->X_op == O_constant)
+ int size = disp_size (n);
+
+ if (now_seg == absolute_section)
+ abs_section_offset += size;
+ else if (i.op[n].disps->X_op == O_constant)
{
- int size = disp_size (n);
offsetT val = i.op[n].disps->X_add_number;
val = offset_in_range (val >> (size == 1 ? i.memshift : 0),
else
{
enum bfd_reloc_code_real reloc_type;
- int size = disp_size (n);
int sign = i.types[n].bitfield.disp32s;
int pcrel = (i.flags[n] & Operand_PCrel) != 0;
fixS *fixP;
if (operand_type_check (i.types[n], imm))
{
- if (i.op[n].imms->X_op == O_constant)
+ int size = imm_size (n);
+
+ if (now_seg == absolute_section)
+ abs_section_offset += size;
+ else if (i.op[n].imms->X_op == O_constant)
{
- int size = imm_size (n);
offsetT val;
val = offset_in_range (i.op[n].imms->X_add_number,
non-absolute imms). Try to support other
sizes ... */
enum bfd_reloc_code_real reloc_type;
- int size = imm_size (n);
int sign;
if (i.types[n].bitfield.imm32s
&& current_templates->end[-1].operand_types[1]
.bitfield.baseindex))
op = 1;
- expected_reg = hash_find (reg_hash, di_si[addr_mode][op == es_op]);
+ expected_reg
+ = (const reg_entry *) str_hash_find (reg_hash,
+ di_si[addr_mode][op == es_op]);
}
else
- expected_reg = hash_find (reg_hash, bx[addr_mode]);
+ expected_reg
+ = (const reg_entry *)str_hash_find (reg_hash, bx[addr_mode]);
if (i.base_reg != expected_reg
|| i.index_reg
if (addr_mode != CODE_16BIT)
{
/* 32-bit/64-bit checks. */
+ if (i.disp_encoding == disp_encoding_16bit)
+ {
+ bad_disp:
+ as_bad (_("invalid `%s' prefix"),
+ addr_mode == CODE_16BIT ? "{disp32}" : "{disp16}");
+ return 0;
+ }
+
if ((i.base_reg
&& ((addr_mode == CODE_64BIT
? !i.base_reg->reg_type.bitfield.qword
|| !i.index_reg->reg_type.bitfield.baseindex)))
goto bad_address;
- /* bndmk, bndldx, and bndstx have special restrictions. */
+ /* bndmk, bndldx, bndstx and mandatory non-vector SIB have special restrictions. */
if (current_templates->start->base_opcode == 0xf30f1b
- || (current_templates->start->base_opcode & ~1) == 0x0f1a)
+ || (current_templates->start->base_opcode & ~1) == 0x0f1a
+ || current_templates->start->opcode_modifier.sib == SIBMEM)
{
/* They cannot use RIP-relative addressing. */
if (i.base_reg && i.base_reg->reg_num == RegIP)
}
/* bndldx and bndstx ignore their scale factor. */
- if (current_templates->start->base_opcode != 0xf30f1b
+ if ((current_templates->start->base_opcode & ~1) == 0x0f1a
&& i.log2_scale_factor)
as_warn (_("register scaling is being ignored here"));
}
else
{
/* 16-bit checks. */
+ if (i.disp_encoding == disp_encoding_32bit)
+ goto bad_disp;
+
if ((i.base_reg
&& (!i.base_reg->reg_type.bitfield.word
|| !i.base_reg->reg_type.bitfield.baseindex))
}
}
+ if (r->reg_type.bitfield.tmmword
+ && (!cpu_arch_flags.bitfield.cpuamx_tile
+ || flag_code != CODE_64BIT))
+ return FALSE;
+
if (r->reg_type.bitfield.class == RegBND && !cpu_arch_flags.bitfield.cpumpx)
return FALSE;
*end_op = s;
- r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
+ r = (const reg_entry *) str_hash_find (reg_hash, reg_name_given);
/* Handle floating point regs, allowing spaces in the (i) part. */
if (r == i386_regtab /* %st is first entry of table */)
if (*s == ')')
{
*end_op = s + 1;
- r = (const reg_entry *) hash_find (reg_hash, "st(0)");
+ r = (const reg_entry *) str_hash_find (reg_hash, "st(0)");
know (r);
return r + fpr;
}
if (symbol_find (name))
as_bad (_("GOT already in symbol table"));
GOT_symbol = symbol_new (name, undefined_section,
- (valueT) 0, &zero_address_frag);
+ &zero_address_frag, 0);
};
return GOT_symbol;
}
}
}
#if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
- else if (!object_64bit)
+ else
{
- if (fixp->fx_r_type == BFD_RELOC_386_GOT32
- && fixp->fx_tcbit2)
- fixp->fx_r_type = BFD_RELOC_386_GOT32X;
+ /* NB: Commit 292676c1 resolved PLT32 reloc aganst local symbol
+ to section. Since PLT32 relocation must be against symbols,
+ turn such PLT32 relocation into PC32 relocation. */
+ if (fixp->fx_addsy
+ && (fixp->fx_r_type == BFD_RELOC_386_PLT32
+ || fixp->fx_r_type == BFD_RELOC_X86_64_PLT32)
+ && symbol_section_p (fixp->fx_addsy))
+ fixp->fx_r_type = BFD_RELOC_32_PCREL;
+ if (!object_64bit)
+ {
+ if (fixp->fx_r_type == BFD_RELOC_386_GOT32
+ && fixp->fx_tcbit2)
+ fixp->fx_r_type = BFD_RELOC_386_GOT32X;
+ }
}
#endif
}