static bool optimize_disp (const insn_template *t);
static const insn_template *match_template (char);
static int check_string (void);
-static int process_suffix (void);
+static int process_suffix (const insn_template *);
static int check_byte_reg (void);
static int check_long_reg (void);
static int check_qword_reg (void);
if (use_unaligned_vector_move)
encode_with_unaligned_vector_move ();
- if (!process_suffix ())
+ if (!process_suffix (t))
return;
/* Check if IP-relative addressing requirements can be satisfied. */
if (operand_type_check (i.types[op], disp)
&& i.op[op].disps->X_op == O_constant)
{
+ /* Make sure to leave i.types[op].bitfield.disp8 alone upon
+ secondary invocations of match_template(). */
if (fits_in_disp8 (i.op[op].disps->X_add_number))
{
- i.types[op].bitfield.disp8 = 1;
+ if (!i.tm.mnem_off)
+ i.types[op].bitfield.disp8 = 1;
return 0;
}
- i.types[op].bitfield.disp8 = 0;
+ if (!i.tm.mnem_off)
+ i.types[op].bitfield.disp8 = 0;
}
}
return NULL;
}
+ /* Don't emit diagnostics or install the template when one was already
+ installed, i.e. when called from process_suffix(). */
+ if (i.tm.mnem_off)
+ return t;
+
if (!quiet_warnings)
{
if (!intel_syntax
}
static int
-process_suffix (void)
+process_suffix (const insn_template *t)
{
bool is_movx = false;
if (flag_code == CODE_64BIT && !i.tm.opcode_modifier.no_qsuf)
suffixes |= 1 << 5;
+ /* Operand size may be ambiguous only across multiple templates. Avoid
+ the extra effort though if we already know that multiple suffixes /
+ operand sizes are allowed. Also limit this to non-SIMD operand sizes
+ (i.e. ones expressable via suffixes) for now.
+ There's one special case though that needs excluding: Insns taking
+ Disp<N> operands also match templates permitting BaseIndex. JMP in
+ particular would thus wrongly trigger the check further down. Cover
+ JUMP_DWORD insns here as well, just in case. */
+ if (i.tm.opcode_modifier.jump != JUMP
+ && i.tm.opcode_modifier.jump != JUMP_DWORD)
+ while (!(suffixes & (suffixes - 1)))
+ {
+ /* Sadly check_VecOperands(), running ahead of install_template(),
+ may update i.memshift. Save and restore the value here. */
+ unsigned int memshift = i.memshift;
+
+ current_templates.start = t + 1;
+ t = match_template (0);
+ i.memshift = memshift;
+ if (t == NULL)
+ break;
+ if (!t->opcode_modifier.no_bsuf)
+ suffixes |= 1 << 0;
+ if (!t->opcode_modifier.no_wsuf)
+ suffixes |= 1 << 1;
+ if (!t->opcode_modifier.no_lsuf)
+ suffixes |= 1 << 2;
+ if (!t->opcode_modifier.no_ssuf)
+ suffixes |= 1 << 4;
+ if (flag_code == CODE_64BIT && !t->opcode_modifier.no_qsuf)
+ suffixes |= 1 << 5;
+ }
+
/* For [XYZ]MMWORD operands inspect operand sizes. While generally
also suitable for AT&T syntax mode, it was requested that this be
restricted to just Intel syntax. */