/* tc-arm.c -- Assemble for the ARM
- Copyright (C) 1994-2020 Free Software Foundation, Inc.
+ Copyright (C) 1994-2021 Free Software Foundation, Inc.
Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
Modified by David Taylor (dtaylor@armltd.co.uk)
Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
{
const char * error;
unsigned long instruction;
- int size;
- int size_req;
- int cond;
+ unsigned int size;
+ unsigned int size_req;
+ unsigned int cond;
/* "uncond_value" is set to the value in place of the conditional field in
- unconditional versions of the instruction, or -1 if nothing is
+ unconditional versions of the instruction, or -1u if nothing is
appropriate. */
- int uncond_value;
+ unsigned int uncond_value;
struct neon_type vectype;
/* This does not indicate an actual NEON instruction, only that
the mnemonic accepts neon-style type suffixes. */
#define BAD_EL_TYPE _("bad element type for instruction")
#define MVE_BAD_QREG _("MVE vector register Q[0..7] expected")
-static struct hash_control * arm_ops_hsh;
-static struct hash_control * arm_cond_hsh;
-static struct hash_control * arm_vcond_hsh;
-static struct hash_control * arm_shift_hsh;
-static struct hash_control * arm_psr_hsh;
-static struct hash_control * arm_v7m_psr_hsh;
-static struct hash_control * arm_reg_hsh;
-static struct hash_control * arm_reloc_hsh;
-static struct hash_control * arm_barrier_opt_hsh;
+static htab_t arm_ops_hsh;
+static htab_t arm_cond_hsh;
+static htab_t arm_vcond_hsh;
+static htab_t arm_shift_hsh;
+static htab_t arm_psr_hsh;
+static htab_t arm_v7m_psr_hsh;
+static htab_t arm_reg_hsh;
+static htab_t arm_reloc_hsh;
+static htab_t arm_barrier_opt_hsh;
/* Stuff needed to resolve the label ambiguity
As:
p++;
while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
- reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
+ reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start);
if (!reg)
return NULL;
do
{
- int setmask = 1, addregs = 1;
+ unsigned int setmask = 1, addregs = 1;
const char vpr_str[] = "vpr";
- int vpr_str_len = strlen (vpr_str);
+ size_t vpr_str_len = strlen (vpr_str);
new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
return -1;
if ((r = (struct reloc_entry *)
- hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
+ str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
return -1;
*str = q + 1;
struct reg_entry *new_reg;
const char *name;
- if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
+ if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0)
{
if (new_reg->builtin)
as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
new_reg->builtin = FALSE;
new_reg->neon = NULL;
- if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
- abort ();
+ str_hash_insert (arm_reg_hsh, name, new_reg, 0);
return new_reg;
}
if (*oldname == '\0')
return FALSE;
- old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
+ old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
if (!old)
{
as_warn (_("unknown register '%s' -- .req ignored"), oldname);
as_bad (_("invalid syntax for .unreq directive"));
else
{
- struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
- name);
+ struct reg_entry *reg
+ = (struct reg_entry *) str_hash_find (arm_reg_hsh, name);
if (!reg)
as_bad (_("unknown register alias '%s'"), name);
char * p;
char * nbuf;
- hash_delete (arm_reg_hsh, name, FALSE);
+ str_hash_delete (arm_reg_hsh, name);
free ((char *) reg->name);
free (reg->neon);
free (reg);
nbuf = strdup (name);
for (p = nbuf; *p; p++)
*p = TOUPPER (*p);
- reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
+ reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
if (reg)
{
- hash_delete (arm_reg_hsh, nbuf, FALSE);
+ str_hash_delete (arm_reg_hsh, nbuf);
free ((char *) reg->name);
free (reg->neon);
free (reg);
for (p = nbuf; *p; p++)
*p = TOLOWER (*p);
- reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
+ reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
if (reg)
{
- hash_delete (arm_reg_hsh, nbuf, FALSE);
+ str_hash_delete (arm_reg_hsh, nbuf);
free ((char *) reg->name);
free (reg->neon);
free (reg);
abort ();
}
- symbolP = symbol_new (symname, now_seg, value, frag);
+ symbolP = symbol_new (symname, now_seg, frag, value);
symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
switch (state)
memset (dummy_frag, 0, sizeof (fragS));
dummy_frag->fr_type = rs_fill;
dummy_frag->line = listing_tail;
- symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
+ symbolP = symbol_new (name, undefined_section, dummy_frag, 0);
dummy_frag->fr_symbol = symbolP;
}
else
#endif
- symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
+ symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0);
#ifdef OBJ_COFF
/* "set" symbols are local unless otherwise specified. */
if (pool->symbol == NULL)
{
pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
- (valueT) 0, &zero_address_frag);
+ &zero_address_frag, 0);
pool->id = latest_pool_num ++;
}
inst.operands[i].imm = 0;
for (j = 0; j < parts; j++, idx++)
- inst.operands[i].imm |= generic_bignum[idx]
- << (LITTLENUM_NUMBER_OF_BITS * j);
+ inst.operands[i].imm |= ((unsigned) generic_bignum[idx]
+ << (LITTLENUM_NUMBER_OF_BITS * j));
inst.operands[i].reg = 0;
for (j = 0; j < parts; j++, idx++)
- inst.operands[i].reg |= generic_bignum[idx]
- << (LITTLENUM_NUMBER_OF_BITS * j);
+ inst.operands[i].reg |= ((unsigned) generic_bignum[idx]
+ << (LITTLENUM_NUMBER_OF_BITS * j));
inst.operands[i].regisimm = 1;
}
else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
return FAIL;
}
- shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
- p - *str);
+ shift_name
+ = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str,
+ p - *str);
if (shift_name == NULL)
{
if (skip_past_char (&p, '[') == FAIL)
{
- if (skip_past_char (&p, '=') == FAIL)
+ if (group_type == GROUP_MVE
+ && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
+ {
+ /* [r0-r15] expected as argument but receiving r0-r15 without
+ [] brackets. */
+ inst.error = BAD_SYNTAX;
+ return PARSE_OPERAND_FAIL;
+ }
+ else if (skip_past_char (&p, '=') == FAIL)
{
/* Bare address - translate to PC-relative offset. */
inst.relocs[0].pc_rel = 1;
|| strncasecmp (start, "psr", 3) == 0)
p = start + strcspn (start, "rR") + 1;
- psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
- p - start);
+ psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start,
+ p - start);
if (!psr)
return FAIL;
}
else
{
- psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
- p - start);
+ psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start,
+ p - start);
if (!psr)
goto error;
n++;
}
- c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
+ c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n);
if (!c)
{
inst.error = _("condition required");
while (ISALPHA (*q))
q++;
- o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
- q - p);
+ o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p,
+ q - p);
if (!o)
return FAIL;
for (i = 1; i <= 24; i++)
{
a = val >> i;
- if ((val & ~(0xff << i)) == 0)
+ if ((val & ~(0xffU << i)) == 0)
return ((val >> i) & 0x7f) | ((32 - i) << 7);
}
to single precision without loss of accuracy. */
static bfd_boolean
-is_double_a_single (bfd_int64_t v)
+is_double_a_single (bfd_uint64_t v)
{
- int exp = (int)((v >> 52) & 0x7FF);
- bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
+ int exp = (v >> 52) & 0x7FF;
+ bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
- return (exp == 0 || exp == 0x7FF
- || (exp >= 1023 - 126 && exp <= 1023 + 127))
- && (mantissa & 0x1FFFFFFFl) == 0;
+ return ((exp == 0 || exp == 0x7FF
+ || (exp >= 1023 - 126 && exp <= 1023 + 127))
+ && (mantissa & 0x1FFFFFFFL) == 0);
}
/* Returns a double precision value casted to single precision
(ignoring the least significant bits in exponent and mantissa). */
static int
-double_to_single (bfd_int64_t v)
+double_to_single (bfd_uint64_t v)
{
- int sign = (int) ((v >> 63) & 1l);
- int exp = (int) ((v >> 52) & 0x7FF);
- bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
+ unsigned int sign = (v >> 63) & 1;
+ int exp = (v >> 52) & 0x7FF;
+ bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
if (exp == 0x7FF)
exp = 0xFF;
|| inst.relocs[0].exp.X_op == O_big)
{
#if defined BFD_HOST_64_BIT
- bfd_int64_t v;
+ bfd_uint64_t v;
#else
- offsetT v;
+ valueT v;
#endif
if (inst.relocs[0].exp.X_op == O_big)
{
l = generic_bignum;
#if defined BFD_HOST_64_BIT
- v =
- ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
- << LITTLENUM_NUMBER_OF_BITS)
- | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
- << LITTLENUM_NUMBER_OF_BITS)
- | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
- << LITTLENUM_NUMBER_OF_BITS)
- | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
+ v = l[3] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[2] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[1] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[0] & LITTLENUM_MASK;
#else
- v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
- | (l[0] & LITTLENUM_MASK);
+ v = l[1] & LITTLENUM_MASK;
+ v <<= LITTLENUM_NUMBER_OF_BITS;
+ v |= l[0] & LITTLENUM_MASK;
#endif
}
else
/* Check if on thumb2 it can be done with a mov.w, mvn or
movw instruction. */
unsigned int newimm;
- bfd_boolean isNegated;
+ bfd_boolean isNegated = FALSE;
newimm = encode_thumb32_immediate (v);
- if (newimm != (unsigned int) FAIL)
- isNegated = FALSE;
- else
+ if (newimm == (unsigned int) FAIL)
{
newimm = encode_thumb32_immediate (~v);
- if (newimm != (unsigned int) FAIL)
- isNegated = TRUE;
+ isNegated = TRUE;
}
/* The number can be loaded with a mov.w or mvn
{
const struct asm_opcode *opcode;
- opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
+ opcode = (const struct asm_opcode *) str_hash_find (arm_ops_hsh, opname);
if (!opcode)
abort ();
first_error (_(BAD_COND));
return FAIL;
}
- if (inst.uncond_value != -1)
+ if (inst.uncond_value != -1u)
inst.instruction |= inst.uncond_value << 28;
}
return;
}
+ if ((rs == NS_FD || rs == NS_QQI) && mode == neon_cvt_mode_n
+ && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
+ {
+ /* We are dealing with vcvt with the 'ne' condition. */
+ inst.cond = 0x1;
+ inst.instruction = N_MNEM_vcvt;
+ do_neon_cvt_1 (neon_cvt_mode_z);
+ return;
+ }
+
/* VFP rather than Neon conversions. */
if (flavour >= neon_cvt_flavour_first_fp)
{
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
}
- else if (mode == neon_cvt_mode_n)
- {
- /* We are dealing with vcvt with the 'ne' condition. */
- inst.cond = 0x1;
- inst.instruction = N_MNEM_vcvt;
- do_neon_cvt_1 (neon_cvt_mode_z);
- return;
- }
/* fall through. */
case NS_DDI:
{
else
rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
+ if (rs == NS_NULL)
+ return;
+
NEON_ENCODE (INTEGER, inst);
inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
inst.instruction |= HI1 (inst.operands[0].reg) << 22;
do_neon_swp (void)
{
enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
+ if (rs == NS_NULL)
+ return;
neon_two_same (neon_quad (rs), 1, -1);
}
&& mark_feature_used (&armv8m_fp))
&& !mark_feature_used (&mve_ext),
_("vcx instructions with S or D registers require either MVE"
- " or Armv8-M floating point etension."));
+ " or Armv8-M floating point extension."));
}
static void
*str = end;
/* Look for unaffixed or special-case affixed mnemonic. */
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
- end - base);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
+ end - base);
+ cond = NULL;
if (opcode)
{
/* step U */
if (warn_on_deprecated && unified_syntax)
as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
affix = base + (opcode->tag - OT_odd_infix_0);
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
gas_assert (cond);
inst.cond = cond->value;
if (end - base < 2)
return NULL;
affix = end - 1;
- cond = (const struct asm_cond *) hash_find_n (arm_vcond_hsh, affix, 1);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
- affix - base);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_vcond_hsh, affix, 1);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
+ affix - base);
/* If this opcode can not be vector predicated then don't accept it with a
vector predication code. */
if (opcode && !opcode->mayBeVecPred)
/* Look for suffixed mnemonic. */
affix = end - 2;
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
- affix - base);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
+ affix - base);
}
if (opcode && cond)
/* Look for infixed mnemonic in the usual position. */
affix = base + 3;
- cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
+ cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
if (!cond)
return NULL;
memcpy (save, affix, 2);
memmove (affix, affix + 2, (end - affix) - 2);
- opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
- (end - base) - 2);
+ opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
+ (end - base) - 2);
memmove (affix + 2, affix, (end - affix) - 2);
memcpy (affix, save, 2);
case MANUAL_PRED_BLOCK:
{
- int cond, is_last;
+ unsigned int cond;
+ int is_last;
if (now_pred.type == SCALAR_PRED)
{
/* Check conditional suffixes. */
/* The value which unconditional instructions should have in place of the
condition field. */
- inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
+ inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1u;
if (thumb_mode)
{
#define ARM_VARIANT & fpu_vfp_ext_v1
#undef THUMB_VARIANT
#define THUMB_VARIANT & arm_ext_v6t2
- mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
- mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
mcCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
#undef ARM_VARIANT
#define ARM_VARIANT & fpu_vfp_ext_v1xd
+ mnCEF(vmla, _vmla, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
+ mnCEF(vmul, _vmul, 3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
MNCE(vmov, 0, 1, (VMOV), neon_mov),
mcCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
mcCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
for (p = nbuf; *p; p++)
*p = TOLOWER (*p);
- if (hash_find (arm_ops_hsh, nbuf) != NULL)
+ if (str_hash_find (arm_ops_hsh, nbuf) != NULL)
{
- static struct hash_control * already_warned = NULL;
+ static htab_t already_warned = NULL;
if (already_warned == NULL)
- already_warned = hash_new ();
+ already_warned = str_htab_create ();
/* Only warn about the symbol once. To keep the code
- simple we let hash_insert do the lookup for us. */
- if (hash_insert (already_warned, nbuf, NULL) == NULL)
- as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
+ simple we let str_hash_insert do the lookup for us. */
+ if (str_hash_find (already_warned, nbuf) == NULL)
+ {
+ as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
+ str_hash_insert (already_warned, nbuf, NULL, 0);
+ }
}
else
free (nbuf);
as_bad (_("GOT already in the symbol table"));
GOT_symbol = symbol_new (name, undefined_section,
- (valueT) 0, & zero_address_frag);
+ &zero_address_frag, 0);
}
return GOT_symbol;
/* Like negate_data_op, but for Thumb-2. */
static unsigned int
-thumb32_negate_data_op (offsetT *instruction, unsigned int value)
+thumb32_negate_data_op (valueT *instruction, unsigned int value)
{
- int op, new_inst;
- int rd;
+ unsigned int op, new_inst;
+ unsigned int rd;
unsigned int negated, inverted;
negated = encode_thumb32_immediate (-value);
valueT * valP,
segT seg)
{
- offsetT value = * valP;
- offsetT newval;
+ valueT value = * valP;
+ valueT newval;
unsigned int newimm;
unsigned long temp;
int sign;
temp = md_chars_to_number (buf, INSN_SIZE);
/* If the offset is negative, we should use encoding A2 for ADR. */
- if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
+ if ((temp & 0xfff0000) == 0x28f0000 && (offsetT) value < 0)
newimm = negate_data_op (&temp, value);
else
{
&& ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
&& ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
&& !((temp >> SBIT_SHIFT) & 0x1)
- && value >= 0 && value <= 0xffff)
+ && value <= 0xffff)
{
/* Clear bits[23:20] to change encoding from A1 to A2. */
temp &= 0xff0fffff;
/* Fall through. */
case BFD_RELOC_ARM_LITERAL:
- sign = value > 0;
+ sign = (offsetT) value > 0;
- if (value < 0)
+ if ((offsetT) value < 0)
value = - value;
if (validate_offset_imm (value, 0) == FAIL)
case BFD_RELOC_ARM_OFFSET_IMM8:
case BFD_RELOC_ARM_HWLITERAL:
- sign = value > 0;
+ sign = (offsetT) value > 0;
- if (value < 0)
+ if ((offsetT) value < 0)
value = - value;
if (validate_offset_imm (value, 1) == FAIL)
break;
case BFD_RELOC_ARM_T32_OFFSET_U8:
- if (value < 0 || value > 1020 || value % 4 != 0)
+ if (value > 1020 || value % 4 != 0)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("bad immediate value for offset (%ld)"), (long) value);
value /= 4;
if ((newval & 0xf0000000) == 0xe0000000)
{
/* Doubleword load/store: 8-bit offset, scaled by 4. */
- if (value >= 0)
+ if ((offsetT) value >= 0)
newval |= (1 << 23);
else
value = -value;
else if ((newval & 0x000f0000) == 0x000f0000)
{
/* PC-relative, 12-bit offset. */
- if (value >= 0)
+ if ((offsetT) value >= 0)
newval |= (1 << 23);
else
value = -value;
else if ((newval & 0x00000100) == 0x00000100)
{
/* Writeback: 8-bit, +/- offset. */
- if (value >= 0)
+ if ((offsetT) value >= 0)
newval |= (1 << 9);
else
value = -value;
else if ((newval & 0x00000f00) == 0x00000e00)
{
/* T-instruction: positive 8-bit offset. */
- if (value < 0 || value > 0xff)
+ if (value > 0xff)
{
as_bad_where (fixP->fx_file, fixP->fx_line,
_("offset out of range"));
else
{
/* Positive 12-bit or negative 8-bit offset. */
- int limit;
- if (value >= 0)
+ unsigned int limit;
+ if ((offsetT) value >= 0)
{
newval |= (1 << 23);
limit = 0xfff;
case BFD_RELOC_ARM_SHIFT_IMM:
newval = md_chars_to_number (buf, INSN_SIZE);
- if (((unsigned long) value) > 32
+ if (value > 32
|| (value == 32
&& (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
{
if ((newval & 0x00100000) == 0)
{
/* 12 bit immediate for addw/subw. */
- if (value < 0)
+ if ((offsetT) value < 0)
{
value = -value;
newval ^= 0x00a00000;
&& (((newval >> 16) & 0xf) == 0xf)
&& ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
&& !((newval >> T2_SBIT_SHIFT) & 0x1)
- && value >= 0 && value <= 0xffff)
+ && value <= 0xffff)
{
/* Toggle bit[25] to change encoding from T2 to T3. */
newval ^= 1 << 25;
break;
case BFD_RELOC_ARM_SMC:
- if (((unsigned long) value) > 0xf)
+ if (value > 0xf)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid smc expression"));
break;
case BFD_RELOC_ARM_HVC:
- if (((unsigned long) value) > 0xffff)
+ if (value > 0xffff)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid hvc expression"));
newval = md_chars_to_number (buf, INSN_SIZE);
case BFD_RELOC_ARM_SWI:
if (fixP->tc_fix_data != 0)
{
- if (((unsigned long) value) > 0xff)
+ if (value > 0xff)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid swi expression"));
newval = md_chars_to_number (buf, THUMB_SIZE);
}
else
{
- if (((unsigned long) value) > 0x00ffffff)
+ if (value > 0x00ffffff)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid swi expression"));
newval = md_chars_to_number (buf, INSN_SIZE);
break;
case BFD_RELOC_ARM_MULTI:
- if (((unsigned long) value) > 0xffff)
+ if (value > 0xffff)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid expression in load/store multiple"));
newval = value | md_chars_to_number (buf, INSN_SIZE);
if (value & temp)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("misaligned branch destination"));
- if ((value & (offsetT)0xfe000000) != (offsetT)0
- && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
+ if ((value & 0xfe000000) != 0
+ && (value & 0xfe000000) != 0xfe000000)
as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
if (fixP->fx_done || !seg->use_rela_p)
FIXME: It may be better to remove the instruction completely and
perform relaxation. */
- if (value == -2)
+ if ((offsetT) value == -2)
{
newval = md_chars_to_number (buf, THUMB_SIZE);
newval = 0xbf00; /* NOP encoding T1 */
if ((newval & 0x0f200f00) == 0x0d000900)
{
/* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
- has permitted values that are multiples of 2, in the range 0
+ has permitted values that are multiples of 2, in the range -510
to 510. */
- if (value < -510 || value > 510 || (value & 1))
+ if (value + 510 > 510 + 510 || (value & 1))
as_bad_where (fixP->fx_file, fixP->fx_line,
_("co-processor offset out of range"));
}
else if ((newval & 0xfe001f80) == 0xec000f80)
{
- if (value < -511 || value > 512 || (value & 3))
+ if (value + 511 > 512 + 511 || (value & 3))
as_bad_where (fixP->fx_file, fixP->fx_line,
_("co-processor offset out of range"));
}
- else if (value < -1023 || value > 1023 || (value & 3))
+ else if (value + 1023 > 1023 + 1023 || (value & 3))
as_bad_where (fixP->fx_file, fixP->fx_line,
_("co-processor offset out of range"));
cp_off_common:
- sign = value > 0;
- if (value < 0)
+ sign = (offsetT) value > 0;
+ if ((offsetT) value < 0)
value = -value;
if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
|| fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
case BFD_RELOC_ARM_CP_OFF_IMM_S2:
case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
- if (value < -255 || value > 255)
+ if (value + 255 > 255 + 255)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("co-processor offset out of range"));
value *= 4;
_("invalid Hi register with immediate"));
/* If value is negative, choose the opposite instruction. */
- if (value < 0)
+ if ((offsetT) value < 0)
{
value = -value;
subtract = !subtract;
- if (value < 0)
+ if ((offsetT) value < 0)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("immediate value out of range"));
}
case BFD_RELOC_ARM_THUMB_IMM:
newval = md_chars_to_number (buf, THUMB_SIZE);
- if (value < 0 || value > 255)
+ if (value > 255)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid immediate: %ld is out of range"),
(long) value);
/* 5bit shift value (0..32). LSL cannot take 32. */
newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
temp = newval & 0xf800;
- if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
+ if (value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
as_bad_where (fixP->fx_file, fixP->fx_line,
_("invalid shift value: %ld"), (long) value);
/* Shifts of zero must be encoded as LSL. */
/* REL format relocations are limited to a 16-bit addend. */
if (!fixP->fx_done)
{
- if (value < -0x8000 || value > 0x7fff)
+ if (value + 0x8000 > 0x7fff + 0x8000)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("offset out of range"));
}
bfd_vma encoded_addend = value;
/* Check that addend can be encoded in instruction. */
- if (!seg->use_rela_p && (value < 0 || value > 255))
+ if (!seg->use_rela_p && value > 255)
as_bad_where (fixP->fx_file, fixP->fx_line,
_("the offset 0x%08lX is not representable"),
(unsigned long) encoded_addend);
{
bfd_vma insn;
bfd_vma encoded_addend;
- bfd_vma addend_abs = llabs (value);
+ bfd_vma addend_abs = llabs ((offsetT) value);
/* Check that the absolute value of the addend can be
expressed as an 8-bit constant plus a rotation. */
/* If the addend is positive, use an ADD instruction.
Otherwise use a SUB. Take care not to destroy the S bit. */
insn &= 0xff1fffff;
- if (value < 0)
+ if ((offsetT) value < 0)
insn |= 1 << 22;
else
insn |= 1 << 23;
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = llabs (value);
+ bfd_vma addend_abs = llabs ((offsetT) value);
/* Check that the absolute value of the addend can be
encoded in 12 bits. */
/* If the addend is negative, clear bit 23 of the instruction.
Otherwise set it. */
- if (value < 0)
+ if ((offsetT) value < 0)
insn &= ~(1 << 23);
else
insn |= 1 << 23;
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = llabs (value);
+ bfd_vma addend_abs = llabs ((offsetT) value);
/* Check that the absolute value of the addend can be
encoded in 8 bits. */
/* If the addend is negative, clear bit 23 of the instruction.
Otherwise set it. */
- if (value < 0)
+ if ((offsetT) value < 0)
insn &= ~(1 << 23);
else
insn |= 1 << 23;
if (!seg->use_rela_p)
{
bfd_vma insn;
- bfd_vma addend_abs = llabs (value);
+ bfd_vma addend_abs = llabs ((offsetT) value);
/* Check that the absolute value of the addend is a multiple of
four and, when divided by four, fits in 8 bits. */
/* If the addend is negative, clear bit 23 of the instruction.
Otherwise set it. */
- if (value < 0)
+ if ((offsetT) value < 0)
insn &= ~(1 << 23);
else
insn |= 1 << 23;
{
fixP->fx_done = 0;
}
- if ((value & ~0x7f) && ((value & ~0x3f) != ~0x3f))
+ if ((value & ~0x7f) && ((value & ~0x3f) != (valueT) ~0x3f))
as_bad_where (fixP->fx_file, fixP->fx_line,
_("branch out of range"));
unsigned mach;
unsigned int i;
- if ( (arm_ops_hsh = hash_new ()) == NULL
- || (arm_cond_hsh = hash_new ()) == NULL
- || (arm_vcond_hsh = hash_new ()) == NULL
- || (arm_shift_hsh = hash_new ()) == NULL
- || (arm_psr_hsh = hash_new ()) == NULL
- || (arm_v7m_psr_hsh = hash_new ()) == NULL
- || (arm_reg_hsh = hash_new ()) == NULL
- || (arm_reloc_hsh = hash_new ()) == NULL
- || (arm_barrier_opt_hsh = hash_new ()) == NULL)
- as_fatal (_("virtual memory exhausted"));
+ arm_ops_hsh = str_htab_create ();
+ arm_cond_hsh = str_htab_create ();
+ arm_vcond_hsh = str_htab_create ();
+ arm_shift_hsh = str_htab_create ();
+ arm_psr_hsh = str_htab_create ();
+ arm_v7m_psr_hsh = str_htab_create ();
+ arm_reg_hsh = str_htab_create ();
+ arm_reloc_hsh = str_htab_create ();
+ arm_barrier_opt_hsh = str_htab_create ();
for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
- hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
+ if (str_hash_find (arm_ops_hsh, insns[i].template_name) == NULL)
+ str_hash_insert (arm_ops_hsh, insns[i].template_name, insns + i, 0);
for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
- hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
+ str_hash_insert (arm_cond_hsh, conds[i].template_name, conds + i, 0);
for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
- hash_insert (arm_vcond_hsh, vconds[i].template_name, (void *) (vconds + i));
+ str_hash_insert (arm_vcond_hsh, vconds[i].template_name, vconds + i, 0);
for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
- hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
+ str_hash_insert (arm_shift_hsh, shift_names[i].name, shift_names + i, 0);
for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
- hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
+ str_hash_insert (arm_psr_hsh, psrs[i].template_name, psrs + i, 0);
for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
- hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
- (void *) (v7m_psrs + i));
+ str_hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
+ v7m_psrs + i, 0);
for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
- hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
+ str_hash_insert (arm_reg_hsh, reg_names[i].name, reg_names + i, 0);
for (i = 0;
i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
i++)
- hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
- (void *) (barrier_opt_names + i));
+ str_hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
+ barrier_opt_names + i, 0);
#ifdef OBJ_ELF
for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
{
/* This makes encode_branch() use the EABI versions of this relocation. */
entry->reloc = BFD_RELOC_UNUSED;
- hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
+ str_hash_insert (arm_reloc_hsh, entry->name, entry, 0);
}
#endif
ARM_CPU_OPT ("cortex-a77", "Cortex-A77", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
+ ARM_CPU_OPT ("cortex-a78", "Cortex-A78", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
+ FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
+ ARM_CPU_OPT ("cortex-a78ae", "Cortex-A78AE", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
+ FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
+ ARM_CPU_OPT ("cortex-a78c", "Cortex-A78C", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
+ FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
ARM_ARCH_NONE,
FPU_NONE),
+ ARM_CPU_OPT ("cortex-x1", "Cortex-X1", ARM_ARCH_V8_2A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
+ FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A,
ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
+ ARM_CPU_OPT ("neoverse-n2", "Neoverse N2", ARM_ARCH_V8_5A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
+ | ARM_EXT2_BF16
+ | ARM_EXT2_I8MM),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
+ ARM_CPU_OPT ("neoverse-v1", "Neoverse V1", ARM_ARCH_V8_4A,
+ ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
+ | ARM_EXT2_BF16
+ | ARM_EXT2_I8MM),
+ FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
/* ??? XSCALE is really an architecture. */
ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
ARM_ARCH_NONE,