/* Perform range checks on positive and negative overflows by checking if the
VALUE given fits within the range of an BITS sized immediate. */
-static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
+static bool out_of_range_p (offsetT value, offsetT bits)
{
gas_assert (bits < (offsetT)(sizeof (value) * 8));
return (value & ~((1 << bits)-1))
static arm_feature_set thumb_arch_used;
/* Flags stored in private area of BFD structure. */
-static int uses_apcs_26 = FALSE;
-static int atpcs = FALSE;
-static int support_interwork = FALSE;
-static int uses_apcs_float = FALSE;
-static int pic_code = FALSE;
-static int fix_v4bx = FALSE;
+static int uses_apcs_26 = false;
+static int atpcs = false;
+static int support_interwork = false;
+static int uses_apcs_float = false;
+static int pic_code = false;
+static int fix_v4bx = false;
/* Warn on using deprecated features. */
-static int warn_on_deprecated = TRUE;
-static int warn_on_restrict_it = FALSE;
+static int warn_on_deprecated = true;
+static int warn_on_restrict_it = false;
/* Understand CodeComposer Studio assembly syntax. */
-bfd_boolean codecomposer_syntax = FALSE;
+bool codecomposer_syntax = false;
/* Variables that we set while parsing command-line options. Once all
options have been read we re-process these values to set the real
extern FLONUM_TYPE generic_floating_point_number;
/* Return if no cpu was selected on command-line. */
-static bfd_boolean
+static bool
no_cpu_selected (void)
{
return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
-bfd_boolean
+bool
arm_is_eabi (void)
{
return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
preceding IT instructions. Unlike ARM mode, you cannot use a
conditional affix except in the scope of an IT instruction. */
-static bfd_boolean unified_syntax = FALSE;
+static bool unified_syntax = false;
/* An immediate operand can start with #, and ld*, st*, pld operands
can contain [ and ]. We need to tell APP not to elide whitespace
<insn> */
symbolS * last_label_seen;
-static int label_is_thumb_function_name = FALSE;
+static int label_is_thumb_function_name = false;
/* Literal pool structure. Held on a per-section
and per-sub-section basis. */
/* Return TRUE if anything in the expression is a bignum. */
-static bfd_boolean
+static bool
walk_no_bignums (symbolS * sp)
{
if (symbol_get_value_expression (sp)->X_op == O_big)
- return TRUE;
+ return true;
if (symbol_get_value_expression (sp)->X_add_symbol)
{
&& walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
}
- return FALSE;
+ return false;
}
-static bfd_boolean in_my_get_expression = FALSE;
+static bool in_my_get_expression = false;
/* Third argument to my_get_expression. */
#define GE_NO_PREFIX 0
save_in = input_line_pointer;
input_line_pointer = *str;
- in_my_get_expression = TRUE;
+ in_my_get_expression = true;
expression (ep);
- in_my_get_expression = FALSE;
+ in_my_get_expression = false;
if (ep->X_op == O_illegal || ep->X_op == O_absent)
{
/* If the given feature available in the selected CPU, mark it as used.
Returns TRUE iff feature is available. */
-static bfd_boolean
+static bool
mark_feature_used (const arm_feature_set *feature)
{
&& ARM_CPU_IS_ANY (cpu_variant))
{
first_error (BAD_MVE_AUTO);
- return FALSE;
+ return false;
}
/* Ensure the option is valid on the current architecture. */
if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
- return FALSE;
+ return false;
/* Add the appropriate architecture feature for the barrier option used.
*/
record_feature_use (feature);
- return TRUE;
+ return true;
}
/* Parse either a register or a scalar, with an optional type. Return the
static int
parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
- bfd_boolean *partial_match)
+ bool *partial_match)
{
char *str = *ccp;
int base_reg;
int warned = 0;
unsigned long mask = 0;
int i;
- bfd_boolean vpr_seen = FALSE;
- bfd_boolean expect_vpr =
+ bool vpr_seen = false;
+ bool expect_vpr =
(etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
if (skip_past_char (&str, '{') == FAIL)
}
base_reg = max_regs;
- *partial_match = FALSE;
+ *partial_match = false;
do
{
&& !ISALPHA (*(str + vpr_str_len))
&& !vpr_seen)
{
- vpr_seen = TRUE;
+ vpr_seen = true;
str += vpr_str_len;
if (count == 0)
base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs. */
return FAIL;
}
- *partial_match = TRUE;
+ *partial_match = true;
if (vpr_seen)
continue;
/* True if two alias types are the same. */
-static bfd_boolean
+static bool
neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
{
if (!a && !b)
- return TRUE;
+ return true;
if (!a || !b)
- return FALSE;
+ return false;
if (a->defined != b->defined)
- return FALSE;
+ return false;
if ((a->defined & NTA_HASTYPE) != 0
&& (a->eltype.type != b->eltype.type
|| a->eltype.size != b->eltype.size))
- return FALSE;
+ return false;
if ((a->defined & NTA_HASINDEX) != 0
&& (a->index != b->index))
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
new_reg->name = name;
new_reg->number = number;
new_reg->type = type;
- new_reg->builtin = FALSE;
+ new_reg->builtin = false;
new_reg->neon = NULL;
str_hash_insert (arm_reg_hsh, name, new_reg, 0);
If we find one, or if it looks sufficiently like one that we want to
handle any error here, return TRUE. Otherwise return FALSE. */
-static bfd_boolean
+static bool
create_register_alias (char * newname, char *p)
{
struct reg_entry *old;
collapsed to single spaces. */
oldname = p;
if (strncmp (oldname, " .req ", 6) != 0)
- return FALSE;
+ return false;
oldname += 6;
if (*oldname == '\0')
- return FALSE;
+ return false;
old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
if (!old)
{
as_warn (_("unknown register '%s' -- .req ignored"), oldname);
- return TRUE;
+ return true;
}
/* If TC_CASE_SENSITIVE is defined, then newname already points to
if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
{
free (nbuf);
- return TRUE;
+ return true;
}
}
}
free (nbuf);
- return TRUE;
+ return true;
}
/* Create a Neon typed/indexed register alias using directives, e.g.:
specified directly, e.g.:
vadd d0.s32, d1.s32, d2.s32 */
-static bfd_boolean
+static bool
create_neon_reg_alias (char *newname, char *p)
{
enum arm_reg_type basetype;
else if (strncmp (p, " .qn ", 5) == 0)
basetype = REG_TYPE_NQ;
else
- return FALSE;
+ return false;
p += 5;
if (*p == '\0')
- return FALSE;
+ return false;
basereg = arm_reg_parse_multi (&p);
if (basereg && basereg->type != basetype)
{
as_bad (_("bad type for register"));
- return FALSE;
+ return false;
}
if (basereg == NULL)
if (exp.X_op != O_constant)
{
as_bad (_("expression must be constant"));
- return FALSE;
+ return false;
}
basereg = &mybasereg;
basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
if (typeinfo.defined & NTA_HASTYPE)
{
as_bad (_("can't redefine the type of a register alias"));
- return FALSE;
+ return false;
}
typeinfo.defined |= NTA_HASTYPE;
if (ntype.elems != 1)
{
as_bad (_("you must specify a single type only"));
- return FALSE;
+ return false;
}
typeinfo.eltype = ntype.el[0];
}
if (typeinfo.defined & NTA_HASINDEX)
{
as_bad (_("can't redefine the index of a scalar alias"));
- return FALSE;
+ return false;
}
my_get_expression (&exp, &p, GE_NO_PREFIX);
if (exp.X_op != O_constant)
{
as_bad (_("scalar index must be constant"));
- return FALSE;
+ return false;
}
typeinfo.defined |= NTA_HASINDEX;
if (skip_past_char (&p, ']') == FAIL)
{
as_bad (_("expecting ]"));
- return FALSE;
+ return false;
}
}
typeinfo.defined != 0 ? &typeinfo : NULL);
free (namebuf);
- return TRUE;
+ return true;
}
/* Should never be called, as .req goes between the alias and the
/* The following label is the name/address of the start of a Thumb function.
We need to know this for the interworking support. */
- label_is_thumb_function_name = TRUE;
+ label_is_thumb_function_name = true;
}
/* Perform a .set directive, but also mark the alias as
delim = get_symbol_name (& name);
if (!strcasecmp (name, "unified"))
- unified_syntax = TRUE;
+ unified_syntax = true;
else if (!strcasecmp (name, "divided"))
- unified_syntax = FALSE;
+ unified_syntax = false;
else
{
as_bad (_("unrecognized syntax mode \"%s\""), name);
#define LIT_ENTRY_SIZE_MASK 0xFF
literal_pool * pool;
unsigned int entry, pool_size = 0;
- bfd_boolean padding_slot_p = FALSE;
+ bool padding_slot_p = false;
unsigned imm1 = 0;
unsigned imm2 = 0;
return SUCCESS;
}
-bfd_boolean
+bool
tc_start_label_without_colon (void)
{
- bfd_boolean ret = TRUE;
+ bool ret = true;
if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
{
if (*label == '.')
{
as_bad (_("Invalid label '%s'"), label);
- ret = FALSE;
+ ret = false;
}
asmfunc_debug (label);
return 0;
}
-static bfd_boolean
+static bool
emit_insn (expressionS *exp, int nbytes)
{
int size = 0;
valueT op;
int num_vfpv3_regs = 0;
int num_regs_below_16;
- bfd_boolean partial_match;
+ bool partial_match;
count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
&partial_match);
int count;
unsigned int reg;
valueT op;
- bfd_boolean partial_match;
+ bool partial_match;
count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D,
&partial_match);
{
char *peek;
struct reg_entry *reg;
- bfd_boolean had_brace = FALSE;
+ bool had_brace = false;
if (!unwind.proc_start)
as_bad (MISSING_FNSTART);
if (*peek == '{')
{
- had_brace = TRUE;
+ had_brace = true;
peek++;
}
static int
parse_immediate (char **str, int *val, int min, int max,
- bfd_boolean prefix_opt)
+ bool prefix_opt)
{
expressionS exp;
static int
parse_big_immediate (char **str, int i, expressionS *in_exp,
- bfd_boolean allow_symbol_p)
+ bool allow_symbol_p)
{
expressionS exp;
expressionS *exp_p = in_exp ? in_exp : &exp;
/* Detect the presence of a floating point or integer zero constant,
i.e. #0.0 or #0. */
-static bfd_boolean
+static bool
parse_ifimm_zero (char **in)
{
int error_code;
{
/* In unified syntax, all prefixes are optional. */
if (!unified_syntax)
- return FALSE;
+ return false;
}
else
++*in;
if (strncmp (*in, "0x", 2) == 0)
{
int val;
- if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
- return FALSE;
- return TRUE;
+ if (parse_immediate (in, &val, 0, 0, true) == FAIL)
+ return false;
+ return true;
}
error_code = atof_generic (in, ".", EXP_CHARS,
&& generic_floating_point_number.sign == '+'
&& (generic_floating_point_number.low
> generic_floating_point_number.leader))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/* Parse an 8-bit "quarter-precision" floating point number of the form:
return PARSE_OPERAND_FAIL;
}
else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
- /*allow_symbol_p=*/TRUE))
+ /*allow_symbol_p=*/true))
return PARSE_OPERAND_FAIL;
*str = p;
{
/* [Rn], {expr} - unindexed, with option */
if (parse_immediate (&p, &inst.operands[i].imm,
- 0, 255, TRUE) == FAIL)
+ 0, 255, true) == FAIL)
return PARSE_OPERAND_FAIL;
if (skip_past_char (&p, '}') == FAIL)
/* Parse a PSR flag operand. The value returned is FAIL on syntax error,
or a bitmask suitable to be or-ed into the ARM msr instruction. */
static int
-parse_psr (char **str, bfd_boolean lhs)
+parse_psr (char **str, bool lhs)
{
char *p;
unsigned long psr_field;
const struct asm_psr *psr;
char *start;
- bfd_boolean is_apsr = FALSE;
- bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
+ bool is_apsr = false;
+ bool m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
/* PR gas/12698: If the user has specified -march=all then m_profile will
be TRUE, but we want to ignore it in this case as we are building for any
CPU type, including non-m variants. */
if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
- m_profile = FALSE;
+ m_profile = false;
/* CPSR's and SPSR's can now be lowercase. This is just a convenience
feature for ease of use and backwards compatibility. */
{
/* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
and ARMv7-R architecture CPUs. */
- is_apsr = TRUE;
+ is_apsr = true;
psr_field = 0;
}
else if (m_profile)
if (psr->field <= 3)
{
psr_field = psr->field;
- is_apsr = TRUE;
+ is_apsr = true;
goto check_suffix;
}
return FAIL;
}
- if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
+ if (parse_immediate (&s, &rot, 0, 24, false) == FAIL)
return FAIL;
switch (rot)
Case 10: VMOV.F32 <Sd>, #<imm>
Case 11: VMOV.F64 <Dd>, #<imm> */
inst.operands[i].immisfloat = 1;
- else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
+ else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/false)
== SUCCESS)
/* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
structure. Returns SUCCESS or FAIL depending on whether the
specified grammar matched. */
static int
-parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
+parse_operands (char *str, const unsigned int *pattern, bool thumb)
{
unsigned const int *upat = pattern;
char *backtrack_pos = 0;
enum arm_reg_type rtype;
parse_operand_result result;
unsigned int op_parse_code;
- bfd_boolean partial_match;
+ bool partial_match;
#define po_char_or_fail(chr) \
do \
po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
break;
try_imm0:
- po_imm_or_fail (0, 0, TRUE);
+ po_imm_or_fail (0, 0, true);
}
break;
try_immbig:
/* There's a possibility of getting a 64-bit immediate here, so
we need special handling. */
- if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
+ if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/false)
== FAIL)
{
inst.error = _("immediate value is out of range");
po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
break;
try_shimm:
- po_imm_or_fail (0, 63, TRUE);
+ po_imm_or_fail (0, 63, true);
}
break;
break;
/* Immediates */
- case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
- case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
- case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
- case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
- case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
- case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
- case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
- case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
- case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
- case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
- case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
- case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
- case OP_I127: po_imm_or_fail ( 0, 127, FALSE); break;
- case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
- case OP_I511: po_imm_or_fail ( 0, 511, FALSE); break;
- case OP_I4095: po_imm_or_fail ( 0, 4095, FALSE); break;
- case OP_I8191: po_imm_or_fail ( 0, 8191, FALSE); break;
- case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
+ case OP_I7: po_imm_or_fail ( 0, 7, false); break;
+ case OP_I15: po_imm_or_fail ( 0, 15, false); break;
+ case OP_I16: po_imm_or_fail ( 1, 16, false); break;
+ case OP_I16z: po_imm_or_fail ( 0, 16, false); break;
+ case OP_I31: po_imm_or_fail ( 0, 31, false); break;
+ case OP_I32: po_imm_or_fail ( 1, 32, false); break;
+ case OP_I32z: po_imm_or_fail ( 0, 32, false); break;
+ case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, false); break;
+ case OP_I63s: po_imm_or_fail (-64, 63, false); break;
+ case OP_I63: po_imm_or_fail ( 0, 63, false); break;
+ case OP_I64: po_imm_or_fail ( 1, 64, false); break;
+ case OP_I64z: po_imm_or_fail ( 0, 64, false); break;
+ case OP_I127: po_imm_or_fail ( 0, 127, false); break;
+ case OP_I255: po_imm_or_fail ( 0, 255, false); break;
+ case OP_I511: po_imm_or_fail ( 0, 511, false); break;
+ case OP_I4095: po_imm_or_fail ( 0, 4095, false); break;
+ case OP_I8191: po_imm_or_fail ( 0, 8191, false); break;
+ case OP_I4b: po_imm_or_fail ( 1, 4, true); break;
case OP_oI7b:
- case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
- case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
+ case OP_I7b: po_imm_or_fail ( 0, 7, true); break;
+ case OP_I15b: po_imm_or_fail ( 0, 15, true); break;
case OP_oI31b:
- case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
- case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
- case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
- case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
+ case OP_I31b: po_imm_or_fail ( 0, 31, true); break;
+ case OP_oI32b: po_imm_or_fail ( 1, 32, true); break;
+ case OP_oI32z: po_imm_or_fail ( 0, 32, true); break;
+ case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, true); break;
/* Immediate variants */
case OP_oI255c:
po_char_or_fail ('{');
- po_imm_or_fail (0, 255, TRUE);
+ po_imm_or_fail (0, 255, true);
po_char_or_fail ('}');
break;
s[-1] = '\0';
inst.operands[i].writeback = 1;
}
- po_imm_or_fail (0, 31, TRUE);
+ po_imm_or_fail (0, 31, true);
if (str == s - 1)
str = s;
}
/* Register or immediate. */
case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
- I0: po_imm_or_fail (0, 0, FALSE); break;
+ I0: po_imm_or_fail (0, 0, false); break;
case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32); break;
- I32: po_imm_or_fail (1, 32, FALSE); break;
+ I32: po_imm_or_fail (1, 32, false); break;
case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
IF:
break;
case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
- I32z: po_imm_or_fail (0, 32, FALSE); break;
+ I32z: po_imm_or_fail (0, 32, false); break;
/* Two kinds of register. */
case OP_RIWR_RIWC:
case OP_oBARRIER_I15:
po_barrier_or_imm (str); break;
immediate:
- if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
+ if (parse_immediate (&str, &val, 0, 15, true) == FAIL)
goto failure;
break;
/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
static void
-encode_arm_addr_mode_common (int i, bfd_boolean is_t)
+encode_arm_addr_mode_common (int i, bool is_t)
{
/* PR 14260:
Generate an error if the operand is not a register. */
reject forms that cannot be used with a T instruction (i.e. not
post-indexed). */
static void
-encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
+encode_arm_addr_mode_2 (int i, bool is_t)
{
- const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
+ const bool is_pc = (inst.operands[i].reg == REG_PC);
encode_arm_addr_mode_common (i, is_t);
{
if (is_pc && !inst.relocs[0].pc_rel)
{
- const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
+ const bool is_load = ((inst.instruction & LOAD_BIT) != 0);
/* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
cannot use PC in addressing.
forms that cannot be used with a T instruction (i.e. not
post-indexed). */
static void
-encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
+encode_arm_addr_mode_3 (int i, bool is_t)
{
if (inst.operands[i].immisreg && inst.operands[i].shifted)
{
/* Returns TRUE if double precision value V may be cast
to single precision without loss of accuracy. */
-static bfd_boolean
+static bool
is_double_a_single (bfd_uint64_t v)
{
int exp = (v >> 52) & 0x7FF;
/* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
Determine whether it can be performed with a move instruction; if
it can, convert inst.instruction to that move instruction and
- return TRUE; if it can't, convert inst.instruction to a literal-pool
+ return true; if it can't, convert inst.instruction to a literal-pool
load and return FALSE. If this is not a valid thing to do in the
current context, set inst.error and return TRUE.
inst.operands[i] describes the destination register. */
-static bfd_boolean
-move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
+static bool
+move_or_literal_pool (int i, enum lit_type t, bool mode_3)
{
unsigned long tbit;
- bfd_boolean thumb_p = (t == CONST_THUMB);
- bfd_boolean arm_p = (t == CONST_ARM);
+ bool thumb_p = (t == CONST_THUMB);
+ bool arm_p = (t == CONST_ARM);
if (thumb_p)
tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
if ((inst.instruction & tbit) == 0)
{
inst.error = _("invalid pseudo operation");
- return TRUE;
+ return true;
}
if (inst.relocs[0].exp.X_op != O_constant
&& inst.relocs[0].exp.X_op != O_big)
{
inst.error = _("constant expression expected");
- return TRUE;
+ return true;
}
if (inst.relocs[0].exp.X_op == O_constant
/* Check if on thumb2 it can be done with a mov.w, mvn or
movw instruction. */
unsigned int newimm;
- bfd_boolean isNegated = FALSE;
+ bool isNegated = false;
newimm = encode_thumb32_immediate (v);
if (newimm == (unsigned int) FAIL)
{
newimm = encode_thumb32_immediate (~v);
- isNegated = TRUE;
+ isNegated = true;
}
/* The number can be loaded with a mov.w or mvn
inst.instruction |= (newimm & 0x800) << 15;
inst.instruction |= (newimm & 0x700) << 4;
inst.instruction |= (newimm & 0x0ff);
- return TRUE;
+ return true;
}
/* The number can be loaded with a movw instruction. */
else if ((v & ~0xFFFF) == 0
instruction size check, as otherwise GAS will reject
the use of this T32 instruction. */
inst.size_req = 0;
- return TRUE;
+ return true;
}
}
}
inst.instruction &= LITERAL_MASK;
inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
inst.instruction |= value & 0xfff;
- return TRUE;
+ return true;
}
value = encode_arm_immediate (~ v);
inst.instruction &= LITERAL_MASK;
inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
inst.instruction |= value & 0xfff;
- return TRUE;
+ return true;
}
}
else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
: inst.relocs[0].exp.X_unsigned
? 0
: ((bfd_int64_t)((int) immlo)) >> 32;
- int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
+ int cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
&op, 64, NT_invtype);
if (cmode == FAIL)
{
neon_invert_size (&immlo, &immhi, 64);
op = !op;
- cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
+ cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
&op, 64, NT_invtype);
}
else
inst.instruction |= (0xFU << 28) | (0x1 << 25);
neon_write_immbits (immbits);
- return TRUE;
+ return true;
}
}
}
inst.operands[1].imm =
neon_qfloat_bits (v);
do_vfp_nsyn_opcode ("fconsts");
- return TRUE;
+ return true;
}
/* If our host does not support a 64-bit type then we cannot perform
inst.operands[1].imm =
neon_qfloat_bits (double_to_single (v));
do_vfp_nsyn_opcode ("fconstd");
- return TRUE;
+ return true;
}
}
#endif
if (add_to_lit_pool ((!inst.operands[i].isvec
|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
- return TRUE;
+ return true;
inst.operands[1].reg = REG_PC;
inst.operands[1].isreg = 1;
: (mode_3
? BFD_RELOC_ARM_HWLITERAL
: BFD_RELOC_ARM_LITERAL));
- return FALSE;
+ return false;
}
/* inst.operands[i] was set up by parse_address. Encode it into an
inst.error = _("invalid co-processor operand");
return FAIL;
}
- if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/false))
return SUCCESS;
}
inst.instruction |= inst.operands[1].reg << 16;
}
-static bfd_boolean
+static bool
check_obsolete (const arm_feature_set *feature, const char *msg)
{
if (ARM_CPU_IS_ANY (cpu_variant))
{
as_tsktsk ("%s", msg);
- return TRUE;
+ return true;
}
else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
{
as_bad ("%s", msg);
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
static void
do_rd_cpaddr (void)
{
inst.instruction |= inst.operands[0].reg << 12;
- encode_arm_cp_address (1, TRUE, TRUE, 0);
+ encode_arm_cp_address (1, true, true, 0);
}
/* ARM instructions, in alphabetical order by function name (except
static void
do_bx (void)
{
- bfd_boolean want_reloc;
+ bool want_reloc;
if (inst.operands[0].reg == REG_PC)
as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
if (!ARM_FEATURE_ZERO (selected_object_arch)
&& !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
- want_reloc = TRUE;
+ want_reloc = true;
#ifdef OBJ_ELF
if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
#endif
- want_reloc = FALSE;
+ want_reloc = false;
if (want_reloc)
inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
static void
do_ldmstm (void)
{
- encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
+ encode_ldmstm (/*from_push_pop_mnem=*/false);
}
/* ARMv5TE load-consecutive (argument parse)
as_warn (_("index register overlaps transfer register"));
}
inst.instruction |= inst.operands[0].reg << 12;
- encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
+ encode_arm_addr_mode_3 (2, /*is_t=*/false);
}
static void
{
inst.instruction |= inst.operands[0].reg << 12;
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/false))
return;
- encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
+ encode_arm_addr_mode_2 (1, /*is_t=*/false);
check_ldr_r15_aligned ();
}
inst.operands[1].writeback = 1;
}
inst.instruction |= inst.operands[0].reg << 12;
- encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
+ encode_arm_addr_mode_2 (1, /*is_t=*/true);
}
/* Halfword and signed-byte load/store operations. */
constraint (inst.operands[0].reg == REG_PC, BAD_PC);
inst.instruction |= inst.operands[0].reg << 12;
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
+ if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/true))
return;
- encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
+ encode_arm_addr_mode_3 (1, /*is_t=*/false);
}
static void
inst.operands[1].writeback = 1;
}
inst.instruction |= inst.operands[0].reg << 12;
- encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
+ encode_arm_addr_mode_3 (1, /*is_t=*/true);
}
/* Co-processor register load/store.
{
inst.instruction |= inst.operands[0].reg << 8;
inst.instruction |= inst.operands[1].reg << 12;
- encode_arm_cp_address (2, TRUE, TRUE, 0);
+ encode_arm_cp_address (2, true, true, 0);
}
static void
do_mov16 (void)
{
bfd_vma imm;
- bfd_boolean top;
+ bool top;
top = (inst.instruction & 0x00400000) != 0;
constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
_("writeback used in preload instruction"));
constraint (!inst.operands[0].preind,
_("unindexed addressing used in preload instruction"));
- encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
+ encode_arm_addr_mode_2 (0, /*is_t=*/false);
}
/* ARMv7: PLI <addr_mode> */
_("writeback used in preload instruction"));
constraint (!inst.operands[0].preind,
_("unindexed addressing used in preload instruction"));
- encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
+ encode_arm_addr_mode_2 (0, /*is_t=*/false);
inst.instruction &= ~PRE_INDEX;
}
inst.operands[0].isreg = 1;
inst.operands[0].writeback = 1;
inst.operands[0].reg = REG_SP;
- encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
+ encode_ldmstm (/*from_push_pop_mnem=*/true);
}
/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
do_vfp_sp_ldst (void)
{
encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
- encode_arm_cp_address (1, FALSE, TRUE, 0);
+ encode_arm_cp_address (1, false, true, 0);
}
static void
do_vfp_dp_ldst (void)
{
encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
- encode_arm_cp_address (1, FALSE, TRUE, 0);
+ encode_arm_cp_address (1, false, true, 0);
}
}
}
- encode_arm_cp_address (2, TRUE, TRUE, 0);
+ encode_arm_cp_address (2, true, true, 0);
}
\f
/* iWMMXt instructions: strictly in alphabetical order. */
reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
else
reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
- encode_arm_cp_address (1, TRUE, FALSE, reloc);
+ encode_arm_cp_address (1, true, false, reloc);
}
static void
}
inst.instruction |= inst.operands[0].reg << 12;
- encode_arm_cp_address (1, TRUE, TRUE, 0);
+ encode_arm_cp_address (1, true, true, 0);
}
static void
inst.instruction |= inst.operands[1].imm;
}
else
- encode_arm_cp_address (1, TRUE, FALSE, 0);
+ encode_arm_cp_address (1, true, false, 0);
}
static void
reject PC in Rn. */
static void
-encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
+encode_thumb32_addr_mode (int i, bool is_t, bool is_d)
{
- const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
+ const bool is_pc = (inst.operands[i].reg == REG_PC);
constraint (!inst.operands[i].isreg,
_("Instruction does not support =N addresses"));
if (unified_syntax)
{
- bfd_boolean flags;
- bfd_boolean narrow;
+ bool flags;
+ bool narrow;
int opcode;
flags = (inst.instruction == T_MNEM_adds
if (!inst.operands[2].shifted && inst.size_req != 4)
{
if (Rd > 7 || Rs > 7 || Rn > 7)
- narrow = FALSE;
+ narrow = false;
if (narrow)
{
}
else
{
- bfd_boolean narrow;
+ bool narrow;
/* See if we can do this with a 16-bit instruction. */
if (THUMB_SETS_FLAGS (inst.instruction))
narrow = in_pred_block ();
if (Rd > 7 || Rn > 7 || Rs > 7)
- narrow = FALSE;
+ narrow = false;
if (inst.operands[2].shifted)
- narrow = FALSE;
+ narrow = false;
if (inst.size_req == 4)
- narrow = FALSE;
+ narrow = false;
if (narrow
&& Rd == Rs)
}
else
{
- bfd_boolean narrow;
+ bool narrow;
/* See if we can do this with a 16-bit instruction. */
if (THUMB_SETS_FLAGS (inst.instruction))
narrow = in_pred_block ();
if (Rd > 7 || Rn > 7 || Rs > 7)
- narrow = FALSE;
+ narrow = false;
if (inst.operands[2].shifted)
- narrow = FALSE;
+ narrow = false;
if (inst.size_req == 4)
- narrow = FALSE;
+ narrow = false;
if (narrow)
{
set_pred_insn_type (IT_INSN);
now_pred.mask = (inst.instruction & 0xf) | 0x10;
now_pred.cc = cond;
- now_pred.warn_deprecated = FALSE;
+ now_pred.warn_deprecated = false;
now_pred.type = SCALAR_PRED;
/* If the condition is a negative condition, invert the mask. */
/* Helper function used for both push/pop and ldm/stm. */
static void
-encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
- bfd_boolean writeback)
+encode_thumb2_multi (bool do_io, int base, unsigned mask,
+ bool writeback)
{
- bfd_boolean load, store;
+ bool load, store;
gas_assert (base != -1 || !do_io);
load = do_io && ((inst.instruction & (1 << 20)) != 0);
if (unified_syntax)
{
- bfd_boolean narrow;
+ bool narrow;
unsigned mask;
- narrow = FALSE;
+ narrow = false;
/* See if we can use a 16-bit instruction. */
if (inst.instruction < 0xffff /* not ldmdb/stmdb */
&& inst.size_req != 4
inst.instruction = THUMB_OP16 (inst.instruction);
inst.instruction |= inst.operands[0].reg << 8;
inst.instruction |= inst.operands[1].imm;
- narrow = TRUE;
+ narrow = true;
}
else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
{
inst.instruction = THUMB_OP16 (opcode);
inst.instruction |= inst.operands[0].reg << 3;
inst.instruction |= (ffs (inst.operands[1].imm)-1);
- narrow = TRUE;
+ narrow = true;
}
}
else if (inst.operands[0] .reg == REG_SP)
THUMB_OP16 (inst.instruction == T_MNEM_stmia
? T_MNEM_push : T_MNEM_pop);
inst.instruction |= inst.operands[1].imm;
- narrow = TRUE;
+ narrow = true;
}
else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
{
THUMB_OP16 (inst.instruction == T_MNEM_stmia
? T_MNEM_str_sp : T_MNEM_ldr_sp);
inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
- narrow = TRUE;
+ narrow = true;
}
}
}
if (inst.instruction < 0xffff)
inst.instruction = THUMB_OP32 (inst.instruction);
- encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
+ encode_thumb2_multi (true /* do_io */, inst.operands[0].reg,
inst.operands[1].imm,
inst.operands[0].writeback);
}
{
if (opcode <= 0xffff)
inst.instruction = THUMB_OP32 (opcode);
- if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
return;
}
if (inst.operands[1].isreg
inst.instruction = THUMB_OP32 (opcode);
inst.instruction |= inst.operands[0].reg << 12;
- encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
+ encode_thumb32_addr_mode (1, /*is_t=*/false, /*is_d=*/false);
check_ldr_r15_aligned ();
return;
}
inst.instruction = THUMB_OP16 (inst.instruction);
if (!inst.operands[1].isreg)
- if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
+ if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
return;
constraint (!inst.operands[1].preind
inst.instruction |= inst.operands[0].reg << 12;
inst.instruction |= inst.operands[1].reg << 8;
- encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
+ encode_thumb32_addr_mode (2, /*is_t=*/false, /*is_d=*/true);
}
static void
do_t_ldstt (void)
{
inst.instruction |= inst.operands[0].reg << 12;
- encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
+ encode_thumb32_addr_mode (1, /*is_t=*/true, /*is_d=*/false);
}
static void
int r0off = (inst.instruction == T_MNEM_mov
|| inst.instruction == T_MNEM_movs) ? 8 : 16;
unsigned long opcode;
- bfd_boolean narrow;
- bfd_boolean low_regs;
+ bool narrow;
+ bool low_regs;
low_regs = (Rn <= 7 && Rm <= 7);
opcode = inst.instruction;
narrow = opcode != T_MNEM_movs || low_regs;
if (inst.size_req == 4
|| inst.operands[1].shifted)
- narrow = FALSE;
+ narrow = false;
/* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
if (opcode == T_MNEM_movs && inst.operands[1].isreg
|| inst.instruction == T_MNEM_movs))
{
/* Register shifts are encoded as separate shift instructions. */
- bfd_boolean flags = (inst.instruction == T_MNEM_movs);
+ bool flags = (inst.instruction == T_MNEM_movs);
if (in_pred_block ())
narrow = !flags;
narrow = flags;
if (inst.size_req == 4)
- narrow = FALSE;
+ narrow = false;
if (!low_regs || inst.operands[1].imm > 7)
- narrow = FALSE;
+ narrow = false;
if (Rn != Rm)
- narrow = FALSE;
+ narrow = false;
switch (inst.operands[1].shift_kind)
{
case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
- default: narrow = FALSE; break;
+ default: narrow = false; break;
}
}
{
unsigned Rd;
bfd_vma imm;
- bfd_boolean top;
+ bool top;
top = (inst.instruction & 0x00800000) != 0;
if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
{
int r0off = (inst.instruction == T_MNEM_mvn
|| inst.instruction == T_MNEM_mvns) ? 8 : 16;
- bfd_boolean narrow;
+ bool narrow;
if (inst.size_req == 4
|| inst.instruction > 0xffff
|| inst.operands[1].shifted
|| Rn > 7 || Rm > 7)
- narrow = FALSE;
+ narrow = false;
else if (inst.instruction == T_MNEM_cmn
|| inst.instruction == T_MNEM_tst)
- narrow = TRUE;
+ narrow = true;
else if (THUMB_SETS_FLAGS (inst.instruction))
narrow = !in_pred_block ();
else
/* PR gas/12698: The constraint is only applied for m_profile.
If the user has specified -march=all, we want to ignore it as
we are building for any CPU type, including non-m variants. */
- bfd_boolean m_profile =
+ bool m_profile =
!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
constraint ((flags != 0) && m_profile, _("selected processor does "
"not support requested special purpose register"));
/* PR gas/12698: The constraint is only applied for m_profile.
If the user has specified -march=all, we want to ignore it as
we are building for any CPU type, including non-m variants. */
- bfd_boolean m_profile =
+ bool m_profile =
!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
&& (bits & ~(PSR_s | PSR_f)) != 0)
static void
do_t_mul (void)
{
- bfd_boolean narrow;
+ bool narrow;
unsigned Rd, Rn, Rm;
if (!inst.operands[2].present)
&& Rd != Rm)
|| Rn > 7
|| Rm > 7)
- narrow = FALSE;
+ narrow = false;
else if (inst.instruction == T_MNEM_muls)
narrow = !in_pred_block ();
else
constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
constraint (Rn > 7 || Rm > 7,
BAD_HIREG);
- narrow = TRUE;
+ narrow = true;
}
if (narrow)
{
if (unified_syntax)
{
- bfd_boolean narrow;
+ bool narrow;
if (THUMB_SETS_FLAGS (inst.instruction))
narrow = !in_pred_block ();
else
narrow = in_pred_block ();
if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
- narrow = FALSE;
+ narrow = false;
if (inst.size_req == 4)
- narrow = FALSE;
+ narrow = false;
if (!narrow)
{
if (inst.operands[0].immisreg)
reject_bad_reg (inst.operands[0].imm);
- encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
+ encode_thumb32_addr_mode (0, /*is_t=*/false, /*is_d=*/false);
}
static void
else if (unified_syntax)
{
inst.instruction = THUMB_OP32 (inst.instruction);
- encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
+ encode_thumb2_multi (true /* do_io */, 13, mask, true);
}
else
{
do_t_clrm (void)
{
if (unified_syntax)
- encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
+ encode_thumb2_multi (false /* do_io */, -1, inst.operands[0].imm, false);
else
{
inst.error = _("invalid register list to push/pop instruction");
inst.instruction |= Rs << 16;
if (!inst.operands[2].isreg)
{
- bfd_boolean narrow;
+ bool narrow;
if ((inst.instruction & 0x00100000) != 0)
narrow = !in_pred_block ();
narrow = in_pred_block ();
if (Rd > 7 || Rs > 7)
- narrow = FALSE;
+ narrow = false;
if (inst.size_req == 4 || !unified_syntax)
- narrow = FALSE;
+ narrow = false;
if (inst.relocs[0].exp.X_op != O_constant
|| inst.relocs[0].exp.X_add_number != 0)
- narrow = FALSE;
+ narrow = false;
/* Turn rsb #0 into 16-bit neg. We should probably do this via
relaxation, but it doesn't seem worth the hassle. */
if (unified_syntax)
{
- bfd_boolean narrow;
+ bool narrow;
int shift_kind;
switch (inst.instruction)
else
narrow = in_pred_block ();
if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
- narrow = FALSE;
+ narrow = false;
if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
- narrow = FALSE;
+ narrow = false;
if (inst.operands[2].isreg
&& (inst.operands[1].reg != inst.operands[0].reg
|| inst.operands[2].reg > 7))
- narrow = FALSE;
+ narrow = false;
if (inst.size_req == 4)
- narrow = FALSE;
+ narrow = false;
reject_bad_reg (inst.operands[0].reg);
reject_bad_reg (inst.operands[1].reg);
inst.instruction = THUMB_OP32 (inst.instruction);
if (inst.operands[0].hasreloc == 0)
{
- if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
+ if (v8_1_branch_value_check (inst.operands[0].imm, 5, false) == FAIL)
as_bad (BAD_BRANCH_OFF);
inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
if (inst.operands[1].hasreloc == 0)
{
int val = inst.operands[1].imm;
- if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
+ if (v8_1_branch_value_check (inst.operands[1].imm, 17, true) == FAIL)
as_bad (BAD_BRANCH_OFF);
int immA = (val & 0x0001f000) >> 12;
if (inst.operands[1].hasreloc == 0)
{
int val = inst.operands[1].imm;
- if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
+ if (v8_1_branch_value_check (inst.operands[1].imm, 19, true) == FAIL)
as_bad (BAD_BRANCH_OFF);
int immA = (val & 0x0007f000) >> 12;
int value = inst.relocs[0].exp.X_add_number;
value = (is_le) ? -value : value;
- if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
+ if (v8_1_branch_value_check (value, 12, false) == FAIL)
as_bad (BAD_BRANCH_OFF);
int imml, immh;
now_pred.cc = 0;
now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
| ((inst.instruction & 0xe000) >> 13);
- now_pred.warn_deprecated = FALSE;
+ now_pred.warn_deprecated = false;
now_pred.type = VECTOR_PRED;
inst.is_neon = 1;
}
if (!inst.operands[0].present)
inst.instruction |= 1 << 21;
- v8_1_loop_reloc (TRUE);
+ v8_1_loop_reloc (true);
break;
case T_MNEM_wls:
case T_MNEM_wlstp:
- v8_1_loop_reloc (FALSE);
+ v8_1_loop_reloc (false);
/* fall through. */
case T_MNEM_dlstp:
case T_MNEM_dls:
instruction. CHECK contains th. CHECK contains the set of bits to pass to
vfp_or_neon_is_neon for the NEON specific checks. */
-static bfd_boolean
+static bool
check_simd_pred_availability (int fp, unsigned check)
{
if (inst.cond > COND_ALWAYS)
if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
{
inst.error = BAD_FPU;
- return FALSE;
+ return false;
}
inst.pred_insn_type = INSIDE_VPT_INSN;
}
if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
else if (vfp_or_neon_is_neon (check) == FAIL)
- return FALSE;
+ return false;
}
else
{
if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
&& vfp_or_neon_is_neon (check) == FAIL)
- return FALSE;
+ return false;
if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
}
-return TRUE;
+return true;
}
/* Neon instruction encoders, in approximate order of appearance. */
static void
do_neon_dyadic_i_su (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
static void
do_neon_dyadic_i64_su (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
enum neon_shape rs;
struct neon_type_el et;
static void
do_neon_shl (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
if (!inst.operands[2].isreg)
constraint (imm < 0 || (unsigned)imm >= et.size,
_("immediate out of range for shift"));
NEON_ENCODE (IMMED, inst);
- neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
+ neon_imm_shift (false, 0, neon_quad (rs), et, imm);
}
else
{
static void
do_neon_qshl (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
if (!inst.operands[2].isreg)
constraint (imm < 0 || (unsigned)imm >= et.size,
_("immediate out of range for shift"));
NEON_ENCODE (IMMED, inst);
- neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
+ neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et, imm);
}
else
{
static void
do_neon_rshl (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
{
enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
if (rs == NS_QQQ
- && !check_simd_pred_availability (FALSE,
+ && !check_simd_pred_availability (false,
NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
else if (rs != NS_QQQ
/* Because neon_select_shape makes the second operand a copy of the first
if the second operand is not present. */
if (rs == NS_QQI
- && !check_simd_pred_availability (FALSE,
+ && !check_simd_pred_availability (false,
NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
else if (rs != NS_QQI
static void
do_neon_cmp (void)
{
- neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
+ neon_compare (N_SUF_32, N_S_32 | N_F_16_32, false);
}
static void
do_neon_cmp_inv (void)
{
- neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
+ neon_compare (N_SUF_32, N_S_32 | N_F_16_32, true);
}
static void
do_neon_ceq (void)
{
- neon_compare (N_IF_32, N_IF_32, FALSE);
+ neon_compare (N_IF_32, N_IF_32, false);
}
/* For multiply instructions, we have the possibility of 16-bit or 32-bit
if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
return;
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
if (inst.operands[2].isscalar)
&& try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
return;
- if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
return;
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
if (inst.operands[2].isscalar)
static void
do_neon_qdmulh (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
if (inst.operands[2].isscalar)
static void
do_neon_qrdmlah (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
{
static void
do_neon_sli (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
int imm = inst.operands[2].imm;
constraint (imm < 0 || (unsigned)imm >= et.size,
_("immediate out of range for insert"));
- neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
+ neon_imm_shift (false, 0, neon_quad (rs), et, imm);
}
static void
do_neon_sri (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
int imm = inst.operands[2].imm;
constraint (imm < 1 || (unsigned)imm > et.size,
_("immediate out of range for insert"));
- neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
+ neon_imm_shift (false, 0, neon_quad (rs), et, et.size - imm);
}
static void
do_neon_qshlu_imm (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
Unsigned types have OP set to 1. */
inst.instruction |= (et.type == NT_unsigned) << 8;
/* The rest of the bits are the same as other immediate shifts. */
- neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
+ neon_imm_shift (false, 0, neon_quad (rs), et, imm);
}
static void
constraint (imm < 1 || (unsigned)imm > et.size,
_("immediate out of range"));
- neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
+ neon_imm_shift (true, et.type == NT_unsigned, 0, et, et.size - imm);
}
static void
/* FIXME: The manual is kind of unclear about what value U should have in
VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
must be 1. */
- neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
+ neon_imm_shift (true, 1, 0, et, et.size - imm);
}
static void
constraint (imm < 1 || (unsigned)imm > et.size,
_("immediate out of range for narrowing operation"));
- neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
+ neon_imm_shift (false, 0, 0, et, et.size - imm);
}
static void
et = neon_check_type (2, NS_QDI,
N_EQK | N_DBL, N_SU_32 | N_KEY);
NEON_ENCODE (IMMED, inst);
- neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
+ neon_imm_shift (true, et.type == NT_unsigned, 0, et, imm);
}
}
inst.instruction |= op << 7;
inst.instruction |= rm << 16;
inst.instruction |= 0xf0000000;
- inst.is_neon = TRUE;
+ inst.is_neon = true;
}
static void
|| flavour == neon_cvt_flavour_s32_f32
|| flavour == neon_cvt_flavour_u32_f32))
{
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
}
|| flavour == neon_cvt_flavour_s32_f32
|| flavour == neon_cvt_flavour_u32_f32))
{
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_CC | NEON_CHECK_ARCH8))
return;
}
|| flavour == neon_cvt_flavour_s32_f32
|| flavour == neon_cvt_flavour_u32_f32))
{
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
}
{
NEON_ENCODE (FLOAT, inst);
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_CC | NEON_CHECK_ARCH8))
return;
}
static void
-do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
+do_neon_cvttb_2 (bool t, bool to, bool is_double)
{
if (is_double)
mark_feature_used (&fpu_vfp_ext_armv8);
}
static void
-do_neon_cvttb_1 (bfd_boolean t)
+do_neon_cvttb_1 (bool t)
{
enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
else if (rs == NS_QQ || rs == NS_QQI)
{
int single_to_half = 0;
- if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (true, NEON_CHECK_ARCH))
return;
enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
{
inst.error = NULL;
- do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
+ do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
}
else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
{
inst.error = NULL;
- do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
+ do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/false);
}
else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
{
_(BAD_FPU));
inst.error = NULL;
- do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
+ do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/true);
}
else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
{
_(BAD_FPU));
inst.error = NULL;
- do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
+ do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/true);
}
else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
{
inst.error = NULL;
inst.instruction |= (1 << 8);
inst.instruction &= ~(1 << 9);
- do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
+ do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
}
else
return;
static void
do_neon_cvtb (void)
{
- do_neon_cvttb_1 (FALSE);
+ do_neon_cvttb_1 (false);
}
static void
do_neon_cvtt (void)
{
- do_neon_cvttb_1 (TRUE);
+ do_neon_cvttb_1 (true);
}
static void
static void
do_neon_mvn (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
if (inst.operands[1].isreg)
static void
do_neon_rev (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
N_8 | N_16 | N_32 | N_KEY, N_EQK);
if (rs == NS_QR)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH))
return;
}
else
case NS_QQ: /* case 0/1. */
{
- if (!check_simd_pred_availability (FALSE,
+ if (!check_simd_pred_availability (false,
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
/* The architecture manual I have doesn't explicitly state which
/* fall through. */
case NS_QI: /* case 2/3. */
- if (!check_simd_pred_availability (FALSE,
+ if (!check_simd_pred_availability (false,
NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
inst.instruction = 0x0800010;
static void
do_neon_rshift_round_imm (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
constraint (imm < 1 || (unsigned)imm > et.size,
_("immediate out of range for shift"));
- neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
+ neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et,
et.size - imm);
}
static void
do_neon_sat_abs_neg (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
+ if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
return;
enum neon_shape rs;
static void
do_neon_cls (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
static void
do_neon_clz (void)
{
- if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
+ if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
return;
enum neon_shape rs;
do_t_vldr_vstr_sysreg (void)
{
int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
- bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
+ bool is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
/* Use of PC is UNPREDICTABLE. */
if (inst.operands[1].reg == REG_PC)
inst.instruction = 0xec000f80;
if (is_vldr)
inst.instruction |= 1 << sysreg_vldr_bitno;
- encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
+ encode_arm_cp_address (1, true, false, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
}
static void
do_vldr_vstr (void)
{
- bfd_boolean sysreg_op = !inst.operands[0].isreg;
+ bool sysreg_op = !inst.operands[0].isreg;
/* VLDR/VSTR (System Register). */
if (sysreg_op)
if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
return;
- if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
+ if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH8))
return;
neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
if (et.type == NT_invtype)
return;
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_CC | NEON_CHECK_ARCH8))
return;
_("immediate out of range"));
rot /= 90;
- if (!check_simd_pred_availability (TRUE,
+ if (!check_simd_pred_availability (true,
NEON_CHECK_ARCH8 | NEON_CHECK_CC))
return;
check_cde_operand (size_t index, int is_dual)
{
unsigned Rx = inst.operands[index].reg;
- bfd_boolean isvec = inst.operands[index].isvec;
+ bool isvec = inst.operands[index].isvec;
if (is_dual == 0 && thumb_mode)
constraint (
!((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
_("Register must be an even register between r0-r10."));
}
-static bfd_boolean
+static bool
cde_coproc_enabled (unsigned coproc)
{
switch (coproc)
case 5: return mark_feature_used (&arm_ext_cde5);
case 6: return mark_feature_used (&arm_ext_cde6);
case 7: return mark_feature_used (&arm_ext_cde7);
- default: return FALSE;
+ default: return false;
}
}
#undef cde_coproc_pos
static void
-cxn_handle_predication (bfd_boolean is_accum)
+cxn_handle_predication (bool is_accum)
{
if (is_accum && conditional_insn ())
set_pred_insn_type (INSIDE_IT_INSN);
}
static void
-do_custom_instruction_1 (int is_dual, bfd_boolean is_accum)
+do_custom_instruction_1 (int is_dual, bool is_accum)
{
constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
}
static void
-do_custom_instruction_2 (int is_dual, bfd_boolean is_accum)
+do_custom_instruction_2 (int is_dual, bool is_accum)
{
constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
}
static void
-do_custom_instruction_3 (int is_dual, bfd_boolean is_accum)
+do_custom_instruction_3 (int is_dual, bool is_accum)
{
constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
now_pred.block_length = 1;
mapping_state (MAP_THUMB);
now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
- now_pred.warn_deprecated = FALSE;
- now_pred.insn_cond = TRUE;
+ now_pred.warn_deprecated = false;
+ now_pred.insn_cond = true;
}
/* Close an automatic IT block.
handle_pred_state (void)
{
now_pred.state_handled = 1;
- now_pred.insn_cond = FALSE;
+ now_pred.insn_cond = false;
switch (now_pred.state)
{
}
else
{
- now_pred.insn_cond = TRUE;
+ now_pred.insn_cond = true;
now_pred_add_mask (inst.cond);
}
/* Fallthrough. */
case NEUTRAL_IT_INSN:
now_pred.block_length++;
- now_pred.insn_cond = TRUE;
+ now_pred.insn_cond = true;
if (now_pred.block_length > 4)
force_automatic_it_block_close ();
now_pred.mask &= 0x1f;
is_last = now_pred.mask == 0x10;
}
- now_pred.insn_cond = TRUE;
+ now_pred.insn_cond = true;
switch (inst.pred_insn_type)
{
{
as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
"performance deprecated in ARMv8-A and ARMv8-R"));
- now_pred.warn_deprecated = TRUE;
+ now_pred.warn_deprecated = true;
}
else
{
"instructions of the following class are "
"performance deprecated in ARMv8-A and "
"ARMv8-R: %s"), p->description);
- now_pred.warn_deprecated = TRUE;
+ now_pred.warn_deprecated = true;
break;
}
as_tsktsk (_("IT blocks containing more than one conditional "
"instruction are performance deprecated in ARMv8-A and "
"ARMv8-R"));
- now_pred.warn_deprecated = TRUE;
+ now_pred.warn_deprecated = true;
}
}
t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
here, hence the "known" in the function name. */
-static bfd_boolean
+static bool
known_t32_only_insn (const struct asm_opcode *opcode)
{
/* Original Thumb-1 wide instruction. */
|| opcode->tencode == do_t_branch23
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
- return TRUE;
+ return true;
/* Wide-only instruction added to ARMv8-M Baseline. */
if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
|| ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
- return TRUE;
+ return true;
- return FALSE;
+ return false;
}
/* Whether wide instruction variant can be used if available for a valid OPCODE
in ARCH. */
-static bfd_boolean
+static bool
t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
{
if (known_t32_only_insn (opcode))
- return TRUE;
+ return true;
/* Instruction with narrow and wide encoding added to ARMv8-M. Availability
of variant T3 of B.W is checked in do_t_branch. */
if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
&& opcode->tencode == do_t_branch)
- return TRUE;
+ return true;
/* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
&& opcode->tencode == do_t_mov_cmp
/* Make sure CMP instruction is not affected. */
&& opcode->aencode == do_mov)
- return TRUE;
+ return true;
/* Wide instruction variants of all instructions with narrow *and* wide
variants become available with ARMv6t2. Other opcodes are either
narrow-only or wide-only and are thus available if OPCODE is valid. */
if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
- return TRUE;
+ return true;
/* OPCODE with narrow only instruction variant or wide variant not
available. */
- return FALSE;
+ return false;
}
void
inst.instruction = opcode->tvalue;
- if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
+ if (!parse_operands (p, opcode->operands, /*thumb=*/true))
{
/* Prepare the pred_insn_type for those encodings that don't set
it. */
}
else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
{
- bfd_boolean is_bx;
+ bool is_bx;
/* bx is allowed on v5 cores, and sometimes on v4 cores. */
is_bx = (opcode->aencode == do_bx);
else
inst.instruction |= inst.cond << 28;
inst.size = INSN_SIZE;
- if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
+ if (!parse_operands (p, opcode->operands, /*thumb=*/false))
{
it_fsm_pre_encode ();
opcode->aencode ();
THUMB_SET_FUNC (sym, 1);
- label_is_thumb_function_name = FALSE;
+ label_is_thumb_function_name = false;
}
dwarf2_emit_label (sym);
}
-bfd_boolean
+bool
arm_data_in_code (void)
{
if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
*input_line_pointer = '/';
input_line_pointer += 5;
*input_line_pointer = 0;
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
char *
should appear in both upper and lowercase variants. Some registers
also have mixed-case names. */
-#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
+#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true, 0 }
#define REGNUM(p,n,t) REGDEF(p##n, n, t)
#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
#define REGSET(p,t) \
/* Return TRUE iff the definition of symbol S could be pre-empted
(overridden) at link or load time. */
-static bfd_boolean
+static bool
symbol_preemptible (symbolS *s)
{
/* Weak symbols can always be pre-empted. */
if (S_IS_WEAK (s))
- return TRUE;
+ return true;
/* Non-global symbols cannot be pre-empted. */
if (! S_IS_EXTERNAL (s))
- return FALSE;
+ return false;
#ifdef OBJ_ELF
/* In ELF, a global symbol can be marked protected, or private. In that
case it can't be pre-empted (other definitions in the same link unit
would violate the ODR). */
if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
- return FALSE;
+ return false;
#endif
/* Other global symbols might be pre-empted. */
- return TRUE;
+ return true;
}
/* Return the size of a relaxable branch instruction. BITS is the
void
arm_init_frag (fragS * fragP, int max_chars)
{
- bfd_boolean frag_thumb_mode;
+ bool frag_thumb_mode;
/* If the current ARM vs THUMB mode has not already
been recorded into this frag then do so now. */
case BFD_RELOC_THUMB_PCREL_BRANCH23:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
base = fixP->fx_where + fixP->fx_frag->fr_address;
case BFD_RELOC_THUMB_PCREL_BLX:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& THUMB_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
base = fixP->fx_where + fixP->fx_frag->fr_address;
case BFD_RELOC_ARM_PCREL_BLX:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
base = fixP->fx_where + fixP->fx_frag->fr_address;
case BFD_RELOC_ARM_PCREL_CALL:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& THUMB_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
base = fixP->fx_where + fixP->fx_frag->fr_address;
}
}
-static bfd_boolean flag_warn_syms = TRUE;
+static bool flag_warn_syms = true;
-bfd_boolean
+bool
arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
{
/* PR 18347 - Warn if the user attempts to create a symbol with the same
free (nbuf);
}
- return FALSE;
+ return false;
}
/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
a constant. Prevent this and force a relocation when the first symbols
is a thumb function. */
-bfd_boolean
+bool
arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
{
if (op == O_subtract
l->X_op = O_subtract;
l->X_op_symbol = r->X_add_symbol;
l->X_add_number -= r->X_add_number;
- return TRUE;
+ return true;
}
/* Process as normal. */
- return FALSE;
+ return false;
}
/* Encode Thumb2 unconditional branches and calls. The encoding
if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
&& fixP->fx_addsy
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
&& THUMB_IS_FUNC (fixP->fx_addsy))
/* Flip the bl to blx. This is a simple flip
case BFD_RELOC_ARM_PCREL_JUMP:
if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
&& fixP->fx_addsy
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
&& THUMB_IS_FUNC (fixP->fx_addsy))
{
temp = 1;
if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
&& fixP->fx_addsy
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
&& ARM_IS_FUNC (fixP->fx_addsy))
{
case BFD_RELOC_THUMB_PCREL_BRANCH20:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
{
about it. */
if (fixP->fx_addsy
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
&& THUMB_IS_FUNC (fixP->fx_addsy))
{
is converted to a blx. */
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
{
gas_assert (!fixP->fx_done);
{
bfd_vma insn;
- bfd_boolean is_mov;
+ bool is_mov;
bfd_vma encoded_addend = value;
/* Check that addend can be encoded in instruction. */
case BFD_RELOC_THUMB_PCREL_BRANCH5:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
/* Force a relocation for a branch 5 bits wide. */
fixP->fx_done = 0;
}
- if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
+ if (v8_1_branch_value_check (value, 5, false) == FAIL)
as_bad_where (fixP->fx_file, fixP->fx_line,
BAD_BRANCH_OFF);
case BFD_RELOC_THUMB_PCREL_BFCSEL:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
case BFD_RELOC_ARM_THUMB_BF17:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
fixP->fx_done = 0;
}
- if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
+ if (v8_1_branch_value_check (value, 17, true) == FAIL)
as_bad_where (fixP->fx_file, fixP->fx_line,
BAD_BRANCH_OFF);
case BFD_RELOC_ARM_THUMB_BF19:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
fixP->fx_done = 0;
}
- if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
+ if (v8_1_branch_value_check (value, 19, true) == FAIL)
as_bad_where (fixP->fx_file, fixP->fx_line,
BAD_BRANCH_OFF);
case BFD_RELOC_ARM_THUMB_BF13:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
fixP->fx_done = 0;
}
- if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
+ if (v8_1_branch_value_check (value, 13, true) == FAIL)
as_bad_where (fixP->fx_file, fixP->fx_line,
BAD_BRANCH_OFF);
case BFD_RELOC_ARM_THUMB_LOOP12:
if (fixP->fx_addsy
&& (S_GET_SEGMENT (fixP->fx_addsy) == seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
+ && !S_FORCE_RELOC (fixP->fx_addsy, true)
&& ARM_IS_FUNC (fixP->fx_addsy)
&& ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
{
|| ((insn & 0xffffffff) == 0xf01fc001))
value = -value;
- if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
+ if (v8_1_branch_value_check (value, 12, false) == FAIL)
as_bad_where (fixP->fx_file, fixP->fx_line,
BAD_BRANCH_OFF);
if (fixP->fx_done || !seg->use_rela_p)
addresses also ought to have their bottom bit set (assuming that
they reside in Thumb code), but at the moment they will not. */
-bfd_boolean
+bool
arm_fix_adjustable (fixS * fixP)
{
if (fixP->fx_addsy == NULL)
/* Preserve relocations against symbols with function type. */
if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
- return FALSE;
+ return false;
if (THUMB_IS_FUNC (fixP->fx_addsy)
&& fixP->fx_subsy == NULL)
- return FALSE;
+ return false;
/* We need the symbol name for the VTABLE entries. */
if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
|| fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
- return FALSE;
+ return false;
/* Don't allow symbols to be discarded on GOT related relocs. */
if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
|| fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
|| fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
|| fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
- return FALSE;
+ return false;
/* Similarly for group relocations. */
if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
&& fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
|| fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
- return FALSE;
+ return false;
/* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
|| fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
|| fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
|| fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
- return FALSE;
+ return false;
/* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
offsets, so keep these symbols. */
if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
&& fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
- return FALSE;
+ return false;
- return TRUE;
+ return true;
}
#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
" in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
{"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
- {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
- {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
+ {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), true, NULL},
+ {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), false, NULL},
{NULL, NULL, NULL, 0, NULL}
};
struct arm_long_option_table
{
- const char * option; /* Substring to match. */
- const char * help; /* Help information. */
- bfd_boolean (*func) (const char *subopt); /* Function to decode sub-option. */
- const char * deprecated; /* If non-null, print this message. */
+ const char *option; /* Substring to match. */
+ const char *help; /* Help information. */
+ bool (*func) (const char *subopt); /* Function to decode sub-option. */
+ const char *deprecated; /* If non-null, print this message. */
};
-static bfd_boolean
+static bool
arm_parse_extension (const char *str, const arm_feature_set *opt_set,
arm_feature_set *ext_set,
const struct arm_ext_table *ext_table)
if (*str != '+')
{
as_bad (_("invalid architectural extension"));
- return FALSE;
+ return false;
}
str++;
{
as_bad (_("must specify extensions to add before specifying "
"those to remove"));
- return FALSE;
+ return false;
}
}
if (len == 0)
{
as_bad (_("missing architectural extension"));
- return FALSE;
+ return false;
}
gas_assert (adding_value != -1);
if (ext_table != NULL)
{
const struct arm_ext_table * ext_opt = ext_table;
- bfd_boolean found = FALSE;
+ bool found = false;
for (; ext_opt->name != NULL; ext_opt++)
if (ext_opt->name_len == len
&& strncmp (ext_opt->name, str, len) == 0)
continue;
ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
}
- found = TRUE;
+ found = true;
break;
}
if (found)
if (i == nb_allowed_archs)
{
as_bad (_("extension does not apply to the base architecture"));
- return FALSE;
+ return false;
}
/* Add or remove the extension. */
as_bad (_("architectural extensions must be specified in "
"alphabetical order"));
- return FALSE;
+ return false;
}
else
{
str = ext;
};
- return TRUE;
+ return true;
}
-static bfd_boolean
+static bool
arm_parse_fp16_opt (const char *str)
{
if (strcasecmp (str, "ieee") == 0)
else
{
as_bad (_("unrecognised float16 format \"%s\""), str);
- return FALSE;
+ return false;
}
- return TRUE;
+ return true;
}
-static bfd_boolean
+static bool
arm_parse_cpu (const char *str)
{
const struct arm_cpu_option_table *opt;
if (len == 0)
{
as_bad (_("missing cpu name `%s'"), str);
- return FALSE;
+ return false;
}
for (opt = arm_cpus; opt->name != NULL; opt++)
if (ext != NULL)
return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
- return TRUE;
+ return true;
}
as_bad (_("unknown cpu `%s'"), str);
- return FALSE;
+ return false;
}
-static bfd_boolean
+static bool
arm_parse_arch (const char *str)
{
const struct arm_arch_option_table *opt;
if (len == 0)
{
as_bad (_("missing architecture name `%s'"), str);
- return FALSE;
+ return false;
}
for (opt = arm_archs; opt->name != NULL; opt++)
return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
opt->ext_table);
- return TRUE;
+ return true;
}
as_bad (_("unknown architecture `%s'\n"), str);
- return FALSE;
+ return false;
}
-static bfd_boolean
+static bool
arm_parse_fpu (const char * str)
{
const struct arm_option_fpu_value_table * opt;
if (streq (opt->name, str))
{
mfpu_opt = &opt->value;
- return TRUE;
+ return true;
}
as_bad (_("unknown floating point format `%s'\n"), str);
- return FALSE;
+ return false;
}
-static bfd_boolean
+static bool
arm_parse_float_abi (const char * str)
{
const struct arm_option_value_table * opt;
if (streq (opt->name, str))
{
mfloat_abi_opt = opt->value;
- return TRUE;
+ return true;
}
as_bad (_("unknown floating point abi `%s'\n"), str);
- return FALSE;
+ return false;
}
#ifdef OBJ_ELF
-static bfd_boolean
+static bool
arm_parse_eabi (const char * str)
{
const struct arm_option_value_table *opt;
if (streq (opt->name, str))
{
meabi_flags = opt->value;
- return TRUE;
+ return true;
}
as_bad (_("unknown EABI `%s'\n"), str);
- return FALSE;
+ return false;
}
#endif
-static bfd_boolean
+static bool
arm_parse_it_mode (const char * str)
{
- bfd_boolean ret = TRUE;
+ bool ret = true;
if (streq ("arm", str))
implicit_it_mode = IMPLICIT_IT_MODE_ARM;
{
as_bad (_("unknown implicit IT mode `%s', should be "\
"arm, thumb, always, or never."), str);
- ret = FALSE;
+ ret = false;
}
return ret;
}
-static bfd_boolean
+static bool
arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
{
- codecomposer_syntax = TRUE;
+ codecomposer_syntax = true;
arm_comment_chars[0] = ';';
arm_line_separator_chars[0] = 0;
- return TRUE;
+ return true;
}
struct arm_long_option_table arm_long_opts[] =
#endif
case OPTION_FIX_V4BX:
- fix_v4bx = TRUE;
+ fix_v4bx = true;
break;
#ifdef OBJ_ELF
case OPTION_FDPIC:
- arm_fdpic = TRUE;
+ arm_fdpic = true;
break;
#endif /* OBJ_ELF */
/* Return whether features in the *NEEDED feature set are available via
extensions for the architecture whose feature set is *ARCH_FSET. */
-static bfd_boolean
+static bool
have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
const arm_feature_set *needed)
{
will always be generated for it, so applying the symbol value now
will result in a double offset being stored in the relocation. */
&& (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
- && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
+ && !S_FORCE_RELOC (fixP->fx_addsy, true))
{
switch (fixP->fx_r_type)
{