}
/* Try to parse a base or offset register. ACCEPT_SP says whether {W}SP
- should be considered valid and ACCEPT_RZ says whether zero registers
+ should be considered valid, ACCEPT_RZ says whether zero registers
+ should be considered valid, and ACCEPT_SVE says whether SVE registers
should be considered valid.
Return the register number on success, setting *QUALIFIER to the
Note that this function does not issue any diagnostics. */
static int
-aarch64_reg_parse_32_64 (char **ccp, bfd_boolean accept_sp,
- bfd_boolean accept_rz,
- aarch64_opnd_qualifier_t *qualifier,
- bfd_boolean *isregzero)
+aarch64_addr_reg_parse (char **ccp, bfd_boolean accept_sp,
+ bfd_boolean accept_rz, bfd_boolean accept_sve,
+ aarch64_opnd_qualifier_t *qualifier,
+ bfd_boolean *isregzero)
{
char *str = *ccp;
const reg_entry *reg = parse_reg (&str);
if (reg == NULL)
return PARSE_FAIL;
- if (! aarch64_check_reg_type (reg, REG_TYPE_R_Z_SP))
- return PARSE_FAIL;
-
switch (reg->type)
{
case REG_TYPE_SP_32:
: AARCH64_OPND_QLF_X);
*isregzero = TRUE;
break;
+ case REG_TYPE_ZN:
+ if (!accept_sve || str[0] != '.')
+ return PARSE_FAIL;
+ switch (TOLOWER (str[1]))
+ {
+ case 's':
+ *qualifier = AARCH64_OPND_QLF_S_S;
+ break;
+ case 'd':
+ *qualifier = AARCH64_OPND_QLF_S_D;
+ break;
+ default:
+ return PARSE_FAIL;
+ }
+ str += 2;
+ *isregzero = FALSE;
+ break;
default:
return PARSE_FAIL;
}
return reg->number;
}
+/* Try to parse a scalar base or offset register. ACCEPT_SP says whether
+ {W}SP should be considered valid and ACCEPT_RZ says whether zero
+ registers should be considered valid.
+
+ Return the register number on success, setting *QUALIFIER to the
+ register qualifier and *ISREGZERO to whether the register is a zero
+ register. Return PARSE_FAIL otherwise.
+
+ Note that this function does not issue any diagnostics. */
+
+static int
+aarch64_reg_parse_32_64 (char **ccp, bfd_boolean accept_sp,
+ bfd_boolean accept_rz,
+ aarch64_opnd_qualifier_t *qualifier,
+ bfd_boolean *isregzero)
+{
+ return aarch64_addr_reg_parse (ccp, accept_sp, accept_rz, FALSE,
+ qualifier, isregzero);
+}
+
/* Parse the qualifier of a vector register or vector element of type
REG_TYPE. Fill in *PARSED_TYPE and return TRUE if the parsing
succeeds; otherwise return FALSE.
The A64 instruction set has the following addressing modes:
Offset
- [base] // in SIMD ld/st structure
- [base{,#0}] // in ld/st exclusive
+ [base] // in SIMD ld/st structure
+ [base{,#0}] // in ld/st exclusive
[base{,#imm}]
[base,Xm{,LSL #imm}]
[base,Xm,SXTX {#imm}]
[base,#imm]!
Post-indexed
[base],#imm
- [base],Xm // in SIMD ld/st structure
+ [base],Xm // in SIMD ld/st structure
PC-relative (literal)
label
- =immediate
+ SVE:
+ [base,Zm.D{,LSL #imm}]
+ [base,Zm.S,(S|U)XTW {#imm}]
+ [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
+ [Zn.S,#imm]
+ [Zn.D,#imm]
+ [Zn.S,Zm.S{,LSL #imm}] // }
+ [Zn.D,Zm.D{,LSL #imm}] // } in ADR
+ [Zn.D,Zm.D,(S|U)XTW {#imm}] // }
(As a convenience, the notation "=immediate" is permitted in conjunction
with the pc-relative literal load instructions to automatically place an
.pcrel=1; .preind=1; .postind=0; .writeback=0
The shift/extension information, if any, will be stored in .shifter.
+ The base and offset qualifiers will be stored in *BASE_QUALIFIER and
+ *OFFSET_QUALIFIER respectively, with NIL being used if there's no
+ corresponding register.
RELOC says whether relocation operators should be accepted
and ACCEPT_REG_POST_INDEX says whether post-indexed register
addressing should be accepted.
+ Likewise ACCEPT_SVE says whether the SVE addressing modes should be
+ accepted. We use context-dependent parsing for this case because
+ (for compatibility) we should accept symbolic constants like z0 and
+ z0.s in base AArch64 code.
+
In all other cases, it is the caller's responsibility to check whether
the addressing mode is supported by the instruction. It is also the
caller's responsibility to set inst.reloc.type. */
static bfd_boolean
-parse_address_main (char **str, aarch64_opnd_info *operand, bfd_boolean reloc,
- bfd_boolean accept_reg_post_index)
+parse_address_main (char **str, aarch64_opnd_info *operand,
+ aarch64_opnd_qualifier_t *base_qualifier,
+ aarch64_opnd_qualifier_t *offset_qualifier,
+ bfd_boolean reloc, bfd_boolean accept_reg_post_index,
+ bfd_boolean accept_sve)
{
char *p = *str;
int reg;
- aarch64_opnd_qualifier_t base_qualifier;
- aarch64_opnd_qualifier_t offset_qualifier;
bfd_boolean isregzero;
expressionS *exp = &inst.reloc.exp;
+ *base_qualifier = AARCH64_OPND_QLF_NIL;
+ *offset_qualifier = AARCH64_OPND_QLF_NIL;
if (! skip_past_char (&p, '['))
{
/* =immediate or label. */
/* [ */
/* Accept SP and reject ZR */
- reg = aarch64_reg_parse_32_64 (&p, TRUE, FALSE, &base_qualifier, &isregzero);
- if (reg == PARSE_FAIL || base_qualifier == AARCH64_OPND_QLF_W)
+ reg = aarch64_addr_reg_parse (&p, TRUE, FALSE, accept_sve, base_qualifier,
+ &isregzero);
+ if (reg == PARSE_FAIL)
+ {
+ set_syntax_error (_("base register expected"));
+ return FALSE;
+ }
+ else if (*base_qualifier == AARCH64_OPND_QLF_W)
{
set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
return FALSE;
operand->addr.preind = 1;
/* Reject SP and accept ZR */
- reg = aarch64_reg_parse_32_64 (&p, FALSE, TRUE, &offset_qualifier,
- &isregzero);
+ reg = aarch64_addr_reg_parse (&p, FALSE, TRUE, accept_sve,
+ offset_qualifier, &isregzero);
if (reg != PARSE_FAIL)
{
/* [Xn,Rm */
|| operand->shifter.kind == AARCH64_MOD_LSL
|| operand->shifter.kind == AARCH64_MOD_SXTX)
{
- if (offset_qualifier == AARCH64_OPND_QLF_W)
+ if (*offset_qualifier == AARCH64_OPND_QLF_W)
{
set_syntax_error (_("invalid use of 32-bit register offset"));
return FALSE;
}
+ if (aarch64_get_qualifier_esize (*base_qualifier)
+ != aarch64_get_qualifier_esize (*offset_qualifier))
+ {
+ set_syntax_error (_("offset has different size from base"));
+ return FALSE;
+ }
}
- else if (offset_qualifier == AARCH64_OPND_QLF_X)
+ else if (*offset_qualifier == AARCH64_OPND_QLF_X)
{
set_syntax_error (_("invalid use of 64-bit register offset"));
return FALSE;
inst.reloc.type = entry->ldst_type;
inst.reloc.pc_rel = entry->pc_rel;
}
- else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
+ else
{
- set_syntax_error (_("invalid expression in the address"));
- return FALSE;
+ if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
+ {
+ set_syntax_error (_("invalid expression in the address"));
+ return FALSE;
+ }
+ /* [Xn,<expr> */
+ if (accept_sve && exp->X_op != O_constant)
+ {
+ set_syntax_error (_("constant offset required"));
+ return FALSE;
+ }
}
- /* [Xn,<expr> */
}
}
if (accept_reg_post_index
&& (reg = aarch64_reg_parse_32_64 (&p, FALSE, FALSE,
- &offset_qualifier,
+ offset_qualifier,
&isregzero)) != PARSE_FAIL)
{
/* [Xn],Xm */
- if (offset_qualifier == AARCH64_OPND_QLF_W)
+ if (*offset_qualifier == AARCH64_OPND_QLF_W)
{
set_syntax_error (_("invalid 32-bit register offset"));
return FALSE;
return TRUE;
}
-/* Parse an address that cannot contain relocation operators.
+/* Parse a base AArch64 address, i.e. one that cannot contain SVE base
+ registers or SVE offset registers. Do not allow relocation operators.
Look for and parse "[Xn], (Xm|#m)" as post-indexed addressing
if ACCEPT_REG_POST_INDEX is true.
parse_address (char **str, aarch64_opnd_info *operand,
bfd_boolean accept_reg_post_index)
{
- return parse_address_main (str, operand, FALSE, accept_reg_post_index);
+ aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
+ return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
+ FALSE, accept_reg_post_index, FALSE);
}
-/* Parse an address that can contain relocation operators. Do not
- accept post-indexed addressing.
+/* Parse a base AArch64 address, i.e. one that cannot contain SVE base
+ registers or SVE offset registers. Allow relocation operators but
+ disallow post-indexed addressing.
Return TRUE on success. */
static bfd_boolean
parse_address_reloc (char **str, aarch64_opnd_info *operand)
{
- return parse_address_main (str, operand, TRUE, FALSE);
+ aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
+ return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
+ TRUE, FALSE, FALSE);
+}
+
+/* Parse an address in which SVE vector registers are allowed.
+ The arguments have the same meaning as for parse_address_main.
+ Return TRUE on success. */
+static bfd_boolean
+parse_sve_address (char **str, aarch64_opnd_info *operand,
+ aarch64_opnd_qualifier_t *base_qualifier,
+ aarch64_opnd_qualifier_t *offset_qualifier)
+{
+ return parse_address_main (str, operand, base_qualifier, offset_qualifier,
+ FALSE, FALSE, TRUE);
}
/* Parse an operand for a MOVZ, MOVN or MOVK instruction.
int comma_skipped_p = 0;
aarch64_reg_type rtype;
struct vector_type_el vectype;
- aarch64_opnd_qualifier_t qualifier;
+ aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
aarch64_opnd_info *info = &inst.base.operands[i];
aarch64_reg_type reg_type;
case AARCH64_OPND_ADDR_REGOFF:
/* [<Xn|SP>, <R><m>{, <extend> {<amount>}}] */
po_misc_or_fail (parse_address (&str, info, FALSE));
+ regoff_addr:
if (info->addr.pcrel || !info->addr.offset.is_reg
|| !info->addr.preind || info->addr.postind
|| info->addr.writeback)
/* No qualifier. */
break;
+ case AARCH64_OPND_SVE_ADDR_RI_U6:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x8:
+ /* [X<n>{, #imm}]
+ but recognizing SVE registers. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ sve_regimm:
+ if (info->addr.pcrel || info->addr.offset.is_reg
+ || !info->addr.preind || info->addr.writeback)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ gas_assert (inst.reloc.exp.X_op == O_constant);
+ info->addr.offset.imm = inst.reloc.exp.X_add_number;
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RR:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RX:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL3:
+ /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
+ but recognizing SVE registers. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X
+ || offset_qualifier != AARCH64_OPND_QLF_X)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ goto regoff_addr;
+
+ case AARCH64_OPND_SVE_ADDR_RZ:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+ /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
+ [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}] */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_X
+ || (offset_qualifier != AARCH64_OPND_QLF_S_S
+ && offset_qualifier != AARCH64_OPND_QLF_S_D))
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = offset_qualifier;
+ goto regoff_addr;
+
+ case AARCH64_OPND_SVE_ADDR_ZI_U5:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+ /* [Z<n>.<T>{, #imm}] */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if (base_qualifier != AARCH64_OPND_QLF_S_S
+ && base_qualifier != AARCH64_OPND_QLF_S_D)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = base_qualifier;
+ goto sve_regimm;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+ case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+ case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+ /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
+ [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
+
+ We don't reject:
+
+ [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
+
+ here since we get better error messages by leaving it to
+ the qualifier checking routines. */
+ po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
+ &offset_qualifier));
+ if ((base_qualifier != AARCH64_OPND_QLF_S_S
+ && base_qualifier != AARCH64_OPND_QLF_S_D)
+ || offset_qualifier != base_qualifier)
+ {
+ set_syntax_error (_("invalid addressing mode"));
+ goto failure;
+ }
+ info->qualifier = base_qualifier;
+ goto regoff_addr;
+
case AARCH64_OPND_SYSREG:
if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
== PARSE_FAIL)
AARCH64_OPND_PRFOP, /* Prefetch operation. */
AARCH64_OPND_BARRIER_PSB, /* Barrier operand for PSB. */
+ AARCH64_OPND_SVE_ADDR_RI_U6, /* SVE [<Xn|SP>, #<uimm6>]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x2, /* SVE [<Xn|SP>, #<uimm6>*2]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x4, /* SVE [<Xn|SP>, #<uimm6>*4]. */
+ AARCH64_OPND_SVE_ADDR_RI_U6x8, /* SVE [<Xn|SP>, #<uimm6>*8]. */
+ AARCH64_OPND_SVE_ADDR_RR, /* SVE [<Xn|SP>, <Xm|XZR>]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL1, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL2, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RR_LSL3, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RX, /* SVE [<Xn|SP>, <Xm>]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL1, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL2, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RX_LSL3, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RZ, /* SVE [<Xn|SP>, Zm.D]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL1, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL2, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
+ AARCH64_OPND_SVE_ADDR_RZ_LSL3, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW1_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW1_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW2_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW2_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW3_14, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
+ Bit 14 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_RZ_XTW3_22, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
+ Bit 22 controls S/U choice. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5, /* SVE [Zn.<T>, #<uimm5>]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x2, /* SVE [Zn.<T>, #<uimm5>*2]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x4, /* SVE [Zn.<T>, #<uimm5>*4]. */
+ AARCH64_OPND_SVE_ADDR_ZI_U5x8, /* SVE [Zn.<T>, #<uimm5>*8]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_LSL, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_SXTW, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
+ AARCH64_OPND_SVE_ADDR_ZZ_UXTW, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
AARCH64_OPND_SVE_PATTERN, /* SVE vector pattern enumeration. */
AARCH64_OPND_SVE_PATTERN_SCALED, /* Likewise, with additional MUL factor. */
AARCH64_OPND_SVE_PRFOP, /* SVE prefetch operation. */
case 27:
case 35:
case 36:
- case 92:
- case 93:
- case 94:
- case 95:
- case 96:
- case 97:
- case 98:
- case 99:
- case 100:
- case 101:
- case 102:
- case 103:
- case 104:
- case 105:
- case 108:
+ case 123:
+ case 124:
+ case 125:
+ case 126:
+ case 127:
+ case 128:
+ case 129:
+ case 130:
+ case 131:
+ case 132:
+ case 133:
+ case 134:
+ case 135:
+ case 136:
+ case 139:
return aarch64_ins_regno (self, info, code, inst);
case 12:
return aarch64_ins_reg_extended (self, info, code, inst);
case 68:
case 69:
case 70:
- case 89:
- case 91:
+ case 120:
+ case 122:
return aarch64_ins_imm (self, info, code, inst);
case 38:
case 39:
return aarch64_ins_prfop (self, info, code, inst);
case 88:
return aarch64_ins_hint (self, info, code, inst);
+ case 89:
case 90:
- return aarch64_ins_sve_scale (self, info, code, inst);
+ case 91:
+ case 92:
+ return aarch64_ins_sve_addr_ri_u6 (self, info, code, inst);
+ case 93:
+ case 94:
+ case 95:
+ case 96:
+ case 97:
+ case 98:
+ case 99:
+ case 100:
+ case 101:
+ case 102:
+ case 103:
+ case 104:
+ return aarch64_ins_sve_addr_rr_lsl (self, info, code, inst);
+ case 105:
case 106:
- return aarch64_ins_sve_index (self, info, code, inst);
case 107:
+ case 108:
case 109:
+ case 110:
+ case 111:
+ case 112:
+ return aarch64_ins_sve_addr_rz_xtw (self, info, code, inst);
+ case 113:
+ case 114:
+ case 115:
+ case 116:
+ return aarch64_ins_sve_addr_zi_u5 (self, info, code, inst);
+ case 117:
+ return aarch64_ins_sve_addr_zz_lsl (self, info, code, inst);
+ case 118:
+ return aarch64_ins_sve_addr_zz_sxtw (self, info, code, inst);
+ case 119:
+ return aarch64_ins_sve_addr_zz_uxtw (self, info, code, inst);
+ case 121:
+ return aarch64_ins_sve_scale (self, info, code, inst);
+ case 137:
+ return aarch64_ins_sve_index (self, info, code, inst);
+ case 138:
+ case 140:
return aarch64_ins_sve_reglist (self, info, code, inst);
default: assert (0); abort ();
}
return NULL;
}
+/* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
+ is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
+ value. fields[0] specifies the base register field. */
+const char *
+aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int factor = 1 << get_operand_specific_data (self);
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
+ is SELF's operand-dependent value. fields[0] specifies the base
+ register field and fields[1] specifies the offset register field. */
+const char *
+aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (self->fields[1], code, info->addr.offset.regno, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
+ <shift> is SELF's operand-dependent value. fields[0] specifies the
+ base register field, fields[1] specifies the offset register field and
+ fields[2] is a single-bit field that selects SXTW over UXTW. */
+const char *
+aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (self->fields[1], code, info->addr.offset.regno, 0);
+ if (info->shifter.kind == AARCH64_MOD_UXTW)
+ insert_field (self->fields[2], code, 0, 0);
+ else
+ insert_field (self->fields[2], code, 1, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
+ 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
+ fields[0] specifies the base register field. */
+const char *
+aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int factor = 1 << get_operand_specific_data (self);
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
+ where <modifier> is fixed by the instruction and where <msz> is a
+ 2-bit unsigned number. fields[0] specifies the base register field
+ and fields[1] specifies the offset register field. */
+static const char *
+aarch64_ext_sve_addr_zz (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code)
+{
+ insert_field (self->fields[0], code, info->addr.base_regno, 0);
+ insert_field (self->fields[1], code, info->addr.offset.regno, 0);
+ insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
+ return NULL;
+}
+
+/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+const char *
+aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
+ const aarch64_opnd_info *info, aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code);
+}
+
+/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+const char *
+aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code);
+}
+
+/* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+const char *
+aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
+ const aarch64_opnd_info *info,
+ aarch64_insn *code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code);
+}
+
/* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
array specifies which field to use for Zn. MM is encoded in the
concatenation of imm5 and SVE_tszh, with imm5 being the less
AARCH64_DECL_OPD_INSERTER (ins_prfop);
AARCH64_DECL_OPD_INSERTER (ins_reg_extended);
AARCH64_DECL_OPD_INSERTER (ins_reg_shifted);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_ri_u6);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rr_lsl);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_rz_xtw);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_zi_u5);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_zz_lsl);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_zz_sxtw);
+AARCH64_DECL_OPD_INSERTER (ins_sve_addr_zz_uxtw);
AARCH64_DECL_OPD_INSERTER (ins_sve_index);
AARCH64_DECL_OPD_INSERTER (ins_sve_reglist);
AARCH64_DECL_OPD_INSERTER (ins_sve_scale);
case 27:
case 35:
case 36:
- case 92:
- case 93:
- case 94:
- case 95:
- case 96:
- case 97:
- case 98:
- case 99:
- case 100:
- case 101:
- case 102:
- case 103:
- case 104:
- case 105:
- case 108:
+ case 123:
+ case 124:
+ case 125:
+ case 126:
+ case 127:
+ case 128:
+ case 129:
+ case 130:
+ case 131:
+ case 132:
+ case 133:
+ case 134:
+ case 135:
+ case 136:
+ case 139:
return aarch64_ext_regno (self, info, code, inst);
case 8:
return aarch64_ext_regrt_sysins (self, info, code, inst);
case 68:
case 69:
case 70:
- case 89:
- case 91:
+ case 120:
+ case 122:
return aarch64_ext_imm (self, info, code, inst);
case 38:
case 39:
return aarch64_ext_prfop (self, info, code, inst);
case 88:
return aarch64_ext_hint (self, info, code, inst);
+ case 89:
case 90:
- return aarch64_ext_sve_scale (self, info, code, inst);
+ case 91:
+ case 92:
+ return aarch64_ext_sve_addr_ri_u6 (self, info, code, inst);
+ case 93:
+ case 94:
+ case 95:
+ case 96:
+ case 97:
+ case 98:
+ case 99:
+ case 100:
+ case 101:
+ case 102:
+ case 103:
+ case 104:
+ return aarch64_ext_sve_addr_rr_lsl (self, info, code, inst);
+ case 105:
case 106:
- return aarch64_ext_sve_index (self, info, code, inst);
case 107:
+ case 108:
case 109:
+ case 110:
+ case 111:
+ case 112:
+ return aarch64_ext_sve_addr_rz_xtw (self, info, code, inst);
+ case 113:
+ case 114:
+ case 115:
+ case 116:
+ return aarch64_ext_sve_addr_zi_u5 (self, info, code, inst);
+ case 117:
+ return aarch64_ext_sve_addr_zz_lsl (self, info, code, inst);
+ case 118:
+ return aarch64_ext_sve_addr_zz_sxtw (self, info, code, inst);
+ case 119:
+ return aarch64_ext_sve_addr_zz_uxtw (self, info, code, inst);
+ case 121:
+ return aarch64_ext_sve_scale (self, info, code, inst);
+ case 137:
+ return aarch64_ext_sve_index (self, info, code, inst);
+ case 138:
+ case 140:
return aarch64_ext_sve_reglist (self, info, code, inst);
default: assert (0); abort ();
}
return 1;
}
+/* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
+ is given by the OFFSET parameter and where <shift> is SELF's operand-
+ dependent value. fields[0] specifies the base register field <base>. */
+static int
+aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ int64_t offset)
+{
+ info->addr.base_regno = extract_field (self->fields[0], code, 0);
+ info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
+ info->addr.offset.is_reg = FALSE;
+ info->addr.writeback = FALSE;
+ info->addr.preind = TRUE;
+ info->shifter.operator_present = FALSE;
+ info->shifter.amount_present = FALSE;
+ return 1;
+}
+
+/* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
+ is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
+ value. fields[0] specifies the base register field. */
+int
+aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int offset = extract_field (FLD_SVE_imm6, code, 0);
+ return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
+}
+
+/* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
+ is SELF's operand-dependent value. fields[0] specifies the base
+ register field and fields[1] specifies the offset register field. */
+int
+aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int index;
+
+ index = extract_field (self->fields[1], code, 0);
+ if (index == 31 && (self->flags & OPD_F_NO_ZR) != 0)
+ return 0;
+
+ info->addr.base_regno = extract_field (self->fields[0], code, 0);
+ info->addr.offset.regno = index;
+ info->addr.offset.is_reg = TRUE;
+ info->addr.writeback = FALSE;
+ info->addr.preind = TRUE;
+ info->shifter.kind = AARCH64_MOD_LSL;
+ info->shifter.amount = get_operand_specific_data (self);
+ info->shifter.operator_present = (info->shifter.amount != 0);
+ info->shifter.amount_present = (info->shifter.amount != 0);
+ return 1;
+}
+
+/* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
+ <shift> is SELF's operand-dependent value. fields[0] specifies the
+ base register field, fields[1] specifies the offset register field and
+ fields[2] is a single-bit field that selects SXTW over UXTW. */
+int
+aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ info->addr.base_regno = extract_field (self->fields[0], code, 0);
+ info->addr.offset.regno = extract_field (self->fields[1], code, 0);
+ info->addr.offset.is_reg = TRUE;
+ info->addr.writeback = FALSE;
+ info->addr.preind = TRUE;
+ if (extract_field (self->fields[2], code, 0))
+ info->shifter.kind = AARCH64_MOD_SXTW;
+ else
+ info->shifter.kind = AARCH64_MOD_UXTW;
+ info->shifter.amount = get_operand_specific_data (self);
+ info->shifter.operator_present = TRUE;
+ info->shifter.amount_present = (info->shifter.amount != 0);
+ return 1;
+}
+
+/* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
+ 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
+ fields[0] specifies the base register field. */
+int
+aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ int offset = extract_field (FLD_imm5, code, 0);
+ return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
+}
+
+/* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
+ where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
+ number. fields[0] specifies the base register field and fields[1]
+ specifies the offset register field. */
+static int
+aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
+ aarch64_insn code, enum aarch64_modifier_kind kind)
+{
+ info->addr.base_regno = extract_field (self->fields[0], code, 0);
+ info->addr.offset.regno = extract_field (self->fields[1], code, 0);
+ info->addr.offset.is_reg = TRUE;
+ info->addr.writeback = FALSE;
+ info->addr.preind = TRUE;
+ info->shifter.kind = kind;
+ info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
+ info->shifter.operator_present = (kind != AARCH64_MOD_LSL
+ || info->shifter.amount != 0);
+ info->shifter.amount_present = (info->shifter.amount != 0);
+ return 1;
+}
+
+/* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+int
+aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
+}
+
+/* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+int
+aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
+}
+
+/* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
+ <msz> is a 2-bit unsigned number. fields[0] specifies the base register
+ field and fields[1] specifies the offset register field. */
+int
+aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
+ aarch64_opnd_info *info, aarch64_insn code,
+ const aarch64_inst *inst ATTRIBUTE_UNUSED)
+{
+ return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
+}
+
/* Decode Zn[MM], where MM has a 7-bit triangular encoding. The fields
array specifies which field to use for Zn. MM is encoded in the
concatenation of imm5 and SVE_tszh, with imm5 being the less
AARCH64_DECL_OPD_EXTRACTOR (ext_prfop);
AARCH64_DECL_OPD_EXTRACTOR (ext_reg_extended);
AARCH64_DECL_OPD_EXTRACTOR (ext_reg_shifted);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_ri_u6);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rr_lsl);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_rz_xtw);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_zi_u5);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_zz_lsl);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_zz_sxtw);
+AARCH64_DECL_OPD_EXTRACTOR (ext_sve_addr_zz_uxtw);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_index);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_reglist);
AARCH64_DECL_OPD_EXTRACTOR (ext_sve_scale);
{AARCH64_OPND_CLASS_SYSTEM, "BARRIER_ISB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the ISB option name SY or an optional 4-bit unsigned immediate"},
{AARCH64_OPND_CLASS_SYSTEM, "PRFOP", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "a prefetch operation specifier"},
{AARCH64_OPND_CLASS_SYSTEM, "BARRIER_PSB", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {}, "the PSB option name CSYNC"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 2"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x4", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 4"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RI_U6x8", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn}, "an address with a 6-bit unsigned offset, multiplied by 8"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RR", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RR_LSL1", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RR_LSL2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RR_LSL3", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX", (0 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL1", (1 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL2", (2 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RX_LSL3", (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_Rm}, "an address with a scalar register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL1", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_LSL3", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW_14", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW_22", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW1_14", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW1_22", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW2_14", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW2_22", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW3_14", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_RZ_XTW3_22", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZI_U5", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an address with a 5-bit unsigned offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZI_U5x2", 1 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an address with a 5-bit unsigned offset, multiplied by 2"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZI_U5x4", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an address with a 5-bit unsigned offset, multiplied by 4"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZI_U5x8", 3 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn}, "an address with a 5-bit unsigned offset, multiplied by 8"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZZ_LSL", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZZ_SXTW", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
+ {AARCH64_OPND_CLASS_ADDRESS, "SVE_ADDR_ZZ_UXTW", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zn,FLD_SVE_Zm_16}, "an address with a vector register offset"},
{AARCH64_OPND_CLASS_IMMEDIATE, "SVE_PATTERN", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_pattern}, "an enumeration value such as POW2"},
{AARCH64_OPND_CLASS_IMMEDIATE, "SVE_PATTERN_SCALED", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_pattern}, "an enumeration value such as POW2"},
{AARCH64_OPND_CLASS_IMMEDIATE, "SVE_PRFOP", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_prfop}, "an enumeration value such as PLDL1KEEP"},
{ 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
{ 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
{ 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
+ { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
+ { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
{ 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
{ 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
{ 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
+ { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
+ { 22, 1 } /* SVE_xs_22: UXTW/SXTW select (bit 22). */
};
enum aarch64_operand_class
const aarch64_opcode *opcode,
aarch64_operand_error *mismatch_detail)
{
- unsigned num;
+ unsigned num, modifiers;
unsigned char size;
- int64_t imm;
+ int64_t imm, min_value, max_value;
const aarch64_opnd_info *opnd = opnds + idx;
aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
}
break;
+ case AARCH64_OPND_SVE_ADDR_RI_U6:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x8:
+ min_value = 0;
+ max_value = 63;
+ sve_imm_offset:
+ assert (!opnd->addr.offset.is_reg);
+ assert (opnd->addr.preind);
+ num = 1 << get_operand_specific_data (&aarch64_operands[type]);
+ min_value *= num;
+ max_value *= num;
+ if (opnd->shifter.operator_present
+ || opnd->shifter.amount_present)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("invalid addressing mode"));
+ return 0;
+ }
+ if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
+ {
+ set_offset_out_of_range_error (mismatch_detail, idx,
+ min_value, max_value);
+ return 0;
+ }
+ if (!value_aligned_p (opnd->addr.offset.imm, num))
+ {
+ set_unaligned_error (mismatch_detail, idx, num);
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RR:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RX:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RZ:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+ modifiers = 1 << AARCH64_MOD_LSL;
+ sve_rr_operand:
+ assert (opnd->addr.offset.is_reg);
+ assert (opnd->addr.preind);
+ if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
+ && opnd->addr.offset.regno == 31)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("index register xzr is not allowed"));
+ return 0;
+ }
+ if (((1 << opnd->shifter.kind) & modifiers) == 0
+ || (opnd->shifter.amount
+ != get_operand_specific_data (&aarch64_operands[type])))
+ {
+ set_other_error (mismatch_detail, idx,
+ _("invalid addressing mode"));
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+ modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
+ goto sve_rr_operand;
+
+ case AARCH64_OPND_SVE_ADDR_ZI_U5:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+ min_value = 0;
+ max_value = 31;
+ goto sve_imm_offset;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+ modifiers = 1 << AARCH64_MOD_LSL;
+ sve_zz_operand:
+ assert (opnd->addr.offset.is_reg);
+ assert (opnd->addr.preind);
+ if (((1 << opnd->shifter.kind) & modifiers) == 0
+ || opnd->shifter.amount < 0
+ || opnd->shifter.amount > 3)
+ {
+ set_other_error (mismatch_detail, idx,
+ _("invalid addressing mode"));
+ return 0;
+ }
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+ modifiers = (1 << AARCH64_MOD_SXTW);
+ goto sve_zz_operand;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+ modifiers = 1 << AARCH64_MOD_UXTW;
+ goto sve_zz_operand;
+
default:
break;
}
#undef R64
#undef R32
};
+
+/* Names of the SVE vector registers, first with .S suffixes,
+ then with .D suffixes. */
+
+static const char *sve_reg[2][32] = {
+#define ZS(X) "z" #X ".s"
+#define ZD(X) "z" #X ".d"
+ BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
+#undef ZD
+#undef ZS
+};
#undef BANK
/* Return the integer register name.
}
}
+/* Get the name of the SVE vector offset register in OPND, using the operand
+ qualifier to decide whether the suffix should be .S or .D. */
+
+static inline const char *
+get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
+{
+ assert (qualifier == AARCH64_OPND_QLF_S_S
+ || qualifier == AARCH64_OPND_QLF_S_D);
+ return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
+}
+
/* Types for expanding an encoded 8-bit value to a floating-point value. */
typedef union
break;
case AARCH64_OPND_ADDR_REGOFF:
+ case AARCH64_OPND_SVE_ADDR_RR:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RR_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RX:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RX_LSL3:
print_register_offset_address
(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
get_offset_int_reg_name (opnd));
break;
+ case AARCH64_OPND_SVE_ADDR_RZ:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
+ case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
+ case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
+ print_register_offset_address
+ (buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
+ get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+ break;
+
case AARCH64_OPND_ADDR_SIMM7:
case AARCH64_OPND_ADDR_SIMM9:
case AARCH64_OPND_ADDR_SIMM9_2:
+ case AARCH64_OPND_SVE_ADDR_RI_U6:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x2:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x4:
+ case AARCH64_OPND_SVE_ADDR_RI_U6x8:
print_immediate_offset_address
(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
break;
+ case AARCH64_OPND_SVE_ADDR_ZI_U5:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
+ case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
+ print_immediate_offset_address
+ (buf, size, opnd,
+ get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
+ break;
+
+ case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
+ case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
+ case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
+ print_register_offset_address
+ (buf, size, opnd,
+ get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
+ get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
+ break;
+
case AARCH64_OPND_ADDR_UIMM12:
name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
if (opnd->addr.offset.imm)
FLD_SVE_Zn,
FLD_SVE_Zt,
FLD_SVE_imm4,
+ FLD_SVE_imm6,
+ FLD_SVE_msz,
FLD_SVE_pattern,
FLD_SVE_prfop,
FLD_SVE_tszh,
+ FLD_SVE_xs_14,
+ FLD_SVE_xs_22,
};
/* Field description. */
value by 2 to get the value
of an immediate operand. */
#define OPD_F_MAYBE_SP 0x00000010 /* May potentially be SP. */
+#define OPD_F_OD_MASK 0x00000060 /* Operand-dependent data. */
+#define OPD_F_OD_LSB 5
+#define OPD_F_NO_ZR 0x00000080 /* ZR index not allowed. */
static inline bfd_boolean
operand_has_inserter (const aarch64_operand *operand)
return (operand->flags & OPD_F_MAYBE_SP) ? TRUE : FALSE;
}
+/* Return the value of the operand-specific data field (OPD_F_OD_MASK). */
+static inline unsigned int
+get_operand_specific_data (const aarch64_operand *operand)
+{
+ return (operand->flags & OPD_F_OD_MASK) >> OPD_F_OD_LSB;
+}
+
/* Return the total width of the operand *OPERAND. */
static inline unsigned
get_operand_fields_width (const aarch64_operand *operand)
"a prefetch operation specifier") \
Y (SYSTEM, hint, "BARRIER_PSB", 0, F (), \
"the PSB option name CSYNC") \
+ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6", 0 << OPD_F_OD_LSB, \
+ F(FLD_Rn), "an address with a 6-bit unsigned offset") \
+ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6x2", 1 << OPD_F_OD_LSB, \
+ F(FLD_Rn), \
+ "an address with a 6-bit unsigned offset, multiplied by 2") \
+ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6x4", 2 << OPD_F_OD_LSB, \
+ F(FLD_Rn), \
+ "an address with a 6-bit unsigned offset, multiplied by 4") \
+ Y(ADDRESS, sve_addr_ri_u6, "SVE_ADDR_RI_U6x8", 3 << OPD_F_OD_LSB, \
+ F(FLD_Rn), \
+ "an address with a 6-bit unsigned offset, multiplied by 8") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RR", 0 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_Rm), "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RR_LSL1", 1 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_Rm), "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RR_LSL2", 2 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_Rm), "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RR_LSL3", 3 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_Rm), "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX", \
+ (0 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \
+ "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL1", \
+ (1 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \
+ "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL2", \
+ (2 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \
+ "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RX_LSL3", \
+ (3 << OPD_F_OD_LSB) | OPD_F_NO_ZR, F(FLD_Rn,FLD_Rm), \
+ "an address with a scalar register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RZ", 0 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RZ_LSL1", 1 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RZ_LSL2", 2 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rr_lsl, "SVE_ADDR_RZ_LSL3", 3 << OPD_F_OD_LSB, \
+ F(FLD_Rn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW_14", \
+ 0 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW_22", \
+ 0 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW1_14", \
+ 1 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW1_22", \
+ 1 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW2_14", \
+ 2 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW2_22", \
+ 2 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW3_14", \
+ 3 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_14), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_rz_xtw, "SVE_ADDR_RZ_XTW3_22", \
+ 3 << OPD_F_OD_LSB, F(FLD_Rn,FLD_SVE_Zm_16,FLD_SVE_xs_22), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_zi_u5, "SVE_ADDR_ZI_U5", 0 << OPD_F_OD_LSB, \
+ F(FLD_SVE_Zn), "an address with a 5-bit unsigned offset") \
+ Y(ADDRESS, sve_addr_zi_u5, "SVE_ADDR_ZI_U5x2", 1 << OPD_F_OD_LSB, \
+ F(FLD_SVE_Zn), \
+ "an address with a 5-bit unsigned offset, multiplied by 2") \
+ Y(ADDRESS, sve_addr_zi_u5, "SVE_ADDR_ZI_U5x4", 2 << OPD_F_OD_LSB, \
+ F(FLD_SVE_Zn), \
+ "an address with a 5-bit unsigned offset, multiplied by 4") \
+ Y(ADDRESS, sve_addr_zi_u5, "SVE_ADDR_ZI_U5x8", 3 << OPD_F_OD_LSB, \
+ F(FLD_SVE_Zn), \
+ "an address with a 5-bit unsigned offset, multiplied by 8") \
+ Y(ADDRESS, sve_addr_zz_lsl, "SVE_ADDR_ZZ_LSL", 0, \
+ F(FLD_SVE_Zn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_zz_sxtw, "SVE_ADDR_ZZ_SXTW", 0, \
+ F(FLD_SVE_Zn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
+ Y(ADDRESS, sve_addr_zz_uxtw, "SVE_ADDR_ZZ_UXTW", 0, \
+ F(FLD_SVE_Zn,FLD_SVE_Zm_16), \
+ "an address with a vector register offset") \
Y(IMMEDIATE, imm, "SVE_PATTERN", 0, F(FLD_SVE_pattern), \
"an enumeration value such as POW2") \
Y(IMMEDIATE, sve_scale, "SVE_PATTERN_SCALED", 0, \