case AARCH64_OPND_SVE_ZtxN:
case AARCH64_OPND_SME_Zdnx2:
case AARCH64_OPND_SME_Zdnx4:
- case AARCH64_OPND_SME_Zdnx4_STRIDED:
case AARCH64_OPND_SME_Zmx2:
case AARCH64_OPND_SME_Zmx4:
case AARCH64_OPND_SME_Znx2:
AARCH64_OPND_SVE_ZtxN, /* SVE vector register list in Zt. */
AARCH64_OPND_SME_Zdnx2, /* SVE vector register list from [4:1]*2. */
AARCH64_OPND_SME_Zdnx4, /* SVE vector register list from [4:2]*4. */
- AARCH64_OPND_SME_Zdnx4_STRIDED, /* SVE vector register list from [4:2]*4. */
AARCH64_OPND_SME_Zm, /* SVE vector register list in 4-bit Zm. */
AARCH64_OPND_SME_Zmx2, /* SVE vector register list from [20:17]*2. */
AARCH64_OPND_SME_Zmx4, /* SVE vector register list from [20:18]*4. */
case 230:
case 241:
case 245:
- case 250:
+ case 249:
+ case 257:
case 258:
case 259:
- case 260:
+ case 266:
case 267:
case 268:
case 269:
- case 270:
- case 304:
- case 308:
+ case 303:
+ case 307:
return aarch64_ins_regno (self, info, code, inst, errors);
case 6:
case 119:
case 120:
- case 314:
- case 317:
+ case 313:
+ case 316:
return aarch64_ins_none (self, info, code, inst, errors);
case 17:
return aarch64_ins_reg_extended (self, info, code, inst, errors);
case 37:
case 38:
case 39:
- case 319:
+ case 318:
return aarch64_ins_reglane (self, info, code, inst, errors);
case 40:
case 41:
case 231:
case 232:
case 235:
+ case 270:
case 271:
- case 272:
+ case 286:
case 287:
case 288:
case 289:
case 300:
case 301:
case 302:
- case 303:
+ case 304:
case 305:
case 306:
- case 307:
+ case 308:
case 309:
case 310:
- case 311:
return aarch64_ins_simple_index (self, info, code, inst, errors);
case 43:
return aarch64_ins_reglist (self, info, code, inst, errors);
case 210:
case 211:
case 212:
- case 273:
+ case 272:
+ case 311:
case 312:
- case 313:
+ case 314:
case 315:
- case 316:
- case 318:
+ case 317:
+ case 322:
case 323:
- case 324:
return aarch64_ins_imm (self, info, code, inst, errors);
case 52:
case 53:
case 201:
case 202:
case 203:
- case 286:
+ case 285:
return aarch64_ins_sve_shrimm (self, info, code, inst, errors);
case 217:
case 218:
return aarch64_ins_sve_index (self, info, code, inst, errors);
case 244:
case 246:
- case 266:
+ case 265:
return aarch64_ins_sve_reglist (self, info, code, inst, errors);
case 247:
case 248:
+ case 250:
case 251:
case 252:
case 253:
case 254:
- case 255:
- case 265:
+ case 264:
return aarch64_ins_sve_aligned_reglist (self, info, code, inst, errors);
- case 249:
+ case 255:
case 256:
- case 257:
return aarch64_ins_sve_strided_reglist (self, info, code, inst, errors);
+ case 260:
+ case 262:
+ case 273:
+ return aarch64_ins_sme_za_hv_tiles (self, info, code, inst, errors);
case 261:
case 263:
- case 274:
- return aarch64_ins_sme_za_hv_tiles (self, info, code, inst, errors);
- case 262:
- case 264:
return aarch64_ins_sme_za_hv_tiles_range (self, info, code, inst, errors);
+ case 274:
case 275:
case 276:
case 277:
case 278:
case 279:
case 280:
- case 281:
return aarch64_ins_sme_za_array (self, info, code, inst, errors);
- case 282:
+ case 281:
return aarch64_ins_sme_addr_ri_u4xvl (self, info, code, inst, errors);
- case 283:
+ case 282:
return aarch64_ins_sme_sm_za (self, info, code, inst, errors);
- case 284:
+ case 283:
return aarch64_ins_sme_pred_reg_with_index (self, info, code, inst, errors);
- case 285:
+ case 284:
return aarch64_ins_plain_shrimm (self, info, code, inst, errors);
+ case 319:
case 320:
case 321:
- case 322:
return aarch64_ins_x0_to_x30 (self, info, code, inst, errors);
+ case 324:
case 325:
case 326:
case 327:
- case 328:
return aarch64_ins_rcpc3_addr_opt_offset (self, info, code, inst, errors);
- case 329:
+ case 328:
return aarch64_ins_rcpc3_addr_offset (self, info, code, inst, errors);
default: assert (0); abort ();
}
case 230:
case 241:
case 245:
- case 250:
+ case 249:
+ case 257:
case 258:
case 259:
- case 260:
+ case 266:
case 267:
case 268:
case 269:
- case 270:
- case 304:
- case 308:
+ case 303:
+ case 307:
return aarch64_ext_regno (self, info, code, inst, errors);
case 6:
case 119:
case 120:
- case 314:
- case 317:
+ case 313:
+ case 316:
return aarch64_ext_none (self, info, code, inst, errors);
case 11:
return aarch64_ext_regrt_sysins (self, info, code, inst, errors);
case 37:
case 38:
case 39:
- case 319:
+ case 318:
return aarch64_ext_reglane (self, info, code, inst, errors);
case 40:
case 41:
case 231:
case 232:
case 235:
+ case 270:
case 271:
- case 272:
+ case 286:
case 287:
case 288:
case 289:
case 300:
case 301:
case 302:
- case 303:
+ case 304:
case 305:
case 306:
- case 307:
+ case 308:
case 309:
case 310:
- case 311:
return aarch64_ext_simple_index (self, info, code, inst, errors);
case 43:
return aarch64_ext_reglist (self, info, code, inst, errors);
case 210:
case 211:
case 212:
- case 273:
+ case 272:
+ case 311:
case 312:
- case 313:
+ case 314:
case 315:
- case 316:
- case 318:
+ case 317:
+ case 322:
case 323:
- case 324:
return aarch64_ext_imm (self, info, code, inst, errors);
case 52:
case 53:
case 201:
case 202:
case 203:
- case 286:
+ case 285:
return aarch64_ext_sve_shrimm (self, info, code, inst, errors);
case 217:
case 218:
return aarch64_ext_sve_index (self, info, code, inst, errors);
case 244:
case 246:
- case 266:
+ case 265:
return aarch64_ext_sve_reglist (self, info, code, inst, errors);
case 247:
case 248:
+ case 250:
case 251:
case 252:
case 253:
case 254:
- case 255:
- case 265:
+ case 264:
return aarch64_ext_sve_aligned_reglist (self, info, code, inst, errors);
- case 249:
+ case 255:
case 256:
- case 257:
return aarch64_ext_sve_strided_reglist (self, info, code, inst, errors);
+ case 260:
+ case 262:
+ case 273:
+ return aarch64_ext_sme_za_hv_tiles (self, info, code, inst, errors);
case 261:
case 263:
- case 274:
- return aarch64_ext_sme_za_hv_tiles (self, info, code, inst, errors);
- case 262:
- case 264:
return aarch64_ext_sme_za_hv_tiles_range (self, info, code, inst, errors);
+ case 274:
case 275:
case 276:
case 277:
case 278:
case 279:
case 280:
- case 281:
return aarch64_ext_sme_za_array (self, info, code, inst, errors);
- case 282:
+ case 281:
return aarch64_ext_sme_addr_ri_u4xvl (self, info, code, inst, errors);
- case 283:
+ case 282:
return aarch64_ext_sme_sm_za (self, info, code, inst, errors);
- case 284:
+ case 283:
return aarch64_ext_sme_pred_reg_with_index (self, info, code, inst, errors);
- case 285:
+ case 284:
return aarch64_ext_plain_shrimm (self, info, code, inst, errors);
+ case 319:
case 320:
case 321:
- case 322:
return aarch64_ext_x0_to_x30 (self, info, code, inst, errors);
+ case 324:
case 325:
case 326:
case 327:
- case 328:
return aarch64_ext_rcpc3_addr_opt_offset (self, info, code, inst, errors);
- case 329:
+ case 328:
return aarch64_ext_rcpc3_addr_offset (self, info, code, inst, errors);
default: assert (0); abort ();
}
{AARCH64_OPND_CLASS_SVE_REGLIST, "SVE_ZtxN", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SVE_Zt}, "a list of SVE vector registers"},
{AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zdnx2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_Zdn2}, "a list of SVE vector registers"},
{AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zdnx4", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_Zdn4}, "a list of SVE vector registers"},
- {AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zdnx4_STRIDED", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_ZdnT, FLD_SME_Zdn2_0}, "a list of SVE vector registers"},
{AARCH64_OPND_CLASS_SVE_REG, "SME_Zm", OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_Zm}, "an SVE vector register"},
{AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zmx2", 2 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_Zm2}, "a list of SVE vector registers"},
{AARCH64_OPND_CLASS_SVE_REGLIST, "SME_Zmx4", 4 << OPD_F_OD_LSB | OPD_F_HAS_INSERTER | OPD_F_HAS_EXTRACTOR, {FLD_SME_Zm4}, "a list of SVE vector registers"},
{ 0, 1 }, /* SME_ZAda_1b: tile ZA0-ZA1. */
{ 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
{ 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
- { 4, 1 }, /* SME_ZdnT: upper bit of Zt, bit [4]. */
{ 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
- { 0, 2 }, /* SME_Zdn2_0: lower 2 bits of Zt, bits [1:0]. */
{ 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
{ 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
{ 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
}
break;
- case AARCH64_OPND_SME_Zdnx4_STRIDED:
case AARCH64_OPND_SME_Ztx2_STRIDED:
case AARCH64_OPND_SME_Ztx4_STRIDED:
/* 2-register lists have a stride of 8 and 4-register lists
case AARCH64_OPND_SVE_ZtxN:
case AARCH64_OPND_SME_Zdnx2:
case AARCH64_OPND_SME_Zdnx4:
- case AARCH64_OPND_SME_Zdnx4_STRIDED:
case AARCH64_OPND_SME_Zmx2:
case AARCH64_OPND_SME_Zmx4:
case AARCH64_OPND_SME_Znx2:
FLD_SME_ZAda_1b,
FLD_SME_ZAda_2b,
FLD_SME_ZAda_3b,
- FLD_SME_ZdnT,
FLD_SME_Zdn2,
- FLD_SME_Zdn2_0,
FLD_SME_Zdn4,
FLD_SME_Zm,
FLD_SME_Zm2,
{ \
QLF2(S_S,NIL), \
}
-/* e.g. movt ZT0{[<offs>, MUL VL]}, <Zt> */
-/* The second operand doesn't have a qualifier and
- is checked separetely during encoding. */
-#define OP_SVE_SU_Q \
-{ \
- QLF2(S_Q,NIL), \
-}
#define OP_SVE_SUS \
{ \
QLF3(S_S,NIL,S_S), \
QLF3(S_S,NIL,W), \
QLF3(S_D,NIL,X), \
}
-/* e.g. luti4 { <Zd1>.B-<Zd4>.B }, ZT0, { <Zn1>-<Zn2> } */
-/* The second and third operands don't have qualifiers and
- are checked separetely during encoding. */
-#define OP_SVE_VUU_B \
-{ \
- QLF3(S_B,NIL,NIL), \
-}
#define OP_SVE_VUU_BH \
{ \
QLF3(S_B,NIL,NIL), \
LUT_SVE2_INSN ("luti4", 0x4520bc00, 0xff20fc00, OP3 (SVE_Zd, SVE_ZnxN, SVE_Zm2_22_INDEX), OP_SVE_HHU, F_OD(1), 0),
/* SME2 lutv2. */
- LUTv2_SME2_INSN ("luti4", 0xc08b0000, 0xffffcc23, sme_size_12_b, OP3 (SME_Zdnx4, SME_ZT0, SME_Znx2_BIT_INDEX), OP_SVE_VUU_B, F_STRICT | 0),
- LUTv2_SME2p1_INSN ("luti4", 0xc09b0000, 0xffffcc2c, sme_size_12_b, OP3 (SME_Zdnx4_STRIDED, SME_ZT0, SME_Znx2_BIT_INDEX), OP_SVE_VUU_B, F_STRICT | 0),
+ LUTv2_SME2_INSN ("luti4", 0xc08b0000, 0xffffcc23, sme_size_12_b, OP3 (SME_Zdnx4, SME_ZT0, SME_Znx2_BIT_INDEX), OP_SVE_BUU, F_STRICT | 0),
+ LUTv2_SME2p1_INSN ("luti4", 0xc09b0000, 0xffffcc2c, sme_size_12_b, OP3 (SME_Ztx4_STRIDED, SME_ZT0, SME_Znx2_BIT_INDEX), OP_SVE_BUU, F_STRICT | 0),
LUTv2_SME2_INSN ("movt", 0xc04f03e0, 0xffffcfe0, sme_misc, OP2 (SME_ZT0_INDEX2_12, SVE_Zt), {}, 0),
/* SME FP16 ZA-targeting addition instructions. */
SME_F16F16_F8F16_INSNC("fadd", 0xc1a41c00, 0xffff9c38, sme_misc, OP2 (SME_ZA_array_off3_0, SME_Znx2), OP_SVE_HH, F_OD (2), 0),
F(FLD_SME_Zdn2), "a list of SVE vector registers") \
Y(SVE_REGLIST, sve_aligned_reglist, "SME_Zdnx4", 4 << OPD_F_OD_LSB, \
F(FLD_SME_Zdn4), "a list of SVE vector registers") \
- Y(SVE_REGLIST, sve_strided_reglist, "SME_Zdnx4_STRIDED", \
- 4 << OPD_F_OD_LSB, F(FLD_SME_ZdnT, FLD_SME_Zdn2_0), \
- "a list of SVE vector registers") \
Y(SVE_REG, regno, "SME_Zm", 0, F(FLD_SME_Zm), \
"an SVE vector register") \
Y(SVE_REGLIST, sve_aligned_reglist, "SME_Zmx2", 2 << OPD_F_OD_LSB, \