;; After all the combinations and propagations of ROTATE have been
;; attempted split any remaining vector rotates into SHL + USRA sequences.
+;; Don't match this after reload as the various possible sequence for this
+;; require temporary registers.
(define_insn_and_split "*aarch64_simd_rotate_imm<mode>"
[(set (match_operand:VDQ_I 0 "register_operand" "=&w")
(rotate:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w")
(match_operand:VDQ_I 2 "aarch64_simd_lshift_imm")))]
- "TARGET_SIMD"
+ "TARGET_SIMD && can_create_pseudo_p ()"
"#"
"&& 1"
[(set (match_dup 3)
if (aarch64_emit_opt_vec_rotate (operands[0], operands[1], operands[2]))
DONE;
- operands[3] = reload_completed ? operands[0] : gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
rtx shft_amnt = unwrap_const_vec_duplicate (operands[2]);
int bitwidth = GET_MODE_UNIT_SIZE (<MODE>mode) * BITS_PER_UNIT;
operands[4]
gcc_assert (CONST_INT_P (amnt));
HOST_WIDE_INT rotamnt = UINTVAL (amnt);
machine_mode mode = GET_MODE (reg);
+ /* Don't end up here after reload. */
+ gcc_assert (can_create_pseudo_p ());
/* Rotates by half the element width map down to REV* instructions and should
always be preferred when possible. */
if (rotamnt == GET_MODE_UNIT_BITSIZE (mode) / 2
return true;
/* 64 and 128-bit vector modes can use the XAR instruction
when available. */
- else if (can_create_pseudo_p ()
- && ((TARGET_SHA3 && mode == V2DImode)
- || (TARGET_SVE2
- && (known_eq (GET_MODE_SIZE (mode), 8)
- || known_eq (GET_MODE_SIZE (mode), 16)))))
+ else if ((TARGET_SHA3 && mode == V2DImode)
+ || (TARGET_SVE2
+ && (known_eq (GET_MODE_SIZE (mode), 8)
+ || known_eq (GET_MODE_SIZE (mode), 16))))
{
rtx zeroes = aarch64_gen_shareable_zero (mode);
rtx xar_op
--- /dev/null
+/* { dg-additional-options "-march=armv8.2-a+sha3" { target aarch64*-*-* } } */
+
+unsigned long *a;
+int i;
+void f() {
+ for (i = 0; i < 80; i++)
+ a[i] = (a[i] >> 8 | a[i] << 64 - 8) ^ a[i];
+}