char *aarch64_output_simd_mov_imm (rtx, unsigned);
char *aarch64_output_simd_orr_imm (rtx, unsigned);
char *aarch64_output_simd_and_imm (rtx, unsigned);
+char *aarch64_output_simd_xor_imm (rtx, unsigned);
char *aarch64_output_sve_mov_immediate (rtx);
char *aarch64_output_sve_ptrues (rtx);
bool aarch64_simd_valid_and_imm (rtx);
bool aarch64_simd_valid_mov_imm (rtx);
bool aarch64_simd_valid_orr_imm (rtx);
+bool aarch64_simd_valid_xor_imm (rtx);
bool aarch64_valid_sysreg_name_p (const char *);
const char *aarch64_retrieve_sysreg (const char *, bool, bool);
rtx aarch64_check_zero_based_sve_index_immediate (rtx);
[(set_attr "type" "neon_logic<q>")]
)
+;; For EOR (vector, register) and SVE EOR (vector, immediate)
(define_insn "xor<mode>3<vczle><vczbe>"
- [(set (match_operand:VDQ_I 0 "register_operand" "=w")
- (xor:VDQ_I (match_operand:VDQ_I 1 "register_operand" "w")
- (match_operand:VDQ_I 2 "register_operand" "w")))]
+ [(set (match_operand:VDQ_I 0 "register_operand")
+ (xor:VDQ_I (match_operand:VDQ_I 1 "register_operand")
+ (match_operand:VDQ_I 2 "aarch64_reg_or_xor_imm")))]
"TARGET_SIMD"
- "eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>"
+ {@ [ cons: =0 , 1 , 2 ]
+ [ w , w , w ] eor\t%0.<Vbtype>, %1.<Vbtype>, %2.<Vbtype>
+ [ w , 0 , Do ] << aarch64_output_simd_xor_imm (operands[2], <bitsize>);
+ }
[(set_attr "type" "neon_logic<q>")]
)
enum simd_immediate_check {
AARCH64_CHECK_MOV,
AARCH64_CHECK_ORR,
- AARCH64_CHECK_AND
+ AARCH64_CHECK_AND,
+ AARCH64_CHECK_XOR
};
/* Information about a legitimate vector immediate operand. */
return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_AND);
}
+/* Return true if OP is a valid SIMD xor immediate for SVE. */
+bool
+aarch64_simd_valid_xor_imm (rtx op)
+{
+ return aarch64_simd_valid_imm (op, NULL, AARCH64_CHECK_XOR);
+}
+
/* Check whether X is a VEC_SERIES-like constant that starts at 0 and
has a step in the range of INDEX. Return the index expression if so,
otherwise return null. */
}
else
{
- /* AARCH64_CHECK_ORR or AARCH64_CHECK_AND. */
+ /* AARCH64_CHECK_ORR, AARCH64_CHECK_AND or AARCH64_CHECK_XOR. */
mnemonic = "orr";
if (which == AARCH64_CHECK_AND)
mnemonic = info.insn == simd_immediate_info::MVN ? "bic" : "and";
+ else if (which == AARCH64_CHECK_XOR)
+ mnemonic = "eor";
if (info.insn == simd_immediate_info::SVE_MOV)
{
return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_AND);
}
+/* Returns the string with the EOR instruction for the SIMD immediate
+ CONST_VECTOR of WIDTH bits. */
+char*
+aarch64_output_simd_xor_imm (rtx const_vector, unsigned width)
+{
+ return aarch64_output_simd_imm (const_vector, width, AARCH64_CHECK_XOR);
+}
+
/* Returns the string with the MOV instruction for the SIMD immediate
CONST_VECTOR of WIDTH bits. */
char*
(and (match_code "const_vector")
(match_test "aarch64_simd_valid_and_imm (op)"))))
+(define_predicate "aarch64_reg_or_xor_imm"
+ (ior (match_operand 0 "register_operand")
+ (and (match_code "const_vector")
+ (match_test "aarch64_simd_valid_xor_imm (op)"))))
+
(define_predicate "aarch64_fp_compare_operand"
(ior (match_operand 0 "register_operand")
(and (match_code "const_double")
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#include <arm_neon.h>
+
+/*
+** t1:
+** and z[0-9]+.s, z[0-9]+.s, #?3
+** ret
+*/
+uint32x2_t t1 (uint32x2_t a)
+{
+ return vand_u32 (a, vdup_n_u32 (3));
+}
+
+/*
+** t2:
+** orr z[0-9]+.s, z[0-9]+.s, #?-3
+** ret
+*/
+uint32x2_t t2 (uint32x2_t a)
+{
+ return vorr_u32 (a, vdup_n_u32 (~2));
+}
+
+/*
+** t3:
+** eor z[0-9]+.s, z[0-9]+.s, #?3
+** ret
+*/
+uint32x2_t t3 (uint32x2_t a)
+{
+ return veor_u32 (a, vdup_n_u32 (3));
+}