This adds an implementation for conditional branch optab for AArch32.
For e.g.
void f1 ()
{
for (int i = 0; i < N; i++)
{
b[i] += a[i];
if (a[i] > 0)
break;
}
}
For 128-bit vectors we generate:
vcgt.s32 q8, q9, #0
vpmax.u32 d7, d16, d17
vpmax.u32 d7, d7, d7
vmov r3, s14 @ int
cmp r3, #0
and of 64-bit vector we can omit one vpmax as we still need to compress to
32-bits.
gcc/ChangeLog:
* config/arm/neon.md (cbranch<mode>4): New.
gcc/testsuite/ChangeLog:
* gcc.dg/vect/vect-early-break_2.c: Skip Arm.
* gcc.dg/vect/vect-early-break_7.c: Likewise.
* gcc.dg/vect/vect-early-break_75.c: Likewise.
* gcc.dg/vect/vect-early-break_77.c: Likewise.
* gcc.dg/vect/vect-early-break_82.c: Likewise.
* gcc.dg/vect/vect-early-break_88.c: Likewise.
* lib/target-supports.exp (add_options_for_vect_early_break,
check_effective_target_vect_early_break_hw,
check_effective_target_vect_early_break): Support AArch32.
* gcc.target/arm/vect-early-break-cbranch.c: New test.
[(set_attr "type" "neon_store1_one_lane<q>,neon_to_gp<q>")]
)
+;; Patterns comparing two vectors and conditionally jump.
+;; Avdanced SIMD lacks a vector != comparison, but this is a quite common
+;; operation. To not pay the penalty for inverting == we can map our any
+;; comparisons to all i.e. any(~x) => all(x).
+;;
+;; However unlike the AArch64 version, we can't optimize this further as the
+;; chain is too long for combine due to these being unspecs so it doesn't fold
+;; the operation to something simpler.
+(define_expand "cbranch<mode>4"
+ [(set (pc) (if_then_else
+ (match_operator 0 "expandable_comparison_operator"
+ [(match_operand:VDQI 1 "register_operand")
+ (match_operand:VDQI 2 "reg_or_zero_operand")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ "TARGET_NEON"
+{
+ rtx mask = operands[1];
+
+ /* If comparing against a non-zero vector we have to do a comparison first
+ so we can have a != 0 comparison with the result. */
+ if (operands[2] != CONST0_RTX (<MODE>mode))
+ {
+ mask = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_xor<mode>3 (mask, operands[1], operands[2]));
+ }
+
+ /* For 128-bit vectors we need an additional reductions. */
+ if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
+ {
+ /* Always reduce using a V4SI. */
+ rtx op1 = lowpart_subreg (V4SImode, mask, <MODE>mode);
+ mask = gen_reg_rtx (V2SImode);
+ rtx low = gen_reg_rtx (V2SImode);
+ rtx high = gen_reg_rtx (V2SImode);
+ emit_insn (gen_neon_vget_lowv4si (low, op1));
+ emit_insn (gen_neon_vget_highv4si (high, op1));
+ emit_insn (gen_neon_vpumaxv2si (mask, low, high));
+ }
+
+ rtx op1 = lowpart_subreg (V2SImode, mask, GET_MODE (mask));
+ emit_insn (gen_neon_vpumaxv2si (op1, op1, op1));
+
+ rtx val = gen_reg_rtx (SImode);
+ emit_move_insn (val, gen_lowpart (SImode, mask));
+ emit_jump_insn (gen_cbranch_cc (operands[0], val, const0_rtx, operands[3]));
+ DONE;
+})
+
;; This pattern is renamed from "vec_extract<mode><V_elem_l>" to
;; "neon_vec_extract<mode><V_elem_l>" and this pattern is called
;; by define_expand in vec-common.md file.
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-*" } } } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "x86_64-*-* i?86-*-* arm*-*-*" } } } } */
#include <limits.h>
#include <assert.h>
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-O3" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include "tree-vect.h"
/* { dg-additional-options "-Ofast" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include <complex.h>
/* { dg-require-effective-target vect_int } */
/* { dg-additional-options "-Ofast --param vect-partial-vector-usage=2" } */
-/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" } } */
+/* { dg-final { scan-tree-dump "LOOP VECTORIZED" "vect" { target { ! "arm*-*-*" } } } } */
#include "tree-vect.h"
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_early_break } */
+/* { dg-require-effective-target arm_neon_ok } */
+/* { dg-require-effective-target arm32 } */
+/* { dg-options "-O3 -march=armv8-a+simd -mfpu=auto -mfloat-abi=hard -fno-schedule-insns -fno-reorder-blocks -fno-schedule-insns2" } */
+/* { dg-final { check-function-bodies "**" "" "" } } */
+
+#define N 640
+int a[N] = {0};
+int b[N] = {0};
+
+/*
+** f1:
+** ...
+** vcgt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f1 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] > 0)
+ break;
+ }
+}
+
+/*
+** f2:
+** ...
+** vcge.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f2 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] >= 0)
+ break;
+ }
+}
+
+/*
+** f3:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f3 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] == 0)
+ break;
+ }
+}
+
+/*
+** f4:
+** ...
+** vceq.i32 q[0-9]+, q[0-9]+, #0
+** vmvn q[0-9]+, q[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f4 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] != 0)
+ break;
+ }
+}
+
+/*
+** f5:
+** ...
+** vclt.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f5 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] < 0)
+ break;
+ }
+}
+
+/*
+** f6:
+** ...
+** vcle.s32 q[0-9]+, q[0-9]+, #0
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vpmax.u32 d[0-9]+, d[0-9]+, d[0-9]+
+** vmov r[0-9]+, s[0-9]+ @ int
+** cmp r[0-9]+, #0
+** bne \.L[0-9]+
+** ...
+*/
+void f6 ()
+{
+ for (int i = 0; i < N; i++)
+ {
+ b[i] += a[i];
+ if (a[i] <= 0)
+ break;
+ }
+}
+
return [check_cached_effective_target_indexed vect_early_break {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_v8_neon_ok]
|| [check_effective_target_sse4]
}}]
}
return [check_cached_effective_target_indexed vect_early_break_hw {
expr {
[istarget aarch64*-*-*]
+ || [check_effective_target_arm_v8_neon_hw]
|| [check_sse4_hw_available]
}}]
}
return "$flags"
}
+ if { [check_effective_target_arm_v8_neon_ok] } {
+ global et_arm_v8_neon_flags
+ return "$flags $et_arm_v8_neon_flags -march=armv8-a"
+ }
+
if { [check_effective_target_sse4] } {
return "$flags -msse4.1"
}