}
[(set_attr "type" "vecperm")])
+/* The cbranch_optab doesn't allow FAIL, so old cpus which are
+ inefficient on unaligned vsx are disabled as the cost is high
+ for unaligned load/store. */
+(define_expand "cbranchv16qi4"
+ [(use (match_operator 0 "equality_operator"
+ [(match_operand:V16QI 1 "reg_or_mem_operand")
+ (match_operand:V16QI 2 "reg_or_mem_operand")]))
+ (use (match_operand 3))]
+ "VECTOR_MEM_VSX_P (V16QImode)
+ && TARGET_EFFICIENT_UNALIGNED_VSX"
+{
+ /* Use direct move for P8 LE to skip doubleword swap, as the byte
+ order doesn't matter for equality compare. If any operands are
+ altivec indexed or indirect operands, the load can be implemented
+ directly by altivec aligned load instruction and swap is no
+ need. */
+ if (!TARGET_P9_VECTOR
+ && !BYTES_BIG_ENDIAN
+ && MEM_P (operands[1])
+ && !altivec_indexed_or_indirect_operand (operands[1], V16QImode)
+ && MEM_P (operands[2])
+ && !altivec_indexed_or_indirect_operand (operands[2], V16QImode))
+ {
+ rtx reg_op1 = gen_reg_rtx (V16QImode);
+ rtx reg_op2 = gen_reg_rtx (V16QImode);
+ rs6000_emit_le_vsx_permute (reg_op1, operands[1], V16QImode);
+ rs6000_emit_le_vsx_permute (reg_op2, operands[2], V16QImode);
+ operands[1] = reg_op1;
+ operands[2] = reg_op2;
+ }
+ else
+ {
+ operands[1] = force_reg (V16QImode, operands[1]);
+ operands[2] = force_reg (V16QImode, operands[2]);
+ }
+
+ rtx_code code = GET_CODE (operands[0]);
+ operands[0] = gen_rtx_fmt_ee (code, V16QImode, operands[1], operands[2]);
+ rs6000_emit_cbranch (V16QImode, operands);
+ DONE;
+})
+
;; Compare vectors producing a vector result and a predicate, setting CR6 to
;; indicate a combined status
(define_insn "altivec_vcmpequ<VI_char>_p"
else
emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
}
+ else if (mode == V16QImode)
+ {
+ gcc_assert (code == EQ || code == NE);
+
+ rtx result_vector = gen_reg_rtx (V16QImode);
+ rtx cc_bit = gen_reg_rtx (SImode);
+ emit_insn (gen_altivec_vcmpequb_p (result_vector, op0, op1));
+ emit_insn (gen_cr6_test_for_lt (cc_bit));
+ emit_insn (gen_rtx_SET (compare_result,
+ gen_rtx_COMPARE (comp_mode, cc_bit,
+ const1_rtx)));
+ }
else
emit_insn (gen_rtx_SET (compare_result,
gen_rtx_COMPARE (comp_mode, op0, op1)));
in one reasonably fast instruction. */
#define MOVE_MAX (! TARGET_POWERPC64 ? 4 : 8)
#define MAX_MOVE_MAX 8
+#define MOVE_MAX_PIECES (TARGET_EFFICIENT_UNALIGNED_VSX \
+ ? 16 : (TARGET_POWERPC64 ? 8 : 4))
+#define STORE_MAX_PIECES (TARGET_POWERPC64 ? 8 : 4)
/* Nonzero if access to memory by bytes is no faster than for words.
Also nonzero if doing byte operations (specifically shifts) in registers
/* { dg-do run { target { aarch64*-*-* alpha*-*-* arm*-*-* hppa*-*-* powerpc*-*-* s390*-*-* } } } */
/* { dg-options "-O2 -fdump-tree-esra --param sra-max-scalarization-size-Ospeed=32" } */
/* { dg-additional-options "-mcpu=ev4" { target alpha*-*-* } } */
+/* { dg-additional-options "-mno-vsx" { target { powerpc*-*-* && ilp32 } } } */
extern void abort (void);
/* { dg-do run { target { aarch64*-*-* alpha*-*-* arm*-*-* hppa*-*-* powerpc*-*-* s390*-*-* } } } */
/* { dg-options "-O2 -fdump-tree-esra --param sra-max-scalarization-size-Ospeed=32" } */
/* { dg-additional-options "-mcpu=ev4" { target alpha*-*-* } } */
+/* { dg-additional-options "-mno-vsx" { target { powerpc*-*-* && ilp32 } } } */
extern void abort (void);
struct foo { long x; };
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-options "-mdejagnu-cpu=power8 -mvsx -O2" } */
+
+/* Ensure vector mode is used for 16-byte by pieces equality compare. */
+
+int compare1 (const char* s1, const char* s2)
+{
+ return __builtin_memcmp (s1, s2, 16) == 0;
+}
+
+int compare2 (const char* s1)
+{
+ return __builtin_memcmp (s1, "0123456789012345", 16) == 0;
+}
+
+/* { dg-final { scan-assembler-times {\mvcmpequb\.} 2 } } */
+/* { dg-final { scan-assembler-not {\mcmpd\M} } } */