return arg0;
}
+/* Fold a call to IFN_VEC_SHL_INSERT (ARG0, ARG1), returning a value
+ of type TYPE. */
+
+static tree
+fold_const_vec_shl_insert (tree, tree arg0, tree arg1)
+{
+ if (TREE_CODE (arg0) != VECTOR_CST)
+ return NULL_TREE;
+
+ /* vec_shl_insert ( dup(CST), CST) -> dup (CST). */
+ if (tree elem = uniform_vector_p (arg0))
+ {
+ if (operand_equal_p (elem, arg1))
+ return arg0;
+ }
+
+ return NULL_TREE;
+}
+
/* Try to evaluate:
*RESULT = FN (*ARG0, *ARG1)
case CFN_FOLD_LEFT_PLUS:
return fold_const_fold_left (type, arg0, arg1, PLUS_EXPR);
+ case CFN_VEC_SHL_INSERT:
+ return fold_const_vec_shl_insert (type, arg0, arg1);
+
case CFN_UBSAN_CHECK_ADD:
case CFN_ADD_OVERFLOW:
subcode = PLUS_EXPR;
&& direct_internal_fn_supported_p (IFN_AVG_CEIL, type, OPTIMIZE_FOR_BOTH))
(IFN_AVG_CEIL @0 @2)))
#endif
+
+/* vec shift left insert (dup (A), A) -> dup(A) */
+(simplify
+ (IFN_VEC_SHL_INSERT (vec_duplicate@1 @0) @0)
+ @1)
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O -fdump-tree-optimized" } */
+/* PR target/116075 */
+
+#include <arm_sve.h>
+
+svint8_t f(void)
+{
+ svint8_t tt;
+ tt = svdup_s8 (0);
+ tt = svinsr (tt, 0);
+ return tt;
+}
+
+svint8_t f1(int8_t t)
+{
+ svint8_t tt;
+ tt = svdup_s8 (t);
+ tt = svinsr (tt, t);
+ return tt;
+}
+
+/* The above 2 functions should have removed the VEC_SHL_INSERT. */
+
+/* { dg-final { scan-tree-dump-not ".VEC_SHL_INSERT " "optimized" } } */
+
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O -fdump-tree-optimized" } */
+/* PR target/116075 */
+
+#include <arm_sve.h>
+
+svint8_t f(int8_t t)
+{
+ svint8_t tt;
+ tt = svdup_s8 (0);
+ tt = svinsr (tt, t);
+ return tt;
+}
+
+svint8_t f1(int8_t t)
+{
+ svint8_t tt;
+ tt = svdup_s8 (t);
+ tt = svinsr (tt, 0);
+ return tt;
+}
+
+/* The above 2 functions should not have removed the VEC_SHL_INSERT. */
+
+/* { dg-final { scan-tree-dump-times ".VEC_SHL_INSERT " 2 "optimized" } } */
+