--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O1 -fdump-tree-forwprop1-details -fdump-tree-esra-details -fexceptions" } */
+
+/* PR tree-optimization/122247 */
+
+struct s1
+{
+ int t[1024];
+};
+
+struct s1 f(void);
+
+void g(int a, int b, int );
+void p(struct s1);
+void h(struct s1 outer)
+{
+ {
+ struct s1 inner = outer;
+ p(inner);
+ }
+ g(outer.t[0], outer.t[1], outer.t[2]);
+}
+/* Forwprop should be able to copy prop the copy of `inner = outer` to the call of p.
+ Also remove this copy. */
+
+/* { dg-final { scan-tree-dump-times "after previous" 1 "forwprop1" } } */
+/* { dg-final { scan-tree-dump-times "Removing dead store stmt inner = outer" 1 "forwprop1" } } */
+
+/* The extra copy that was done by inlining is removed so SRA should not decide to cause
+ inner nor outer to be scalarized even for the 3 elements accessed afterwards. */
+/* { dg-final { scan-tree-dump-times "Disqualifying inner" 1 "esra" } } */
+/* { dg-final { scan-tree-dump-times "Disqualifying outer" 1 "esra" } } */
+
/* Only handle clobbers of a full decl. */
if (!DECL_P (lhs))
return;
+ clobber_kind kind = (clobber_kind)CLOBBER_KIND (gimple_assign_rhs1 (stmt));
ao_ref_init (&read, lhs);
tree vuse = gimple_vuse (stmt);
unsigned limit = full_walk ? param_sccvn_max_alias_queries_per_access : 4;
basic_block ubb = gimple_bb (use_stmt);
if (stmt == use_stmt)
continue;
+ /* If the use is the same kind of clobber for lhs,
+ then it can be safely skipped; this happens with eh
+ and sometimes jump threading. */
+ if (gimple_clobber_p (use_stmt, kind)
+ && lhs == gimple_assign_lhs (use_stmt))
+ continue;
/* The use needs to be dominating the clobber. */
if ((ubb != bb && !dominated_by_p (CDI_DOMINATORS, bb, ubb))
|| ref_maybe_used_by_stmt_p (use_stmt, &read, false))