]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-loop-manip.c
cfglayout.h: Remove.
[thirdparty/gcc.git] / gcc / tree-vect-loop-manip.c
CommitLineData
b8698a0f 1/* Vectorizer Specific Loop Manipulations
5d2eb24b 2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012
7c028163 3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "basic-block.h"
cf835838
JM
30#include "tree-pretty-print.h"
31#include "gimple-pretty-print.h"
ebfd146a
IR
32#include "tree-flow.h"
33#include "tree-dump.h"
34#include "cfgloop.h"
718f9c0f 35#include "diagnostic-core.h"
ebfd146a
IR
36#include "tree-scalar-evolution.h"
37#include "tree-vectorizer.h"
38#include "langhooks.h"
39
40/*************************************************************************
41 Simple Loop Peeling Utilities
42
43 Utilities to support loop peeling for vectorization purposes.
44 *************************************************************************/
45
46
47/* Renames the use *OP_P. */
48
49static void
50rename_use_op (use_operand_p op_p)
51{
52 tree new_name;
53
54 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
55 return;
56
57 new_name = get_current_def (USE_FROM_PTR (op_p));
58
59 /* Something defined outside of the loop. */
60 if (!new_name)
61 return;
62
63 /* An ordinary ssa name defined in the loop. */
64
65 SET_USE (op_p, new_name);
66}
67
68
69/* Renames the variables in basic block BB. */
70
71void
72rename_variables_in_bb (basic_block bb)
73{
74 gimple_stmt_iterator gsi;
75 gimple stmt;
76 use_operand_p use_p;
77 ssa_op_iter iter;
78 edge e;
79 edge_iterator ei;
80 struct loop *loop = bb->loop_father;
81
82 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
83 {
84 stmt = gsi_stmt (gsi);
85 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
86 rename_use_op (use_p);
87 }
88
89 FOR_EACH_EDGE (e, ei, bb->succs)
90 {
91 if (!flow_bb_inside_loop_p (loop, e->dest))
92 continue;
93 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
94 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
95 }
96}
97
98
99/* Renames variables in new generated LOOP. */
100
101void
102rename_variables_in_loop (struct loop *loop)
103{
104 unsigned i;
105 basic_block *bbs;
106
107 bbs = get_loop_body (loop);
108
109 for (i = 0; i < loop->num_nodes; i++)
110 rename_variables_in_bb (bbs[i]);
111
112 free (bbs);
113}
114
684f25f4
AO
115typedef struct
116{
117 tree from, to;
118 basic_block bb;
119} adjust_info;
120
121DEF_VEC_O(adjust_info);
122DEF_VEC_ALLOC_O_STACK(adjust_info);
123#define VEC_adjust_info_stack_alloc(alloc) VEC_stack_alloc (adjust_info, alloc)
124
125/* A stack of values to be adjusted in debug stmts. We have to
126 process them LIFO, so that the closest substitution applies. If we
127 processed them FIFO, without the stack, we might substitute uses
128 with a PHI DEF that would soon become non-dominant, and when we got
129 to the suitable one, it wouldn't have anything to substitute any
130 more. */
131static VEC(adjust_info, stack) *adjust_vec;
132
133/* Adjust any debug stmts that referenced AI->from values to use the
134 loop-closed AI->to, if the references are dominated by AI->bb and
135 not by the definition of AI->from. */
136
137static void
138adjust_debug_stmts_now (adjust_info *ai)
139{
140 basic_block bbphi = ai->bb;
141 tree orig_def = ai->from;
142 tree new_def = ai->to;
143 imm_use_iterator imm_iter;
144 gimple stmt;
145 basic_block bbdef = gimple_bb (SSA_NAME_DEF_STMT (orig_def));
146
147 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
148
149 /* Adjust any debug stmts that held onto non-loop-closed
150 references. */
151 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, orig_def)
152 {
153 use_operand_p use_p;
154 basic_block bbuse;
155
156 if (!is_gimple_debug (stmt))
157 continue;
158
159 gcc_assert (gimple_debug_bind_p (stmt));
160
161 bbuse = gimple_bb (stmt);
162
163 if ((bbuse == bbphi
164 || dominated_by_p (CDI_DOMINATORS, bbuse, bbphi))
165 && !(bbuse == bbdef
166 || dominated_by_p (CDI_DOMINATORS, bbuse, bbdef)))
167 {
168 if (new_def)
169 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
170 SET_USE (use_p, new_def);
171 else
172 {
173 gimple_debug_bind_reset_value (stmt);
174 update_stmt (stmt);
175 }
176 }
177 }
178}
179
180/* Adjust debug stmts as scheduled before. */
181
182static void
183adjust_vec_debug_stmts (void)
184{
185 if (!MAY_HAVE_DEBUG_STMTS)
186 return;
187
188 gcc_assert (adjust_vec);
189
190 while (!VEC_empty (adjust_info, adjust_vec))
191 {
192 adjust_debug_stmts_now (VEC_last (adjust_info, adjust_vec));
193 VEC_pop (adjust_info, adjust_vec);
194 }
195
196 VEC_free (adjust_info, stack, adjust_vec);
197}
198
199/* Adjust any debug stmts that referenced FROM values to use the
200 loop-closed TO, if the references are dominated by BB and not by
201 the definition of FROM. If adjust_vec is non-NULL, adjustments
202 will be postponed until adjust_vec_debug_stmts is called. */
203
204static void
205adjust_debug_stmts (tree from, tree to, basic_block bb)
206{
207 adjust_info ai;
208
209 if (MAY_HAVE_DEBUG_STMTS && TREE_CODE (from) == SSA_NAME
210 && SSA_NAME_VAR (from) != gimple_vop (cfun))
211 {
212 ai.from = from;
213 ai.to = to;
214 ai.bb = bb;
215
216 if (adjust_vec)
217 VEC_safe_push (adjust_info, stack, adjust_vec, &ai);
218 else
219 adjust_debug_stmts_now (&ai);
220 }
221}
222
223/* Change E's phi arg in UPDATE_PHI to NEW_DEF, and record information
224 to adjust any debug stmts that referenced the old phi arg,
225 presumably non-loop-closed references left over from other
226 transformations. */
227
228static void
229adjust_phi_and_debug_stmts (gimple update_phi, edge e, tree new_def)
230{
231 tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
232
233 SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
234
235 if (MAY_HAVE_DEBUG_STMTS)
236 adjust_debug_stmts (orig_def, PHI_RESULT (update_phi),
237 gimple_bb (update_phi));
238}
239
ebfd146a
IR
240
241/* Update the PHI nodes of NEW_LOOP.
242
243 NEW_LOOP is a duplicate of ORIG_LOOP.
244 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
245 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
246 executes before it. */
247
248static void
249slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
250 struct loop *new_loop, bool after)
251{
252 tree new_ssa_name;
253 gimple phi_new, phi_orig;
254 tree def;
255 edge orig_loop_latch = loop_latch_edge (orig_loop);
256 edge orig_entry_e = loop_preheader_edge (orig_loop);
257 edge new_loop_exit_e = single_exit (new_loop);
258 edge new_loop_entry_e = loop_preheader_edge (new_loop);
259 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
260 gimple_stmt_iterator gsi_new, gsi_orig;
261
262 /*
263 step 1. For each loop-header-phi:
264 Add the first phi argument for the phi in NEW_LOOP
265 (the one associated with the entry of NEW_LOOP)
266
267 step 2. For each loop-header-phi:
268 Add the second phi argument for the phi in NEW_LOOP
269 (the one associated with the latch of NEW_LOOP)
270
271 step 3. Update the phis in the successor block of NEW_LOOP.
272
273 case 1: NEW_LOOP was placed before ORIG_LOOP:
274 The successor block of NEW_LOOP is the header of ORIG_LOOP.
275 Updating the phis in the successor block can therefore be done
276 along with the scanning of the loop header phis, because the
277 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
278 phi nodes, organized in the same order.
279
280 case 2: NEW_LOOP was placed after ORIG_LOOP:
b8698a0f 281 The successor block of NEW_LOOP is the original exit block of
ebfd146a
IR
282 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
283 We postpone updating these phis to a later stage (when
284 loop guards are added).
285 */
286
287
288 /* Scan the phis in the headers of the old and new loops
289 (they are organized in exactly the same order). */
290
291 for (gsi_new = gsi_start_phis (new_loop->header),
292 gsi_orig = gsi_start_phis (orig_loop->header);
293 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
294 gsi_next (&gsi_new), gsi_next (&gsi_orig))
295 {
f5045c96 296 source_location locus;
ebfd146a
IR
297 phi_new = gsi_stmt (gsi_new);
298 phi_orig = gsi_stmt (gsi_orig);
299
300 /* step 1. */
301 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
f5045c96
AM
302 locus = gimple_phi_arg_location_from_edge (phi_orig, entry_arg_e);
303 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
ebfd146a
IR
304
305 /* step 2. */
306 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
f5045c96 307 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
ebfd146a
IR
308 if (TREE_CODE (def) != SSA_NAME)
309 continue;
310
311 new_ssa_name = get_current_def (def);
312 if (!new_ssa_name)
313 {
314 /* This only happens if there are no definitions
315 inside the loop. use the phi_result in this case. */
316 new_ssa_name = PHI_RESULT (phi_new);
317 }
318
319 /* An ordinary ssa name defined in the loop. */
f5045c96 320 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
ebfd146a 321
684f25f4
AO
322 /* Drop any debug references outside the loop, if they would
323 become ill-formed SSA. */
324 adjust_debug_stmts (def, NULL, single_exit (orig_loop)->dest);
325
ebfd146a
IR
326 /* step 3 (case 1). */
327 if (!after)
328 {
329 gcc_assert (new_loop_exit_e == orig_entry_e);
684f25f4 330 adjust_phi_and_debug_stmts (phi_orig, new_loop_exit_e, new_ssa_name);
ebfd146a
IR
331 }
332 }
333}
334
335
336/* Update PHI nodes for a guard of the LOOP.
337
338 Input:
339 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
340 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
341 originates from the guard-bb, skips LOOP and reaches the (unique) exit
342 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
343 We denote this bb NEW_MERGE_BB because before the guard code was added
344 it had a single predecessor (the LOOP header), and now it became a merge
345 point of two paths - the path that ends with the LOOP exit-edge, and
346 the path that ends with GUARD_EDGE.
347 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
348 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
349
350 ===> The CFG before the guard-code was added:
351 LOOP_header_bb:
352 loop_body
353 if (exit_loop) goto update_bb
354 else goto LOOP_header_bb
355 update_bb:
356
357 ==> The CFG after the guard-code was added:
358 guard_bb:
359 if (LOOP_guard_condition) goto new_merge_bb
360 else goto LOOP_header_bb
361 LOOP_header_bb:
362 loop_body
363 if (exit_loop_condition) goto new_merge_bb
364 else goto LOOP_header_bb
365 new_merge_bb:
366 goto update_bb
367 update_bb:
368
369 ==> The CFG after this function:
370 guard_bb:
371 if (LOOP_guard_condition) goto new_merge_bb
372 else goto LOOP_header_bb
373 LOOP_header_bb:
374 loop_body
375 if (exit_loop_condition) goto new_exit_bb
376 else goto LOOP_header_bb
377 new_exit_bb:
378 new_merge_bb:
379 goto update_bb
380 update_bb:
381
382 This function:
383 1. creates and updates the relevant phi nodes to account for the new
384 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
385 1.1. Create phi nodes at NEW_MERGE_BB.
386 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
387 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
388 2. preserves loop-closed-ssa-form by creating the required phi nodes
389 at the exit of LOOP (i.e, in NEW_EXIT_BB).
390
391 There are two flavors to this function:
392
393 slpeel_update_phi_nodes_for_guard1:
394 Here the guard controls whether we enter or skip LOOP, where LOOP is a
395 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
396 for variables that have phis in the loop header.
397
398 slpeel_update_phi_nodes_for_guard2:
399 Here the guard controls whether we enter or skip LOOP, where LOOP is an
400 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
401 for variables that have phis in the loop exit.
402
403 I.E., the overall structure is:
404
405 loop1_preheader_bb:
406 guard1 (goto loop1/merge1_bb)
407 loop1
408 loop1_exit_bb:
409 guard2 (goto merge1_bb/merge2_bb)
410 merge1_bb
411 loop2
412 loop2_exit_bb
413 merge2_bb
414 next_bb
415
416 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
417 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
418 that have phis in loop1->header).
419
420 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
421 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
422 that have phis in next_bb). It also adds some of these phis to
423 loop1_exit_bb.
424
425 slpeel_update_phi_nodes_for_guard1 is always called before
426 slpeel_update_phi_nodes_for_guard2. They are both needed in order
427 to create correct data-flow and loop-closed-ssa-form.
428
429 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
430 that change between iterations of a loop (and therefore have a phi-node
431 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
b8698a0f
L
432 phis for variables that are used out of the loop (and therefore have
433 loop-closed exit phis). Some variables may be both updated between
ebfd146a
IR
434 iterations and used after the loop. This is why in loop1_exit_bb we
435 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
436 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
437
438 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
439 an original loop. i.e., we have:
440
441 orig_loop
442 guard_bb (goto LOOP/new_merge)
443 new_loop <-- LOOP
444 new_exit
445 new_merge
446 next_bb
447
448 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
449 have:
450
451 new_loop
452 guard_bb (goto LOOP/new_merge)
453 orig_loop <-- LOOP
454 new_exit
455 new_merge
456 next_bb
457
458 The SSA names defined in the original loop have a current
459 reaching definition that that records the corresponding new
460 ssa-name used in the new duplicated loop copy.
461 */
462
463/* Function slpeel_update_phi_nodes_for_guard1
b8698a0f 464
ebfd146a
IR
465 Input:
466 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
467 - DEFS - a bitmap of ssa names to mark new names for which we recorded
b8698a0f
L
468 information.
469
ebfd146a
IR
470 In the context of the overall structure, we have:
471
b8698a0f 472 loop1_preheader_bb:
ebfd146a
IR
473 guard1 (goto loop1/merge1_bb)
474LOOP-> loop1
475 loop1_exit_bb:
476 guard2 (goto merge1_bb/merge2_bb)
477 merge1_bb
478 loop2
479 loop2_exit_bb
480 merge2_bb
481 next_bb
482
483 For each name updated between loop iterations (i.e - for each name that has
484 an entry (loop-header) phi in LOOP) we create a new phi in:
485 1. merge1_bb (to account for the edge from guard1)
486 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
487*/
488
489static void
490slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
c334023f 491 bool is_new_loop, basic_block *new_exit_bb)
ebfd146a
IR
492{
493 gimple orig_phi, new_phi;
494 gimple update_phi, update_phi2;
495 tree guard_arg, loop_arg;
496 basic_block new_merge_bb = guard_edge->dest;
497 edge e = EDGE_SUCC (new_merge_bb, 0);
498 basic_block update_bb = e->dest;
499 basic_block orig_bb = loop->header;
500 edge new_exit_e;
501 tree current_new_name;
ebfd146a
IR
502 gimple_stmt_iterator gsi_orig, gsi_update;
503
504 /* Create new bb between loop and new_merge_bb. */
505 *new_exit_bb = split_edge (single_exit (loop));
506
507 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
508
509 for (gsi_orig = gsi_start_phis (orig_bb),
510 gsi_update = gsi_start_phis (update_bb);
511 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
512 gsi_next (&gsi_orig), gsi_next (&gsi_update))
513 {
e20f6b4b 514 source_location loop_locus, guard_locus;
ebfd146a
IR
515 orig_phi = gsi_stmt (gsi_orig);
516 update_phi = gsi_stmt (gsi_update);
517
ebfd146a
IR
518 /** 1. Handle new-merge-point phis **/
519
520 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
521 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
522 new_merge_bb);
523
524 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
525 of LOOP. Set the two phi args in NEW_PHI for these edges: */
526 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
b8698a0f
L
527 loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
528 EDGE_SUCC (loop->latch,
f5045c96 529 0));
ebfd146a 530 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
b8698a0f
L
531 guard_locus
532 = gimple_phi_arg_location_from_edge (orig_phi,
f5045c96 533 loop_preheader_edge (loop));
ebfd146a 534
f5045c96
AM
535 add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
536 add_phi_arg (new_phi, guard_arg, guard_edge, guard_locus);
ebfd146a
IR
537
538 /* 1.3. Update phi in successor block. */
539 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
540 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
684f25f4 541 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
ebfd146a
IR
542 update_phi2 = new_phi;
543
544
545 /** 2. Handle loop-closed-ssa-form phis **/
546
547 if (!is_gimple_reg (PHI_RESULT (orig_phi)))
548 continue;
549
550 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
551 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
552 *new_exit_bb);
553
554 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
f5045c96 555 add_phi_arg (new_phi, loop_arg, single_exit (loop), loop_locus);
ebfd146a
IR
556
557 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
558 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
684f25f4
AO
559 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
560 PHI_RESULT (new_phi));
ebfd146a
IR
561
562 /* 2.4. Record the newly created name with set_current_def.
563 We want to find a name such that
564 name = get_current_def (orig_loop_name)
565 and to set its current definition as follows:
566 set_current_def (name, new_phi_name)
567
568 If LOOP is a new loop then loop_arg is already the name we're
569 looking for. If LOOP is the original loop, then loop_arg is
570 the orig_loop_name and the relevant name is recorded in its
571 current reaching definition. */
572 if (is_new_loop)
573 current_new_name = loop_arg;
574 else
575 {
576 current_new_name = get_current_def (loop_arg);
577 /* current_def is not available only if the variable does not
578 change inside the loop, in which case we also don't care
579 about recording a current_def for it because we won't be
580 trying to create loop-exit-phis for it. */
581 if (!current_new_name)
582 continue;
583 }
584 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
585
586 set_current_def (current_new_name, PHI_RESULT (new_phi));
ebfd146a
IR
587 }
588}
589
590
591/* Function slpeel_update_phi_nodes_for_guard2
592
593 Input:
594 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
595
596 In the context of the overall structure, we have:
597
b8698a0f 598 loop1_preheader_bb:
ebfd146a
IR
599 guard1 (goto loop1/merge1_bb)
600 loop1
b8698a0f 601 loop1_exit_bb:
ebfd146a
IR
602 guard2 (goto merge1_bb/merge2_bb)
603 merge1_bb
604LOOP-> loop2
605 loop2_exit_bb
606 merge2_bb
607 next_bb
608
609 For each name used out side the loop (i.e - for each name that has an exit
610 phi in next_bb) we create a new phi in:
b8698a0f 611 1. merge2_bb (to account for the edge from guard_bb)
ebfd146a
IR
612 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
613 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
614 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
615*/
616
617static void
618slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
619 bool is_new_loop, basic_block *new_exit_bb)
620{
621 gimple orig_phi, new_phi;
622 gimple update_phi, update_phi2;
623 tree guard_arg, loop_arg;
624 basic_block new_merge_bb = guard_edge->dest;
625 edge e = EDGE_SUCC (new_merge_bb, 0);
626 basic_block update_bb = e->dest;
627 edge new_exit_e;
628 tree orig_def, orig_def_new_name;
629 tree new_name, new_name2;
630 tree arg;
631 gimple_stmt_iterator gsi;
632
633 /* Create new bb between loop and new_merge_bb. */
634 *new_exit_bb = split_edge (single_exit (loop));
635
636 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
637
638 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
639 {
640 update_phi = gsi_stmt (gsi);
641 orig_phi = update_phi;
642 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
643 /* This loop-closed-phi actually doesn't represent a use
b8698a0f 644 out of the loop - the phi arg is a constant. */
ebfd146a
IR
645 if (TREE_CODE (orig_def) != SSA_NAME)
646 continue;
647 orig_def_new_name = get_current_def (orig_def);
648 arg = NULL_TREE;
649
650 /** 1. Handle new-merge-point phis **/
651
652 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
653 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
654 new_merge_bb);
655
656 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
657 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
658 new_name = orig_def;
659 new_name2 = NULL_TREE;
660 if (orig_def_new_name)
661 {
662 new_name = orig_def_new_name;
663 /* Some variables have both loop-entry-phis and loop-exit-phis.
664 Such variables were given yet newer names by phis placed in
665 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
666 new_name2 = get_current_def (get_current_def (orig_name)). */
667 new_name2 = get_current_def (new_name);
668 }
b8698a0f 669
ebfd146a
IR
670 if (is_new_loop)
671 {
672 guard_arg = orig_def;
673 loop_arg = new_name;
674 }
675 else
676 {
677 guard_arg = new_name;
678 loop_arg = orig_def;
679 }
680 if (new_name2)
681 guard_arg = new_name2;
b8698a0f 682
f5045c96
AM
683 add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
684 add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
ebfd146a
IR
685
686 /* 1.3. Update phi in successor block. */
687 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
684f25f4 688 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
ebfd146a
IR
689 update_phi2 = new_phi;
690
691
692 /** 2. Handle loop-closed-ssa-form phis **/
693
694 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
695 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
696 *new_exit_bb);
697
698 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
f5045c96 699 add_phi_arg (new_phi, loop_arg, single_exit (loop), UNKNOWN_LOCATION);
ebfd146a
IR
700
701 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
702 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
684f25f4
AO
703 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
704 PHI_RESULT (new_phi));
ebfd146a
IR
705
706
707 /** 3. Handle loop-closed-ssa-form phis for first loop **/
708
709 /* 3.1. Find the relevant names that need an exit-phi in
710 GUARD_BB, i.e. names for which
711 slpeel_update_phi_nodes_for_guard1 had not already created a
712 phi node. This is the case for names that are used outside
713 the loop (and therefore need an exit phi) but are not updated
714 across loop iterations (and therefore don't have a
715 loop-header-phi).
716
717 slpeel_update_phi_nodes_for_guard1 is responsible for
718 creating loop-exit phis in GUARD_BB for names that have a
719 loop-header-phi. When such a phi is created we also record
720 the new name in its current definition. If this new name
721 exists, then guard_arg was set to this new name (see 1.2
722 above). Therefore, if guard_arg is not this new name, this
723 is an indication that an exit-phi in GUARD_BB was not yet
724 created, so we take care of it here. */
725 if (guard_arg == new_name2)
726 continue;
727 arg = guard_arg;
728
729 /* 3.2. Generate new phi node in GUARD_BB: */
730 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
731 guard_edge->src);
732
733 /* 3.3. GUARD_BB has one incoming edge: */
734 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
b8698a0f 735 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
f5045c96 736 UNKNOWN_LOCATION);
ebfd146a
IR
737
738 /* 3.4. Update phi in successor of GUARD_BB: */
739 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
740 == guard_arg);
684f25f4
AO
741 adjust_phi_and_debug_stmts (update_phi2, guard_edge,
742 PHI_RESULT (new_phi));
ebfd146a
IR
743 }
744}
745
746
747/* Make the LOOP iterate NITERS times. This is done by adding a new IV
748 that starts at zero, increases by one and its limit is NITERS.
749
750 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
751
752void
753slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
754{
755 tree indx_before_incr, indx_after_incr;
756 gimple cond_stmt;
757 gimple orig_cond;
758 edge exit_edge = single_exit (loop);
759 gimple_stmt_iterator loop_cond_gsi;
760 gimple_stmt_iterator incr_gsi;
761 bool insert_after;
762 tree init = build_int_cst (TREE_TYPE (niters), 0);
763 tree step = build_int_cst (TREE_TYPE (niters), 1);
764 LOC loop_loc;
765 enum tree_code code;
766
767 orig_cond = get_loop_exit_condition (loop);
768 gcc_assert (orig_cond);
769 loop_cond_gsi = gsi_for_stmt (orig_cond);
770
771 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
772 create_iv (init, step, NULL_TREE, loop,
773 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
774
775 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
776 true, NULL_TREE, true,
777 GSI_SAME_STMT);
778 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
779 true, GSI_SAME_STMT);
780
781 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
782 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
783 NULL_TREE);
784
785 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
786
787 /* Remove old loop exit test: */
788 gsi_remove (&loop_cond_gsi, true);
789
790 loop_loc = find_loop_location (loop);
791 if (dump_file && (dump_flags & TDF_DETAILS))
792 {
793 if (loop_loc != UNKNOWN_LOC)
794 fprintf (dump_file, "\nloop at %s:%d: ",
795 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
796 print_gimple_stmt (dump_file, cond_stmt, 0, TDF_SLIM);
797 }
798
799 loop->nb_iterations = niters;
800}
801
802
b8698a0f 803/* Given LOOP this function generates a new copy of it and puts it
ebfd146a
IR
804 on E which is either the entry or exit of LOOP. */
805
806struct loop *
807slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
808{
809 struct loop *new_loop;
810 basic_block *new_bbs, *bbs;
811 bool at_exit;
812 bool was_imm_dom;
b8698a0f 813 basic_block exit_dest;
ebfd146a
IR
814 gimple phi;
815 tree phi_arg;
816 edge exit, new_exit;
817 gimple_stmt_iterator gsi;
818
b8698a0f 819 at_exit = (e == single_exit (loop));
ebfd146a
IR
820 if (!at_exit && e != loop_preheader_edge (loop))
821 return NULL;
822
823 bbs = get_loop_body (loop);
824
825 /* Check whether duplication is possible. */
826 if (!can_copy_bbs_p (bbs, loop->num_nodes))
827 {
828 free (bbs);
829 return NULL;
830 }
831
832 /* Generate new loop structure. */
833 new_loop = duplicate_loop (loop, loop_outer (loop));
834 if (!new_loop)
835 {
836 free (bbs);
837 return NULL;
838 }
839
840 exit_dest = single_exit (loop)->dest;
b8698a0f
L
841 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
842 exit_dest) == loop->header ?
ebfd146a
IR
843 true : false);
844
845 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
846
847 exit = single_exit (loop);
848 copy_bbs (bbs, loop->num_nodes, new_bbs,
849 &exit, 1, &new_exit, NULL,
850 e->src);
851
b8698a0f 852 /* Duplicating phi args at exit bbs as coming
ebfd146a
IR
853 also from exit of duplicated loop. */
854 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
855 {
856 phi = gsi_stmt (gsi);
857 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
858 if (phi_arg)
859 {
860 edge new_loop_exit_edge;
f5045c96 861 source_location locus;
ebfd146a 862
f5045c96 863 locus = gimple_phi_arg_location_from_edge (phi, single_exit (loop));
ebfd146a
IR
864 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
865 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
866 else
867 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
b8698a0f
L
868
869 add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
ebfd146a 870 }
b8698a0f
L
871 }
872
ebfd146a
IR
873 if (at_exit) /* Add the loop copy at exit. */
874 {
875 redirect_edge_and_branch_force (e, new_loop->header);
876 PENDING_STMT (e) = NULL;
877 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
878 if (was_imm_dom)
879 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
880 }
881 else /* Add the copy at entry. */
882 {
883 edge new_exit_e;
884 edge entry_e = loop_preheader_edge (loop);
885 basic_block preheader = entry_e->src;
b8698a0f
L
886
887 if (!flow_bb_inside_loop_p (new_loop,
ebfd146a
IR
888 EDGE_SUCC (new_loop->header, 0)->dest))
889 new_exit_e = EDGE_SUCC (new_loop->header, 0);
890 else
b8698a0f 891 new_exit_e = EDGE_SUCC (new_loop->header, 1);
ebfd146a
IR
892
893 redirect_edge_and_branch_force (new_exit_e, loop->header);
894 PENDING_STMT (new_exit_e) = NULL;
895 set_immediate_dominator (CDI_DOMINATORS, loop->header,
896 new_exit_e->src);
897
b8698a0f 898 /* We have to add phi args to the loop->header here as coming
ebfd146a
IR
899 from new_exit_e edge. */
900 for (gsi = gsi_start_phis (loop->header);
901 !gsi_end_p (gsi);
902 gsi_next (&gsi))
903 {
904 phi = gsi_stmt (gsi);
905 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
906 if (phi_arg)
f5045c96 907 add_phi_arg (phi, phi_arg, new_exit_e,
b8698a0f
L
908 gimple_phi_arg_location_from_edge (phi, entry_e));
909 }
ebfd146a
IR
910
911 redirect_edge_and_branch_force (entry_e, new_loop->header);
912 PENDING_STMT (entry_e) = NULL;
913 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
914 }
915
916 free (new_bbs);
917 free (bbs);
918
919 return new_loop;
920}
921
922
923/* Given the condition statement COND, put it as the last statement
924 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
b8698a0f 925 Assumes that this is the single exit of the guarded loop.
86290011 926 Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
ebfd146a
IR
927
928static edge
86290011
RG
929slpeel_add_loop_guard (basic_block guard_bb, tree cond,
930 gimple_seq cond_expr_stmt_list,
931 basic_block exit_bb, basic_block dom_bb)
ebfd146a
IR
932{
933 gimple_stmt_iterator gsi;
934 edge new_e, enter_e;
935 gimple cond_stmt;
936 gimple_seq gimplify_stmt_list = NULL;
937
938 enter_e = EDGE_SUCC (guard_bb, 0);
939 enter_e->flags &= ~EDGE_FALLTHRU;
940 enter_e->flags |= EDGE_FALSE_VALUE;
941 gsi = gsi_last_bb (guard_bb);
942
f7a06a98
RG
943 cond = force_gimple_operand_1 (cond, &gimplify_stmt_list, is_gimple_condexpr,
944 NULL_TREE);
86290011
RG
945 if (gimplify_stmt_list)
946 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
f7a06a98 947 cond_stmt = gimple_build_cond_from_tree (cond, NULL_TREE, NULL_TREE);
86290011
RG
948 if (cond_expr_stmt_list)
949 gsi_insert_seq_after (&gsi, cond_expr_stmt_list, GSI_NEW_STMT);
ebfd146a
IR
950
951 gsi = gsi_last_bb (guard_bb);
952 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
953
954 /* Add new edge to connect guard block to the merge/loop-exit block. */
955 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
956 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
957 return new_e;
958}
959
960
961/* This function verifies that the following restrictions apply to LOOP:
962 (1) it is innermost
963 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
964 (3) it is single entry, single exit
965 (4) its exit condition is the last stmt in the header
966 (5) E is the entry/exit edge of LOOP.
967 */
968
969bool
970slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
971{
972 edge exit_e = single_exit (loop);
973 edge entry_e = loop_preheader_edge (loop);
974 gimple orig_cond = get_loop_exit_condition (loop);
975 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
976
5006671f 977 if (need_ssa_update_p (cfun))
ebfd146a
IR
978 return false;
979
980 if (loop->inner
981 /* All loops have an outer scope; the only case loop->outer is NULL is for
982 the function itself. */
983 || !loop_outer (loop)
984 || loop->num_nodes != 2
985 || !empty_block_p (loop->latch)
986 || !single_exit (loop)
987 /* Verify that new loop exit condition can be trivially modified. */
988 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
989 || (e != exit_e && e != entry_e))
990 return false;
991
992 return true;
993}
994
995#ifdef ENABLE_CHECKING
996static void
997slpeel_verify_cfg_after_peeling (struct loop *first_loop,
998 struct loop *second_loop)
999{
1000 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1001 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1002 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1003
1004 /* A guard that controls whether the second_loop is to be executed or skipped
1005 is placed in first_loop->exit. first_loop->exit therefore has two
1006 successors - one is the preheader of second_loop, and the other is a bb
1007 after second_loop.
1008 */
1009 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
b8698a0f 1010
ebfd146a
IR
1011 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1012 of second_loop. */
b8698a0f 1013
ebfd146a
IR
1014 /* The preheader of new_loop is expected to have two predecessors:
1015 first_loop->exit and the block that precedes first_loop. */
1016
b8698a0f 1017 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
ebfd146a
IR
1018 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1019 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1020 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1021 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
b8698a0f 1022
ebfd146a
IR
1023 /* Verify that the other successor of first_loop->exit is after the
1024 second_loop. */
1025 /* TODO */
1026}
1027#endif
1028
1029/* If the run time cost model check determines that vectorization is
1030 not profitable and hence scalar loop should be generated then set
1031 FIRST_NITERS to prologue peeled iterations. This will allow all the
1032 iterations to be executed in the prologue peeled scalar loop. */
1033
1034static void
1035set_prologue_iterations (basic_block bb_before_first_loop,
5d2eb24b 1036 tree *first_niters,
ebfd146a
IR
1037 struct loop *loop,
1038 unsigned int th)
1039{
1040 edge e;
1041 basic_block cond_bb, then_bb;
1042 tree var, prologue_after_cost_adjust_name;
1043 gimple_stmt_iterator gsi;
1044 gimple newphi;
1045 edge e_true, e_false, e_fallthru;
1046 gimple cond_stmt;
f7a06a98 1047 gimple_seq stmts = NULL;
ebfd146a 1048 tree cost_pre_condition = NULL_TREE;
b8698a0f 1049 tree scalar_loop_iters =
ebfd146a
IR
1050 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1051
1052 e = single_pred_edge (bb_before_first_loop);
1053 cond_bb = split_edge(e);
1054
1055 e = single_pred_edge (bb_before_first_loop);
1056 then_bb = split_edge(e);
1057 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1058
1059 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1060 EDGE_FALSE_VALUE);
1061 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1062
1063 e_true = EDGE_PRED (then_bb, 0);
1064 e_true->flags &= ~EDGE_FALLTHRU;
1065 e_true->flags |= EDGE_TRUE_VALUE;
1066
1067 e_fallthru = EDGE_SUCC (then_bb, 0);
1068
f7a06a98 1069 gsi = gsi_last_bb (cond_bb);
ebfd146a 1070 cost_pre_condition =
b8698a0f 1071 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
ebfd146a
IR
1072 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1073 cost_pre_condition =
f7a06a98
RG
1074 force_gimple_operand_gsi_1 (&gsi, cost_pre_condition, is_gimple_condexpr,
1075 NULL_TREE, false, GSI_CONTINUE_LINKING);
1076 cond_stmt = gimple_build_cond_from_tree (cost_pre_condition,
1077 NULL_TREE, NULL_TREE);
ebfd146a 1078 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
b8698a0f 1079
ebfd146a
IR
1080 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1081 "prologue_after_cost_adjust");
1082 add_referenced_var (var);
b8698a0f 1083 prologue_after_cost_adjust_name =
ebfd146a
IR
1084 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1085
1086 gsi = gsi_last_bb (then_bb);
1087 if (stmts)
1088 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1089
1090 newphi = create_phi_node (var, bb_before_first_loop);
b8698a0f 1091 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
f5045c96 1092 UNKNOWN_LOCATION);
5d2eb24b 1093 add_phi_arg (newphi, *first_niters, e_false, UNKNOWN_LOCATION);
ebfd146a 1094
5d2eb24b 1095 *first_niters = PHI_RESULT (newphi);
ebfd146a
IR
1096}
1097
ebfd146a
IR
1098/* Function slpeel_tree_peel_loop_to_edge.
1099
1100 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1101 that is placed on the entry (exit) edge E of LOOP. After this transformation
1102 we have two loops one after the other - first-loop iterates FIRST_NITERS
1103 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
b8698a0f 1104 If the cost model indicates that it is profitable to emit a scalar
ebfd146a
IR
1105 loop instead of the vector one, then the prolog (epilog) loop will iterate
1106 for the entire unchanged scalar iterations of the loop.
1107
1108 Input:
1109 - LOOP: the loop to be peeled.
1110 - E: the exit or entry edge of LOOP.
1111 If it is the entry edge, we peel the first iterations of LOOP. In this
1112 case first-loop is LOOP, and second-loop is the newly created loop.
1113 If it is the exit edge, we peel the last iterations of LOOP. In this
1114 case, first-loop is the newly created loop, and second-loop is LOOP.
1115 - NITERS: the number of iterations that LOOP iterates.
1116 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1117 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1118 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1119 is false, the caller of this function may want to take care of this
1120 (this can be useful if we don't want new stmts added to first-loop).
1121 - TH: cost model profitability threshold of iterations for vectorization.
1122 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1123 during versioning and hence needs to occur during
b8698a0f 1124 prologue generation or whether cost model check
ebfd146a
IR
1125 has not occurred during prologue generation and hence
1126 needs to occur during epilogue generation.
b8698a0f 1127
ebfd146a
IR
1128
1129 Output:
1130 The function returns a pointer to the new loop-copy, or NULL if it failed
1131 to perform the transformation.
1132
1133 The function generates two if-then-else guards: one before the first loop,
1134 and the other before the second loop:
1135 The first guard is:
1136 if (FIRST_NITERS == 0) then skip the first loop,
1137 and go directly to the second loop.
1138 The second guard is:
1139 if (FIRST_NITERS == NITERS) then skip the second loop.
1140
86290011
RG
1141 If the optional COND_EXPR and COND_EXPR_STMT_LIST arguments are given
1142 then the generated condition is combined with COND_EXPR and the
1143 statements in COND_EXPR_STMT_LIST are emitted together with it.
1144
ebfd146a
IR
1145 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1146 FORNOW the resulting code will not be in loop-closed-ssa form.
1147*/
1148
1149static struct loop*
b8698a0f 1150slpeel_tree_peel_loop_to_edge (struct loop *loop,
5d2eb24b 1151 edge e, tree *first_niters,
ebfd146a 1152 tree niters, bool update_first_loop_count,
86290011
RG
1153 unsigned int th, bool check_profitability,
1154 tree cond_expr, gimple_seq cond_expr_stmt_list)
ebfd146a
IR
1155{
1156 struct loop *new_loop = NULL, *first_loop, *second_loop;
1157 edge skip_e;
1158 tree pre_condition = NULL_TREE;
ebfd146a
IR
1159 basic_block bb_before_second_loop, bb_after_second_loop;
1160 basic_block bb_before_first_loop;
1161 basic_block bb_between_loops;
1162 basic_block new_exit_bb;
e20f6b4b 1163 gimple_stmt_iterator gsi;
ebfd146a
IR
1164 edge exit_e = single_exit (loop);
1165 LOC loop_loc;
1166 tree cost_pre_condition = NULL_TREE;
b8698a0f 1167
ebfd146a
IR
1168 if (!slpeel_can_duplicate_loop_p (loop, e))
1169 return NULL;
b8698a0f 1170
e20f6b4b
JJ
1171 /* If the loop has a virtual PHI, but exit bb doesn't, create a virtual PHI
1172 in the exit bb and rename all the uses after the loop. This simplifies
1173 the *guard[12] routines, which assume loop closed SSA form for all PHIs
1174 (but normally loop closed SSA form doesn't require virtual PHIs to be
1175 in the same form). Doing this early simplifies the checking what
1176 uses should be renamed. */
1177 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1178 if (!is_gimple_reg (gimple_phi_result (gsi_stmt (gsi))))
1179 {
1180 gimple phi = gsi_stmt (gsi);
1181 for (gsi = gsi_start_phis (exit_e->dest);
1182 !gsi_end_p (gsi); gsi_next (&gsi))
1183 if (!is_gimple_reg (gimple_phi_result (gsi_stmt (gsi))))
1184 break;
1185 if (gsi_end_p (gsi))
1186 {
1187 gimple new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (phi)),
1188 exit_e->dest);
1189 tree vop = PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0));
1190 imm_use_iterator imm_iter;
1191 gimple stmt;
1192 tree new_vop = make_ssa_name (SSA_NAME_VAR (PHI_RESULT (phi)),
1193 new_phi);
1194 use_operand_p use_p;
1195
1196 add_phi_arg (new_phi, vop, exit_e, UNKNOWN_LOCATION);
1197 gimple_phi_set_result (new_phi, new_vop);
1198 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, vop)
1199 if (stmt != new_phi && gimple_bb (stmt) != loop->header)
1200 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1201 SET_USE (use_p, new_vop);
1202 }
1203 break;
1204 }
ebfd146a
IR
1205
1206 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1207 Resulting CFG would be:
1208
1209 first_loop:
1210 do {
1211 } while ...
1212
1213 second_loop:
1214 do {
1215 } while ...
1216
1217 orig_exit_bb:
1218 */
b8698a0f 1219
ebfd146a
IR
1220 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1221 {
1222 loop_loc = find_loop_location (loop);
1223 if (dump_file && (dump_flags & TDF_DETAILS))
1224 {
1225 if (loop_loc != UNKNOWN_LOC)
1226 fprintf (dump_file, "\n%s:%d: note: ",
1227 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1228 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1229 }
1230 return NULL;
1231 }
b8698a0f 1232
684f25f4
AO
1233 if (MAY_HAVE_DEBUG_STMTS)
1234 {
1235 gcc_assert (!adjust_vec);
1236 adjust_vec = VEC_alloc (adjust_info, stack, 32);
1237 }
1238
ebfd146a
IR
1239 if (e == exit_e)
1240 {
1241 /* NEW_LOOP was placed after LOOP. */
1242 first_loop = loop;
1243 second_loop = new_loop;
1244 }
1245 else
1246 {
1247 /* NEW_LOOP was placed before LOOP. */
1248 first_loop = new_loop;
1249 second_loop = loop;
1250 }
1251
ebfd146a
IR
1252 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1253 rename_variables_in_loop (new_loop);
1254
1255
1256 /* 2. Add the guard code in one of the following ways:
1257
1258 2.a Add the guard that controls whether the first loop is executed.
1259 This occurs when this function is invoked for prologue or epilogue
1260 generation and when the cost model check can be done at compile time.
1261
1262 Resulting CFG would be:
1263
1264 bb_before_first_loop:
1265 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1266 GOTO first-loop
1267
1268 first_loop:
1269 do {
1270 } while ...
1271
1272 bb_before_second_loop:
1273
1274 second_loop:
1275 do {
1276 } while ...
1277
1278 orig_exit_bb:
1279
1280 2.b Add the cost model check that allows the prologue
1281 to iterate for the entire unchanged scalar
1282 iterations of the loop in the event that the cost
1283 model indicates that the scalar loop is more
1284 profitable than the vector one. This occurs when
1285 this function is invoked for prologue generation
1286 and the cost model check needs to be done at run
1287 time.
1288
1289 Resulting CFG after prologue peeling would be:
1290
1291 if (scalar_loop_iterations <= th)
1292 FIRST_NITERS = scalar_loop_iterations
1293
1294 bb_before_first_loop:
1295 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1296 GOTO first-loop
1297
1298 first_loop:
1299 do {
1300 } while ...
1301
1302 bb_before_second_loop:
1303
1304 second_loop:
1305 do {
1306 } while ...
1307
1308 orig_exit_bb:
1309
1310 2.c Add the cost model check that allows the epilogue
1311 to iterate for the entire unchanged scalar
1312 iterations of the loop in the event that the cost
1313 model indicates that the scalar loop is more
1314 profitable than the vector one. This occurs when
1315 this function is invoked for epilogue generation
1316 and the cost model check needs to be done at run
86290011
RG
1317 time. This check is combined with any pre-existing
1318 check in COND_EXPR to avoid versioning.
ebfd146a
IR
1319
1320 Resulting CFG after prologue peeling would be:
1321
1322 bb_before_first_loop:
1323 if ((scalar_loop_iterations <= th)
1324 ||
1325 FIRST_NITERS == 0) GOTO bb_before_second_loop
1326 GOTO first-loop
1327
1328 first_loop:
1329 do {
1330 } while ...
1331
1332 bb_before_second_loop:
1333
1334 second_loop:
1335 do {
1336 } while ...
1337
1338 orig_exit_bb:
1339 */
1340
1341 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1342 bb_before_second_loop = split_edge (single_exit (first_loop));
1343
1344 /* Epilogue peeling. */
1345 if (!update_first_loop_count)
1346 {
1347 pre_condition =
5d2eb24b
IR
1348 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1349 build_int_cst (TREE_TYPE (*first_niters), 0));
ebfd146a
IR
1350 if (check_profitability)
1351 {
1352 tree scalar_loop_iters
1353 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1354 (loop_vec_info_for_loop (loop)));
b8698a0f
L
1355 cost_pre_condition =
1356 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
ebfd146a
IR
1357 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1358
1359 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1360 cost_pre_condition, pre_condition);
1361 }
86290011
RG
1362 if (cond_expr)
1363 {
1364 pre_condition =
1365 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1366 pre_condition,
1367 fold_build1 (TRUTH_NOT_EXPR, boolean_type_node,
1368 cond_expr));
1369 }
ebfd146a
IR
1370 }
1371
b8698a0f 1372 /* Prologue peeling. */
ebfd146a
IR
1373 else
1374 {
1375 if (check_profitability)
1376 set_prologue_iterations (bb_before_first_loop, first_niters,
1377 loop, th);
1378
1379 pre_condition =
5d2eb24b
IR
1380 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1381 build_int_cst (TREE_TYPE (*first_niters), 0));
ebfd146a
IR
1382 }
1383
1384 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
86290011 1385 cond_expr_stmt_list,
ebfd146a
IR
1386 bb_before_second_loop, bb_before_first_loop);
1387 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1388 first_loop == new_loop,
c334023f 1389 &new_exit_bb);
ebfd146a
IR
1390
1391
1392 /* 3. Add the guard that controls whether the second loop is executed.
1393 Resulting CFG would be:
1394
1395 bb_before_first_loop:
1396 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1397 GOTO first-loop
1398
1399 first_loop:
1400 do {
1401 } while ...
1402
1403 bb_between_loops:
1404 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1405 GOTO bb_before_second_loop
1406
1407 bb_before_second_loop:
1408
1409 second_loop:
1410 do {
1411 } while ...
1412
1413 bb_after_second_loop:
1414
1415 orig_exit_bb:
1416 */
1417
1418 bb_between_loops = new_exit_bb;
1419 bb_after_second_loop = split_edge (single_exit (second_loop));
1420
b8698a0f 1421 pre_condition =
5d2eb24b 1422 fold_build2 (EQ_EXPR, boolean_type_node, *first_niters, niters);
86290011 1423 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
ebfd146a
IR
1424 bb_after_second_loop, bb_before_first_loop);
1425 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1426 second_loop == new_loop, &new_exit_bb);
1427
1428 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1429 */
1430 if (update_first_loop_count)
5d2eb24b 1431 slpeel_make_loop_iterate_ntimes (first_loop, *first_niters);
ebfd146a 1432
040d39ee
RG
1433 delete_update_ssa ();
1434
684f25f4
AO
1435 adjust_vec_debug_stmts ();
1436
ebfd146a
IR
1437 return new_loop;
1438}
1439
1440/* Function vect_get_loop_location.
1441
1442 Extract the location of the loop in the source code.
1443 If the loop is not well formed for vectorization, an estimated
1444 location is calculated.
1445 Return the loop location if succeed and NULL if not. */
1446
1447LOC
1448find_loop_location (struct loop *loop)
1449{
1450 gimple stmt = NULL;
1451 basic_block bb;
1452 gimple_stmt_iterator si;
1453
1454 if (!loop)
1455 return UNKNOWN_LOC;
1456
1457 stmt = get_loop_exit_condition (loop);
1458
1459 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1460 return gimple_location (stmt);
1461
1462 /* If we got here the loop is probably not "well formed",
1463 try to estimate the loop location */
1464
1465 if (!loop->header)
1466 return UNKNOWN_LOC;
1467
1468 bb = loop->header;
1469
1470 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1471 {
1472 stmt = gsi_stmt (si);
1473 if (gimple_location (stmt) != UNKNOWN_LOC)
1474 return gimple_location (stmt);
1475 }
1476
1477 return UNKNOWN_LOC;
1478}
1479
1480
1481/* This function builds ni_name = number of iterations loop executes
86290011
RG
1482 on the loop preheader. If SEQ is given the stmt is instead emitted
1483 there. */
ebfd146a
IR
1484
1485static tree
86290011 1486vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
ebfd146a
IR
1487{
1488 tree ni_name, var;
1489 gimple_seq stmts = NULL;
1490 edge pe;
1491 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1492 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
1493
1494 var = create_tmp_var (TREE_TYPE (ni), "niters");
1495 add_referenced_var (var);
1496 ni_name = force_gimple_operand (ni, &stmts, false, var);
1497
1498 pe = loop_preheader_edge (loop);
1499 if (stmts)
1500 {
86290011
RG
1501 if (seq)
1502 gimple_seq_add_seq (&seq, stmts);
1503 else
1504 {
1505 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1506 gcc_assert (!new_bb);
1507 }
ebfd146a
IR
1508 }
1509
1510 return ni_name;
1511}
1512
1513
1514/* This function generates the following statements:
1515
1516 ni_name = number of iterations loop executes
1517 ratio = ni_name / vf
1518 ratio_mult_vf_name = ratio * vf
1519
86290011
RG
1520 and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
1521 if that is non-NULL. */
ebfd146a 1522
b8698a0f
L
1523static void
1524vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
ebfd146a 1525 tree *ni_name_ptr,
b8698a0f 1526 tree *ratio_mult_vf_name_ptr,
86290011
RG
1527 tree *ratio_name_ptr,
1528 gimple_seq cond_expr_stmt_list)
ebfd146a
IR
1529{
1530
1531 edge pe;
1532 basic_block new_bb;
1533 gimple_seq stmts;
48df3fa6 1534 tree ni_name, ni_minus_gap_name;
ebfd146a
IR
1535 tree var;
1536 tree ratio_name;
1537 tree ratio_mult_vf_name;
1538 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1539 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
1540 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1541 tree log_vf;
1542
1543 pe = loop_preheader_edge (loop);
1544
b8698a0f 1545 /* Generate temporary variable that contains
ebfd146a
IR
1546 number of iterations loop executes. */
1547
86290011 1548 ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
ebfd146a
IR
1549 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
1550
48df3fa6
IR
1551 /* If epilogue loop is required because of data accesses with gaps, we
1552 subtract one iteration from the total number of iterations here for
1553 correct calculation of RATIO. */
1554 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1555 {
1556 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
1557 ni_name,
1558 build_one_cst (TREE_TYPE (ni_name)));
1559 if (!is_gimple_val (ni_minus_gap_name))
1560 {
1561 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
1562 add_referenced_var (var);
1563
1564 stmts = NULL;
1565 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
1566 true, var);
1567 if (cond_expr_stmt_list)
1568 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1569 else
1570 {
1571 pe = loop_preheader_edge (loop);
1572 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1573 gcc_assert (!new_bb);
1574 }
1575 }
1576 }
1577 else
1578 ni_minus_gap_name = ni_name;
1579
ebfd146a
IR
1580 /* Create: ratio = ni >> log2(vf) */
1581
48df3fa6
IR
1582 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
1583 ni_minus_gap_name, log_vf);
ebfd146a
IR
1584 if (!is_gimple_val (ratio_name))
1585 {
1586 var = create_tmp_var (TREE_TYPE (ni), "bnd");
1587 add_referenced_var (var);
1588
1589 stmts = NULL;
1590 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
86290011
RG
1591 if (cond_expr_stmt_list)
1592 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1593 else
1594 {
1595 pe = loop_preheader_edge (loop);
1596 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1597 gcc_assert (!new_bb);
1598 }
ebfd146a 1599 }
b8698a0f 1600
ebfd146a
IR
1601 /* Create: ratio_mult_vf = ratio << log2 (vf). */
1602
1603 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
1604 ratio_name, log_vf);
1605 if (!is_gimple_val (ratio_mult_vf_name))
1606 {
1607 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
1608 add_referenced_var (var);
1609
1610 stmts = NULL;
1611 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
1612 true, var);
86290011
RG
1613 if (cond_expr_stmt_list)
1614 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1615 else
1616 {
1617 pe = loop_preheader_edge (loop);
1618 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1619 gcc_assert (!new_bb);
1620 }
ebfd146a
IR
1621 }
1622
1623 *ni_name_ptr = ni_name;
1624 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
1625 *ratio_name_ptr = ratio_name;
b8698a0f
L
1626
1627 return;
ebfd146a
IR
1628}
1629
1630/* Function vect_can_advance_ivs_p
1631
b8698a0f
L
1632 In case the number of iterations that LOOP iterates is unknown at compile
1633 time, an epilog loop will be generated, and the loop induction variables
1634 (IVs) will be "advanced" to the value they are supposed to take just before
ebfd146a
IR
1635 the epilog loop. Here we check that the access function of the loop IVs
1636 and the expression that represents the loop bound are simple enough.
1637 These restrictions will be relaxed in the future. */
1638
b8698a0f 1639bool
ebfd146a
IR
1640vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
1641{
1642 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1643 basic_block bb = loop->header;
1644 gimple phi;
1645 gimple_stmt_iterator gsi;
1646
1647 /* Analyze phi functions of the loop header. */
1648
1649 if (vect_print_dump_info (REPORT_DETAILS))
1650 fprintf (vect_dump, "vect_can_advance_ivs_p:");
1651
1652 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1653 {
1654 tree access_fn = NULL;
1655 tree evolution_part;
1656
1657 phi = gsi_stmt (gsi);
1658 if (vect_print_dump_info (REPORT_DETAILS))
1659 {
1660 fprintf (vect_dump, "Analyze phi: ");
1661 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1662 }
1663
1664 /* Skip virtual phi's. The data dependences that are associated with
1665 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
1666
1667 if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
1668 {
1669 if (vect_print_dump_info (REPORT_DETAILS))
1670 fprintf (vect_dump, "virtual phi. skip.");
1671 continue;
1672 }
1673
1674 /* Skip reduction phis. */
1675
1676 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
1677 {
1678 if (vect_print_dump_info (REPORT_DETAILS))
1679 fprintf (vect_dump, "reduc phi. skip.");
1680 continue;
1681 }
1682
1683 /* Analyze the evolution function. */
1684
1685 access_fn = instantiate_parameters
1686 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1687
1688 if (!access_fn)
1689 {
1690 if (vect_print_dump_info (REPORT_DETAILS))
1691 fprintf (vect_dump, "No Access function.");
1692 return false;
1693 }
1694
1695 if (vect_print_dump_info (REPORT_DETAILS))
1696 {
1697 fprintf (vect_dump, "Access function of PHI: ");
1698 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
1699 }
1700
1701 evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
b8698a0f 1702
ebfd146a
IR
1703 if (evolution_part == NULL_TREE)
1704 {
1705 if (vect_print_dump_info (REPORT_DETAILS))
1706 fprintf (vect_dump, "No evolution.");
1707 return false;
1708 }
b8698a0f
L
1709
1710 /* FORNOW: We do not transform initial conditions of IVs
ebfd146a
IR
1711 which evolution functions are a polynomial of degree >= 2. */
1712
1713 if (tree_is_chrec (evolution_part))
b8698a0f 1714 return false;
ebfd146a
IR
1715 }
1716
1717 return true;
1718}
1719
1720
1721/* Function vect_update_ivs_after_vectorizer.
1722
1723 "Advance" the induction variables of LOOP to the value they should take
1724 after the execution of LOOP. This is currently necessary because the
1725 vectorizer does not handle induction variables that are used after the
1726 loop. Such a situation occurs when the last iterations of LOOP are
1727 peeled, because:
1728 1. We introduced new uses after LOOP for IVs that were not originally used
1729 after LOOP: the IVs of LOOP are now used by an epilog loop.
1730 2. LOOP is going to be vectorized; this means that it will iterate N/VF
1731 times, whereas the loop IVs should be bumped N times.
1732
1733 Input:
1734 - LOOP - a loop that is going to be vectorized. The last few iterations
1735 of LOOP were peeled.
1736 - NITERS - the number of iterations that LOOP executes (before it is
1737 vectorized). i.e, the number of times the ivs should be bumped.
1738 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
1739 coming out from LOOP on which there are uses of the LOOP ivs
1740 (this is the path from LOOP->exit to epilog_loop->preheader).
1741
1742 The new definitions of the ivs are placed in LOOP->exit.
1743 The phi args associated with the edge UPDATE_E in the bb
1744 UPDATE_E->dest are updated accordingly.
1745
1746 Assumption 1: Like the rest of the vectorizer, this function assumes
1747 a single loop exit that has a single predecessor.
1748
1749 Assumption 2: The phi nodes in the LOOP header and in update_bb are
1750 organized in the same order.
1751
1752 Assumption 3: The access function of the ivs is simple enough (see
1753 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
1754
1755 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
b8698a0f 1756 coming out of LOOP on which the ivs of LOOP are used (this is the path
ebfd146a
IR
1757 that leads to the epilog loop; other paths skip the epilog loop). This
1758 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
1759 needs to have its phis updated.
1760 */
1761
1762static void
b8698a0f 1763vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
ebfd146a
IR
1764 edge update_e)
1765{
1766 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1767 basic_block exit_bb = single_exit (loop)->dest;
1768 gimple phi, phi1;
1769 gimple_stmt_iterator gsi, gsi1;
1770 basic_block update_bb = update_e->dest;
1771
1772 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
1773
1774 /* Make sure there exists a single-predecessor exit bb: */
1775 gcc_assert (single_pred_p (exit_bb));
1776
1777 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
1778 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
1779 gsi_next (&gsi), gsi_next (&gsi1))
1780 {
ebfd146a 1781 tree init_expr;
550918ca
RG
1782 tree step_expr, off;
1783 tree type;
ebfd146a
IR
1784 tree var, ni, ni_name;
1785 gimple_stmt_iterator last_gsi;
0ac168a1 1786 stmt_vec_info stmt_info;
ebfd146a
IR
1787
1788 phi = gsi_stmt (gsi);
1789 phi1 = gsi_stmt (gsi1);
1790 if (vect_print_dump_info (REPORT_DETAILS))
1791 {
1792 fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: ");
1793 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1794 }
1795
1796 /* Skip virtual phi's. */
1797 if (!is_gimple_reg (SSA_NAME_VAR (PHI_RESULT (phi))))
1798 {
1799 if (vect_print_dump_info (REPORT_DETAILS))
1800 fprintf (vect_dump, "virtual phi. skip.");
1801 continue;
1802 }
1803
1804 /* Skip reduction phis. */
0ac168a1
RG
1805 stmt_info = vinfo_for_stmt (phi);
1806 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
b8698a0f 1807 {
ebfd146a
IR
1808 if (vect_print_dump_info (REPORT_DETAILS))
1809 fprintf (vect_dump, "reduc phi. skip.");
1810 continue;
b8698a0f 1811 }
ebfd146a 1812
0ac168a1
RG
1813 type = TREE_TYPE (gimple_phi_result (phi));
1814 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
1815 step_expr = unshare_expr (step_expr);
b8698a0f 1816
ebfd146a
IR
1817 /* FORNOW: We do not support IVs whose evolution function is a polynomial
1818 of degree >= 2 or exponential. */
0ac168a1 1819 gcc_assert (!tree_is_chrec (step_expr));
ebfd146a 1820
0ac168a1 1821 init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
ebfd146a 1822
550918ca
RG
1823 off = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
1824 fold_convert (TREE_TYPE (step_expr), niters),
1825 step_expr);
0ac168a1 1826 if (POINTER_TYPE_P (type))
5d49b6a7 1827 ni = fold_build_pointer_plus (init_expr, off);
ebfd146a 1828 else
0ac168a1
RG
1829 ni = fold_build2 (PLUS_EXPR, type,
1830 init_expr, fold_convert (type, off));
ebfd146a 1831
0ac168a1 1832 var = create_tmp_var (type, "tmp");
ebfd146a
IR
1833 add_referenced_var (var);
1834
1835 last_gsi = gsi_last_bb (exit_bb);
1836 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
1837 true, GSI_SAME_STMT);
b8698a0f 1838
ebfd146a 1839 /* Fix phi expressions in the successor bb. */
684f25f4 1840 adjust_phi_and_debug_stmts (phi1, update_e, ni_name);
ebfd146a
IR
1841 }
1842}
1843
ebfd146a
IR
1844/* Function vect_do_peeling_for_loop_bound
1845
1846 Peel the last iterations of the loop represented by LOOP_VINFO.
b8698a0f 1847 The peeled iterations form a new epilog loop. Given that the loop now
ebfd146a
IR
1848 iterates NITERS times, the new epilog loop iterates
1849 NITERS % VECTORIZATION_FACTOR times.
b8698a0f
L
1850
1851 The original loop will later be made to iterate
86290011
RG
1852 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
1853
1854 COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
1855 test. */
ebfd146a 1856
b8698a0f 1857void
86290011 1858vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
368117e8 1859 unsigned int th, bool check_profitability)
ebfd146a
IR
1860{
1861 tree ni_name, ratio_mult_vf_name;
1862 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1863 struct loop *new_loop;
1864 edge update_e;
1865 basic_block preheader;
1866 int loop_num;
d68d56b5 1867 int max_iter;
368117e8
RG
1868 tree cond_expr = NULL_TREE;
1869 gimple_seq cond_expr_stmt_list = NULL;
ebfd146a
IR
1870
1871 if (vect_print_dump_info (REPORT_DETAILS))
1872 fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
1873
1874 initialize_original_copy_tables ();
1875
1876 /* Generate the following variables on the preheader of original loop:
b8698a0f 1877
ebfd146a
IR
1878 ni_name = number of iteration the original loop executes
1879 ratio = ni_name / vf
1880 ratio_mult_vf_name = ratio * vf */
1881 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
86290011
RG
1882 &ratio_mult_vf_name, ratio,
1883 cond_expr_stmt_list);
ebfd146a 1884
b8698a0f 1885 loop_num = loop->num;
ebfd146a 1886
ebfd146a 1887 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
5d2eb24b 1888 &ratio_mult_vf_name, ni_name, false,
86290011
RG
1889 th, check_profitability,
1890 cond_expr, cond_expr_stmt_list);
ebfd146a
IR
1891 gcc_assert (new_loop);
1892 gcc_assert (loop_num == loop->num);
1893#ifdef ENABLE_CHECKING
1894 slpeel_verify_cfg_after_peeling (loop, new_loop);
1895#endif
1896
1897 /* A guard that controls whether the new_loop is to be executed or skipped
1898 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
1899 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
1900 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
1901 is on the path where the LOOP IVs are used and need to be updated. */
1902
1903 preheader = loop_preheader_edge (new_loop)->src;
1904 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
1905 update_e = EDGE_PRED (preheader, 0);
1906 else
1907 update_e = EDGE_PRED (preheader, 1);
1908
b8698a0f 1909 /* Update IVs of original loop as if they were advanced
ebfd146a 1910 by ratio_mult_vf_name steps. */
b8698a0f 1911 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
ebfd146a 1912
368117e8
RG
1913 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
1914 if (check_profitability)
1915 max_iter = MAX (max_iter, (int) th);
d68d56b5
RG
1916 record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
1917 if (dump_file && (dump_flags & TDF_DETAILS))
1918 fprintf (dump_file, "Setting upper bound of nb iterations for epilogue "
1919 "loop to %d\n", max_iter);
7d5a99f4 1920
ebfd146a
IR
1921 /* After peeling we have to reset scalar evolution analyzer. */
1922 scev_reset ();
1923
1924 free_original_copy_tables ();
1925}
1926
1927
1928/* Function vect_gen_niters_for_prolog_loop
1929
1930 Set the number of iterations for the loop represented by LOOP_VINFO
1931 to the minimum between LOOP_NITERS (the original iteration count of the loop)
1932 and the misalignment of DR - the data reference recorded in
b8698a0f 1933 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
ebfd146a
IR
1934 this loop, the data reference DR will refer to an aligned location.
1935
1936 The following computation is generated:
1937
1938 If the misalignment of DR is known at compile time:
1939 addr_mis = int mis = DR_MISALIGNMENT (dr);
1940 Else, compute address misalignment in bytes:
1941 addr_mis = addr & (vectype_size - 1)
1942
1943 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
1944
1945 (elem_size = element type size; an element is the scalar element whose type
1946 is the inner type of the vectype)
1947
1948 When the step of the data-ref in the loop is not 1 (as in interleaved data
1949 and SLP), the number of iterations of the prolog must be divided by the step
1950 (which is equal to the size of interleaved group).
1951
1952 The above formulas assume that VF == number of elements in the vector. This
1953 may not hold when there are multiple-types in the loop.
1954 In this case, for some data-references in the loop the VF does not represent
1955 the number of elements that fit in the vector. Therefore, instead of VF we
1956 use TYPE_VECTOR_SUBPARTS. */
1957
b8698a0f 1958static tree
5d2eb24b 1959vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
ebfd146a
IR
1960{
1961 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
1962 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1963 tree var;
1964 gimple_seq stmts;
1965 tree iters, iters_name;
1966 edge pe;
1967 basic_block new_bb;
1968 gimple dr_stmt = DR_STMT (dr);
1969 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
1970 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1971 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
1972 tree niters_type = TREE_TYPE (loop_niters);
ebfd146a
IR
1973 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1974
b8698a0f 1975 pe = loop_preheader_edge (loop);
ebfd146a
IR
1976
1977 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1978 {
720f5239 1979 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
ebfd146a
IR
1980
1981 if (vect_print_dump_info (REPORT_DETAILS))
720f5239 1982 fprintf (vect_dump, "known peeling = %d.", npeel);
ebfd146a 1983
720f5239 1984 iters = build_int_cst (niters_type, npeel);
ebfd146a
IR
1985 }
1986 else
1987 {
1988 gimple_seq new_stmts = NULL;
d8ba5b19
RG
1989 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1990 tree offset = negative
1991 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
b8698a0f 1992 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
d8ba5b19 1993 &new_stmts, offset, loop);
96f9265a 1994 tree type = unsigned_type_for (TREE_TYPE (start_addr));
ebfd146a
IR
1995 tree vectype_size_minus_1 = build_int_cst (type, vectype_align - 1);
1996 tree elem_size_log =
1997 build_int_cst (type, exact_log2 (vectype_align/nelements));
1998 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
1999 tree nelements_tree = build_int_cst (type, nelements);
2000 tree byte_misalign;
2001 tree elem_misalign;
2002
2003 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
2004 gcc_assert (!new_bb);
b8698a0f 2005
ebfd146a 2006 /* Create: byte_misalign = addr & (vectype_size - 1) */
b8698a0f 2007 byte_misalign =
720f5239
IR
2008 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr),
2009 vectype_size_minus_1);
b8698a0f 2010
ebfd146a
IR
2011 /* Create: elem_misalign = byte_misalign / element_size */
2012 elem_misalign =
2013 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
2014
2015 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
d8ba5b19
RG
2016 if (negative)
2017 iters = fold_build2 (MINUS_EXPR, type, elem_misalign, nelements_tree);
2018 else
2019 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
ebfd146a
IR
2020 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
2021 iters = fold_convert (niters_type, iters);
2022 }
2023
2024 /* Create: prolog_loop_niters = min (iters, loop_niters) */
2025 /* If the loop bound is known at compile time we already verified that it is
2026 greater than vf; since the misalignment ('iters') is at most vf, there's
2027 no need to generate the MIN_EXPR in this case. */
2028 if (TREE_CODE (loop_niters) != INTEGER_CST)
2029 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
2030
2031 if (vect_print_dump_info (REPORT_DETAILS))
2032 {
2033 fprintf (vect_dump, "niters for prolog loop: ");
2034 print_generic_expr (vect_dump, iters, TDF_SLIM);
2035 }
2036
2037 var = create_tmp_var (niters_type, "prolog_loop_niters");
2038 add_referenced_var (var);
2039 stmts = NULL;
2040 iters_name = force_gimple_operand (iters, &stmts, false, var);
2041
2042 /* Insert stmt on loop preheader edge. */
2043 if (stmts)
2044 {
2045 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2046 gcc_assert (!new_bb);
2047 }
2048
b8698a0f 2049 return iters_name;
ebfd146a
IR
2050}
2051
2052
2053/* Function vect_update_init_of_dr
2054
2055 NITERS iterations were peeled from LOOP. DR represents a data reference
2056 in LOOP. This function updates the information recorded in DR to
b8698a0f 2057 account for the fact that the first NITERS iterations had already been
ebfd146a
IR
2058 executed. Specifically, it updates the OFFSET field of DR. */
2059
2060static void
2061vect_update_init_of_dr (struct data_reference *dr, tree niters)
2062{
2063 tree offset = DR_OFFSET (dr);
b8698a0f 2064
ebfd146a
IR
2065 niters = fold_build2 (MULT_EXPR, sizetype,
2066 fold_convert (sizetype, niters),
2067 fold_convert (sizetype, DR_STEP (dr)));
587aa063
RG
2068 offset = fold_build2 (PLUS_EXPR, sizetype,
2069 fold_convert (sizetype, offset), niters);
ebfd146a
IR
2070 DR_OFFSET (dr) = offset;
2071}
2072
2073
2074/* Function vect_update_inits_of_drs
2075
b8698a0f
L
2076 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
2077 This function updates the information recorded for the data references in
2078 the loop to account for the fact that the first NITERS iterations had
ebfd146a
IR
2079 already been executed. Specifically, it updates the initial_condition of
2080 the access_function of all the data_references in the loop. */
2081
2082static void
2083vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
2084{
2085 unsigned int i;
2086 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2087 struct data_reference *dr;
2088
2089 if (vect_print_dump_info (REPORT_DETAILS))
2090 fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
2091
ac47786e 2092 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
ebfd146a
IR
2093 vect_update_init_of_dr (dr, niters);
2094}
2095
2096
2097/* Function vect_do_peeling_for_alignment
2098
2099 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
2100 'niters' is set to the misalignment of one of the data references in the
2101 loop, thereby forcing it to refer to an aligned location at the beginning
2102 of the execution of this loop. The data reference for which we are
2103 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
2104
2105void
368117e8
RG
2106vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
2107 unsigned int th, bool check_profitability)
ebfd146a
IR
2108{
2109 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2110 tree niters_of_prolog_loop, ni_name;
2111 tree n_iters;
b61b1f17 2112 tree wide_prolog_niters;
ebfd146a 2113 struct loop *new_loop;
03fd03d5 2114 int max_iter;
ebfd146a
IR
2115
2116 if (vect_print_dump_info (REPORT_DETAILS))
2117 fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
2118
2119 initialize_original_copy_tables ();
2120
86290011 2121 ni_name = vect_build_loop_niters (loop_vinfo, NULL);
5d2eb24b
IR
2122 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
2123 ni_name);
ebfd146a 2124
ebfd146a
IR
2125 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
2126 new_loop =
2127 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
5d2eb24b 2128 &niters_of_prolog_loop, ni_name, true,
368117e8 2129 th, check_profitability, NULL_TREE, NULL);
ebfd146a
IR
2130
2131 gcc_assert (new_loop);
2132#ifdef ENABLE_CHECKING
2133 slpeel_verify_cfg_after_peeling (new_loop, loop);
2134#endif
368117e8
RG
2135 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
2136 if (check_profitability)
2137 max_iter = MAX (max_iter, (int) th);
03fd03d5
RG
2138 record_niter_bound (new_loop, shwi_to_double_int (max_iter), false, true);
2139 if (dump_file && (dump_flags & TDF_DETAILS))
2140 fprintf (dump_file, "Setting upper bound of nb iterations for prologue "
2141 "loop to %d\n", max_iter);
ebfd146a
IR
2142
2143 /* Update number of times loop executes. */
2144 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
2145 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
2146 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
2147
5d2eb24b
IR
2148 if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
2149 wide_prolog_niters = niters_of_prolog_loop;
2150 else
2151 {
2152 gimple_seq seq = NULL;
2153 edge pe = loop_preheader_edge (loop);
2154 tree wide_iters = fold_convert (sizetype, niters_of_prolog_loop);
2155 tree var = create_tmp_var (sizetype, "prolog_loop_adjusted_niters");
2156 add_referenced_var (var);
2157 wide_prolog_niters = force_gimple_operand (wide_iters, &seq, false,
2158 var);
2159 if (seq)
2160 {
2161 /* Insert stmt on loop preheader edge. */
2162 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2163 gcc_assert (!new_bb);
2164 }
2165 }
2166
ebfd146a 2167 /* Update the init conditions of the access functions of all data refs. */
b61b1f17 2168 vect_update_inits_of_drs (loop_vinfo, wide_prolog_niters);
ebfd146a
IR
2169
2170 /* After peeling we have to reset scalar evolution analyzer. */
2171 scev_reset ();
2172
2173 free_original_copy_tables ();
2174}
2175
2176
2177/* Function vect_create_cond_for_align_checks.
2178
2179 Create a conditional expression that represents the alignment checks for
2180 all of data references (array element references) whose alignment must be
2181 checked at runtime.
2182
2183 Input:
2184 COND_EXPR - input conditional expression. New conditions will be chained
2185 with logical AND operation.
2186 LOOP_VINFO - two fields of the loop information are used.
2187 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
2188 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
2189
2190 Output:
2191 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2192 expression.
2193 The returned value is the conditional expression to be used in the if
2194 statement that controls which version of the loop gets executed at runtime.
2195
2196 The algorithm makes two assumptions:
2197 1) The number of bytes "n" in a vector is a power of 2.
2198 2) An address "a" is aligned if a%n is zero and that this
2199 test can be done as a&(n-1) == 0. For example, for 16
2200 byte vectors the test is a&0xf == 0. */
2201
2202static void
2203vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
2204 tree *cond_expr,
2205 gimple_seq *cond_expr_stmt_list)
2206{
2207 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2208 VEC(gimple,heap) *may_misalign_stmts
2209 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2210 gimple ref_stmt;
2211 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
2212 tree mask_cst;
2213 unsigned int i;
ebfd146a
IR
2214 tree int_ptrsize_type;
2215 char tmp_name[20];
2216 tree or_tmp_name = NULL_TREE;
2217 tree and_tmp, and_tmp_name;
2218 gimple and_stmt;
2219 tree ptrsize_zero;
2220 tree part_cond_expr;
2221
2222 /* Check that mask is one less than a power of 2, i.e., mask is
2223 all zeros followed by all ones. */
2224 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
2225
96f9265a 2226 int_ptrsize_type = signed_type_for (ptr_type_node);
ebfd146a
IR
2227
2228 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
2229 of the first vector of the i'th data reference. */
2230
ac47786e 2231 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, ref_stmt)
ebfd146a
IR
2232 {
2233 gimple_seq new_stmt_list = NULL;
2234 tree addr_base;
2235 tree addr_tmp, addr_tmp_name;
2236 tree or_tmp, new_or_tmp_name;
2237 gimple addr_stmt, or_stmt;
d8ba5b19
RG
2238 stmt_vec_info stmt_vinfo = vinfo_for_stmt (ref_stmt);
2239 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2240 bool negative = tree_int_cst_compare
2241 (DR_STEP (STMT_VINFO_DATA_REF (stmt_vinfo)), size_zero_node) < 0;
2242 tree offset = negative
2243 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
ebfd146a
IR
2244
2245 /* create: addr_tmp = (int)(address_of_first_vector) */
2246 addr_base =
2247 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
d8ba5b19 2248 offset, loop);
ebfd146a
IR
2249 if (new_stmt_list != NULL)
2250 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
2251
2252 sprintf (tmp_name, "%s%d", "addr2int", i);
2253 addr_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
2254 add_referenced_var (addr_tmp);
2255 addr_tmp_name = make_ssa_name (addr_tmp, NULL);
2256 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
2257 addr_base, NULL_TREE);
2258 SSA_NAME_DEF_STMT (addr_tmp_name) = addr_stmt;
2259 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
2260
2261 /* The addresses are OR together. */
2262
2263 if (or_tmp_name != NULL_TREE)
2264 {
2265 /* create: or_tmp = or_tmp | addr_tmp */
2266 sprintf (tmp_name, "%s%d", "orptrs", i);
2267 or_tmp = create_tmp_var (int_ptrsize_type, tmp_name);
2268 add_referenced_var (or_tmp);
2269 new_or_tmp_name = make_ssa_name (or_tmp, NULL);
2270 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
2271 new_or_tmp_name,
2272 or_tmp_name, addr_tmp_name);
2273 SSA_NAME_DEF_STMT (new_or_tmp_name) = or_stmt;
2274 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
2275 or_tmp_name = new_or_tmp_name;
2276 }
2277 else
2278 or_tmp_name = addr_tmp_name;
2279
2280 } /* end for i */
2281
2282 mask_cst = build_int_cst (int_ptrsize_type, mask);
2283
2284 /* create: and_tmp = or_tmp & mask */
2285 and_tmp = create_tmp_var (int_ptrsize_type, "andmask" );
2286 add_referenced_var (and_tmp);
2287 and_tmp_name = make_ssa_name (and_tmp, NULL);
2288
2289 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
2290 or_tmp_name, mask_cst);
2291 SSA_NAME_DEF_STMT (and_tmp_name) = and_stmt;
2292 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
2293
2294 /* Make and_tmp the left operand of the conditional test against zero.
2295 if and_tmp has a nonzero bit then some address is unaligned. */
2296 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
2297 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
2298 and_tmp_name, ptrsize_zero);
2299 if (*cond_expr)
2300 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2301 *cond_expr, part_cond_expr);
2302 else
2303 *cond_expr = part_cond_expr;
2304}
2305
2306
2307/* Function vect_vfa_segment_size.
2308
2309 Create an expression that computes the size of segment
2310 that will be accessed for a data reference. The functions takes into
2311 account that realignment loads may access one more vector.
2312
2313 Input:
2314 DR: The data reference.
208cb8cb 2315 LENGTH_FACTOR: segment length to consider.
ebfd146a
IR
2316
2317 Return an expression whose value is the size of segment which will be
2318 accessed by DR. */
2319
2320static tree
208cb8cb 2321vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
ebfd146a 2322{
e2a3a5f1 2323 tree segment_length;
338f655d 2324
319e6439 2325 if (integer_zerop (DR_STEP (dr)))
338f655d
IR
2326 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2327 else
2328 segment_length = size_binop (MULT_EXPR,
2329 fold_convert (sizetype, DR_STEP (dr)),
2330 fold_convert (sizetype, length_factor));
2331
720f5239
IR
2332 if (vect_supportable_dr_alignment (dr, false)
2333 == dr_explicit_realign_optimized)
ebfd146a
IR
2334 {
2335 tree vector_size = TYPE_SIZE_UNIT
2336 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2337
e2a3a5f1 2338 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
ebfd146a 2339 }
e2a3a5f1 2340 return segment_length;
ebfd146a
IR
2341}
2342
2343
2344/* Function vect_create_cond_for_alias_checks.
2345
2346 Create a conditional expression that represents the run-time checks for
2347 overlapping of address ranges represented by a list of data references
2348 relations passed as input.
2349
2350 Input:
2351 COND_EXPR - input conditional expression. New conditions will be chained
2352 with logical AND operation.
2353 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
2354 to be checked.
2355
2356 Output:
2357 COND_EXPR - conditional expression.
2358 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2359 expression.
2360
2361
2362 The returned value is the conditional expression to be used in the if
2363 statement that controls which version of the loop gets executed at runtime.
2364*/
2365
2366static void
2367vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
2368 tree * cond_expr,
2369 gimple_seq * cond_expr_stmt_list)
2370{
2371 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2372 VEC (ddr_p, heap) * may_alias_ddrs =
2373 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
e2a3a5f1
RG
2374 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2375 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ebfd146a
IR
2376
2377 ddr_p ddr;
2378 unsigned int i;
208cb8cb 2379 tree part_cond_expr, length_factor;
ebfd146a
IR
2380
2381 /* Create expression
36fc3799
RS
2382 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2383 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
b8698a0f 2384 &&
ebfd146a
IR
2385 ...
2386 &&
36fc3799
RS
2387 ((store_ptr_n + store_segment_length_n) <= load_ptr_n)
2388 || (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
ebfd146a
IR
2389
2390 if (VEC_empty (ddr_p, may_alias_ddrs))
2391 return;
2392
ac47786e 2393 FOR_EACH_VEC_ELT (ddr_p, may_alias_ddrs, i, ddr)
ebfd146a
IR
2394 {
2395 struct data_reference *dr_a, *dr_b;
2396 gimple dr_group_first_a, dr_group_first_b;
2397 tree addr_base_a, addr_base_b;
2398 tree segment_length_a, segment_length_b;
2399 gimple stmt_a, stmt_b;
d8ba5b19 2400 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
ebfd146a
IR
2401
2402 dr_a = DDR_A (ddr);
2403 stmt_a = DR_STMT (DDR_A (ddr));
e14c1050 2404 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
ebfd146a
IR
2405 if (dr_group_first_a)
2406 {
2407 stmt_a = dr_group_first_a;
2408 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2409 }
2410
2411 dr_b = DDR_B (ddr);
2412 stmt_b = DR_STMT (DDR_B (ddr));
e14c1050 2413 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
ebfd146a
IR
2414 if (dr_group_first_b)
2415 {
2416 stmt_b = dr_group_first_b;
2417 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2418 }
2419
2420 addr_base_a =
2421 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
2422 NULL_TREE, loop);
2423 addr_base_b =
2424 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
2425 NULL_TREE, loop);
2426
208cb8cb
RG
2427 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2428 length_factor = scalar_loop_iters;
2429 else
2430 length_factor = size_int (vect_factor);
2431 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2432 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
ebfd146a
IR
2433
2434 if (vect_print_dump_info (REPORT_DR_DETAILS))
2435 {
2436 fprintf (vect_dump,
2437 "create runtime check for data references ");
2438 print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM);
2439 fprintf (vect_dump, " and ");
2440 print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM);
2441 }
2442
d8ba5b19 2443 seg_a_min = addr_base_a;
5d49b6a7 2444 seg_a_max = fold_build_pointer_plus (addr_base_a, segment_length_a);
d8ba5b19
RG
2445 if (tree_int_cst_compare (DR_STEP (dr_a), size_zero_node) < 0)
2446 seg_a_min = seg_a_max, seg_a_max = addr_base_a;
2447
2448 seg_b_min = addr_base_b;
5d49b6a7 2449 seg_b_max = fold_build_pointer_plus (addr_base_b, segment_length_b);
d8ba5b19
RG
2450 if (tree_int_cst_compare (DR_STEP (dr_b), size_zero_node) < 0)
2451 seg_b_min = seg_b_max, seg_b_max = addr_base_b;
ebfd146a 2452
b8698a0f 2453 part_cond_expr =
ebfd146a 2454 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
36fc3799
RS
2455 fold_build2 (LE_EXPR, boolean_type_node, seg_a_max, seg_b_min),
2456 fold_build2 (LE_EXPR, boolean_type_node, seg_b_max, seg_a_min));
b8698a0f 2457
ebfd146a
IR
2458 if (*cond_expr)
2459 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2460 *cond_expr, part_cond_expr);
2461 else
2462 *cond_expr = part_cond_expr;
2463 }
ebfd146a 2464
e9dbe7bb
IR
2465 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
2466 fprintf (vect_dump, "created %u versioning for alias checks.\n",
2467 VEC_length (ddr_p, may_alias_ddrs));
ebfd146a
IR
2468}
2469
2470
2471/* Function vect_loop_versioning.
b8698a0f 2472
ebfd146a
IR
2473 If the loop has data references that may or may not be aligned or/and
2474 has data reference relations whose independence was not proven then
2475 two versions of the loop need to be generated, one which is vectorized
2476 and one which isn't. A test is then generated to control which of the
2477 loops is executed. The test checks for the alignment of all of the
2478 data references that may or may not be aligned. An additional
2479 sequence of runtime tests is generated for each pairs of DDRs whose
b8698a0f
L
2480 independence was not proven. The vectorized version of loop is
2481 executed only if both alias and alignment tests are passed.
2482
ebfd146a 2483 The test generated to check which version of loop is executed
b8698a0f 2484 is modified to also check for profitability as indicated by the
86290011
RG
2485 cost model initially.
2486
2487 The versioning precondition(s) are placed in *COND_EXPR and
d68d56b5 2488 *COND_EXPR_STMT_LIST. */
ebfd146a
IR
2489
2490void
368117e8
RG
2491vect_loop_versioning (loop_vec_info loop_vinfo,
2492 unsigned int th, bool check_profitability)
ebfd146a
IR
2493{
2494 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
2495 basic_block condition_bb;
2496 gimple_stmt_iterator gsi, cond_exp_gsi;
2497 basic_block merge_bb;
2498 basic_block new_exit_bb;
2499 edge new_exit_e, e;
2500 gimple orig_phi, new_phi;
368117e8 2501 tree cond_expr = NULL_TREE;
d68d56b5 2502 gimple_seq cond_expr_stmt_list = NULL;
ebfd146a
IR
2503 tree arg;
2504 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
2505 gimple_seq gimplify_stmt_list = NULL;
2506 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ebfd146a 2507
368117e8
RG
2508 if (check_profitability)
2509 {
2510 cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
2511 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
2512 cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
2513 is_gimple_condexpr, NULL_TREE);
2514 }
ebfd146a 2515
e9dbe7bb 2516 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
d68d56b5
RG
2517 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
2518 &cond_expr_stmt_list);
ebfd146a 2519
e9dbe7bb 2520 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
d68d56b5
RG
2521 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
2522 &cond_expr_stmt_list);
86290011 2523
d68d56b5
RG
2524 cond_expr = force_gimple_operand_1 (cond_expr, &gimplify_stmt_list,
2525 is_gimple_condexpr, NULL_TREE);
2526 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
ebfd146a
IR
2527
2528 initialize_original_copy_tables ();
d68d56b5 2529 loop_version (loop, cond_expr, &condition_bb,
0f900dfa 2530 prob, prob, REG_BR_PROB_BASE - prob, true);
ebfd146a
IR
2531 free_original_copy_tables();
2532
b8698a0f 2533 /* Loop versioning violates an assumption we try to maintain during
ebfd146a
IR
2534 vectorization - that the loop exit block has a single predecessor.
2535 After versioning, the exit block of both loop versions is the same
2536 basic block (i.e. it has two predecessors). Just in order to simplify
2537 following transformations in the vectorizer, we fix this situation
2538 here by adding a new (empty) block on the exit-edge of the loop,
2539 with the proper loop-exit phis to maintain loop-closed-form. */
b8698a0f 2540
ebfd146a
IR
2541 merge_bb = single_exit (loop)->dest;
2542 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
2543 new_exit_bb = split_edge (single_exit (loop));
2544 new_exit_e = single_exit (loop);
2545 e = EDGE_SUCC (new_exit_bb, 0);
2546
2547 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
2548 {
2549 orig_phi = gsi_stmt (gsi);
2550 new_phi = create_phi_node (SSA_NAME_VAR (PHI_RESULT (orig_phi)),
2551 new_exit_bb);
2552 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
b8698a0f 2553 add_phi_arg (new_phi, arg, new_exit_e,
f5045c96 2554 gimple_phi_arg_location_from_edge (orig_phi, e));
684f25f4 2555 adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
b8698a0f 2556 }
ebfd146a
IR
2557
2558 /* End loop-exit-fixes after versioning. */
2559
2560 update_ssa (TODO_update_ssa);
d68d56b5 2561 if (cond_expr_stmt_list)
ebfd146a
IR
2562 {
2563 cond_exp_gsi = gsi_last_bb (condition_bb);
d68d56b5 2564 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
86290011 2565 GSI_SAME_STMT);
ebfd146a
IR
2566 }
2567}
2568