]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/tree-vect-loop-manip.c
Remove unnecessary VEC function overloads.
[thirdparty/gcc.git] / gcc / tree-vect-loop-manip.c
CommitLineData
b8698a0f 1/* Vectorizer Specific Loop Manipulations
5d2eb24b 2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012
7c028163 3 Free Software Foundation, Inc.
b8698a0f 4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
ebfd146a
IR
5 and Ira Rosen <irar@il.ibm.com>
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "tm.h"
27#include "ggc.h"
28#include "tree.h"
29#include "basic-block.h"
cf835838 30#include "gimple-pretty-print.h"
ebfd146a 31#include "tree-flow.h"
7ee2468b 32#include "tree-pass.h"
ebfd146a 33#include "cfgloop.h"
718f9c0f 34#include "diagnostic-core.h"
ebfd146a
IR
35#include "tree-scalar-evolution.h"
36#include "tree-vectorizer.h"
37#include "langhooks.h"
38
39/*************************************************************************
40 Simple Loop Peeling Utilities
41
42 Utilities to support loop peeling for vectorization purposes.
43 *************************************************************************/
44
45
46/* Renames the use *OP_P. */
47
48static void
49rename_use_op (use_operand_p op_p)
50{
51 tree new_name;
52
53 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
54 return;
55
56 new_name = get_current_def (USE_FROM_PTR (op_p));
57
58 /* Something defined outside of the loop. */
59 if (!new_name)
60 return;
61
62 /* An ordinary ssa name defined in the loop. */
63
64 SET_USE (op_p, new_name);
65}
66
67
68/* Renames the variables in basic block BB. */
69
70void
71rename_variables_in_bb (basic_block bb)
72{
73 gimple_stmt_iterator gsi;
74 gimple stmt;
75 use_operand_p use_p;
76 ssa_op_iter iter;
77 edge e;
78 edge_iterator ei;
79 struct loop *loop = bb->loop_father;
80
81 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
82 {
83 stmt = gsi_stmt (gsi);
84 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
85 rename_use_op (use_p);
86 }
87
88 FOR_EACH_EDGE (e, ei, bb->succs)
89 {
90 if (!flow_bb_inside_loop_p (loop, e->dest))
91 continue;
92 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
93 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
94 }
95}
96
97
98/* Renames variables in new generated LOOP. */
99
100void
101rename_variables_in_loop (struct loop *loop)
102{
103 unsigned i;
104 basic_block *bbs;
105
106 bbs = get_loop_body (loop);
107
108 for (i = 0; i < loop->num_nodes; i++)
109 rename_variables_in_bb (bbs[i]);
110
111 free (bbs);
112}
113
684f25f4
AO
114typedef struct
115{
116 tree from, to;
117 basic_block bb;
118} adjust_info;
119
120DEF_VEC_O(adjust_info);
121DEF_VEC_ALLOC_O_STACK(adjust_info);
122#define VEC_adjust_info_stack_alloc(alloc) VEC_stack_alloc (adjust_info, alloc)
123
124/* A stack of values to be adjusted in debug stmts. We have to
125 process them LIFO, so that the closest substitution applies. If we
126 processed them FIFO, without the stack, we might substitute uses
127 with a PHI DEF that would soon become non-dominant, and when we got
128 to the suitable one, it wouldn't have anything to substitute any
129 more. */
130static VEC(adjust_info, stack) *adjust_vec;
131
132/* Adjust any debug stmts that referenced AI->from values to use the
133 loop-closed AI->to, if the references are dominated by AI->bb and
134 not by the definition of AI->from. */
135
136static void
137adjust_debug_stmts_now (adjust_info *ai)
138{
139 basic_block bbphi = ai->bb;
140 tree orig_def = ai->from;
141 tree new_def = ai->to;
142 imm_use_iterator imm_iter;
143 gimple stmt;
144 basic_block bbdef = gimple_bb (SSA_NAME_DEF_STMT (orig_def));
145
146 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
147
148 /* Adjust any debug stmts that held onto non-loop-closed
149 references. */
150 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, orig_def)
151 {
152 use_operand_p use_p;
153 basic_block bbuse;
154
155 if (!is_gimple_debug (stmt))
156 continue;
157
158 gcc_assert (gimple_debug_bind_p (stmt));
159
160 bbuse = gimple_bb (stmt);
161
162 if ((bbuse == bbphi
163 || dominated_by_p (CDI_DOMINATORS, bbuse, bbphi))
164 && !(bbuse == bbdef
165 || dominated_by_p (CDI_DOMINATORS, bbuse, bbdef)))
166 {
167 if (new_def)
168 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
169 SET_USE (use_p, new_def);
170 else
171 {
172 gimple_debug_bind_reset_value (stmt);
173 update_stmt (stmt);
174 }
175 }
176 }
177}
178
179/* Adjust debug stmts as scheduled before. */
180
181static void
182adjust_vec_debug_stmts (void)
183{
184 if (!MAY_HAVE_DEBUG_STMTS)
185 return;
186
187 gcc_assert (adjust_vec);
188
189 while (!VEC_empty (adjust_info, adjust_vec))
190 {
0823efed 191 adjust_debug_stmts_now (&VEC_last (adjust_info, adjust_vec));
684f25f4
AO
192 VEC_pop (adjust_info, adjust_vec);
193 }
194
195 VEC_free (adjust_info, stack, adjust_vec);
196}
197
198/* Adjust any debug stmts that referenced FROM values to use the
199 loop-closed TO, if the references are dominated by BB and not by
200 the definition of FROM. If adjust_vec is non-NULL, adjustments
201 will be postponed until adjust_vec_debug_stmts is called. */
202
203static void
204adjust_debug_stmts (tree from, tree to, basic_block bb)
205{
206 adjust_info ai;
207
a471762f
RG
208 if (MAY_HAVE_DEBUG_STMTS
209 && TREE_CODE (from) == SSA_NAME
210 && ! virtual_operand_p (from))
684f25f4
AO
211 {
212 ai.from = from;
213 ai.to = to;
214 ai.bb = bb;
215
216 if (adjust_vec)
f32682ca 217 VEC_safe_push (adjust_info, stack, adjust_vec, ai);
684f25f4
AO
218 else
219 adjust_debug_stmts_now (&ai);
220 }
221}
222
223/* Change E's phi arg in UPDATE_PHI to NEW_DEF, and record information
224 to adjust any debug stmts that referenced the old phi arg,
225 presumably non-loop-closed references left over from other
226 transformations. */
227
228static void
229adjust_phi_and_debug_stmts (gimple update_phi, edge e, tree new_def)
230{
231 tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
232
233 SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
234
235 if (MAY_HAVE_DEBUG_STMTS)
236 adjust_debug_stmts (orig_def, PHI_RESULT (update_phi),
237 gimple_bb (update_phi));
238}
239
ebfd146a
IR
240
241/* Update the PHI nodes of NEW_LOOP.
242
243 NEW_LOOP is a duplicate of ORIG_LOOP.
244 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
245 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
246 executes before it. */
247
248static void
249slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
250 struct loop *new_loop, bool after)
251{
252 tree new_ssa_name;
253 gimple phi_new, phi_orig;
254 tree def;
255 edge orig_loop_latch = loop_latch_edge (orig_loop);
256 edge orig_entry_e = loop_preheader_edge (orig_loop);
257 edge new_loop_exit_e = single_exit (new_loop);
258 edge new_loop_entry_e = loop_preheader_edge (new_loop);
259 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
260 gimple_stmt_iterator gsi_new, gsi_orig;
261
262 /*
263 step 1. For each loop-header-phi:
264 Add the first phi argument for the phi in NEW_LOOP
265 (the one associated with the entry of NEW_LOOP)
266
267 step 2. For each loop-header-phi:
268 Add the second phi argument for the phi in NEW_LOOP
269 (the one associated with the latch of NEW_LOOP)
270
271 step 3. Update the phis in the successor block of NEW_LOOP.
272
273 case 1: NEW_LOOP was placed before ORIG_LOOP:
274 The successor block of NEW_LOOP is the header of ORIG_LOOP.
275 Updating the phis in the successor block can therefore be done
276 along with the scanning of the loop header phis, because the
277 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
278 phi nodes, organized in the same order.
279
280 case 2: NEW_LOOP was placed after ORIG_LOOP:
b8698a0f 281 The successor block of NEW_LOOP is the original exit block of
ebfd146a
IR
282 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
283 We postpone updating these phis to a later stage (when
284 loop guards are added).
285 */
286
287
288 /* Scan the phis in the headers of the old and new loops
289 (they are organized in exactly the same order). */
290
291 for (gsi_new = gsi_start_phis (new_loop->header),
292 gsi_orig = gsi_start_phis (orig_loop->header);
293 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
294 gsi_next (&gsi_new), gsi_next (&gsi_orig))
295 {
f5045c96 296 source_location locus;
ebfd146a
IR
297 phi_new = gsi_stmt (gsi_new);
298 phi_orig = gsi_stmt (gsi_orig);
299
300 /* step 1. */
301 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
f5045c96 302 locus = gimple_phi_arg_location_from_edge (phi_orig, entry_arg_e);
9e227d60 303 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
ebfd146a
IR
304
305 /* step 2. */
306 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
f5045c96 307 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
ebfd146a
IR
308 if (TREE_CODE (def) != SSA_NAME)
309 continue;
310
311 new_ssa_name = get_current_def (def);
312 if (!new_ssa_name)
313 {
314 /* This only happens if there are no definitions
315 inside the loop. use the phi_result in this case. */
316 new_ssa_name = PHI_RESULT (phi_new);
317 }
318
319 /* An ordinary ssa name defined in the loop. */
9e227d60 320 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
ebfd146a 321
684f25f4
AO
322 /* Drop any debug references outside the loop, if they would
323 become ill-formed SSA. */
324 adjust_debug_stmts (def, NULL, single_exit (orig_loop)->dest);
325
ebfd146a
IR
326 /* step 3 (case 1). */
327 if (!after)
328 {
329 gcc_assert (new_loop_exit_e == orig_entry_e);
684f25f4 330 adjust_phi_and_debug_stmts (phi_orig, new_loop_exit_e, new_ssa_name);
ebfd146a
IR
331 }
332 }
333}
334
335
336/* Update PHI nodes for a guard of the LOOP.
337
338 Input:
339 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
340 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
341 originates from the guard-bb, skips LOOP and reaches the (unique) exit
342 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
343 We denote this bb NEW_MERGE_BB because before the guard code was added
344 it had a single predecessor (the LOOP header), and now it became a merge
345 point of two paths - the path that ends with the LOOP exit-edge, and
346 the path that ends with GUARD_EDGE.
347 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
348 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
349
350 ===> The CFG before the guard-code was added:
351 LOOP_header_bb:
352 loop_body
353 if (exit_loop) goto update_bb
354 else goto LOOP_header_bb
355 update_bb:
356
357 ==> The CFG after the guard-code was added:
358 guard_bb:
359 if (LOOP_guard_condition) goto new_merge_bb
360 else goto LOOP_header_bb
361 LOOP_header_bb:
362 loop_body
363 if (exit_loop_condition) goto new_merge_bb
364 else goto LOOP_header_bb
365 new_merge_bb:
366 goto update_bb
367 update_bb:
368
369 ==> The CFG after this function:
370 guard_bb:
371 if (LOOP_guard_condition) goto new_merge_bb
372 else goto LOOP_header_bb
373 LOOP_header_bb:
374 loop_body
375 if (exit_loop_condition) goto new_exit_bb
376 else goto LOOP_header_bb
377 new_exit_bb:
378 new_merge_bb:
379 goto update_bb
380 update_bb:
381
382 This function:
383 1. creates and updates the relevant phi nodes to account for the new
384 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
385 1.1. Create phi nodes at NEW_MERGE_BB.
386 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
387 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
388 2. preserves loop-closed-ssa-form by creating the required phi nodes
389 at the exit of LOOP (i.e, in NEW_EXIT_BB).
390
391 There are two flavors to this function:
392
393 slpeel_update_phi_nodes_for_guard1:
394 Here the guard controls whether we enter or skip LOOP, where LOOP is a
395 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
396 for variables that have phis in the loop header.
397
398 slpeel_update_phi_nodes_for_guard2:
399 Here the guard controls whether we enter or skip LOOP, where LOOP is an
400 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
401 for variables that have phis in the loop exit.
402
403 I.E., the overall structure is:
404
405 loop1_preheader_bb:
406 guard1 (goto loop1/merge1_bb)
407 loop1
408 loop1_exit_bb:
409 guard2 (goto merge1_bb/merge2_bb)
410 merge1_bb
411 loop2
412 loop2_exit_bb
413 merge2_bb
414 next_bb
415
416 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
417 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
418 that have phis in loop1->header).
419
420 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
421 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
422 that have phis in next_bb). It also adds some of these phis to
423 loop1_exit_bb.
424
425 slpeel_update_phi_nodes_for_guard1 is always called before
426 slpeel_update_phi_nodes_for_guard2. They are both needed in order
427 to create correct data-flow and loop-closed-ssa-form.
428
429 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
430 that change between iterations of a loop (and therefore have a phi-node
431 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
b8698a0f
L
432 phis for variables that are used out of the loop (and therefore have
433 loop-closed exit phis). Some variables may be both updated between
ebfd146a
IR
434 iterations and used after the loop. This is why in loop1_exit_bb we
435 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
436 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
437
438 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
439 an original loop. i.e., we have:
440
441 orig_loop
442 guard_bb (goto LOOP/new_merge)
443 new_loop <-- LOOP
444 new_exit
445 new_merge
446 next_bb
447
448 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
449 have:
450
451 new_loop
452 guard_bb (goto LOOP/new_merge)
453 orig_loop <-- LOOP
454 new_exit
455 new_merge
456 next_bb
457
458 The SSA names defined in the original loop have a current
459 reaching definition that that records the corresponding new
460 ssa-name used in the new duplicated loop copy.
461 */
462
463/* Function slpeel_update_phi_nodes_for_guard1
b8698a0f 464
ebfd146a
IR
465 Input:
466 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
467 - DEFS - a bitmap of ssa names to mark new names for which we recorded
b8698a0f
L
468 information.
469
ebfd146a
IR
470 In the context of the overall structure, we have:
471
b8698a0f 472 loop1_preheader_bb:
ebfd146a
IR
473 guard1 (goto loop1/merge1_bb)
474LOOP-> loop1
475 loop1_exit_bb:
476 guard2 (goto merge1_bb/merge2_bb)
477 merge1_bb
478 loop2
479 loop2_exit_bb
480 merge2_bb
481 next_bb
482
483 For each name updated between loop iterations (i.e - for each name that has
484 an entry (loop-header) phi in LOOP) we create a new phi in:
485 1. merge1_bb (to account for the edge from guard1)
486 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
487*/
488
489static void
490slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
c334023f 491 bool is_new_loop, basic_block *new_exit_bb)
ebfd146a
IR
492{
493 gimple orig_phi, new_phi;
494 gimple update_phi, update_phi2;
495 tree guard_arg, loop_arg;
496 basic_block new_merge_bb = guard_edge->dest;
497 edge e = EDGE_SUCC (new_merge_bb, 0);
498 basic_block update_bb = e->dest;
499 basic_block orig_bb = loop->header;
500 edge new_exit_e;
501 tree current_new_name;
ebfd146a
IR
502 gimple_stmt_iterator gsi_orig, gsi_update;
503
504 /* Create new bb between loop and new_merge_bb. */
505 *new_exit_bb = split_edge (single_exit (loop));
506
507 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
508
509 for (gsi_orig = gsi_start_phis (orig_bb),
510 gsi_update = gsi_start_phis (update_bb);
511 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
512 gsi_next (&gsi_orig), gsi_next (&gsi_update))
513 {
e20f6b4b 514 source_location loop_locus, guard_locus;
070ecdfd 515 tree new_res;
ebfd146a
IR
516 orig_phi = gsi_stmt (gsi_orig);
517 update_phi = gsi_stmt (gsi_update);
518
ebfd146a
IR
519 /** 1. Handle new-merge-point phis **/
520
521 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
070ecdfd
RG
522 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
523 new_phi = create_phi_node (new_res, new_merge_bb);
ebfd146a
IR
524
525 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
526 of LOOP. Set the two phi args in NEW_PHI for these edges: */
527 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
b8698a0f
L
528 loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
529 EDGE_SUCC (loop->latch,
f5045c96 530 0));
ebfd146a 531 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
b8698a0f
L
532 guard_locus
533 = gimple_phi_arg_location_from_edge (orig_phi,
f5045c96 534 loop_preheader_edge (loop));
ebfd146a 535
9e227d60
DC
536 add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
537 add_phi_arg (new_phi, guard_arg, guard_edge, guard_locus);
ebfd146a
IR
538
539 /* 1.3. Update phi in successor block. */
540 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
541 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
684f25f4 542 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
ebfd146a
IR
543 update_phi2 = new_phi;
544
545
546 /** 2. Handle loop-closed-ssa-form phis **/
547
ea057359 548 if (virtual_operand_p (PHI_RESULT (orig_phi)))
ebfd146a
IR
549 continue;
550
551 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
070ecdfd
RG
552 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
553 new_phi = create_phi_node (new_res, *new_exit_bb);
ebfd146a
IR
554
555 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
9e227d60 556 add_phi_arg (new_phi, loop_arg, single_exit (loop), loop_locus);
ebfd146a
IR
557
558 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
559 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
684f25f4
AO
560 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
561 PHI_RESULT (new_phi));
ebfd146a
IR
562
563 /* 2.4. Record the newly created name with set_current_def.
564 We want to find a name such that
565 name = get_current_def (orig_loop_name)
566 and to set its current definition as follows:
567 set_current_def (name, new_phi_name)
568
569 If LOOP is a new loop then loop_arg is already the name we're
570 looking for. If LOOP is the original loop, then loop_arg is
571 the orig_loop_name and the relevant name is recorded in its
572 current reaching definition. */
573 if (is_new_loop)
574 current_new_name = loop_arg;
575 else
576 {
577 current_new_name = get_current_def (loop_arg);
578 /* current_def is not available only if the variable does not
579 change inside the loop, in which case we also don't care
580 about recording a current_def for it because we won't be
581 trying to create loop-exit-phis for it. */
582 if (!current_new_name)
583 continue;
584 }
585 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
586
587 set_current_def (current_new_name, PHI_RESULT (new_phi));
ebfd146a
IR
588 }
589}
590
591
592/* Function slpeel_update_phi_nodes_for_guard2
593
594 Input:
595 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
596
597 In the context of the overall structure, we have:
598
b8698a0f 599 loop1_preheader_bb:
ebfd146a
IR
600 guard1 (goto loop1/merge1_bb)
601 loop1
b8698a0f 602 loop1_exit_bb:
ebfd146a
IR
603 guard2 (goto merge1_bb/merge2_bb)
604 merge1_bb
605LOOP-> loop2
606 loop2_exit_bb
607 merge2_bb
608 next_bb
609
610 For each name used out side the loop (i.e - for each name that has an exit
611 phi in next_bb) we create a new phi in:
b8698a0f 612 1. merge2_bb (to account for the edge from guard_bb)
ebfd146a
IR
613 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
614 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
615 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
616*/
617
618static void
619slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
620 bool is_new_loop, basic_block *new_exit_bb)
621{
622 gimple orig_phi, new_phi;
623 gimple update_phi, update_phi2;
624 tree guard_arg, loop_arg;
625 basic_block new_merge_bb = guard_edge->dest;
626 edge e = EDGE_SUCC (new_merge_bb, 0);
627 basic_block update_bb = e->dest;
628 edge new_exit_e;
629 tree orig_def, orig_def_new_name;
630 tree new_name, new_name2;
631 tree arg;
632 gimple_stmt_iterator gsi;
633
634 /* Create new bb between loop and new_merge_bb. */
635 *new_exit_bb = split_edge (single_exit (loop));
636
637 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
638
639 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
640 {
070ecdfd 641 tree new_res;
ebfd146a
IR
642 update_phi = gsi_stmt (gsi);
643 orig_phi = update_phi;
644 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
645 /* This loop-closed-phi actually doesn't represent a use
b8698a0f 646 out of the loop - the phi arg is a constant. */
ebfd146a
IR
647 if (TREE_CODE (orig_def) != SSA_NAME)
648 continue;
649 orig_def_new_name = get_current_def (orig_def);
650 arg = NULL_TREE;
651
652 /** 1. Handle new-merge-point phis **/
653
654 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
070ecdfd
RG
655 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
656 new_phi = create_phi_node (new_res, new_merge_bb);
ebfd146a
IR
657
658 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
659 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
660 new_name = orig_def;
661 new_name2 = NULL_TREE;
662 if (orig_def_new_name)
663 {
664 new_name = orig_def_new_name;
665 /* Some variables have both loop-entry-phis and loop-exit-phis.
666 Such variables were given yet newer names by phis placed in
667 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
668 new_name2 = get_current_def (get_current_def (orig_name)). */
669 new_name2 = get_current_def (new_name);
670 }
b8698a0f 671
ebfd146a
IR
672 if (is_new_loop)
673 {
674 guard_arg = orig_def;
675 loop_arg = new_name;
676 }
677 else
678 {
679 guard_arg = new_name;
680 loop_arg = orig_def;
681 }
682 if (new_name2)
683 guard_arg = new_name2;
b8698a0f 684
9e227d60
DC
685 add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
686 add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
ebfd146a
IR
687
688 /* 1.3. Update phi in successor block. */
689 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
684f25f4 690 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
ebfd146a
IR
691 update_phi2 = new_phi;
692
693
694 /** 2. Handle loop-closed-ssa-form phis **/
695
696 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
070ecdfd
RG
697 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
698 new_phi = create_phi_node (new_res, *new_exit_bb);
ebfd146a
IR
699
700 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
9e227d60 701 add_phi_arg (new_phi, loop_arg, single_exit (loop), UNKNOWN_LOCATION);
ebfd146a
IR
702
703 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
704 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
684f25f4
AO
705 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
706 PHI_RESULT (new_phi));
ebfd146a
IR
707
708
709 /** 3. Handle loop-closed-ssa-form phis for first loop **/
710
711 /* 3.1. Find the relevant names that need an exit-phi in
712 GUARD_BB, i.e. names for which
713 slpeel_update_phi_nodes_for_guard1 had not already created a
714 phi node. This is the case for names that are used outside
715 the loop (and therefore need an exit phi) but are not updated
716 across loop iterations (and therefore don't have a
717 loop-header-phi).
718
719 slpeel_update_phi_nodes_for_guard1 is responsible for
720 creating loop-exit phis in GUARD_BB for names that have a
721 loop-header-phi. When such a phi is created we also record
722 the new name in its current definition. If this new name
723 exists, then guard_arg was set to this new name (see 1.2
724 above). Therefore, if guard_arg is not this new name, this
725 is an indication that an exit-phi in GUARD_BB was not yet
726 created, so we take care of it here. */
727 if (guard_arg == new_name2)
728 continue;
729 arg = guard_arg;
730
731 /* 3.2. Generate new phi node in GUARD_BB: */
070ecdfd
RG
732 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
733 new_phi = create_phi_node (new_res, guard_edge->src);
ebfd146a
IR
734
735 /* 3.3. GUARD_BB has one incoming edge: */
736 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
b8698a0f 737 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
9e227d60 738 UNKNOWN_LOCATION);
ebfd146a
IR
739
740 /* 3.4. Update phi in successor of GUARD_BB: */
741 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
742 == guard_arg);
684f25f4
AO
743 adjust_phi_and_debug_stmts (update_phi2, guard_edge,
744 PHI_RESULT (new_phi));
ebfd146a
IR
745 }
746}
747
748
749/* Make the LOOP iterate NITERS times. This is done by adding a new IV
750 that starts at zero, increases by one and its limit is NITERS.
751
752 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
753
754void
755slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
756{
757 tree indx_before_incr, indx_after_incr;
758 gimple cond_stmt;
759 gimple orig_cond;
760 edge exit_edge = single_exit (loop);
761 gimple_stmt_iterator loop_cond_gsi;
762 gimple_stmt_iterator incr_gsi;
763 bool insert_after;
764 tree init = build_int_cst (TREE_TYPE (niters), 0);
765 tree step = build_int_cst (TREE_TYPE (niters), 1);
766 LOC loop_loc;
767 enum tree_code code;
768
769 orig_cond = get_loop_exit_condition (loop);
770 gcc_assert (orig_cond);
771 loop_cond_gsi = gsi_for_stmt (orig_cond);
772
773 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
774 create_iv (init, step, NULL_TREE, loop,
775 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
776
777 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
778 true, NULL_TREE, true,
779 GSI_SAME_STMT);
780 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
781 true, GSI_SAME_STMT);
782
783 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
784 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
785 NULL_TREE);
786
787 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
788
789 /* Remove old loop exit test: */
790 gsi_remove (&loop_cond_gsi, true);
6f723d33 791 free_stmt_vec_info (orig_cond);
ebfd146a
IR
792
793 loop_loc = find_loop_location (loop);
794 if (dump_file && (dump_flags & TDF_DETAILS))
795 {
796 if (loop_loc != UNKNOWN_LOC)
797 fprintf (dump_file, "\nloop at %s:%d: ",
798 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
799 print_gimple_stmt (dump_file, cond_stmt, 0, TDF_SLIM);
800 }
801
802 loop->nb_iterations = niters;
803}
804
805
b8698a0f 806/* Given LOOP this function generates a new copy of it and puts it
ebfd146a
IR
807 on E which is either the entry or exit of LOOP. */
808
809struct loop *
810slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
811{
812 struct loop *new_loop;
813 basic_block *new_bbs, *bbs;
814 bool at_exit;
815 bool was_imm_dom;
b8698a0f 816 basic_block exit_dest;
ebfd146a
IR
817 gimple phi;
818 tree phi_arg;
819 edge exit, new_exit;
820 gimple_stmt_iterator gsi;
821
b8698a0f 822 at_exit = (e == single_exit (loop));
ebfd146a
IR
823 if (!at_exit && e != loop_preheader_edge (loop))
824 return NULL;
825
826 bbs = get_loop_body (loop);
827
828 /* Check whether duplication is possible. */
829 if (!can_copy_bbs_p (bbs, loop->num_nodes))
830 {
831 free (bbs);
832 return NULL;
833 }
834
835 /* Generate new loop structure. */
836 new_loop = duplicate_loop (loop, loop_outer (loop));
837 if (!new_loop)
838 {
839 free (bbs);
840 return NULL;
841 }
842
843 exit_dest = single_exit (loop)->dest;
b8698a0f
L
844 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
845 exit_dest) == loop->header ?
ebfd146a
IR
846 true : false);
847
848 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
849
850 exit = single_exit (loop);
851 copy_bbs (bbs, loop->num_nodes, new_bbs,
852 &exit, 1, &new_exit, NULL,
853 e->src);
854
b8698a0f 855 /* Duplicating phi args at exit bbs as coming
ebfd146a
IR
856 also from exit of duplicated loop. */
857 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
858 {
859 phi = gsi_stmt (gsi);
860 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
861 if (phi_arg)
862 {
863 edge new_loop_exit_edge;
f5045c96 864 source_location locus;
ebfd146a 865
f5045c96 866 locus = gimple_phi_arg_location_from_edge (phi, single_exit (loop));
ebfd146a
IR
867 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
868 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
869 else
870 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
b8698a0f 871
9e227d60 872 add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
ebfd146a 873 }
b8698a0f
L
874 }
875
ebfd146a
IR
876 if (at_exit) /* Add the loop copy at exit. */
877 {
878 redirect_edge_and_branch_force (e, new_loop->header);
879 PENDING_STMT (e) = NULL;
880 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
881 if (was_imm_dom)
882 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
883 }
884 else /* Add the copy at entry. */
885 {
886 edge new_exit_e;
887 edge entry_e = loop_preheader_edge (loop);
888 basic_block preheader = entry_e->src;
b8698a0f
L
889
890 if (!flow_bb_inside_loop_p (new_loop,
ebfd146a
IR
891 EDGE_SUCC (new_loop->header, 0)->dest))
892 new_exit_e = EDGE_SUCC (new_loop->header, 0);
893 else
b8698a0f 894 new_exit_e = EDGE_SUCC (new_loop->header, 1);
ebfd146a
IR
895
896 redirect_edge_and_branch_force (new_exit_e, loop->header);
897 PENDING_STMT (new_exit_e) = NULL;
898 set_immediate_dominator (CDI_DOMINATORS, loop->header,
899 new_exit_e->src);
900
b8698a0f 901 /* We have to add phi args to the loop->header here as coming
ebfd146a
IR
902 from new_exit_e edge. */
903 for (gsi = gsi_start_phis (loop->header);
904 !gsi_end_p (gsi);
905 gsi_next (&gsi))
906 {
907 phi = gsi_stmt (gsi);
908 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
909 if (phi_arg)
f5045c96 910 add_phi_arg (phi, phi_arg, new_exit_e,
9e227d60 911 gimple_phi_arg_location_from_edge (phi, entry_e));
b8698a0f 912 }
ebfd146a
IR
913
914 redirect_edge_and_branch_force (entry_e, new_loop->header);
915 PENDING_STMT (entry_e) = NULL;
916 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
917 }
918
919 free (new_bbs);
920 free (bbs);
921
922 return new_loop;
923}
924
925
926/* Given the condition statement COND, put it as the last statement
927 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
b8698a0f 928 Assumes that this is the single exit of the guarded loop.
86290011 929 Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
ebfd146a
IR
930
931static edge
86290011
RG
932slpeel_add_loop_guard (basic_block guard_bb, tree cond,
933 gimple_seq cond_expr_stmt_list,
934 basic_block exit_bb, basic_block dom_bb)
ebfd146a
IR
935{
936 gimple_stmt_iterator gsi;
937 edge new_e, enter_e;
938 gimple cond_stmt;
939 gimple_seq gimplify_stmt_list = NULL;
940
941 enter_e = EDGE_SUCC (guard_bb, 0);
942 enter_e->flags &= ~EDGE_FALLTHRU;
943 enter_e->flags |= EDGE_FALSE_VALUE;
944 gsi = gsi_last_bb (guard_bb);
945
f7a06a98
RG
946 cond = force_gimple_operand_1 (cond, &gimplify_stmt_list, is_gimple_condexpr,
947 NULL_TREE);
86290011
RG
948 if (gimplify_stmt_list)
949 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
f7a06a98 950 cond_stmt = gimple_build_cond_from_tree (cond, NULL_TREE, NULL_TREE);
86290011
RG
951 if (cond_expr_stmt_list)
952 gsi_insert_seq_after (&gsi, cond_expr_stmt_list, GSI_NEW_STMT);
ebfd146a
IR
953
954 gsi = gsi_last_bb (guard_bb);
955 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
956
957 /* Add new edge to connect guard block to the merge/loop-exit block. */
958 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
959 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
960 return new_e;
961}
962
963
964/* This function verifies that the following restrictions apply to LOOP:
965 (1) it is innermost
966 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
967 (3) it is single entry, single exit
968 (4) its exit condition is the last stmt in the header
969 (5) E is the entry/exit edge of LOOP.
970 */
971
972bool
973slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
974{
975 edge exit_e = single_exit (loop);
976 edge entry_e = loop_preheader_edge (loop);
977 gimple orig_cond = get_loop_exit_condition (loop);
978 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
979
5006671f 980 if (need_ssa_update_p (cfun))
ebfd146a
IR
981 return false;
982
983 if (loop->inner
984 /* All loops have an outer scope; the only case loop->outer is NULL is for
985 the function itself. */
986 || !loop_outer (loop)
987 || loop->num_nodes != 2
988 || !empty_block_p (loop->latch)
989 || !single_exit (loop)
990 /* Verify that new loop exit condition can be trivially modified. */
991 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
992 || (e != exit_e && e != entry_e))
993 return false;
994
995 return true;
996}
997
998#ifdef ENABLE_CHECKING
999static void
1000slpeel_verify_cfg_after_peeling (struct loop *first_loop,
1001 struct loop *second_loop)
1002{
1003 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1004 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1005 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1006
1007 /* A guard that controls whether the second_loop is to be executed or skipped
1008 is placed in first_loop->exit. first_loop->exit therefore has two
1009 successors - one is the preheader of second_loop, and the other is a bb
1010 after second_loop.
1011 */
1012 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
b8698a0f 1013
ebfd146a
IR
1014 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1015 of second_loop. */
b8698a0f 1016
ebfd146a
IR
1017 /* The preheader of new_loop is expected to have two predecessors:
1018 first_loop->exit and the block that precedes first_loop. */
1019
b8698a0f 1020 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
ebfd146a
IR
1021 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1022 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1023 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1024 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
b8698a0f 1025
ebfd146a
IR
1026 /* Verify that the other successor of first_loop->exit is after the
1027 second_loop. */
1028 /* TODO */
1029}
1030#endif
1031
1032/* If the run time cost model check determines that vectorization is
1033 not profitable and hence scalar loop should be generated then set
1034 FIRST_NITERS to prologue peeled iterations. This will allow all the
1035 iterations to be executed in the prologue peeled scalar loop. */
1036
1037static void
1038set_prologue_iterations (basic_block bb_before_first_loop,
5d2eb24b 1039 tree *first_niters,
ebfd146a
IR
1040 struct loop *loop,
1041 unsigned int th)
1042{
1043 edge e;
1044 basic_block cond_bb, then_bb;
1045 tree var, prologue_after_cost_adjust_name;
1046 gimple_stmt_iterator gsi;
1047 gimple newphi;
1048 edge e_true, e_false, e_fallthru;
1049 gimple cond_stmt;
f7a06a98 1050 gimple_seq stmts = NULL;
ebfd146a 1051 tree cost_pre_condition = NULL_TREE;
b8698a0f 1052 tree scalar_loop_iters =
ebfd146a
IR
1053 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1054
1055 e = single_pred_edge (bb_before_first_loop);
1056 cond_bb = split_edge(e);
1057
1058 e = single_pred_edge (bb_before_first_loop);
1059 then_bb = split_edge(e);
1060 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1061
1062 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1063 EDGE_FALSE_VALUE);
1064 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1065
1066 e_true = EDGE_PRED (then_bb, 0);
1067 e_true->flags &= ~EDGE_FALLTHRU;
1068 e_true->flags |= EDGE_TRUE_VALUE;
1069
1070 e_fallthru = EDGE_SUCC (then_bb, 0);
1071
f7a06a98 1072 gsi = gsi_last_bb (cond_bb);
ebfd146a 1073 cost_pre_condition =
b8698a0f 1074 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
ebfd146a
IR
1075 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1076 cost_pre_condition =
f7a06a98
RG
1077 force_gimple_operand_gsi_1 (&gsi, cost_pre_condition, is_gimple_condexpr,
1078 NULL_TREE, false, GSI_CONTINUE_LINKING);
1079 cond_stmt = gimple_build_cond_from_tree (cost_pre_condition,
1080 NULL_TREE, NULL_TREE);
ebfd146a 1081 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
b8698a0f 1082
ebfd146a
IR
1083 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1084 "prologue_after_cost_adjust");
b8698a0f 1085 prologue_after_cost_adjust_name =
ebfd146a
IR
1086 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1087
1088 gsi = gsi_last_bb (then_bb);
1089 if (stmts)
1090 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1091
1092 newphi = create_phi_node (var, bb_before_first_loop);
b8698a0f 1093 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
9e227d60
DC
1094 UNKNOWN_LOCATION);
1095 add_phi_arg (newphi, *first_niters, e_false, UNKNOWN_LOCATION);
ebfd146a 1096
5d2eb24b 1097 *first_niters = PHI_RESULT (newphi);
ebfd146a
IR
1098}
1099
ebfd146a
IR
1100/* Function slpeel_tree_peel_loop_to_edge.
1101
1102 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1103 that is placed on the entry (exit) edge E of LOOP. After this transformation
1104 we have two loops one after the other - first-loop iterates FIRST_NITERS
1105 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
b8698a0f 1106 If the cost model indicates that it is profitable to emit a scalar
ebfd146a
IR
1107 loop instead of the vector one, then the prolog (epilog) loop will iterate
1108 for the entire unchanged scalar iterations of the loop.
1109
1110 Input:
1111 - LOOP: the loop to be peeled.
1112 - E: the exit or entry edge of LOOP.
1113 If it is the entry edge, we peel the first iterations of LOOP. In this
1114 case first-loop is LOOP, and second-loop is the newly created loop.
1115 If it is the exit edge, we peel the last iterations of LOOP. In this
1116 case, first-loop is the newly created loop, and second-loop is LOOP.
1117 - NITERS: the number of iterations that LOOP iterates.
1118 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1119 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1120 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1121 is false, the caller of this function may want to take care of this
1122 (this can be useful if we don't want new stmts added to first-loop).
1123 - TH: cost model profitability threshold of iterations for vectorization.
1124 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1125 during versioning and hence needs to occur during
b8698a0f 1126 prologue generation or whether cost model check
ebfd146a
IR
1127 has not occurred during prologue generation and hence
1128 needs to occur during epilogue generation.
b8698a0f 1129
ebfd146a
IR
1130
1131 Output:
1132 The function returns a pointer to the new loop-copy, or NULL if it failed
1133 to perform the transformation.
1134
1135 The function generates two if-then-else guards: one before the first loop,
1136 and the other before the second loop:
1137 The first guard is:
1138 if (FIRST_NITERS == 0) then skip the first loop,
1139 and go directly to the second loop.
1140 The second guard is:
1141 if (FIRST_NITERS == NITERS) then skip the second loop.
1142
86290011
RG
1143 If the optional COND_EXPR and COND_EXPR_STMT_LIST arguments are given
1144 then the generated condition is combined with COND_EXPR and the
1145 statements in COND_EXPR_STMT_LIST are emitted together with it.
1146
ebfd146a
IR
1147 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1148 FORNOW the resulting code will not be in loop-closed-ssa form.
1149*/
1150
1151static struct loop*
b8698a0f 1152slpeel_tree_peel_loop_to_edge (struct loop *loop,
5d2eb24b 1153 edge e, tree *first_niters,
ebfd146a 1154 tree niters, bool update_first_loop_count,
86290011
RG
1155 unsigned int th, bool check_profitability,
1156 tree cond_expr, gimple_seq cond_expr_stmt_list)
ebfd146a
IR
1157{
1158 struct loop *new_loop = NULL, *first_loop, *second_loop;
1159 edge skip_e;
1160 tree pre_condition = NULL_TREE;
ebfd146a
IR
1161 basic_block bb_before_second_loop, bb_after_second_loop;
1162 basic_block bb_before_first_loop;
1163 basic_block bb_between_loops;
1164 basic_block new_exit_bb;
e20f6b4b 1165 gimple_stmt_iterator gsi;
ebfd146a
IR
1166 edge exit_e = single_exit (loop);
1167 LOC loop_loc;
1168 tree cost_pre_condition = NULL_TREE;
b8698a0f 1169
ebfd146a
IR
1170 if (!slpeel_can_duplicate_loop_p (loop, e))
1171 return NULL;
b8698a0f 1172
e20f6b4b
JJ
1173 /* If the loop has a virtual PHI, but exit bb doesn't, create a virtual PHI
1174 in the exit bb and rename all the uses after the loop. This simplifies
1175 the *guard[12] routines, which assume loop closed SSA form for all PHIs
1176 (but normally loop closed SSA form doesn't require virtual PHIs to be
1177 in the same form). Doing this early simplifies the checking what
1178 uses should be renamed. */
1179 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
ea057359 1180 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
e20f6b4b
JJ
1181 {
1182 gimple phi = gsi_stmt (gsi);
1183 for (gsi = gsi_start_phis (exit_e->dest);
1184 !gsi_end_p (gsi); gsi_next (&gsi))
ea057359 1185 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
e20f6b4b
JJ
1186 break;
1187 if (gsi_end_p (gsi))
1188 {
070ecdfd
RG
1189 tree new_vop = copy_ssa_name (PHI_RESULT (phi), NULL);
1190 gimple new_phi = create_phi_node (new_vop, exit_e->dest);
e20f6b4b
JJ
1191 tree vop = PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0));
1192 imm_use_iterator imm_iter;
1193 gimple stmt;
e20f6b4b
JJ
1194 use_operand_p use_p;
1195
9e227d60 1196 add_phi_arg (new_phi, vop, exit_e, UNKNOWN_LOCATION);
e20f6b4b
JJ
1197 gimple_phi_set_result (new_phi, new_vop);
1198 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, vop)
1199 if (stmt != new_phi && gimple_bb (stmt) != loop->header)
1200 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1201 SET_USE (use_p, new_vop);
1202 }
1203 break;
1204 }
ebfd146a
IR
1205
1206 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1207 Resulting CFG would be:
1208
1209 first_loop:
1210 do {
1211 } while ...
1212
1213 second_loop:
1214 do {
1215 } while ...
1216
1217 orig_exit_bb:
1218 */
b8698a0f 1219
ebfd146a
IR
1220 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1221 {
1222 loop_loc = find_loop_location (loop);
1223 if (dump_file && (dump_flags & TDF_DETAILS))
1224 {
1225 if (loop_loc != UNKNOWN_LOC)
1226 fprintf (dump_file, "\n%s:%d: note: ",
1227 LOC_FILE (loop_loc), LOC_LINE (loop_loc));
1228 fprintf (dump_file, "tree_duplicate_loop_to_edge_cfg failed.\n");
1229 }
1230 return NULL;
1231 }
b8698a0f 1232
684f25f4
AO
1233 if (MAY_HAVE_DEBUG_STMTS)
1234 {
1235 gcc_assert (!adjust_vec);
1236 adjust_vec = VEC_alloc (adjust_info, stack, 32);
1237 }
1238
ebfd146a
IR
1239 if (e == exit_e)
1240 {
1241 /* NEW_LOOP was placed after LOOP. */
1242 first_loop = loop;
1243 second_loop = new_loop;
1244 }
1245 else
1246 {
1247 /* NEW_LOOP was placed before LOOP. */
1248 first_loop = new_loop;
1249 second_loop = loop;
1250 }
1251
ebfd146a
IR
1252 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1253 rename_variables_in_loop (new_loop);
1254
1255
1256 /* 2. Add the guard code in one of the following ways:
1257
1258 2.a Add the guard that controls whether the first loop is executed.
1259 This occurs when this function is invoked for prologue or epilogue
1260 generation and when the cost model check can be done at compile time.
1261
1262 Resulting CFG would be:
1263
1264 bb_before_first_loop:
1265 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1266 GOTO first-loop
1267
1268 first_loop:
1269 do {
1270 } while ...
1271
1272 bb_before_second_loop:
1273
1274 second_loop:
1275 do {
1276 } while ...
1277
1278 orig_exit_bb:
1279
1280 2.b Add the cost model check that allows the prologue
1281 to iterate for the entire unchanged scalar
1282 iterations of the loop in the event that the cost
1283 model indicates that the scalar loop is more
1284 profitable than the vector one. This occurs when
1285 this function is invoked for prologue generation
1286 and the cost model check needs to be done at run
1287 time.
1288
1289 Resulting CFG after prologue peeling would be:
1290
1291 if (scalar_loop_iterations <= th)
1292 FIRST_NITERS = scalar_loop_iterations
1293
1294 bb_before_first_loop:
1295 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1296 GOTO first-loop
1297
1298 first_loop:
1299 do {
1300 } while ...
1301
1302 bb_before_second_loop:
1303
1304 second_loop:
1305 do {
1306 } while ...
1307
1308 orig_exit_bb:
1309
1310 2.c Add the cost model check that allows the epilogue
1311 to iterate for the entire unchanged scalar
1312 iterations of the loop in the event that the cost
1313 model indicates that the scalar loop is more
1314 profitable than the vector one. This occurs when
1315 this function is invoked for epilogue generation
1316 and the cost model check needs to be done at run
86290011
RG
1317 time. This check is combined with any pre-existing
1318 check in COND_EXPR to avoid versioning.
ebfd146a
IR
1319
1320 Resulting CFG after prologue peeling would be:
1321
1322 bb_before_first_loop:
1323 if ((scalar_loop_iterations <= th)
1324 ||
1325 FIRST_NITERS == 0) GOTO bb_before_second_loop
1326 GOTO first-loop
1327
1328 first_loop:
1329 do {
1330 } while ...
1331
1332 bb_before_second_loop:
1333
1334 second_loop:
1335 do {
1336 } while ...
1337
1338 orig_exit_bb:
1339 */
1340
1341 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1342 bb_before_second_loop = split_edge (single_exit (first_loop));
1343
1344 /* Epilogue peeling. */
1345 if (!update_first_loop_count)
1346 {
1347 pre_condition =
5d2eb24b
IR
1348 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1349 build_int_cst (TREE_TYPE (*first_niters), 0));
ebfd146a
IR
1350 if (check_profitability)
1351 {
1352 tree scalar_loop_iters
1353 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1354 (loop_vec_info_for_loop (loop)));
b8698a0f
L
1355 cost_pre_condition =
1356 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
ebfd146a
IR
1357 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1358
1359 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1360 cost_pre_condition, pre_condition);
1361 }
86290011
RG
1362 if (cond_expr)
1363 {
1364 pre_condition =
1365 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1366 pre_condition,
1367 fold_build1 (TRUTH_NOT_EXPR, boolean_type_node,
1368 cond_expr));
1369 }
ebfd146a
IR
1370 }
1371
b8698a0f 1372 /* Prologue peeling. */
ebfd146a
IR
1373 else
1374 {
1375 if (check_profitability)
1376 set_prologue_iterations (bb_before_first_loop, first_niters,
1377 loop, th);
1378
1379 pre_condition =
5d2eb24b
IR
1380 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1381 build_int_cst (TREE_TYPE (*first_niters), 0));
ebfd146a
IR
1382 }
1383
1384 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
86290011 1385 cond_expr_stmt_list,
ebfd146a
IR
1386 bb_before_second_loop, bb_before_first_loop);
1387 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1388 first_loop == new_loop,
c334023f 1389 &new_exit_bb);
ebfd146a
IR
1390
1391
1392 /* 3. Add the guard that controls whether the second loop is executed.
1393 Resulting CFG would be:
1394
1395 bb_before_first_loop:
1396 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1397 GOTO first-loop
1398
1399 first_loop:
1400 do {
1401 } while ...
1402
1403 bb_between_loops:
1404 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1405 GOTO bb_before_second_loop
1406
1407 bb_before_second_loop:
1408
1409 second_loop:
1410 do {
1411 } while ...
1412
1413 bb_after_second_loop:
1414
1415 orig_exit_bb:
1416 */
1417
1418 bb_between_loops = new_exit_bb;
1419 bb_after_second_loop = split_edge (single_exit (second_loop));
1420
b8698a0f 1421 pre_condition =
5d2eb24b 1422 fold_build2 (EQ_EXPR, boolean_type_node, *first_niters, niters);
86290011 1423 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
ebfd146a
IR
1424 bb_after_second_loop, bb_before_first_loop);
1425 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1426 second_loop == new_loop, &new_exit_bb);
1427
1428 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1429 */
1430 if (update_first_loop_count)
5d2eb24b 1431 slpeel_make_loop_iterate_ntimes (first_loop, *first_niters);
ebfd146a 1432
040d39ee
RG
1433 delete_update_ssa ();
1434
684f25f4
AO
1435 adjust_vec_debug_stmts ();
1436
ebfd146a
IR
1437 return new_loop;
1438}
1439
1440/* Function vect_get_loop_location.
1441
1442 Extract the location of the loop in the source code.
1443 If the loop is not well formed for vectorization, an estimated
1444 location is calculated.
1445 Return the loop location if succeed and NULL if not. */
1446
1447LOC
1448find_loop_location (struct loop *loop)
1449{
1450 gimple stmt = NULL;
1451 basic_block bb;
1452 gimple_stmt_iterator si;
1453
1454 if (!loop)
1455 return UNKNOWN_LOC;
1456
1457 stmt = get_loop_exit_condition (loop);
1458
1459 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1460 return gimple_location (stmt);
1461
1462 /* If we got here the loop is probably not "well formed",
1463 try to estimate the loop location */
1464
1465 if (!loop->header)
1466 return UNKNOWN_LOC;
1467
1468 bb = loop->header;
1469
1470 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1471 {
1472 stmt = gsi_stmt (si);
1473 if (gimple_location (stmt) != UNKNOWN_LOC)
1474 return gimple_location (stmt);
1475 }
1476
1477 return UNKNOWN_LOC;
1478}
1479
1480
1481/* This function builds ni_name = number of iterations loop executes
86290011
RG
1482 on the loop preheader. If SEQ is given the stmt is instead emitted
1483 there. */
ebfd146a
IR
1484
1485static tree
86290011 1486vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
ebfd146a
IR
1487{
1488 tree ni_name, var;
1489 gimple_seq stmts = NULL;
1490 edge pe;
1491 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1492 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
1493
1494 var = create_tmp_var (TREE_TYPE (ni), "niters");
ebfd146a
IR
1495 ni_name = force_gimple_operand (ni, &stmts, false, var);
1496
1497 pe = loop_preheader_edge (loop);
1498 if (stmts)
1499 {
86290011
RG
1500 if (seq)
1501 gimple_seq_add_seq (&seq, stmts);
1502 else
1503 {
1504 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1505 gcc_assert (!new_bb);
1506 }
ebfd146a
IR
1507 }
1508
1509 return ni_name;
1510}
1511
1512
1513/* This function generates the following statements:
1514
1515 ni_name = number of iterations loop executes
1516 ratio = ni_name / vf
1517 ratio_mult_vf_name = ratio * vf
1518
86290011
RG
1519 and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
1520 if that is non-NULL. */
ebfd146a 1521
b8698a0f
L
1522static void
1523vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
ebfd146a 1524 tree *ni_name_ptr,
b8698a0f 1525 tree *ratio_mult_vf_name_ptr,
86290011
RG
1526 tree *ratio_name_ptr,
1527 gimple_seq cond_expr_stmt_list)
ebfd146a
IR
1528{
1529
1530 edge pe;
1531 basic_block new_bb;
1532 gimple_seq stmts;
48df3fa6 1533 tree ni_name, ni_minus_gap_name;
ebfd146a
IR
1534 tree var;
1535 tree ratio_name;
1536 tree ratio_mult_vf_name;
1537 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1538 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
1539 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1540 tree log_vf;
1541
1542 pe = loop_preheader_edge (loop);
1543
b8698a0f 1544 /* Generate temporary variable that contains
ebfd146a
IR
1545 number of iterations loop executes. */
1546
86290011 1547 ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
ebfd146a
IR
1548 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
1549
48df3fa6
IR
1550 /* If epilogue loop is required because of data accesses with gaps, we
1551 subtract one iteration from the total number of iterations here for
1552 correct calculation of RATIO. */
1553 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1554 {
1555 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
1556 ni_name,
1557 build_one_cst (TREE_TYPE (ni_name)));
1558 if (!is_gimple_val (ni_minus_gap_name))
1559 {
1560 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
48df3fa6
IR
1561
1562 stmts = NULL;
1563 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
1564 true, var);
1565 if (cond_expr_stmt_list)
1566 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1567 else
1568 {
1569 pe = loop_preheader_edge (loop);
1570 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1571 gcc_assert (!new_bb);
1572 }
1573 }
1574 }
1575 else
1576 ni_minus_gap_name = ni_name;
1577
ebfd146a
IR
1578 /* Create: ratio = ni >> log2(vf) */
1579
48df3fa6
IR
1580 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
1581 ni_minus_gap_name, log_vf);
ebfd146a
IR
1582 if (!is_gimple_val (ratio_name))
1583 {
1584 var = create_tmp_var (TREE_TYPE (ni), "bnd");
ebfd146a
IR
1585
1586 stmts = NULL;
1587 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
86290011
RG
1588 if (cond_expr_stmt_list)
1589 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1590 else
1591 {
1592 pe = loop_preheader_edge (loop);
1593 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1594 gcc_assert (!new_bb);
1595 }
ebfd146a 1596 }
b8698a0f 1597
ebfd146a
IR
1598 /* Create: ratio_mult_vf = ratio << log2 (vf). */
1599
1600 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
1601 ratio_name, log_vf);
1602 if (!is_gimple_val (ratio_mult_vf_name))
1603 {
1604 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
ebfd146a
IR
1605
1606 stmts = NULL;
1607 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
1608 true, var);
86290011
RG
1609 if (cond_expr_stmt_list)
1610 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1611 else
1612 {
1613 pe = loop_preheader_edge (loop);
1614 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1615 gcc_assert (!new_bb);
1616 }
ebfd146a
IR
1617 }
1618
1619 *ni_name_ptr = ni_name;
1620 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
1621 *ratio_name_ptr = ratio_name;
b8698a0f
L
1622
1623 return;
ebfd146a
IR
1624}
1625
1626/* Function vect_can_advance_ivs_p
1627
b8698a0f
L
1628 In case the number of iterations that LOOP iterates is unknown at compile
1629 time, an epilog loop will be generated, and the loop induction variables
1630 (IVs) will be "advanced" to the value they are supposed to take just before
ebfd146a
IR
1631 the epilog loop. Here we check that the access function of the loop IVs
1632 and the expression that represents the loop bound are simple enough.
1633 These restrictions will be relaxed in the future. */
1634
b8698a0f 1635bool
ebfd146a
IR
1636vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
1637{
1638 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1639 basic_block bb = loop->header;
1640 gimple phi;
1641 gimple_stmt_iterator gsi;
1642
1643 /* Analyze phi functions of the loop header. */
1644
1645 if (vect_print_dump_info (REPORT_DETAILS))
1646 fprintf (vect_dump, "vect_can_advance_ivs_p:");
1647
1648 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1649 {
1650 tree access_fn = NULL;
1651 tree evolution_part;
1652
1653 phi = gsi_stmt (gsi);
1654 if (vect_print_dump_info (REPORT_DETAILS))
1655 {
1656 fprintf (vect_dump, "Analyze phi: ");
1657 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1658 }
1659
1660 /* Skip virtual phi's. The data dependences that are associated with
1661 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
1662
ea057359 1663 if (virtual_operand_p (PHI_RESULT (phi)))
ebfd146a
IR
1664 {
1665 if (vect_print_dump_info (REPORT_DETAILS))
1666 fprintf (vect_dump, "virtual phi. skip.");
1667 continue;
1668 }
1669
1670 /* Skip reduction phis. */
1671
1672 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
1673 {
1674 if (vect_print_dump_info (REPORT_DETAILS))
1675 fprintf (vect_dump, "reduc phi. skip.");
1676 continue;
1677 }
1678
1679 /* Analyze the evolution function. */
1680
1681 access_fn = instantiate_parameters
1682 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1683
1684 if (!access_fn)
1685 {
1686 if (vect_print_dump_info (REPORT_DETAILS))
1687 fprintf (vect_dump, "No Access function.");
1688 return false;
1689 }
1690
1691 if (vect_print_dump_info (REPORT_DETAILS))
1692 {
1693 fprintf (vect_dump, "Access function of PHI: ");
1694 print_generic_expr (vect_dump, access_fn, TDF_SLIM);
1695 }
1696
1697 evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
b8698a0f 1698
ebfd146a
IR
1699 if (evolution_part == NULL_TREE)
1700 {
1701 if (vect_print_dump_info (REPORT_DETAILS))
1702 fprintf (vect_dump, "No evolution.");
1703 return false;
1704 }
b8698a0f
L
1705
1706 /* FORNOW: We do not transform initial conditions of IVs
ebfd146a
IR
1707 which evolution functions are a polynomial of degree >= 2. */
1708
1709 if (tree_is_chrec (evolution_part))
b8698a0f 1710 return false;
ebfd146a
IR
1711 }
1712
1713 return true;
1714}
1715
1716
1717/* Function vect_update_ivs_after_vectorizer.
1718
1719 "Advance" the induction variables of LOOP to the value they should take
1720 after the execution of LOOP. This is currently necessary because the
1721 vectorizer does not handle induction variables that are used after the
1722 loop. Such a situation occurs when the last iterations of LOOP are
1723 peeled, because:
1724 1. We introduced new uses after LOOP for IVs that were not originally used
1725 after LOOP: the IVs of LOOP are now used by an epilog loop.
1726 2. LOOP is going to be vectorized; this means that it will iterate N/VF
1727 times, whereas the loop IVs should be bumped N times.
1728
1729 Input:
1730 - LOOP - a loop that is going to be vectorized. The last few iterations
1731 of LOOP were peeled.
1732 - NITERS - the number of iterations that LOOP executes (before it is
1733 vectorized). i.e, the number of times the ivs should be bumped.
1734 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
1735 coming out from LOOP on which there are uses of the LOOP ivs
1736 (this is the path from LOOP->exit to epilog_loop->preheader).
1737
1738 The new definitions of the ivs are placed in LOOP->exit.
1739 The phi args associated with the edge UPDATE_E in the bb
1740 UPDATE_E->dest are updated accordingly.
1741
1742 Assumption 1: Like the rest of the vectorizer, this function assumes
1743 a single loop exit that has a single predecessor.
1744
1745 Assumption 2: The phi nodes in the LOOP header and in update_bb are
1746 organized in the same order.
1747
1748 Assumption 3: The access function of the ivs is simple enough (see
1749 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
1750
1751 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
b8698a0f 1752 coming out of LOOP on which the ivs of LOOP are used (this is the path
ebfd146a
IR
1753 that leads to the epilog loop; other paths skip the epilog loop). This
1754 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
1755 needs to have its phis updated.
1756 */
1757
1758static void
b8698a0f 1759vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
ebfd146a
IR
1760 edge update_e)
1761{
1762 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1763 basic_block exit_bb = single_exit (loop)->dest;
1764 gimple phi, phi1;
1765 gimple_stmt_iterator gsi, gsi1;
1766 basic_block update_bb = update_e->dest;
1767
1768 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
1769
1770 /* Make sure there exists a single-predecessor exit bb: */
1771 gcc_assert (single_pred_p (exit_bb));
1772
1773 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
1774 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
1775 gsi_next (&gsi), gsi_next (&gsi1))
1776 {
ebfd146a 1777 tree init_expr;
550918ca
RG
1778 tree step_expr, off;
1779 tree type;
ebfd146a
IR
1780 tree var, ni, ni_name;
1781 gimple_stmt_iterator last_gsi;
0ac168a1 1782 stmt_vec_info stmt_info;
ebfd146a
IR
1783
1784 phi = gsi_stmt (gsi);
1785 phi1 = gsi_stmt (gsi1);
1786 if (vect_print_dump_info (REPORT_DETAILS))
1787 {
1788 fprintf (vect_dump, "vect_update_ivs_after_vectorizer: phi: ");
1789 print_gimple_stmt (vect_dump, phi, 0, TDF_SLIM);
1790 }
1791
1792 /* Skip virtual phi's. */
ea057359 1793 if (virtual_operand_p (PHI_RESULT (phi)))
ebfd146a
IR
1794 {
1795 if (vect_print_dump_info (REPORT_DETAILS))
1796 fprintf (vect_dump, "virtual phi. skip.");
1797 continue;
1798 }
1799
1800 /* Skip reduction phis. */
0ac168a1
RG
1801 stmt_info = vinfo_for_stmt (phi);
1802 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
b8698a0f 1803 {
ebfd146a
IR
1804 if (vect_print_dump_info (REPORT_DETAILS))
1805 fprintf (vect_dump, "reduc phi. skip.");
1806 continue;
b8698a0f 1807 }
ebfd146a 1808
0ac168a1
RG
1809 type = TREE_TYPE (gimple_phi_result (phi));
1810 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
1811 step_expr = unshare_expr (step_expr);
b8698a0f 1812
ebfd146a
IR
1813 /* FORNOW: We do not support IVs whose evolution function is a polynomial
1814 of degree >= 2 or exponential. */
0ac168a1 1815 gcc_assert (!tree_is_chrec (step_expr));
ebfd146a 1816
0ac168a1 1817 init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
ebfd146a 1818
550918ca
RG
1819 off = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
1820 fold_convert (TREE_TYPE (step_expr), niters),
1821 step_expr);
0ac168a1 1822 if (POINTER_TYPE_P (type))
5d49b6a7 1823 ni = fold_build_pointer_plus (init_expr, off);
ebfd146a 1824 else
0ac168a1
RG
1825 ni = fold_build2 (PLUS_EXPR, type,
1826 init_expr, fold_convert (type, off));
ebfd146a 1827
0ac168a1 1828 var = create_tmp_var (type, "tmp");
ebfd146a
IR
1829
1830 last_gsi = gsi_last_bb (exit_bb);
1831 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
1832 true, GSI_SAME_STMT);
b8698a0f 1833
ebfd146a 1834 /* Fix phi expressions in the successor bb. */
684f25f4 1835 adjust_phi_and_debug_stmts (phi1, update_e, ni_name);
ebfd146a
IR
1836 }
1837}
1838
ebfd146a
IR
1839/* Function vect_do_peeling_for_loop_bound
1840
1841 Peel the last iterations of the loop represented by LOOP_VINFO.
b8698a0f 1842 The peeled iterations form a new epilog loop. Given that the loop now
ebfd146a
IR
1843 iterates NITERS times, the new epilog loop iterates
1844 NITERS % VECTORIZATION_FACTOR times.
b8698a0f
L
1845
1846 The original loop will later be made to iterate
86290011
RG
1847 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
1848
1849 COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
1850 test. */
ebfd146a 1851
b8698a0f 1852void
86290011 1853vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
368117e8 1854 unsigned int th, bool check_profitability)
ebfd146a
IR
1855{
1856 tree ni_name, ratio_mult_vf_name;
1857 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1858 struct loop *new_loop;
1859 edge update_e;
1860 basic_block preheader;
1861 int loop_num;
d68d56b5 1862 int max_iter;
368117e8
RG
1863 tree cond_expr = NULL_TREE;
1864 gimple_seq cond_expr_stmt_list = NULL;
ebfd146a
IR
1865
1866 if (vect_print_dump_info (REPORT_DETAILS))
1867 fprintf (vect_dump, "=== vect_do_peeling_for_loop_bound ===");
1868
1869 initialize_original_copy_tables ();
1870
1871 /* Generate the following variables on the preheader of original loop:
b8698a0f 1872
ebfd146a
IR
1873 ni_name = number of iteration the original loop executes
1874 ratio = ni_name / vf
1875 ratio_mult_vf_name = ratio * vf */
1876 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
86290011
RG
1877 &ratio_mult_vf_name, ratio,
1878 cond_expr_stmt_list);
ebfd146a 1879
b8698a0f 1880 loop_num = loop->num;
ebfd146a 1881
ebfd146a 1882 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
5d2eb24b 1883 &ratio_mult_vf_name, ni_name, false,
86290011
RG
1884 th, check_profitability,
1885 cond_expr, cond_expr_stmt_list);
ebfd146a
IR
1886 gcc_assert (new_loop);
1887 gcc_assert (loop_num == loop->num);
1888#ifdef ENABLE_CHECKING
1889 slpeel_verify_cfg_after_peeling (loop, new_loop);
1890#endif
1891
1892 /* A guard that controls whether the new_loop is to be executed or skipped
1893 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
1894 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
1895 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
1896 is on the path where the LOOP IVs are used and need to be updated. */
1897
1898 preheader = loop_preheader_edge (new_loop)->src;
1899 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
1900 update_e = EDGE_PRED (preheader, 0);
1901 else
1902 update_e = EDGE_PRED (preheader, 1);
1903
b8698a0f 1904 /* Update IVs of original loop as if they were advanced
ebfd146a 1905 by ratio_mult_vf_name steps. */
b8698a0f 1906 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
ebfd146a 1907
368117e8
RG
1908 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
1909 if (check_profitability)
1910 max_iter = MAX (max_iter, (int) th);
27bcd47c 1911 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
d68d56b5
RG
1912 if (dump_file && (dump_flags & TDF_DETAILS))
1913 fprintf (dump_file, "Setting upper bound of nb iterations for epilogue "
1914 "loop to %d\n", max_iter);
7d5a99f4 1915
ebfd146a
IR
1916 /* After peeling we have to reset scalar evolution analyzer. */
1917 scev_reset ();
1918
1919 free_original_copy_tables ();
1920}
1921
1922
1923/* Function vect_gen_niters_for_prolog_loop
1924
1925 Set the number of iterations for the loop represented by LOOP_VINFO
1926 to the minimum between LOOP_NITERS (the original iteration count of the loop)
1927 and the misalignment of DR - the data reference recorded in
b8698a0f 1928 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
ebfd146a
IR
1929 this loop, the data reference DR will refer to an aligned location.
1930
1931 The following computation is generated:
1932
1933 If the misalignment of DR is known at compile time:
1934 addr_mis = int mis = DR_MISALIGNMENT (dr);
1935 Else, compute address misalignment in bytes:
5aea1e76 1936 addr_mis = addr & (vectype_align - 1)
ebfd146a
IR
1937
1938 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
1939
1940 (elem_size = element type size; an element is the scalar element whose type
1941 is the inner type of the vectype)
1942
1943 When the step of the data-ref in the loop is not 1 (as in interleaved data
1944 and SLP), the number of iterations of the prolog must be divided by the step
1945 (which is equal to the size of interleaved group).
1946
1947 The above formulas assume that VF == number of elements in the vector. This
1948 may not hold when there are multiple-types in the loop.
1949 In this case, for some data-references in the loop the VF does not represent
1950 the number of elements that fit in the vector. Therefore, instead of VF we
1951 use TYPE_VECTOR_SUBPARTS. */
1952
b8698a0f 1953static tree
5d2eb24b 1954vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters)
ebfd146a
IR
1955{
1956 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
1957 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1958 tree var;
1959 gimple_seq stmts;
1960 tree iters, iters_name;
1961 edge pe;
1962 basic_block new_bb;
1963 gimple dr_stmt = DR_STMT (dr);
1964 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
1965 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1966 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
1967 tree niters_type = TREE_TYPE (loop_niters);
ebfd146a
IR
1968 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
1969
b8698a0f 1970 pe = loop_preheader_edge (loop);
ebfd146a
IR
1971
1972 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
1973 {
720f5239 1974 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
ebfd146a
IR
1975
1976 if (vect_print_dump_info (REPORT_DETAILS))
720f5239 1977 fprintf (vect_dump, "known peeling = %d.", npeel);
ebfd146a 1978
720f5239 1979 iters = build_int_cst (niters_type, npeel);
ebfd146a
IR
1980 }
1981 else
1982 {
1983 gimple_seq new_stmts = NULL;
d8ba5b19
RG
1984 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
1985 tree offset = negative
1986 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
b8698a0f 1987 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
d8ba5b19 1988 &new_stmts, offset, loop);
96f9265a 1989 tree type = unsigned_type_for (TREE_TYPE (start_addr));
5aea1e76
UW
1990 tree vectype_align_minus_1 = build_int_cst (type, vectype_align - 1);
1991 HOST_WIDE_INT elem_size =
1992 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1993 tree elem_size_log = build_int_cst (type, exact_log2 (elem_size));
ebfd146a
IR
1994 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
1995 tree nelements_tree = build_int_cst (type, nelements);
1996 tree byte_misalign;
1997 tree elem_misalign;
1998
1999 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
2000 gcc_assert (!new_bb);
b8698a0f 2001
5aea1e76 2002 /* Create: byte_misalign = addr & (vectype_align - 1) */
b8698a0f 2003 byte_misalign =
720f5239 2004 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr),
5aea1e76 2005 vectype_align_minus_1);
b8698a0f 2006
ebfd146a
IR
2007 /* Create: elem_misalign = byte_misalign / element_size */
2008 elem_misalign =
2009 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
2010
2011 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
d8ba5b19
RG
2012 if (negative)
2013 iters = fold_build2 (MINUS_EXPR, type, elem_misalign, nelements_tree);
2014 else
2015 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
ebfd146a
IR
2016 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
2017 iters = fold_convert (niters_type, iters);
2018 }
2019
2020 /* Create: prolog_loop_niters = min (iters, loop_niters) */
2021 /* If the loop bound is known at compile time we already verified that it is
2022 greater than vf; since the misalignment ('iters') is at most vf, there's
2023 no need to generate the MIN_EXPR in this case. */
2024 if (TREE_CODE (loop_niters) != INTEGER_CST)
2025 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
2026
2027 if (vect_print_dump_info (REPORT_DETAILS))
2028 {
2029 fprintf (vect_dump, "niters for prolog loop: ");
2030 print_generic_expr (vect_dump, iters, TDF_SLIM);
2031 }
2032
2033 var = create_tmp_var (niters_type, "prolog_loop_niters");
ebfd146a
IR
2034 stmts = NULL;
2035 iters_name = force_gimple_operand (iters, &stmts, false, var);
2036
2037 /* Insert stmt on loop preheader edge. */
2038 if (stmts)
2039 {
2040 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2041 gcc_assert (!new_bb);
2042 }
2043
b8698a0f 2044 return iters_name;
ebfd146a
IR
2045}
2046
2047
2048/* Function vect_update_init_of_dr
2049
2050 NITERS iterations were peeled from LOOP. DR represents a data reference
2051 in LOOP. This function updates the information recorded in DR to
b8698a0f 2052 account for the fact that the first NITERS iterations had already been
ebfd146a
IR
2053 executed. Specifically, it updates the OFFSET field of DR. */
2054
2055static void
2056vect_update_init_of_dr (struct data_reference *dr, tree niters)
2057{
2058 tree offset = DR_OFFSET (dr);
b8698a0f 2059
ebfd146a
IR
2060 niters = fold_build2 (MULT_EXPR, sizetype,
2061 fold_convert (sizetype, niters),
2062 fold_convert (sizetype, DR_STEP (dr)));
587aa063
RG
2063 offset = fold_build2 (PLUS_EXPR, sizetype,
2064 fold_convert (sizetype, offset), niters);
ebfd146a
IR
2065 DR_OFFSET (dr) = offset;
2066}
2067
2068
2069/* Function vect_update_inits_of_drs
2070
b8698a0f
L
2071 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
2072 This function updates the information recorded for the data references in
2073 the loop to account for the fact that the first NITERS iterations had
ebfd146a
IR
2074 already been executed. Specifically, it updates the initial_condition of
2075 the access_function of all the data_references in the loop. */
2076
2077static void
2078vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
2079{
2080 unsigned int i;
2081 VEC (data_reference_p, heap) *datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2082 struct data_reference *dr;
2083
2084 if (vect_print_dump_info (REPORT_DETAILS))
2085 fprintf (vect_dump, "=== vect_update_inits_of_dr ===");
2086
ac47786e 2087 FOR_EACH_VEC_ELT (data_reference_p, datarefs, i, dr)
ebfd146a
IR
2088 vect_update_init_of_dr (dr, niters);
2089}
2090
2091
2092/* Function vect_do_peeling_for_alignment
2093
2094 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
2095 'niters' is set to the misalignment of one of the data references in the
2096 loop, thereby forcing it to refer to an aligned location at the beginning
2097 of the execution of this loop. The data reference for which we are
2098 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
2099
2100void
368117e8
RG
2101vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
2102 unsigned int th, bool check_profitability)
ebfd146a
IR
2103{
2104 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2105 tree niters_of_prolog_loop, ni_name;
2106 tree n_iters;
b61b1f17 2107 tree wide_prolog_niters;
ebfd146a 2108 struct loop *new_loop;
03fd03d5 2109 int max_iter;
ebfd146a
IR
2110
2111 if (vect_print_dump_info (REPORT_DETAILS))
2112 fprintf (vect_dump, "=== vect_do_peeling_for_alignment ===");
2113
2114 initialize_original_copy_tables ();
2115
86290011 2116 ni_name = vect_build_loop_niters (loop_vinfo, NULL);
5d2eb24b
IR
2117 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
2118 ni_name);
ebfd146a 2119
ebfd146a
IR
2120 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
2121 new_loop =
2122 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
5d2eb24b 2123 &niters_of_prolog_loop, ni_name, true,
368117e8 2124 th, check_profitability, NULL_TREE, NULL);
ebfd146a
IR
2125
2126 gcc_assert (new_loop);
2127#ifdef ENABLE_CHECKING
2128 slpeel_verify_cfg_after_peeling (new_loop, loop);
2129#endif
368117e8
RG
2130 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 1;
2131 if (check_profitability)
2132 max_iter = MAX (max_iter, (int) th);
27bcd47c 2133 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
03fd03d5
RG
2134 if (dump_file && (dump_flags & TDF_DETAILS))
2135 fprintf (dump_file, "Setting upper bound of nb iterations for prologue "
2136 "loop to %d\n", max_iter);
ebfd146a
IR
2137
2138 /* Update number of times loop executes. */
2139 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
2140 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
2141 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
2142
5d2eb24b
IR
2143 if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
2144 wide_prolog_niters = niters_of_prolog_loop;
2145 else
2146 {
2147 gimple_seq seq = NULL;
2148 edge pe = loop_preheader_edge (loop);
2149 tree wide_iters = fold_convert (sizetype, niters_of_prolog_loop);
2150 tree var = create_tmp_var (sizetype, "prolog_loop_adjusted_niters");
5d2eb24b
IR
2151 wide_prolog_niters = force_gimple_operand (wide_iters, &seq, false,
2152 var);
2153 if (seq)
2154 {
2155 /* Insert stmt on loop preheader edge. */
2156 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2157 gcc_assert (!new_bb);
2158 }
2159 }
2160
ebfd146a 2161 /* Update the init conditions of the access functions of all data refs. */
b61b1f17 2162 vect_update_inits_of_drs (loop_vinfo, wide_prolog_niters);
ebfd146a
IR
2163
2164 /* After peeling we have to reset scalar evolution analyzer. */
2165 scev_reset ();
2166
2167 free_original_copy_tables ();
2168}
2169
2170
2171/* Function vect_create_cond_for_align_checks.
2172
2173 Create a conditional expression that represents the alignment checks for
2174 all of data references (array element references) whose alignment must be
2175 checked at runtime.
2176
2177 Input:
2178 COND_EXPR - input conditional expression. New conditions will be chained
2179 with logical AND operation.
2180 LOOP_VINFO - two fields of the loop information are used.
2181 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
2182 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
2183
2184 Output:
2185 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2186 expression.
2187 The returned value is the conditional expression to be used in the if
2188 statement that controls which version of the loop gets executed at runtime.
2189
2190 The algorithm makes two assumptions:
2191 1) The number of bytes "n" in a vector is a power of 2.
2192 2) An address "a" is aligned if a%n is zero and that this
2193 test can be done as a&(n-1) == 0. For example, for 16
2194 byte vectors the test is a&0xf == 0. */
2195
2196static void
2197vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
2198 tree *cond_expr,
2199 gimple_seq *cond_expr_stmt_list)
2200{
2201 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2202 VEC(gimple,heap) *may_misalign_stmts
2203 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2204 gimple ref_stmt;
2205 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
2206 tree mask_cst;
2207 unsigned int i;
ebfd146a
IR
2208 tree int_ptrsize_type;
2209 char tmp_name[20];
2210 tree or_tmp_name = NULL_TREE;
83d5977e 2211 tree and_tmp_name;
ebfd146a
IR
2212 gimple and_stmt;
2213 tree ptrsize_zero;
2214 tree part_cond_expr;
2215
2216 /* Check that mask is one less than a power of 2, i.e., mask is
2217 all zeros followed by all ones. */
2218 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
2219
96f9265a 2220 int_ptrsize_type = signed_type_for (ptr_type_node);
ebfd146a
IR
2221
2222 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
2223 of the first vector of the i'th data reference. */
2224
ac47786e 2225 FOR_EACH_VEC_ELT (gimple, may_misalign_stmts, i, ref_stmt)
ebfd146a
IR
2226 {
2227 gimple_seq new_stmt_list = NULL;
2228 tree addr_base;
83d5977e
RG
2229 tree addr_tmp_name;
2230 tree new_or_tmp_name;
ebfd146a 2231 gimple addr_stmt, or_stmt;
d8ba5b19
RG
2232 stmt_vec_info stmt_vinfo = vinfo_for_stmt (ref_stmt);
2233 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2234 bool negative = tree_int_cst_compare
2235 (DR_STEP (STMT_VINFO_DATA_REF (stmt_vinfo)), size_zero_node) < 0;
2236 tree offset = negative
2237 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
ebfd146a
IR
2238
2239 /* create: addr_tmp = (int)(address_of_first_vector) */
2240 addr_base =
2241 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
d8ba5b19 2242 offset, loop);
ebfd146a
IR
2243 if (new_stmt_list != NULL)
2244 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
2245
83d5977e
RG
2246 sprintf (tmp_name, "addr2int%d", i);
2247 addr_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
ebfd146a
IR
2248 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
2249 addr_base, NULL_TREE);
ebfd146a
IR
2250 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
2251
2252 /* The addresses are OR together. */
2253
2254 if (or_tmp_name != NULL_TREE)
2255 {
2256 /* create: or_tmp = or_tmp | addr_tmp */
83d5977e
RG
2257 sprintf (tmp_name, "orptrs%d", i);
2258 new_or_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
ebfd146a
IR
2259 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
2260 new_or_tmp_name,
2261 or_tmp_name, addr_tmp_name);
ebfd146a
IR
2262 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
2263 or_tmp_name = new_or_tmp_name;
2264 }
2265 else
2266 or_tmp_name = addr_tmp_name;
2267
2268 } /* end for i */
2269
2270 mask_cst = build_int_cst (int_ptrsize_type, mask);
2271
2272 /* create: and_tmp = or_tmp & mask */
83d5977e 2273 and_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, "andmask");
ebfd146a
IR
2274
2275 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
2276 or_tmp_name, mask_cst);
ebfd146a
IR
2277 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
2278
2279 /* Make and_tmp the left operand of the conditional test against zero.
2280 if and_tmp has a nonzero bit then some address is unaligned. */
2281 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
2282 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
2283 and_tmp_name, ptrsize_zero);
2284 if (*cond_expr)
2285 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2286 *cond_expr, part_cond_expr);
2287 else
2288 *cond_expr = part_cond_expr;
2289}
2290
2291
2292/* Function vect_vfa_segment_size.
2293
2294 Create an expression that computes the size of segment
2295 that will be accessed for a data reference. The functions takes into
2296 account that realignment loads may access one more vector.
2297
2298 Input:
2299 DR: The data reference.
208cb8cb 2300 LENGTH_FACTOR: segment length to consider.
ebfd146a
IR
2301
2302 Return an expression whose value is the size of segment which will be
2303 accessed by DR. */
2304
2305static tree
208cb8cb 2306vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
ebfd146a 2307{
e2a3a5f1 2308 tree segment_length;
338f655d 2309
319e6439 2310 if (integer_zerop (DR_STEP (dr)))
338f655d
IR
2311 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2312 else
2313 segment_length = size_binop (MULT_EXPR,
2314 fold_convert (sizetype, DR_STEP (dr)),
2315 fold_convert (sizetype, length_factor));
2316
720f5239
IR
2317 if (vect_supportable_dr_alignment (dr, false)
2318 == dr_explicit_realign_optimized)
ebfd146a
IR
2319 {
2320 tree vector_size = TYPE_SIZE_UNIT
2321 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2322
e2a3a5f1 2323 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
ebfd146a 2324 }
e2a3a5f1 2325 return segment_length;
ebfd146a
IR
2326}
2327
2328
2329/* Function vect_create_cond_for_alias_checks.
2330
2331 Create a conditional expression that represents the run-time checks for
2332 overlapping of address ranges represented by a list of data references
2333 relations passed as input.
2334
2335 Input:
2336 COND_EXPR - input conditional expression. New conditions will be chained
2337 with logical AND operation.
2338 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
2339 to be checked.
2340
2341 Output:
2342 COND_EXPR - conditional expression.
2343 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2344 expression.
2345
2346
2347 The returned value is the conditional expression to be used in the if
2348 statement that controls which version of the loop gets executed at runtime.
2349*/
2350
2351static void
2352vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
2353 tree * cond_expr,
2354 gimple_seq * cond_expr_stmt_list)
2355{
2356 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2357 VEC (ddr_p, heap) * may_alias_ddrs =
2358 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
e2a3a5f1
RG
2359 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2360 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ebfd146a
IR
2361
2362 ddr_p ddr;
2363 unsigned int i;
208cb8cb 2364 tree part_cond_expr, length_factor;
ebfd146a
IR
2365
2366 /* Create expression
36fc3799
RS
2367 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2368 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
b8698a0f 2369 &&
ebfd146a
IR
2370 ...
2371 &&
36fc3799
RS
2372 ((store_ptr_n + store_segment_length_n) <= load_ptr_n)
2373 || (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
ebfd146a
IR
2374
2375 if (VEC_empty (ddr_p, may_alias_ddrs))
2376 return;
2377
ac47786e 2378 FOR_EACH_VEC_ELT (ddr_p, may_alias_ddrs, i, ddr)
ebfd146a
IR
2379 {
2380 struct data_reference *dr_a, *dr_b;
2381 gimple dr_group_first_a, dr_group_first_b;
2382 tree addr_base_a, addr_base_b;
2383 tree segment_length_a, segment_length_b;
2384 gimple stmt_a, stmt_b;
d8ba5b19 2385 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
ebfd146a
IR
2386
2387 dr_a = DDR_A (ddr);
2388 stmt_a = DR_STMT (DDR_A (ddr));
e14c1050 2389 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
ebfd146a
IR
2390 if (dr_group_first_a)
2391 {
2392 stmt_a = dr_group_first_a;
2393 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2394 }
2395
2396 dr_b = DDR_B (ddr);
2397 stmt_b = DR_STMT (DDR_B (ddr));
e14c1050 2398 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
ebfd146a
IR
2399 if (dr_group_first_b)
2400 {
2401 stmt_b = dr_group_first_b;
2402 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2403 }
2404
2405 addr_base_a =
2406 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
2407 NULL_TREE, loop);
2408 addr_base_b =
2409 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
2410 NULL_TREE, loop);
2411
208cb8cb
RG
2412 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2413 length_factor = scalar_loop_iters;
2414 else
2415 length_factor = size_int (vect_factor);
2416 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2417 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
ebfd146a
IR
2418
2419 if (vect_print_dump_info (REPORT_DR_DETAILS))
2420 {
2421 fprintf (vect_dump,
2422 "create runtime check for data references ");
2423 print_generic_expr (vect_dump, DR_REF (dr_a), TDF_SLIM);
2424 fprintf (vect_dump, " and ");
2425 print_generic_expr (vect_dump, DR_REF (dr_b), TDF_SLIM);
2426 }
2427
d8ba5b19 2428 seg_a_min = addr_base_a;
5d49b6a7 2429 seg_a_max = fold_build_pointer_plus (addr_base_a, segment_length_a);
d8ba5b19
RG
2430 if (tree_int_cst_compare (DR_STEP (dr_a), size_zero_node) < 0)
2431 seg_a_min = seg_a_max, seg_a_max = addr_base_a;
2432
2433 seg_b_min = addr_base_b;
5d49b6a7 2434 seg_b_max = fold_build_pointer_plus (addr_base_b, segment_length_b);
d8ba5b19
RG
2435 if (tree_int_cst_compare (DR_STEP (dr_b), size_zero_node) < 0)
2436 seg_b_min = seg_b_max, seg_b_max = addr_base_b;
ebfd146a 2437
b8698a0f 2438 part_cond_expr =
ebfd146a 2439 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
36fc3799
RS
2440 fold_build2 (LE_EXPR, boolean_type_node, seg_a_max, seg_b_min),
2441 fold_build2 (LE_EXPR, boolean_type_node, seg_b_max, seg_a_min));
b8698a0f 2442
ebfd146a
IR
2443 if (*cond_expr)
2444 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2445 *cond_expr, part_cond_expr);
2446 else
2447 *cond_expr = part_cond_expr;
2448 }
ebfd146a 2449
e9dbe7bb
IR
2450 if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
2451 fprintf (vect_dump, "created %u versioning for alias checks.\n",
2452 VEC_length (ddr_p, may_alias_ddrs));
ebfd146a
IR
2453}
2454
2455
2456/* Function vect_loop_versioning.
b8698a0f 2457
ebfd146a
IR
2458 If the loop has data references that may or may not be aligned or/and
2459 has data reference relations whose independence was not proven then
2460 two versions of the loop need to be generated, one which is vectorized
2461 and one which isn't. A test is then generated to control which of the
2462 loops is executed. The test checks for the alignment of all of the
2463 data references that may or may not be aligned. An additional
2464 sequence of runtime tests is generated for each pairs of DDRs whose
b8698a0f
L
2465 independence was not proven. The vectorized version of loop is
2466 executed only if both alias and alignment tests are passed.
2467
ebfd146a 2468 The test generated to check which version of loop is executed
b8698a0f 2469 is modified to also check for profitability as indicated by the
86290011
RG
2470 cost model initially.
2471
2472 The versioning precondition(s) are placed in *COND_EXPR and
d68d56b5 2473 *COND_EXPR_STMT_LIST. */
ebfd146a
IR
2474
2475void
368117e8
RG
2476vect_loop_versioning (loop_vec_info loop_vinfo,
2477 unsigned int th, bool check_profitability)
ebfd146a
IR
2478{
2479 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
ebfd146a
IR
2480 basic_block condition_bb;
2481 gimple_stmt_iterator gsi, cond_exp_gsi;
2482 basic_block merge_bb;
2483 basic_block new_exit_bb;
2484 edge new_exit_e, e;
2485 gimple orig_phi, new_phi;
368117e8 2486 tree cond_expr = NULL_TREE;
d68d56b5 2487 gimple_seq cond_expr_stmt_list = NULL;
ebfd146a
IR
2488 tree arg;
2489 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
2490 gimple_seq gimplify_stmt_list = NULL;
2491 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
ebfd146a 2492
368117e8
RG
2493 if (check_profitability)
2494 {
2495 cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
2496 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
2497 cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
2498 is_gimple_condexpr, NULL_TREE);
2499 }
ebfd146a 2500
e9dbe7bb 2501 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
d68d56b5
RG
2502 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
2503 &cond_expr_stmt_list);
ebfd146a 2504
e9dbe7bb 2505 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
d68d56b5
RG
2506 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
2507 &cond_expr_stmt_list);
86290011 2508
d68d56b5
RG
2509 cond_expr = force_gimple_operand_1 (cond_expr, &gimplify_stmt_list,
2510 is_gimple_condexpr, NULL_TREE);
2511 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
ebfd146a
IR
2512
2513 initialize_original_copy_tables ();
d68d56b5 2514 loop_version (loop, cond_expr, &condition_bb,
0f900dfa 2515 prob, prob, REG_BR_PROB_BASE - prob, true);
ebfd146a
IR
2516 free_original_copy_tables();
2517
b8698a0f 2518 /* Loop versioning violates an assumption we try to maintain during
ebfd146a
IR
2519 vectorization - that the loop exit block has a single predecessor.
2520 After versioning, the exit block of both loop versions is the same
2521 basic block (i.e. it has two predecessors). Just in order to simplify
2522 following transformations in the vectorizer, we fix this situation
2523 here by adding a new (empty) block on the exit-edge of the loop,
2524 with the proper loop-exit phis to maintain loop-closed-form. */
b8698a0f 2525
ebfd146a
IR
2526 merge_bb = single_exit (loop)->dest;
2527 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
2528 new_exit_bb = split_edge (single_exit (loop));
2529 new_exit_e = single_exit (loop);
2530 e = EDGE_SUCC (new_exit_bb, 0);
2531
2532 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
2533 {
070ecdfd 2534 tree new_res;
ebfd146a 2535 orig_phi = gsi_stmt (gsi);
070ecdfd
RG
2536 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
2537 new_phi = create_phi_node (new_res, new_exit_bb);
ebfd146a 2538 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
b8698a0f 2539 add_phi_arg (new_phi, arg, new_exit_e,
9e227d60 2540 gimple_phi_arg_location_from_edge (orig_phi, e));
684f25f4 2541 adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
b8698a0f 2542 }
ebfd146a
IR
2543
2544 /* End loop-exit-fixes after versioning. */
2545
2546 update_ssa (TODO_update_ssa);
d68d56b5 2547 if (cond_expr_stmt_list)
ebfd146a
IR
2548 {
2549 cond_exp_gsi = gsi_last_bb (condition_bb);
d68d56b5 2550 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
86290011 2551 GSI_SAME_STMT);
ebfd146a
IR
2552 }
2553}