]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/tree-vect-loop-manip.c
Update Copyright years for files modified in 2011 and/or 2012.
[thirdparty/gcc.git] / gcc / tree-vect-loop-manip.c
1 /* Vectorizer Specific Loop Manipulations
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
3 Free Software Foundation, Inc.
4 Contributed by Dorit Naishlos <dorit@il.ibm.com>
5 and Ira Rosen <irar@il.ibm.com>
6
7 This file is part of GCC.
8
9 GCC is free software; you can redistribute it and/or modify it under
10 the terms of the GNU General Public License as published by the Free
11 Software Foundation; either version 3, or (at your option) any later
12 version.
13
14 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "dumpfile.h"
27 #include "tm.h"
28 #include "ggc.h"
29 #include "tree.h"
30 #include "basic-block.h"
31 #include "gimple-pretty-print.h"
32 #include "tree-flow.h"
33 #include "tree-pass.h"
34 #include "cfgloop.h"
35 #include "diagnostic-core.h"
36 #include "tree-scalar-evolution.h"
37 #include "tree-vectorizer.h"
38 #include "langhooks.h"
39
40 /*************************************************************************
41 Simple Loop Peeling Utilities
42
43 Utilities to support loop peeling for vectorization purposes.
44 *************************************************************************/
45
46
47 /* Renames the use *OP_P. */
48
49 static void
50 rename_use_op (use_operand_p op_p)
51 {
52 tree new_name;
53
54 if (TREE_CODE (USE_FROM_PTR (op_p)) != SSA_NAME)
55 return;
56
57 new_name = get_current_def (USE_FROM_PTR (op_p));
58
59 /* Something defined outside of the loop. */
60 if (!new_name)
61 return;
62
63 /* An ordinary ssa name defined in the loop. */
64
65 SET_USE (op_p, new_name);
66 }
67
68
69 /* Renames the variables in basic block BB. */
70
71 void
72 rename_variables_in_bb (basic_block bb)
73 {
74 gimple_stmt_iterator gsi;
75 gimple stmt;
76 use_operand_p use_p;
77 ssa_op_iter iter;
78 edge e;
79 edge_iterator ei;
80 struct loop *loop = bb->loop_father;
81
82 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
83 {
84 stmt = gsi_stmt (gsi);
85 FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
86 rename_use_op (use_p);
87 }
88
89 FOR_EACH_EDGE (e, ei, bb->succs)
90 {
91 if (!flow_bb_inside_loop_p (loop, e->dest))
92 continue;
93 for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
94 rename_use_op (PHI_ARG_DEF_PTR_FROM_EDGE (gsi_stmt (gsi), e));
95 }
96 }
97
98
99 /* Renames variables in new generated LOOP. */
100
101 void
102 rename_variables_in_loop (struct loop *loop)
103 {
104 unsigned i;
105 basic_block *bbs;
106
107 bbs = get_loop_body (loop);
108
109 for (i = 0; i < loop->num_nodes; i++)
110 rename_variables_in_bb (bbs[i]);
111
112 free (bbs);
113 }
114
115 typedef struct
116 {
117 tree from, to;
118 basic_block bb;
119 } adjust_info;
120
121 /* A stack of values to be adjusted in debug stmts. We have to
122 process them LIFO, so that the closest substitution applies. If we
123 processed them FIFO, without the stack, we might substitute uses
124 with a PHI DEF that would soon become non-dominant, and when we got
125 to the suitable one, it wouldn't have anything to substitute any
126 more. */
127 static vec<adjust_info, va_stack> adjust_vec;
128
129 /* Adjust any debug stmts that referenced AI->from values to use the
130 loop-closed AI->to, if the references are dominated by AI->bb and
131 not by the definition of AI->from. */
132
133 static void
134 adjust_debug_stmts_now (adjust_info *ai)
135 {
136 basic_block bbphi = ai->bb;
137 tree orig_def = ai->from;
138 tree new_def = ai->to;
139 imm_use_iterator imm_iter;
140 gimple stmt;
141 basic_block bbdef = gimple_bb (SSA_NAME_DEF_STMT (orig_def));
142
143 gcc_assert (dom_info_available_p (CDI_DOMINATORS));
144
145 /* Adjust any debug stmts that held onto non-loop-closed
146 references. */
147 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, orig_def)
148 {
149 use_operand_p use_p;
150 basic_block bbuse;
151
152 if (!is_gimple_debug (stmt))
153 continue;
154
155 gcc_assert (gimple_debug_bind_p (stmt));
156
157 bbuse = gimple_bb (stmt);
158
159 if ((bbuse == bbphi
160 || dominated_by_p (CDI_DOMINATORS, bbuse, bbphi))
161 && !(bbuse == bbdef
162 || dominated_by_p (CDI_DOMINATORS, bbuse, bbdef)))
163 {
164 if (new_def)
165 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
166 SET_USE (use_p, new_def);
167 else
168 {
169 gimple_debug_bind_reset_value (stmt);
170 update_stmt (stmt);
171 }
172 }
173 }
174 }
175
176 /* Adjust debug stmts as scheduled before. */
177
178 static void
179 adjust_vec_debug_stmts (void)
180 {
181 if (!MAY_HAVE_DEBUG_STMTS)
182 return;
183
184 gcc_assert (adjust_vec.exists ());
185
186 while (!adjust_vec.is_empty ())
187 {
188 adjust_debug_stmts_now (&adjust_vec.last ());
189 adjust_vec.pop ();
190 }
191
192 adjust_vec.release ();
193 }
194
195 /* Adjust any debug stmts that referenced FROM values to use the
196 loop-closed TO, if the references are dominated by BB and not by
197 the definition of FROM. If adjust_vec is non-NULL, adjustments
198 will be postponed until adjust_vec_debug_stmts is called. */
199
200 static void
201 adjust_debug_stmts (tree from, tree to, basic_block bb)
202 {
203 adjust_info ai;
204
205 if (MAY_HAVE_DEBUG_STMTS
206 && TREE_CODE (from) == SSA_NAME
207 && ! virtual_operand_p (from))
208 {
209 ai.from = from;
210 ai.to = to;
211 ai.bb = bb;
212
213 if (adjust_vec.exists ())
214 adjust_vec.safe_push (ai);
215 else
216 adjust_debug_stmts_now (&ai);
217 }
218 }
219
220 /* Change E's phi arg in UPDATE_PHI to NEW_DEF, and record information
221 to adjust any debug stmts that referenced the old phi arg,
222 presumably non-loop-closed references left over from other
223 transformations. */
224
225 static void
226 adjust_phi_and_debug_stmts (gimple update_phi, edge e, tree new_def)
227 {
228 tree orig_def = PHI_ARG_DEF_FROM_EDGE (update_phi, e);
229
230 SET_PHI_ARG_DEF (update_phi, e->dest_idx, new_def);
231
232 if (MAY_HAVE_DEBUG_STMTS)
233 adjust_debug_stmts (orig_def, PHI_RESULT (update_phi),
234 gimple_bb (update_phi));
235 }
236
237
238 /* Update the PHI nodes of NEW_LOOP.
239
240 NEW_LOOP is a duplicate of ORIG_LOOP.
241 AFTER indicates whether NEW_LOOP executes before or after ORIG_LOOP:
242 AFTER is true if NEW_LOOP executes after ORIG_LOOP, and false if it
243 executes before it. */
244
245 static void
246 slpeel_update_phis_for_duplicate_loop (struct loop *orig_loop,
247 struct loop *new_loop, bool after)
248 {
249 tree new_ssa_name;
250 gimple phi_new, phi_orig;
251 tree def;
252 edge orig_loop_latch = loop_latch_edge (orig_loop);
253 edge orig_entry_e = loop_preheader_edge (orig_loop);
254 edge new_loop_exit_e = single_exit (new_loop);
255 edge new_loop_entry_e = loop_preheader_edge (new_loop);
256 edge entry_arg_e = (after ? orig_loop_latch : orig_entry_e);
257 gimple_stmt_iterator gsi_new, gsi_orig;
258
259 /*
260 step 1. For each loop-header-phi:
261 Add the first phi argument for the phi in NEW_LOOP
262 (the one associated with the entry of NEW_LOOP)
263
264 step 2. For each loop-header-phi:
265 Add the second phi argument for the phi in NEW_LOOP
266 (the one associated with the latch of NEW_LOOP)
267
268 step 3. Update the phis in the successor block of NEW_LOOP.
269
270 case 1: NEW_LOOP was placed before ORIG_LOOP:
271 The successor block of NEW_LOOP is the header of ORIG_LOOP.
272 Updating the phis in the successor block can therefore be done
273 along with the scanning of the loop header phis, because the
274 header blocks of ORIG_LOOP and NEW_LOOP have exactly the same
275 phi nodes, organized in the same order.
276
277 case 2: NEW_LOOP was placed after ORIG_LOOP:
278 The successor block of NEW_LOOP is the original exit block of
279 ORIG_LOOP - the phis to be updated are the loop-closed-ssa phis.
280 We postpone updating these phis to a later stage (when
281 loop guards are added).
282 */
283
284
285 /* Scan the phis in the headers of the old and new loops
286 (they are organized in exactly the same order). */
287
288 for (gsi_new = gsi_start_phis (new_loop->header),
289 gsi_orig = gsi_start_phis (orig_loop->header);
290 !gsi_end_p (gsi_new) && !gsi_end_p (gsi_orig);
291 gsi_next (&gsi_new), gsi_next (&gsi_orig))
292 {
293 source_location locus;
294 phi_new = gsi_stmt (gsi_new);
295 phi_orig = gsi_stmt (gsi_orig);
296
297 /* step 1. */
298 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, entry_arg_e);
299 locus = gimple_phi_arg_location_from_edge (phi_orig, entry_arg_e);
300 add_phi_arg (phi_new, def, new_loop_entry_e, locus);
301
302 /* step 2. */
303 def = PHI_ARG_DEF_FROM_EDGE (phi_orig, orig_loop_latch);
304 locus = gimple_phi_arg_location_from_edge (phi_orig, orig_loop_latch);
305 if (TREE_CODE (def) != SSA_NAME)
306 continue;
307
308 new_ssa_name = get_current_def (def);
309 if (!new_ssa_name)
310 {
311 /* This only happens if there are no definitions
312 inside the loop. use the phi_result in this case. */
313 new_ssa_name = PHI_RESULT (phi_new);
314 }
315
316 /* An ordinary ssa name defined in the loop. */
317 add_phi_arg (phi_new, new_ssa_name, loop_latch_edge (new_loop), locus);
318
319 /* Drop any debug references outside the loop, if they would
320 become ill-formed SSA. */
321 adjust_debug_stmts (def, NULL, single_exit (orig_loop)->dest);
322
323 /* step 3 (case 1). */
324 if (!after)
325 {
326 gcc_assert (new_loop_exit_e == orig_entry_e);
327 adjust_phi_and_debug_stmts (phi_orig, new_loop_exit_e, new_ssa_name);
328 }
329 }
330 }
331
332
333 /* Update PHI nodes for a guard of the LOOP.
334
335 Input:
336 - LOOP, GUARD_EDGE: LOOP is a loop for which we added guard code that
337 controls whether LOOP is to be executed. GUARD_EDGE is the edge that
338 originates from the guard-bb, skips LOOP and reaches the (unique) exit
339 bb of LOOP. This loop-exit-bb is an empty bb with one successor.
340 We denote this bb NEW_MERGE_BB because before the guard code was added
341 it had a single predecessor (the LOOP header), and now it became a merge
342 point of two paths - the path that ends with the LOOP exit-edge, and
343 the path that ends with GUARD_EDGE.
344 - NEW_EXIT_BB: New basic block that is added by this function between LOOP
345 and NEW_MERGE_BB. It is used to place loop-closed-ssa-form exit-phis.
346
347 ===> The CFG before the guard-code was added:
348 LOOP_header_bb:
349 loop_body
350 if (exit_loop) goto update_bb
351 else goto LOOP_header_bb
352 update_bb:
353
354 ==> The CFG after the guard-code was added:
355 guard_bb:
356 if (LOOP_guard_condition) goto new_merge_bb
357 else goto LOOP_header_bb
358 LOOP_header_bb:
359 loop_body
360 if (exit_loop_condition) goto new_merge_bb
361 else goto LOOP_header_bb
362 new_merge_bb:
363 goto update_bb
364 update_bb:
365
366 ==> The CFG after this function:
367 guard_bb:
368 if (LOOP_guard_condition) goto new_merge_bb
369 else goto LOOP_header_bb
370 LOOP_header_bb:
371 loop_body
372 if (exit_loop_condition) goto new_exit_bb
373 else goto LOOP_header_bb
374 new_exit_bb:
375 new_merge_bb:
376 goto update_bb
377 update_bb:
378
379 This function:
380 1. creates and updates the relevant phi nodes to account for the new
381 incoming edge (GUARD_EDGE) into NEW_MERGE_BB. This involves:
382 1.1. Create phi nodes at NEW_MERGE_BB.
383 1.2. Update the phi nodes at the successor of NEW_MERGE_BB (denoted
384 UPDATE_BB). UPDATE_BB was the exit-bb of LOOP before NEW_MERGE_BB
385 2. preserves loop-closed-ssa-form by creating the required phi nodes
386 at the exit of LOOP (i.e, in NEW_EXIT_BB).
387
388 There are two flavors to this function:
389
390 slpeel_update_phi_nodes_for_guard1:
391 Here the guard controls whether we enter or skip LOOP, where LOOP is a
392 prolog_loop (loop1 below), and the new phis created in NEW_MERGE_BB are
393 for variables that have phis in the loop header.
394
395 slpeel_update_phi_nodes_for_guard2:
396 Here the guard controls whether we enter or skip LOOP, where LOOP is an
397 epilog_loop (loop2 below), and the new phis created in NEW_MERGE_BB are
398 for variables that have phis in the loop exit.
399
400 I.E., the overall structure is:
401
402 loop1_preheader_bb:
403 guard1 (goto loop1/merge1_bb)
404 loop1
405 loop1_exit_bb:
406 guard2 (goto merge1_bb/merge2_bb)
407 merge1_bb
408 loop2
409 loop2_exit_bb
410 merge2_bb
411 next_bb
412
413 slpeel_update_phi_nodes_for_guard1 takes care of creating phis in
414 loop1_exit_bb and merge1_bb. These are entry phis (phis for the vars
415 that have phis in loop1->header).
416
417 slpeel_update_phi_nodes_for_guard2 takes care of creating phis in
418 loop2_exit_bb and merge2_bb. These are exit phis (phis for the vars
419 that have phis in next_bb). It also adds some of these phis to
420 loop1_exit_bb.
421
422 slpeel_update_phi_nodes_for_guard1 is always called before
423 slpeel_update_phi_nodes_for_guard2. They are both needed in order
424 to create correct data-flow and loop-closed-ssa-form.
425
426 Generally slpeel_update_phi_nodes_for_guard1 creates phis for variables
427 that change between iterations of a loop (and therefore have a phi-node
428 at the loop entry), whereas slpeel_update_phi_nodes_for_guard2 creates
429 phis for variables that are used out of the loop (and therefore have
430 loop-closed exit phis). Some variables may be both updated between
431 iterations and used after the loop. This is why in loop1_exit_bb we
432 may need both entry_phis (created by slpeel_update_phi_nodes_for_guard1)
433 and exit phis (created by slpeel_update_phi_nodes_for_guard2).
434
435 - IS_NEW_LOOP: if IS_NEW_LOOP is true, then LOOP is a newly created copy of
436 an original loop. i.e., we have:
437
438 orig_loop
439 guard_bb (goto LOOP/new_merge)
440 new_loop <-- LOOP
441 new_exit
442 new_merge
443 next_bb
444
445 If IS_NEW_LOOP is false, then LOOP is an original loop, in which case we
446 have:
447
448 new_loop
449 guard_bb (goto LOOP/new_merge)
450 orig_loop <-- LOOP
451 new_exit
452 new_merge
453 next_bb
454
455 The SSA names defined in the original loop have a current
456 reaching definition that that records the corresponding new
457 ssa-name used in the new duplicated loop copy.
458 */
459
460 /* Function slpeel_update_phi_nodes_for_guard1
461
462 Input:
463 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
464 - DEFS - a bitmap of ssa names to mark new names for which we recorded
465 information.
466
467 In the context of the overall structure, we have:
468
469 loop1_preheader_bb:
470 guard1 (goto loop1/merge1_bb)
471 LOOP-> loop1
472 loop1_exit_bb:
473 guard2 (goto merge1_bb/merge2_bb)
474 merge1_bb
475 loop2
476 loop2_exit_bb
477 merge2_bb
478 next_bb
479
480 For each name updated between loop iterations (i.e - for each name that has
481 an entry (loop-header) phi in LOOP) we create a new phi in:
482 1. merge1_bb (to account for the edge from guard1)
483 2. loop1_exit_bb (an exit-phi to keep LOOP in loop-closed form)
484 */
485
486 static void
487 slpeel_update_phi_nodes_for_guard1 (edge guard_edge, struct loop *loop,
488 bool is_new_loop, basic_block *new_exit_bb)
489 {
490 gimple orig_phi, new_phi;
491 gimple update_phi, update_phi2;
492 tree guard_arg, loop_arg;
493 basic_block new_merge_bb = guard_edge->dest;
494 edge e = EDGE_SUCC (new_merge_bb, 0);
495 basic_block update_bb = e->dest;
496 basic_block orig_bb = loop->header;
497 edge new_exit_e;
498 tree current_new_name;
499 gimple_stmt_iterator gsi_orig, gsi_update;
500
501 /* Create new bb between loop and new_merge_bb. */
502 *new_exit_bb = split_edge (single_exit (loop));
503
504 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
505
506 for (gsi_orig = gsi_start_phis (orig_bb),
507 gsi_update = gsi_start_phis (update_bb);
508 !gsi_end_p (gsi_orig) && !gsi_end_p (gsi_update);
509 gsi_next (&gsi_orig), gsi_next (&gsi_update))
510 {
511 source_location loop_locus, guard_locus;
512 tree new_res;
513 orig_phi = gsi_stmt (gsi_orig);
514 update_phi = gsi_stmt (gsi_update);
515
516 /** 1. Handle new-merge-point phis **/
517
518 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
519 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
520 new_phi = create_phi_node (new_res, new_merge_bb);
521
522 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
523 of LOOP. Set the two phi args in NEW_PHI for these edges: */
524 loop_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, EDGE_SUCC (loop->latch, 0));
525 loop_locus = gimple_phi_arg_location_from_edge (orig_phi,
526 EDGE_SUCC (loop->latch,
527 0));
528 guard_arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, loop_preheader_edge (loop));
529 guard_locus
530 = gimple_phi_arg_location_from_edge (orig_phi,
531 loop_preheader_edge (loop));
532
533 add_phi_arg (new_phi, loop_arg, new_exit_e, loop_locus);
534 add_phi_arg (new_phi, guard_arg, guard_edge, guard_locus);
535
536 /* 1.3. Update phi in successor block. */
537 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == loop_arg
538 || PHI_ARG_DEF_FROM_EDGE (update_phi, e) == guard_arg);
539 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
540 update_phi2 = new_phi;
541
542
543 /** 2. Handle loop-closed-ssa-form phis **/
544
545 if (virtual_operand_p (PHI_RESULT (orig_phi)))
546 continue;
547
548 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
549 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
550 new_phi = create_phi_node (new_res, *new_exit_bb);
551
552 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
553 add_phi_arg (new_phi, loop_arg, single_exit (loop), loop_locus);
554
555 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
556 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
557 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
558 PHI_RESULT (new_phi));
559
560 /* 2.4. Record the newly created name with set_current_def.
561 We want to find a name such that
562 name = get_current_def (orig_loop_name)
563 and to set its current definition as follows:
564 set_current_def (name, new_phi_name)
565
566 If LOOP is a new loop then loop_arg is already the name we're
567 looking for. If LOOP is the original loop, then loop_arg is
568 the orig_loop_name and the relevant name is recorded in its
569 current reaching definition. */
570 if (is_new_loop)
571 current_new_name = loop_arg;
572 else
573 {
574 current_new_name = get_current_def (loop_arg);
575 /* current_def is not available only if the variable does not
576 change inside the loop, in which case we also don't care
577 about recording a current_def for it because we won't be
578 trying to create loop-exit-phis for it. */
579 if (!current_new_name)
580 continue;
581 }
582 gcc_assert (get_current_def (current_new_name) == NULL_TREE);
583
584 set_current_def (current_new_name, PHI_RESULT (new_phi));
585 }
586 }
587
588
589 /* Function slpeel_update_phi_nodes_for_guard2
590
591 Input:
592 - GUARD_EDGE, LOOP, IS_NEW_LOOP, NEW_EXIT_BB - as explained above.
593
594 In the context of the overall structure, we have:
595
596 loop1_preheader_bb:
597 guard1 (goto loop1/merge1_bb)
598 loop1
599 loop1_exit_bb:
600 guard2 (goto merge1_bb/merge2_bb)
601 merge1_bb
602 LOOP-> loop2
603 loop2_exit_bb
604 merge2_bb
605 next_bb
606
607 For each name used out side the loop (i.e - for each name that has an exit
608 phi in next_bb) we create a new phi in:
609 1. merge2_bb (to account for the edge from guard_bb)
610 2. loop2_exit_bb (an exit-phi to keep LOOP in loop-closed form)
611 3. guard2 bb (an exit phi to keep the preceding loop in loop-closed form),
612 if needed (if it wasn't handled by slpeel_update_phis_nodes_for_phi1).
613 */
614
615 static void
616 slpeel_update_phi_nodes_for_guard2 (edge guard_edge, struct loop *loop,
617 bool is_new_loop, basic_block *new_exit_bb)
618 {
619 gimple orig_phi, new_phi;
620 gimple update_phi, update_phi2;
621 tree guard_arg, loop_arg;
622 basic_block new_merge_bb = guard_edge->dest;
623 edge e = EDGE_SUCC (new_merge_bb, 0);
624 basic_block update_bb = e->dest;
625 edge new_exit_e;
626 tree orig_def, orig_def_new_name;
627 tree new_name, new_name2;
628 tree arg;
629 gimple_stmt_iterator gsi;
630
631 /* Create new bb between loop and new_merge_bb. */
632 *new_exit_bb = split_edge (single_exit (loop));
633
634 new_exit_e = EDGE_SUCC (*new_exit_bb, 0);
635
636 for (gsi = gsi_start_phis (update_bb); !gsi_end_p (gsi); gsi_next (&gsi))
637 {
638 tree new_res;
639 update_phi = gsi_stmt (gsi);
640 orig_phi = update_phi;
641 orig_def = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
642 /* This loop-closed-phi actually doesn't represent a use
643 out of the loop - the phi arg is a constant. */
644 if (TREE_CODE (orig_def) != SSA_NAME)
645 continue;
646 orig_def_new_name = get_current_def (orig_def);
647 arg = NULL_TREE;
648
649 /** 1. Handle new-merge-point phis **/
650
651 /* 1.1. Generate new phi node in NEW_MERGE_BB: */
652 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
653 new_phi = create_phi_node (new_res, new_merge_bb);
654
655 /* 1.2. NEW_MERGE_BB has two incoming edges: GUARD_EDGE and the exit-edge
656 of LOOP. Set the two PHI args in NEW_PHI for these edges: */
657 new_name = orig_def;
658 new_name2 = NULL_TREE;
659 if (orig_def_new_name)
660 {
661 new_name = orig_def_new_name;
662 /* Some variables have both loop-entry-phis and loop-exit-phis.
663 Such variables were given yet newer names by phis placed in
664 guard_bb by slpeel_update_phi_nodes_for_guard1. I.e:
665 new_name2 = get_current_def (get_current_def (orig_name)). */
666 new_name2 = get_current_def (new_name);
667 }
668
669 if (is_new_loop)
670 {
671 guard_arg = orig_def;
672 loop_arg = new_name;
673 }
674 else
675 {
676 guard_arg = new_name;
677 loop_arg = orig_def;
678 }
679 if (new_name2)
680 guard_arg = new_name2;
681
682 add_phi_arg (new_phi, loop_arg, new_exit_e, UNKNOWN_LOCATION);
683 add_phi_arg (new_phi, guard_arg, guard_edge, UNKNOWN_LOCATION);
684
685 /* 1.3. Update phi in successor block. */
686 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi, e) == orig_def);
687 adjust_phi_and_debug_stmts (update_phi, e, PHI_RESULT (new_phi));
688 update_phi2 = new_phi;
689
690
691 /** 2. Handle loop-closed-ssa-form phis **/
692
693 /* 2.1. Generate new phi node in NEW_EXIT_BB: */
694 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
695 new_phi = create_phi_node (new_res, *new_exit_bb);
696
697 /* 2.2. NEW_EXIT_BB has one incoming edge: the exit-edge of the loop. */
698 add_phi_arg (new_phi, loop_arg, single_exit (loop), UNKNOWN_LOCATION);
699
700 /* 2.3. Update phi in successor of NEW_EXIT_BB: */
701 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, new_exit_e) == loop_arg);
702 adjust_phi_and_debug_stmts (update_phi2, new_exit_e,
703 PHI_RESULT (new_phi));
704
705
706 /** 3. Handle loop-closed-ssa-form phis for first loop **/
707
708 /* 3.1. Find the relevant names that need an exit-phi in
709 GUARD_BB, i.e. names for which
710 slpeel_update_phi_nodes_for_guard1 had not already created a
711 phi node. This is the case for names that are used outside
712 the loop (and therefore need an exit phi) but are not updated
713 across loop iterations (and therefore don't have a
714 loop-header-phi).
715
716 slpeel_update_phi_nodes_for_guard1 is responsible for
717 creating loop-exit phis in GUARD_BB for names that have a
718 loop-header-phi. When such a phi is created we also record
719 the new name in its current definition. If this new name
720 exists, then guard_arg was set to this new name (see 1.2
721 above). Therefore, if guard_arg is not this new name, this
722 is an indication that an exit-phi in GUARD_BB was not yet
723 created, so we take care of it here. */
724 if (guard_arg == new_name2)
725 continue;
726 arg = guard_arg;
727
728 /* 3.2. Generate new phi node in GUARD_BB: */
729 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
730 new_phi = create_phi_node (new_res, guard_edge->src);
731
732 /* 3.3. GUARD_BB has one incoming edge: */
733 gcc_assert (EDGE_COUNT (guard_edge->src->preds) == 1);
734 add_phi_arg (new_phi, arg, EDGE_PRED (guard_edge->src, 0),
735 UNKNOWN_LOCATION);
736
737 /* 3.4. Update phi in successor of GUARD_BB: */
738 gcc_assert (PHI_ARG_DEF_FROM_EDGE (update_phi2, guard_edge)
739 == guard_arg);
740 adjust_phi_and_debug_stmts (update_phi2, guard_edge,
741 PHI_RESULT (new_phi));
742 }
743 }
744
745
746 /* Make the LOOP iterate NITERS times. This is done by adding a new IV
747 that starts at zero, increases by one and its limit is NITERS.
748
749 Assumption: the exit-condition of LOOP is the last stmt in the loop. */
750
751 void
752 slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
753 {
754 tree indx_before_incr, indx_after_incr;
755 gimple cond_stmt;
756 gimple orig_cond;
757 edge exit_edge = single_exit (loop);
758 gimple_stmt_iterator loop_cond_gsi;
759 gimple_stmt_iterator incr_gsi;
760 bool insert_after;
761 tree init = build_int_cst (TREE_TYPE (niters), 0);
762 tree step = build_int_cst (TREE_TYPE (niters), 1);
763 LOC loop_loc;
764 enum tree_code code;
765
766 orig_cond = get_loop_exit_condition (loop);
767 gcc_assert (orig_cond);
768 loop_cond_gsi = gsi_for_stmt (orig_cond);
769
770 standard_iv_increment_position (loop, &incr_gsi, &insert_after);
771 create_iv (init, step, NULL_TREE, loop,
772 &incr_gsi, insert_after, &indx_before_incr, &indx_after_incr);
773
774 indx_after_incr = force_gimple_operand_gsi (&loop_cond_gsi, indx_after_incr,
775 true, NULL_TREE, true,
776 GSI_SAME_STMT);
777 niters = force_gimple_operand_gsi (&loop_cond_gsi, niters, true, NULL_TREE,
778 true, GSI_SAME_STMT);
779
780 code = (exit_edge->flags & EDGE_TRUE_VALUE) ? GE_EXPR : LT_EXPR;
781 cond_stmt = gimple_build_cond (code, indx_after_incr, niters, NULL_TREE,
782 NULL_TREE);
783
784 gsi_insert_before (&loop_cond_gsi, cond_stmt, GSI_SAME_STMT);
785
786 /* Remove old loop exit test: */
787 gsi_remove (&loop_cond_gsi, true);
788 free_stmt_vec_info (orig_cond);
789
790 loop_loc = find_loop_location (loop);
791 if (dump_enabled_p ())
792 {
793 if (LOCATION_LOCUS (loop_loc) != UNKNOWN_LOC)
794 dump_printf (MSG_NOTE, "\nloop at %s:%d: ", LOC_FILE (loop_loc),
795 LOC_LINE (loop_loc));
796 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
797 }
798 loop->nb_iterations = niters;
799 }
800
801
802 /* Given LOOP this function generates a new copy of it and puts it
803 on E which is either the entry or exit of LOOP. */
804
805 struct loop *
806 slpeel_tree_duplicate_loop_to_edge_cfg (struct loop *loop, edge e)
807 {
808 struct loop *new_loop;
809 basic_block *new_bbs, *bbs;
810 bool at_exit;
811 bool was_imm_dom;
812 basic_block exit_dest;
813 gimple phi;
814 tree phi_arg;
815 edge exit, new_exit;
816 gimple_stmt_iterator gsi;
817
818 at_exit = (e == single_exit (loop));
819 if (!at_exit && e != loop_preheader_edge (loop))
820 return NULL;
821
822 bbs = get_loop_body (loop);
823
824 /* Check whether duplication is possible. */
825 if (!can_copy_bbs_p (bbs, loop->num_nodes))
826 {
827 free (bbs);
828 return NULL;
829 }
830
831 /* Generate new loop structure. */
832 new_loop = duplicate_loop (loop, loop_outer (loop));
833 if (!new_loop)
834 {
835 free (bbs);
836 return NULL;
837 }
838
839 exit_dest = single_exit (loop)->dest;
840 was_imm_dom = (get_immediate_dominator (CDI_DOMINATORS,
841 exit_dest) == loop->header ?
842 true : false);
843
844 new_bbs = XNEWVEC (basic_block, loop->num_nodes);
845
846 exit = single_exit (loop);
847 copy_bbs (bbs, loop->num_nodes, new_bbs,
848 &exit, 1, &new_exit, NULL,
849 e->src);
850
851 /* Duplicating phi args at exit bbs as coming
852 also from exit of duplicated loop. */
853 for (gsi = gsi_start_phis (exit_dest); !gsi_end_p (gsi); gsi_next (&gsi))
854 {
855 phi = gsi_stmt (gsi);
856 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, single_exit (loop));
857 if (phi_arg)
858 {
859 edge new_loop_exit_edge;
860 source_location locus;
861
862 locus = gimple_phi_arg_location_from_edge (phi, single_exit (loop));
863 if (EDGE_SUCC (new_loop->header, 0)->dest == new_loop->latch)
864 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 1);
865 else
866 new_loop_exit_edge = EDGE_SUCC (new_loop->header, 0);
867
868 add_phi_arg (phi, phi_arg, new_loop_exit_edge, locus);
869 }
870 }
871
872 if (at_exit) /* Add the loop copy at exit. */
873 {
874 redirect_edge_and_branch_force (e, new_loop->header);
875 PENDING_STMT (e) = NULL;
876 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, e->src);
877 if (was_imm_dom)
878 set_immediate_dominator (CDI_DOMINATORS, exit_dest, new_loop->header);
879 }
880 else /* Add the copy at entry. */
881 {
882 edge new_exit_e;
883 edge entry_e = loop_preheader_edge (loop);
884 basic_block preheader = entry_e->src;
885
886 if (!flow_bb_inside_loop_p (new_loop,
887 EDGE_SUCC (new_loop->header, 0)->dest))
888 new_exit_e = EDGE_SUCC (new_loop->header, 0);
889 else
890 new_exit_e = EDGE_SUCC (new_loop->header, 1);
891
892 redirect_edge_and_branch_force (new_exit_e, loop->header);
893 PENDING_STMT (new_exit_e) = NULL;
894 set_immediate_dominator (CDI_DOMINATORS, loop->header,
895 new_exit_e->src);
896
897 /* We have to add phi args to the loop->header here as coming
898 from new_exit_e edge. */
899 for (gsi = gsi_start_phis (loop->header);
900 !gsi_end_p (gsi);
901 gsi_next (&gsi))
902 {
903 phi = gsi_stmt (gsi);
904 phi_arg = PHI_ARG_DEF_FROM_EDGE (phi, entry_e);
905 if (phi_arg)
906 add_phi_arg (phi, phi_arg, new_exit_e,
907 gimple_phi_arg_location_from_edge (phi, entry_e));
908 }
909
910 redirect_edge_and_branch_force (entry_e, new_loop->header);
911 PENDING_STMT (entry_e) = NULL;
912 set_immediate_dominator (CDI_DOMINATORS, new_loop->header, preheader);
913 }
914
915 free (new_bbs);
916 free (bbs);
917
918 return new_loop;
919 }
920
921
922 /* Given the condition statement COND, put it as the last statement
923 of GUARD_BB; EXIT_BB is the basic block to skip the loop;
924 Assumes that this is the single exit of the guarded loop.
925 Returns the skip edge, inserts new stmts on the COND_EXPR_STMT_LIST. */
926
927 static edge
928 slpeel_add_loop_guard (basic_block guard_bb, tree cond,
929 gimple_seq cond_expr_stmt_list,
930 basic_block exit_bb, basic_block dom_bb,
931 int probability)
932 {
933 gimple_stmt_iterator gsi;
934 edge new_e, enter_e;
935 gimple cond_stmt;
936 gimple_seq gimplify_stmt_list = NULL;
937
938 enter_e = EDGE_SUCC (guard_bb, 0);
939 enter_e->flags &= ~EDGE_FALLTHRU;
940 enter_e->flags |= EDGE_FALSE_VALUE;
941 gsi = gsi_last_bb (guard_bb);
942
943 cond = force_gimple_operand_1 (cond, &gimplify_stmt_list, is_gimple_condexpr,
944 NULL_TREE);
945 if (gimplify_stmt_list)
946 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
947 cond_stmt = gimple_build_cond_from_tree (cond, NULL_TREE, NULL_TREE);
948 if (cond_expr_stmt_list)
949 gsi_insert_seq_after (&gsi, cond_expr_stmt_list, GSI_NEW_STMT);
950
951 gsi = gsi_last_bb (guard_bb);
952 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
953
954 /* Add new edge to connect guard block to the merge/loop-exit block. */
955 new_e = make_edge (guard_bb, exit_bb, EDGE_TRUE_VALUE);
956
957 new_e->count = guard_bb->count;
958 new_e->probability = probability;
959 new_e->count = apply_probability (enter_e->count, probability);
960 enter_e->count -= new_e->count;
961 enter_e->probability = inverse_probability (probability);
962 set_immediate_dominator (CDI_DOMINATORS, exit_bb, dom_bb);
963 return new_e;
964 }
965
966
967 /* This function verifies that the following restrictions apply to LOOP:
968 (1) it is innermost
969 (2) it consists of exactly 2 basic blocks - header, and an empty latch.
970 (3) it is single entry, single exit
971 (4) its exit condition is the last stmt in the header
972 (5) E is the entry/exit edge of LOOP.
973 */
974
975 bool
976 slpeel_can_duplicate_loop_p (const struct loop *loop, const_edge e)
977 {
978 edge exit_e = single_exit (loop);
979 edge entry_e = loop_preheader_edge (loop);
980 gimple orig_cond = get_loop_exit_condition (loop);
981 gimple_stmt_iterator loop_exit_gsi = gsi_last_bb (exit_e->src);
982
983 if (need_ssa_update_p (cfun))
984 return false;
985
986 if (loop->inner
987 /* All loops have an outer scope; the only case loop->outer is NULL is for
988 the function itself. */
989 || !loop_outer (loop)
990 || loop->num_nodes != 2
991 || !empty_block_p (loop->latch)
992 || !single_exit (loop)
993 /* Verify that new loop exit condition can be trivially modified. */
994 || (!orig_cond || orig_cond != gsi_stmt (loop_exit_gsi))
995 || (e != exit_e && e != entry_e))
996 return false;
997
998 return true;
999 }
1000
1001 #ifdef ENABLE_CHECKING
1002 static void
1003 slpeel_verify_cfg_after_peeling (struct loop *first_loop,
1004 struct loop *second_loop)
1005 {
1006 basic_block loop1_exit_bb = single_exit (first_loop)->dest;
1007 basic_block loop2_entry_bb = loop_preheader_edge (second_loop)->src;
1008 basic_block loop1_entry_bb = loop_preheader_edge (first_loop)->src;
1009
1010 /* A guard that controls whether the second_loop is to be executed or skipped
1011 is placed in first_loop->exit. first_loop->exit therefore has two
1012 successors - one is the preheader of second_loop, and the other is a bb
1013 after second_loop.
1014 */
1015 gcc_assert (EDGE_COUNT (loop1_exit_bb->succs) == 2);
1016
1017 /* 1. Verify that one of the successors of first_loop->exit is the preheader
1018 of second_loop. */
1019
1020 /* The preheader of new_loop is expected to have two predecessors:
1021 first_loop->exit and the block that precedes first_loop. */
1022
1023 gcc_assert (EDGE_COUNT (loop2_entry_bb->preds) == 2
1024 && ((EDGE_PRED (loop2_entry_bb, 0)->src == loop1_exit_bb
1025 && EDGE_PRED (loop2_entry_bb, 1)->src == loop1_entry_bb)
1026 || (EDGE_PRED (loop2_entry_bb, 1)->src == loop1_exit_bb
1027 && EDGE_PRED (loop2_entry_bb, 0)->src == loop1_entry_bb)));
1028
1029 /* Verify that the other successor of first_loop->exit is after the
1030 second_loop. */
1031 /* TODO */
1032 }
1033 #endif
1034
1035 /* If the run time cost model check determines that vectorization is
1036 not profitable and hence scalar loop should be generated then set
1037 FIRST_NITERS to prologue peeled iterations. This will allow all the
1038 iterations to be executed in the prologue peeled scalar loop. */
1039
1040 static void
1041 set_prologue_iterations (basic_block bb_before_first_loop,
1042 tree *first_niters,
1043 struct loop *loop,
1044 unsigned int th,
1045 int probability)
1046 {
1047 edge e;
1048 basic_block cond_bb, then_bb;
1049 tree var, prologue_after_cost_adjust_name;
1050 gimple_stmt_iterator gsi;
1051 gimple newphi;
1052 edge e_true, e_false, e_fallthru;
1053 gimple cond_stmt;
1054 gimple_seq stmts = NULL;
1055 tree cost_pre_condition = NULL_TREE;
1056 tree scalar_loop_iters =
1057 unshare_expr (LOOP_VINFO_NITERS_UNCHANGED (loop_vec_info_for_loop (loop)));
1058
1059 e = single_pred_edge (bb_before_first_loop);
1060 cond_bb = split_edge(e);
1061
1062 e = single_pred_edge (bb_before_first_loop);
1063 then_bb = split_edge(e);
1064 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
1065
1066 e_false = make_single_succ_edge (cond_bb, bb_before_first_loop,
1067 EDGE_FALSE_VALUE);
1068 set_immediate_dominator (CDI_DOMINATORS, bb_before_first_loop, cond_bb);
1069
1070 e_true = EDGE_PRED (then_bb, 0);
1071 e_true->flags &= ~EDGE_FALLTHRU;
1072 e_true->flags |= EDGE_TRUE_VALUE;
1073
1074 e_true->probability = probability;
1075 e_false->probability = inverse_probability (probability);
1076 e_true->count = apply_probability (cond_bb->count, probability);
1077 e_false->count = cond_bb->count - e_true->count;
1078 then_bb->frequency = EDGE_FREQUENCY (e_true);
1079 then_bb->count = e_true->count;
1080
1081 e_fallthru = EDGE_SUCC (then_bb, 0);
1082 e_fallthru->count = then_bb->count;
1083
1084 gsi = gsi_last_bb (cond_bb);
1085 cost_pre_condition =
1086 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1087 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1088 cost_pre_condition =
1089 force_gimple_operand_gsi_1 (&gsi, cost_pre_condition, is_gimple_condexpr,
1090 NULL_TREE, false, GSI_CONTINUE_LINKING);
1091 cond_stmt = gimple_build_cond_from_tree (cost_pre_condition,
1092 NULL_TREE, NULL_TREE);
1093 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
1094
1095 var = create_tmp_var (TREE_TYPE (scalar_loop_iters),
1096 "prologue_after_cost_adjust");
1097 prologue_after_cost_adjust_name =
1098 force_gimple_operand (scalar_loop_iters, &stmts, false, var);
1099
1100 gsi = gsi_last_bb (then_bb);
1101 if (stmts)
1102 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
1103
1104 newphi = create_phi_node (var, bb_before_first_loop);
1105 add_phi_arg (newphi, prologue_after_cost_adjust_name, e_fallthru,
1106 UNKNOWN_LOCATION);
1107 add_phi_arg (newphi, *first_niters, e_false, UNKNOWN_LOCATION);
1108
1109 *first_niters = PHI_RESULT (newphi);
1110 }
1111
1112 /* Function slpeel_tree_peel_loop_to_edge.
1113
1114 Peel the first (last) iterations of LOOP into a new prolog (epilog) loop
1115 that is placed on the entry (exit) edge E of LOOP. After this transformation
1116 we have two loops one after the other - first-loop iterates FIRST_NITERS
1117 times, and second-loop iterates the remainder NITERS - FIRST_NITERS times.
1118 If the cost model indicates that it is profitable to emit a scalar
1119 loop instead of the vector one, then the prolog (epilog) loop will iterate
1120 for the entire unchanged scalar iterations of the loop.
1121
1122 Input:
1123 - LOOP: the loop to be peeled.
1124 - E: the exit or entry edge of LOOP.
1125 If it is the entry edge, we peel the first iterations of LOOP. In this
1126 case first-loop is LOOP, and second-loop is the newly created loop.
1127 If it is the exit edge, we peel the last iterations of LOOP. In this
1128 case, first-loop is the newly created loop, and second-loop is LOOP.
1129 - NITERS: the number of iterations that LOOP iterates.
1130 - FIRST_NITERS: the number of iterations that the first-loop should iterate.
1131 - UPDATE_FIRST_LOOP_COUNT: specified whether this function is responsible
1132 for updating the loop bound of the first-loop to FIRST_NITERS. If it
1133 is false, the caller of this function may want to take care of this
1134 (this can be useful if we don't want new stmts added to first-loop).
1135 - TH: cost model profitability threshold of iterations for vectorization.
1136 - CHECK_PROFITABILITY: specify whether cost model check has not occurred
1137 during versioning and hence needs to occur during
1138 prologue generation or whether cost model check
1139 has not occurred during prologue generation and hence
1140 needs to occur during epilogue generation.
1141 - BOUND1 is the upper bound on number of iterations of the first loop (if known)
1142 - BOUND2 is the upper bound on number of iterations of the second loop (if known)
1143
1144
1145 Output:
1146 The function returns a pointer to the new loop-copy, or NULL if it failed
1147 to perform the transformation.
1148
1149 The function generates two if-then-else guards: one before the first loop,
1150 and the other before the second loop:
1151 The first guard is:
1152 if (FIRST_NITERS == 0) then skip the first loop,
1153 and go directly to the second loop.
1154 The second guard is:
1155 if (FIRST_NITERS == NITERS) then skip the second loop.
1156
1157 If the optional COND_EXPR and COND_EXPR_STMT_LIST arguments are given
1158 then the generated condition is combined with COND_EXPR and the
1159 statements in COND_EXPR_STMT_LIST are emitted together with it.
1160
1161 FORNOW only simple loops are supported (see slpeel_can_duplicate_loop_p).
1162 FORNOW the resulting code will not be in loop-closed-ssa form.
1163 */
1164
1165 static struct loop*
1166 slpeel_tree_peel_loop_to_edge (struct loop *loop,
1167 edge e, tree *first_niters,
1168 tree niters, bool update_first_loop_count,
1169 unsigned int th, bool check_profitability,
1170 tree cond_expr, gimple_seq cond_expr_stmt_list,
1171 int bound1, int bound2)
1172 {
1173 struct loop *new_loop = NULL, *first_loop, *second_loop;
1174 edge skip_e;
1175 tree pre_condition = NULL_TREE;
1176 basic_block bb_before_second_loop, bb_after_second_loop;
1177 basic_block bb_before_first_loop;
1178 basic_block bb_between_loops;
1179 basic_block new_exit_bb;
1180 gimple_stmt_iterator gsi;
1181 edge exit_e = single_exit (loop);
1182 LOC loop_loc;
1183 tree cost_pre_condition = NULL_TREE;
1184 /* There are many aspects to how likely the first loop is going to be executed.
1185 Without histogram we can't really do good job. Simply set it to
1186 2/3, so the first loop is not reordered to the end of function and
1187 the hot path through stays short. */
1188 int first_guard_probability = 2 * REG_BR_PROB_BASE / 3;
1189 int second_guard_probability = 2 * REG_BR_PROB_BASE / 3;
1190 int probability_of_second_loop;
1191
1192 if (!slpeel_can_duplicate_loop_p (loop, e))
1193 return NULL;
1194
1195 /* If the loop has a virtual PHI, but exit bb doesn't, create a virtual PHI
1196 in the exit bb and rename all the uses after the loop. This simplifies
1197 the *guard[12] routines, which assume loop closed SSA form for all PHIs
1198 (but normally loop closed SSA form doesn't require virtual PHIs to be
1199 in the same form). Doing this early simplifies the checking what
1200 uses should be renamed. */
1201 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
1202 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1203 {
1204 gimple phi = gsi_stmt (gsi);
1205 for (gsi = gsi_start_phis (exit_e->dest);
1206 !gsi_end_p (gsi); gsi_next (&gsi))
1207 if (virtual_operand_p (gimple_phi_result (gsi_stmt (gsi))))
1208 break;
1209 if (gsi_end_p (gsi))
1210 {
1211 tree new_vop = copy_ssa_name (PHI_RESULT (phi), NULL);
1212 gimple new_phi = create_phi_node (new_vop, exit_e->dest);
1213 tree vop = PHI_ARG_DEF_FROM_EDGE (phi, EDGE_SUCC (loop->latch, 0));
1214 imm_use_iterator imm_iter;
1215 gimple stmt;
1216 use_operand_p use_p;
1217
1218 add_phi_arg (new_phi, vop, exit_e, UNKNOWN_LOCATION);
1219 gimple_phi_set_result (new_phi, new_vop);
1220 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, vop)
1221 if (stmt != new_phi && gimple_bb (stmt) != loop->header)
1222 FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
1223 SET_USE (use_p, new_vop);
1224 }
1225 break;
1226 }
1227
1228 /* 1. Generate a copy of LOOP and put it on E (E is the entry/exit of LOOP).
1229 Resulting CFG would be:
1230
1231 first_loop:
1232 do {
1233 } while ...
1234
1235 second_loop:
1236 do {
1237 } while ...
1238
1239 orig_exit_bb:
1240 */
1241
1242 if (!(new_loop = slpeel_tree_duplicate_loop_to_edge_cfg (loop, e)))
1243 {
1244 loop_loc = find_loop_location (loop);
1245 dump_printf_loc (MSG_MISSED_OPTIMIZATION, loop_loc,
1246 "tree_duplicate_loop_to_edge_cfg failed.\n");
1247 return NULL;
1248 }
1249
1250 if (MAY_HAVE_DEBUG_STMTS)
1251 {
1252 gcc_assert (!adjust_vec.exists ());
1253 vec_stack_alloc (adjust_info, adjust_vec, 32);
1254 }
1255
1256 if (e == exit_e)
1257 {
1258 /* NEW_LOOP was placed after LOOP. */
1259 first_loop = loop;
1260 second_loop = new_loop;
1261 }
1262 else
1263 {
1264 /* NEW_LOOP was placed before LOOP. */
1265 first_loop = new_loop;
1266 second_loop = loop;
1267 }
1268
1269 slpeel_update_phis_for_duplicate_loop (loop, new_loop, e == exit_e);
1270 rename_variables_in_loop (new_loop);
1271
1272
1273 /* 2. Add the guard code in one of the following ways:
1274
1275 2.a Add the guard that controls whether the first loop is executed.
1276 This occurs when this function is invoked for prologue or epilogue
1277 generation and when the cost model check can be done at compile time.
1278
1279 Resulting CFG would be:
1280
1281 bb_before_first_loop:
1282 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1283 GOTO first-loop
1284
1285 first_loop:
1286 do {
1287 } while ...
1288
1289 bb_before_second_loop:
1290
1291 second_loop:
1292 do {
1293 } while ...
1294
1295 orig_exit_bb:
1296
1297 2.b Add the cost model check that allows the prologue
1298 to iterate for the entire unchanged scalar
1299 iterations of the loop in the event that the cost
1300 model indicates that the scalar loop is more
1301 profitable than the vector one. This occurs when
1302 this function is invoked for prologue generation
1303 and the cost model check needs to be done at run
1304 time.
1305
1306 Resulting CFG after prologue peeling would be:
1307
1308 if (scalar_loop_iterations <= th)
1309 FIRST_NITERS = scalar_loop_iterations
1310
1311 bb_before_first_loop:
1312 if (FIRST_NITERS == 0) GOTO bb_before_second_loop
1313 GOTO first-loop
1314
1315 first_loop:
1316 do {
1317 } while ...
1318
1319 bb_before_second_loop:
1320
1321 second_loop:
1322 do {
1323 } while ...
1324
1325 orig_exit_bb:
1326
1327 2.c Add the cost model check that allows the epilogue
1328 to iterate for the entire unchanged scalar
1329 iterations of the loop in the event that the cost
1330 model indicates that the scalar loop is more
1331 profitable than the vector one. This occurs when
1332 this function is invoked for epilogue generation
1333 and the cost model check needs to be done at run
1334 time. This check is combined with any pre-existing
1335 check in COND_EXPR to avoid versioning.
1336
1337 Resulting CFG after prologue peeling would be:
1338
1339 bb_before_first_loop:
1340 if ((scalar_loop_iterations <= th)
1341 ||
1342 FIRST_NITERS == 0) GOTO bb_before_second_loop
1343 GOTO first-loop
1344
1345 first_loop:
1346 do {
1347 } while ...
1348
1349 bb_before_second_loop:
1350
1351 second_loop:
1352 do {
1353 } while ...
1354
1355 orig_exit_bb:
1356 */
1357
1358 bb_before_first_loop = split_edge (loop_preheader_edge (first_loop));
1359 bb_before_second_loop = split_edge (single_exit (first_loop));
1360
1361 probability_of_second_loop = (inverse_probability (first_guard_probability)
1362 + combine_probabilities (second_guard_probability,
1363 first_guard_probability));
1364 /* Theoretically preheader edge of first loop and exit edge should have
1365 same frequencies. Loop exit probablities are however easy to get wrong.
1366 It is safer to copy value from original loop entry. */
1367 bb_before_second_loop->frequency
1368 = apply_probability (bb_before_first_loop->frequency,
1369 probability_of_second_loop);
1370 bb_before_second_loop->count
1371 = apply_probability (bb_before_first_loop->count,
1372 probability_of_second_loop);
1373 single_succ_edge (bb_before_second_loop)->count
1374 = bb_before_second_loop->count;
1375
1376 /* Epilogue peeling. */
1377 if (!update_first_loop_count)
1378 {
1379 pre_condition =
1380 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1381 build_int_cst (TREE_TYPE (*first_niters), 0));
1382 if (check_profitability)
1383 {
1384 tree scalar_loop_iters
1385 = unshare_expr (LOOP_VINFO_NITERS_UNCHANGED
1386 (loop_vec_info_for_loop (loop)));
1387 cost_pre_condition =
1388 fold_build2 (LE_EXPR, boolean_type_node, scalar_loop_iters,
1389 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
1390
1391 pre_condition = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1392 cost_pre_condition, pre_condition);
1393 }
1394 if (cond_expr)
1395 {
1396 pre_condition =
1397 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
1398 pre_condition,
1399 fold_build1 (TRUTH_NOT_EXPR, boolean_type_node,
1400 cond_expr));
1401 }
1402 }
1403
1404 /* Prologue peeling. */
1405 else
1406 {
1407 if (check_profitability)
1408 set_prologue_iterations (bb_before_first_loop, first_niters,
1409 loop, th, first_guard_probability);
1410
1411 pre_condition =
1412 fold_build2 (LE_EXPR, boolean_type_node, *first_niters,
1413 build_int_cst (TREE_TYPE (*first_niters), 0));
1414 }
1415
1416 skip_e = slpeel_add_loop_guard (bb_before_first_loop, pre_condition,
1417 cond_expr_stmt_list,
1418 bb_before_second_loop, bb_before_first_loop,
1419 inverse_probability (first_guard_probability));
1420 scale_loop_profile (first_loop, first_guard_probability,
1421 check_profitability && (int)th > bound1 ? th : bound1);
1422 slpeel_update_phi_nodes_for_guard1 (skip_e, first_loop,
1423 first_loop == new_loop,
1424 &new_exit_bb);
1425
1426
1427 /* 3. Add the guard that controls whether the second loop is executed.
1428 Resulting CFG would be:
1429
1430 bb_before_first_loop:
1431 if (FIRST_NITERS == 0) GOTO bb_before_second_loop (skip first loop)
1432 GOTO first-loop
1433
1434 first_loop:
1435 do {
1436 } while ...
1437
1438 bb_between_loops:
1439 if (FIRST_NITERS == NITERS) GOTO bb_after_second_loop (skip second loop)
1440 GOTO bb_before_second_loop
1441
1442 bb_before_second_loop:
1443
1444 second_loop:
1445 do {
1446 } while ...
1447
1448 bb_after_second_loop:
1449
1450 orig_exit_bb:
1451 */
1452
1453 bb_between_loops = new_exit_bb;
1454 bb_after_second_loop = split_edge (single_exit (second_loop));
1455
1456 pre_condition =
1457 fold_build2 (EQ_EXPR, boolean_type_node, *first_niters, niters);
1458 skip_e = slpeel_add_loop_guard (bb_between_loops, pre_condition, NULL,
1459 bb_after_second_loop, bb_before_first_loop,
1460 inverse_probability (second_guard_probability));
1461 scale_loop_profile (second_loop, probability_of_second_loop, bound2);
1462 slpeel_update_phi_nodes_for_guard2 (skip_e, second_loop,
1463 second_loop == new_loop, &new_exit_bb);
1464
1465 /* 4. Make first-loop iterate FIRST_NITERS times, if requested.
1466 */
1467 if (update_first_loop_count)
1468 slpeel_make_loop_iterate_ntimes (first_loop, *first_niters);
1469
1470 delete_update_ssa ();
1471
1472 adjust_vec_debug_stmts ();
1473
1474 return new_loop;
1475 }
1476
1477 /* Function vect_get_loop_location.
1478
1479 Extract the location of the loop in the source code.
1480 If the loop is not well formed for vectorization, an estimated
1481 location is calculated.
1482 Return the loop location if succeed and NULL if not. */
1483
1484 LOC
1485 find_loop_location (struct loop *loop)
1486 {
1487 gimple stmt = NULL;
1488 basic_block bb;
1489 gimple_stmt_iterator si;
1490
1491 if (!loop)
1492 return UNKNOWN_LOC;
1493
1494 stmt = get_loop_exit_condition (loop);
1495
1496 if (stmt && gimple_location (stmt) != UNKNOWN_LOC)
1497 return gimple_location (stmt);
1498
1499 /* If we got here the loop is probably not "well formed",
1500 try to estimate the loop location */
1501
1502 if (!loop->header)
1503 return UNKNOWN_LOC;
1504
1505 bb = loop->header;
1506
1507 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
1508 {
1509 stmt = gsi_stmt (si);
1510 if (gimple_location (stmt) != UNKNOWN_LOC)
1511 return gimple_location (stmt);
1512 }
1513
1514 return UNKNOWN_LOC;
1515 }
1516
1517
1518 /* This function builds ni_name = number of iterations loop executes
1519 on the loop preheader. If SEQ is given the stmt is instead emitted
1520 there. */
1521
1522 static tree
1523 vect_build_loop_niters (loop_vec_info loop_vinfo, gimple_seq seq)
1524 {
1525 tree ni_name, var;
1526 gimple_seq stmts = NULL;
1527 edge pe;
1528 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1529 tree ni = unshare_expr (LOOP_VINFO_NITERS (loop_vinfo));
1530
1531 var = create_tmp_var (TREE_TYPE (ni), "niters");
1532 ni_name = force_gimple_operand (ni, &stmts, false, var);
1533
1534 pe = loop_preheader_edge (loop);
1535 if (stmts)
1536 {
1537 if (seq)
1538 gimple_seq_add_seq (&seq, stmts);
1539 else
1540 {
1541 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1542 gcc_assert (!new_bb);
1543 }
1544 }
1545
1546 return ni_name;
1547 }
1548
1549
1550 /* This function generates the following statements:
1551
1552 ni_name = number of iterations loop executes
1553 ratio = ni_name / vf
1554 ratio_mult_vf_name = ratio * vf
1555
1556 and places them at the loop preheader edge or in COND_EXPR_STMT_LIST
1557 if that is non-NULL. */
1558
1559 static void
1560 vect_generate_tmps_on_preheader (loop_vec_info loop_vinfo,
1561 tree *ni_name_ptr,
1562 tree *ratio_mult_vf_name_ptr,
1563 tree *ratio_name_ptr,
1564 gimple_seq cond_expr_stmt_list)
1565 {
1566
1567 edge pe;
1568 basic_block new_bb;
1569 gimple_seq stmts;
1570 tree ni_name, ni_minus_gap_name;
1571 tree var;
1572 tree ratio_name;
1573 tree ratio_mult_vf_name;
1574 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1575 tree ni = LOOP_VINFO_NITERS (loop_vinfo);
1576 int vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1577 tree log_vf;
1578
1579 pe = loop_preheader_edge (loop);
1580
1581 /* Generate temporary variable that contains
1582 number of iterations loop executes. */
1583
1584 ni_name = vect_build_loop_niters (loop_vinfo, cond_expr_stmt_list);
1585 log_vf = build_int_cst (TREE_TYPE (ni), exact_log2 (vf));
1586
1587 /* If epilogue loop is required because of data accesses with gaps, we
1588 subtract one iteration from the total number of iterations here for
1589 correct calculation of RATIO. */
1590 if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
1591 {
1592 ni_minus_gap_name = fold_build2 (MINUS_EXPR, TREE_TYPE (ni_name),
1593 ni_name,
1594 build_one_cst (TREE_TYPE (ni_name)));
1595 if (!is_gimple_val (ni_minus_gap_name))
1596 {
1597 var = create_tmp_var (TREE_TYPE (ni), "ni_gap");
1598
1599 stmts = NULL;
1600 ni_minus_gap_name = force_gimple_operand (ni_minus_gap_name, &stmts,
1601 true, var);
1602 if (cond_expr_stmt_list)
1603 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1604 else
1605 {
1606 pe = loop_preheader_edge (loop);
1607 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1608 gcc_assert (!new_bb);
1609 }
1610 }
1611 }
1612 else
1613 ni_minus_gap_name = ni_name;
1614
1615 /* Create: ratio = ni >> log2(vf) */
1616
1617 ratio_name = fold_build2 (RSHIFT_EXPR, TREE_TYPE (ni_minus_gap_name),
1618 ni_minus_gap_name, log_vf);
1619 if (!is_gimple_val (ratio_name))
1620 {
1621 var = create_tmp_var (TREE_TYPE (ni), "bnd");
1622
1623 stmts = NULL;
1624 ratio_name = force_gimple_operand (ratio_name, &stmts, true, var);
1625 if (cond_expr_stmt_list)
1626 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1627 else
1628 {
1629 pe = loop_preheader_edge (loop);
1630 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1631 gcc_assert (!new_bb);
1632 }
1633 }
1634
1635 /* Create: ratio_mult_vf = ratio << log2 (vf). */
1636
1637 ratio_mult_vf_name = fold_build2 (LSHIFT_EXPR, TREE_TYPE (ratio_name),
1638 ratio_name, log_vf);
1639 if (!is_gimple_val (ratio_mult_vf_name))
1640 {
1641 var = create_tmp_var (TREE_TYPE (ni), "ratio_mult_vf");
1642
1643 stmts = NULL;
1644 ratio_mult_vf_name = force_gimple_operand (ratio_mult_vf_name, &stmts,
1645 true, var);
1646 if (cond_expr_stmt_list)
1647 gimple_seq_add_seq (&cond_expr_stmt_list, stmts);
1648 else
1649 {
1650 pe = loop_preheader_edge (loop);
1651 new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
1652 gcc_assert (!new_bb);
1653 }
1654 }
1655
1656 *ni_name_ptr = ni_name;
1657 *ratio_mult_vf_name_ptr = ratio_mult_vf_name;
1658 *ratio_name_ptr = ratio_name;
1659
1660 return;
1661 }
1662
1663 /* Function vect_can_advance_ivs_p
1664
1665 In case the number of iterations that LOOP iterates is unknown at compile
1666 time, an epilog loop will be generated, and the loop induction variables
1667 (IVs) will be "advanced" to the value they are supposed to take just before
1668 the epilog loop. Here we check that the access function of the loop IVs
1669 and the expression that represents the loop bound are simple enough.
1670 These restrictions will be relaxed in the future. */
1671
1672 bool
1673 vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
1674 {
1675 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1676 basic_block bb = loop->header;
1677 gimple phi;
1678 gimple_stmt_iterator gsi;
1679
1680 /* Analyze phi functions of the loop header. */
1681
1682 if (dump_enabled_p ())
1683 dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:");
1684 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1685 {
1686 tree access_fn = NULL;
1687 tree evolution_part;
1688
1689 phi = gsi_stmt (gsi);
1690 if (dump_enabled_p ())
1691 {
1692 dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
1693 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1694 }
1695
1696 /* Skip virtual phi's. The data dependences that are associated with
1697 virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
1698
1699 if (virtual_operand_p (PHI_RESULT (phi)))
1700 {
1701 if (dump_enabled_p ())
1702 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1703 "virtual phi. skip.");
1704 continue;
1705 }
1706
1707 /* Skip reduction phis. */
1708
1709 if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (phi)) == vect_reduction_def)
1710 {
1711 if (dump_enabled_p ())
1712 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1713 "reduc phi. skip.");
1714 continue;
1715 }
1716
1717 /* Analyze the evolution function. */
1718
1719 access_fn = instantiate_parameters
1720 (loop, analyze_scalar_evolution (loop, PHI_RESULT (phi)));
1721
1722 if (!access_fn)
1723 {
1724 if (dump_enabled_p ())
1725 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1726 "No Access function.");
1727 return false;
1728 }
1729
1730 STRIP_NOPS (access_fn);
1731 if (dump_enabled_p ())
1732 {
1733 dump_printf_loc (MSG_NOTE, vect_location,
1734 "Access function of PHI: ");
1735 dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
1736 }
1737
1738 evolution_part = evolution_part_in_loop_num (access_fn, loop->num);
1739
1740 if (evolution_part == NULL_TREE)
1741 {
1742 if (dump_enabled_p ())
1743 dump_printf (MSG_MISSED_OPTIMIZATION, "No evolution.");
1744 return false;
1745 }
1746
1747 /* FORNOW: We do not transform initial conditions of IVs
1748 which evolution functions are a polynomial of degree >= 2. */
1749
1750 if (tree_is_chrec (evolution_part))
1751 return false;
1752 }
1753
1754 return true;
1755 }
1756
1757
1758 /* Function vect_update_ivs_after_vectorizer.
1759
1760 "Advance" the induction variables of LOOP to the value they should take
1761 after the execution of LOOP. This is currently necessary because the
1762 vectorizer does not handle induction variables that are used after the
1763 loop. Such a situation occurs when the last iterations of LOOP are
1764 peeled, because:
1765 1. We introduced new uses after LOOP for IVs that were not originally used
1766 after LOOP: the IVs of LOOP are now used by an epilog loop.
1767 2. LOOP is going to be vectorized; this means that it will iterate N/VF
1768 times, whereas the loop IVs should be bumped N times.
1769
1770 Input:
1771 - LOOP - a loop that is going to be vectorized. The last few iterations
1772 of LOOP were peeled.
1773 - NITERS - the number of iterations that LOOP executes (before it is
1774 vectorized). i.e, the number of times the ivs should be bumped.
1775 - UPDATE_E - a successor edge of LOOP->exit that is on the (only) path
1776 coming out from LOOP on which there are uses of the LOOP ivs
1777 (this is the path from LOOP->exit to epilog_loop->preheader).
1778
1779 The new definitions of the ivs are placed in LOOP->exit.
1780 The phi args associated with the edge UPDATE_E in the bb
1781 UPDATE_E->dest are updated accordingly.
1782
1783 Assumption 1: Like the rest of the vectorizer, this function assumes
1784 a single loop exit that has a single predecessor.
1785
1786 Assumption 2: The phi nodes in the LOOP header and in update_bb are
1787 organized in the same order.
1788
1789 Assumption 3: The access function of the ivs is simple enough (see
1790 vect_can_advance_ivs_p). This assumption will be relaxed in the future.
1791
1792 Assumption 4: Exactly one of the successors of LOOP exit-bb is on a path
1793 coming out of LOOP on which the ivs of LOOP are used (this is the path
1794 that leads to the epilog loop; other paths skip the epilog loop). This
1795 path starts with the edge UPDATE_E, and its destination (denoted update_bb)
1796 needs to have its phis updated.
1797 */
1798
1799 static void
1800 vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
1801 edge update_e)
1802 {
1803 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1804 basic_block exit_bb = single_exit (loop)->dest;
1805 gimple phi, phi1;
1806 gimple_stmt_iterator gsi, gsi1;
1807 basic_block update_bb = update_e->dest;
1808
1809 /* gcc_assert (vect_can_advance_ivs_p (loop_vinfo)); */
1810
1811 /* Make sure there exists a single-predecessor exit bb: */
1812 gcc_assert (single_pred_p (exit_bb));
1813
1814 for (gsi = gsi_start_phis (loop->header), gsi1 = gsi_start_phis (update_bb);
1815 !gsi_end_p (gsi) && !gsi_end_p (gsi1);
1816 gsi_next (&gsi), gsi_next (&gsi1))
1817 {
1818 tree init_expr;
1819 tree step_expr, off;
1820 tree type;
1821 tree var, ni, ni_name;
1822 gimple_stmt_iterator last_gsi;
1823 stmt_vec_info stmt_info;
1824
1825 phi = gsi_stmt (gsi);
1826 phi1 = gsi_stmt (gsi1);
1827 if (dump_enabled_p ())
1828 {
1829 dump_printf_loc (MSG_NOTE, vect_location,
1830 "vect_update_ivs_after_vectorizer: phi: ");
1831 dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
1832 }
1833
1834 /* Skip virtual phi's. */
1835 if (virtual_operand_p (PHI_RESULT (phi)))
1836 {
1837 if (dump_enabled_p ())
1838 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1839 "virtual phi. skip.");
1840 continue;
1841 }
1842
1843 /* Skip reduction phis. */
1844 stmt_info = vinfo_for_stmt (phi);
1845 if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
1846 {
1847 if (dump_enabled_p ())
1848 dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1849 "reduc phi. skip.");
1850 continue;
1851 }
1852
1853 type = TREE_TYPE (gimple_phi_result (phi));
1854 step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
1855 step_expr = unshare_expr (step_expr);
1856
1857 /* FORNOW: We do not support IVs whose evolution function is a polynomial
1858 of degree >= 2 or exponential. */
1859 gcc_assert (!tree_is_chrec (step_expr));
1860
1861 init_expr = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
1862
1863 off = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
1864 fold_convert (TREE_TYPE (step_expr), niters),
1865 step_expr);
1866 if (POINTER_TYPE_P (type))
1867 ni = fold_build_pointer_plus (init_expr, off);
1868 else
1869 ni = fold_build2 (PLUS_EXPR, type,
1870 init_expr, fold_convert (type, off));
1871
1872 var = create_tmp_var (type, "tmp");
1873
1874 last_gsi = gsi_last_bb (exit_bb);
1875 ni_name = force_gimple_operand_gsi (&last_gsi, ni, false, var,
1876 true, GSI_SAME_STMT);
1877
1878 /* Fix phi expressions in the successor bb. */
1879 adjust_phi_and_debug_stmts (phi1, update_e, ni_name);
1880 }
1881 }
1882
1883 /* Function vect_do_peeling_for_loop_bound
1884
1885 Peel the last iterations of the loop represented by LOOP_VINFO.
1886 The peeled iterations form a new epilog loop. Given that the loop now
1887 iterates NITERS times, the new epilog loop iterates
1888 NITERS % VECTORIZATION_FACTOR times.
1889
1890 The original loop will later be made to iterate
1891 NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO).
1892
1893 COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated
1894 test. */
1895
1896 void
1897 vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
1898 unsigned int th, bool check_profitability)
1899 {
1900 tree ni_name, ratio_mult_vf_name;
1901 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
1902 struct loop *new_loop;
1903 edge update_e;
1904 basic_block preheader;
1905 int loop_num;
1906 int max_iter;
1907 tree cond_expr = NULL_TREE;
1908 gimple_seq cond_expr_stmt_list = NULL;
1909
1910 if (dump_enabled_p ())
1911 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
1912 "=== vect_do_peeling_for_loop_bound ===");
1913
1914 initialize_original_copy_tables ();
1915
1916 /* Generate the following variables on the preheader of original loop:
1917
1918 ni_name = number of iteration the original loop executes
1919 ratio = ni_name / vf
1920 ratio_mult_vf_name = ratio * vf */
1921 vect_generate_tmps_on_preheader (loop_vinfo, &ni_name,
1922 &ratio_mult_vf_name, ratio,
1923 cond_expr_stmt_list);
1924
1925 loop_num = loop->num;
1926
1927 new_loop = slpeel_tree_peel_loop_to_edge (loop, single_exit (loop),
1928 &ratio_mult_vf_name, ni_name, false,
1929 th, check_profitability,
1930 cond_expr, cond_expr_stmt_list,
1931 0, LOOP_VINFO_VECT_FACTOR (loop_vinfo));
1932 gcc_assert (new_loop);
1933 gcc_assert (loop_num == loop->num);
1934 #ifdef ENABLE_CHECKING
1935 slpeel_verify_cfg_after_peeling (loop, new_loop);
1936 #endif
1937
1938 /* A guard that controls whether the new_loop is to be executed or skipped
1939 is placed in LOOP->exit. LOOP->exit therefore has two successors - one
1940 is the preheader of NEW_LOOP, where the IVs from LOOP are used. The other
1941 is a bb after NEW_LOOP, where these IVs are not used. Find the edge that
1942 is on the path where the LOOP IVs are used and need to be updated. */
1943
1944 preheader = loop_preheader_edge (new_loop)->src;
1945 if (EDGE_PRED (preheader, 0)->src == single_exit (loop)->dest)
1946 update_e = EDGE_PRED (preheader, 0);
1947 else
1948 update_e = EDGE_PRED (preheader, 1);
1949
1950 /* Update IVs of original loop as if they were advanced
1951 by ratio_mult_vf_name steps. */
1952 vect_update_ivs_after_vectorizer (loop_vinfo, ratio_mult_vf_name, update_e);
1953
1954 /* For vectorization factor N, we need to copy last N-1 values in epilogue
1955 and this means N-2 loopback edge executions.
1956
1957 PEELING_FOR_GAPS works by subtracting last iteration and thus the epilogue
1958 will execute at least LOOP_VINFO_VECT_FACTOR times. */
1959 max_iter = (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
1960 ? LOOP_VINFO_VECT_FACTOR (loop_vinfo) * 2
1961 : LOOP_VINFO_VECT_FACTOR (loop_vinfo)) - 2;
1962 if (check_profitability)
1963 max_iter = MAX (max_iter, (int) th - 1);
1964 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
1965 dump_printf (MSG_OPTIMIZED_LOCATIONS,
1966 "Setting upper bound of nb iterations for epilogue "
1967 "loop to %d\n", max_iter);
1968
1969 /* After peeling we have to reset scalar evolution analyzer. */
1970 scev_reset ();
1971
1972 free_original_copy_tables ();
1973 }
1974
1975
1976 /* Function vect_gen_niters_for_prolog_loop
1977
1978 Set the number of iterations for the loop represented by LOOP_VINFO
1979 to the minimum between LOOP_NITERS (the original iteration count of the loop)
1980 and the misalignment of DR - the data reference recorded in
1981 LOOP_VINFO_UNALIGNED_DR (LOOP_VINFO). As a result, after the execution of
1982 this loop, the data reference DR will refer to an aligned location.
1983
1984 The following computation is generated:
1985
1986 If the misalignment of DR is known at compile time:
1987 addr_mis = int mis = DR_MISALIGNMENT (dr);
1988 Else, compute address misalignment in bytes:
1989 addr_mis = addr & (vectype_align - 1)
1990
1991 prolog_niters = min (LOOP_NITERS, ((VF - addr_mis/elem_size)&(VF-1))/step)
1992
1993 (elem_size = element type size; an element is the scalar element whose type
1994 is the inner type of the vectype)
1995
1996 When the step of the data-ref in the loop is not 1 (as in interleaved data
1997 and SLP), the number of iterations of the prolog must be divided by the step
1998 (which is equal to the size of interleaved group).
1999
2000 The above formulas assume that VF == number of elements in the vector. This
2001 may not hold when there are multiple-types in the loop.
2002 In this case, for some data-references in the loop the VF does not represent
2003 the number of elements that fit in the vector. Therefore, instead of VF we
2004 use TYPE_VECTOR_SUBPARTS. */
2005
2006 static tree
2007 vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int *bound)
2008 {
2009 struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
2010 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2011 tree var;
2012 gimple_seq stmts;
2013 tree iters, iters_name;
2014 edge pe;
2015 basic_block new_bb;
2016 gimple dr_stmt = DR_STMT (dr);
2017 stmt_vec_info stmt_info = vinfo_for_stmt (dr_stmt);
2018 tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2019 int vectype_align = TYPE_ALIGN (vectype) / BITS_PER_UNIT;
2020 tree niters_type = TREE_TYPE (loop_niters);
2021 int nelements = TYPE_VECTOR_SUBPARTS (vectype);
2022
2023 pe = loop_preheader_edge (loop);
2024
2025 if (LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
2026 {
2027 int npeel = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2028
2029 if (dump_enabled_p ())
2030 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2031 "known peeling = %d.", npeel);
2032
2033 iters = build_int_cst (niters_type, npeel);
2034 *bound = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
2035 }
2036 else
2037 {
2038 gimple_seq new_stmts = NULL;
2039 bool negative = tree_int_cst_compare (DR_STEP (dr), size_zero_node) < 0;
2040 tree offset = negative
2041 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
2042 tree start_addr = vect_create_addr_base_for_vector_ref (dr_stmt,
2043 &new_stmts, offset, loop);
2044 tree type = unsigned_type_for (TREE_TYPE (start_addr));
2045 tree vectype_align_minus_1 = build_int_cst (type, vectype_align - 1);
2046 HOST_WIDE_INT elem_size =
2047 int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
2048 tree elem_size_log = build_int_cst (type, exact_log2 (elem_size));
2049 tree nelements_minus_1 = build_int_cst (type, nelements - 1);
2050 tree nelements_tree = build_int_cst (type, nelements);
2051 tree byte_misalign;
2052 tree elem_misalign;
2053
2054 new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmts);
2055 gcc_assert (!new_bb);
2056
2057 /* Create: byte_misalign = addr & (vectype_align - 1) */
2058 byte_misalign =
2059 fold_build2 (BIT_AND_EXPR, type, fold_convert (type, start_addr),
2060 vectype_align_minus_1);
2061
2062 /* Create: elem_misalign = byte_misalign / element_size */
2063 elem_misalign =
2064 fold_build2 (RSHIFT_EXPR, type, byte_misalign, elem_size_log);
2065
2066 /* Create: (niters_type) (nelements - elem_misalign)&(nelements - 1) */
2067 if (negative)
2068 iters = fold_build2 (MINUS_EXPR, type, elem_misalign, nelements_tree);
2069 else
2070 iters = fold_build2 (MINUS_EXPR, type, nelements_tree, elem_misalign);
2071 iters = fold_build2 (BIT_AND_EXPR, type, iters, nelements_minus_1);
2072 iters = fold_convert (niters_type, iters);
2073 *bound = nelements;
2074 }
2075
2076 /* Create: prolog_loop_niters = min (iters, loop_niters) */
2077 /* If the loop bound is known at compile time we already verified that it is
2078 greater than vf; since the misalignment ('iters') is at most vf, there's
2079 no need to generate the MIN_EXPR in this case. */
2080 if (TREE_CODE (loop_niters) != INTEGER_CST)
2081 iters = fold_build2 (MIN_EXPR, niters_type, iters, loop_niters);
2082
2083 if (dump_enabled_p ())
2084 {
2085 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2086 "niters for prolog loop: ");
2087 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, iters);
2088 }
2089
2090 var = create_tmp_var (niters_type, "prolog_loop_niters");
2091 stmts = NULL;
2092 iters_name = force_gimple_operand (iters, &stmts, false, var);
2093
2094 /* Insert stmt on loop preheader edge. */
2095 if (stmts)
2096 {
2097 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
2098 gcc_assert (!new_bb);
2099 }
2100
2101 return iters_name;
2102 }
2103
2104
2105 /* Function vect_update_init_of_dr
2106
2107 NITERS iterations were peeled from LOOP. DR represents a data reference
2108 in LOOP. This function updates the information recorded in DR to
2109 account for the fact that the first NITERS iterations had already been
2110 executed. Specifically, it updates the OFFSET field of DR. */
2111
2112 static void
2113 vect_update_init_of_dr (struct data_reference *dr, tree niters)
2114 {
2115 tree offset = DR_OFFSET (dr);
2116
2117 niters = fold_build2 (MULT_EXPR, sizetype,
2118 fold_convert (sizetype, niters),
2119 fold_convert (sizetype, DR_STEP (dr)));
2120 offset = fold_build2 (PLUS_EXPR, sizetype,
2121 fold_convert (sizetype, offset), niters);
2122 DR_OFFSET (dr) = offset;
2123 }
2124
2125
2126 /* Function vect_update_inits_of_drs
2127
2128 NITERS iterations were peeled from the loop represented by LOOP_VINFO.
2129 This function updates the information recorded for the data references in
2130 the loop to account for the fact that the first NITERS iterations had
2131 already been executed. Specifically, it updates the initial_condition of
2132 the access_function of all the data_references in the loop. */
2133
2134 static void
2135 vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
2136 {
2137 unsigned int i;
2138 vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2139 struct data_reference *dr;
2140
2141 if (dump_enabled_p ())
2142 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2143 "=== vect_update_inits_of_dr ===");
2144
2145 FOR_EACH_VEC_ELT (datarefs, i, dr)
2146 vect_update_init_of_dr (dr, niters);
2147 }
2148
2149
2150 /* Function vect_do_peeling_for_alignment
2151
2152 Peel the first 'niters' iterations of the loop represented by LOOP_VINFO.
2153 'niters' is set to the misalignment of one of the data references in the
2154 loop, thereby forcing it to refer to an aligned location at the beginning
2155 of the execution of this loop. The data reference for which we are
2156 peeling is recorded in LOOP_VINFO_UNALIGNED_DR. */
2157
2158 void
2159 vect_do_peeling_for_alignment (loop_vec_info loop_vinfo,
2160 unsigned int th, bool check_profitability)
2161 {
2162 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2163 tree niters_of_prolog_loop, ni_name;
2164 tree n_iters;
2165 tree wide_prolog_niters;
2166 struct loop *new_loop;
2167 int max_iter;
2168 int bound = 0;
2169
2170 if (dump_enabled_p ())
2171 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2172 "=== vect_do_peeling_for_alignment ===");
2173
2174 initialize_original_copy_tables ();
2175
2176 ni_name = vect_build_loop_niters (loop_vinfo, NULL);
2177 niters_of_prolog_loop = vect_gen_niters_for_prolog_loop (loop_vinfo,
2178 ni_name,
2179 &bound);
2180
2181 /* Peel the prolog loop and iterate it niters_of_prolog_loop. */
2182 new_loop =
2183 slpeel_tree_peel_loop_to_edge (loop, loop_preheader_edge (loop),
2184 &niters_of_prolog_loop, ni_name, true,
2185 th, check_profitability, NULL_TREE, NULL,
2186 bound,
2187 0);
2188
2189 gcc_assert (new_loop);
2190 #ifdef ENABLE_CHECKING
2191 slpeel_verify_cfg_after_peeling (new_loop, loop);
2192 #endif
2193 /* For vectorization factor N, we need to copy at most N-1 values
2194 for alignment and this means N-2 loopback edge executions. */
2195 max_iter = LOOP_VINFO_VECT_FACTOR (loop_vinfo) - 2;
2196 if (check_profitability)
2197 max_iter = MAX (max_iter, (int) th - 1);
2198 record_niter_bound (new_loop, double_int::from_shwi (max_iter), false, true);
2199 dump_printf (MSG_OPTIMIZED_LOCATIONS,
2200 "Setting upper bound of nb iterations for prologue "
2201 "loop to %d\n", max_iter);
2202
2203 /* Update number of times loop executes. */
2204 n_iters = LOOP_VINFO_NITERS (loop_vinfo);
2205 LOOP_VINFO_NITERS (loop_vinfo) = fold_build2 (MINUS_EXPR,
2206 TREE_TYPE (n_iters), n_iters, niters_of_prolog_loop);
2207
2208 if (types_compatible_p (sizetype, TREE_TYPE (niters_of_prolog_loop)))
2209 wide_prolog_niters = niters_of_prolog_loop;
2210 else
2211 {
2212 gimple_seq seq = NULL;
2213 edge pe = loop_preheader_edge (loop);
2214 tree wide_iters = fold_convert (sizetype, niters_of_prolog_loop);
2215 tree var = create_tmp_var (sizetype, "prolog_loop_adjusted_niters");
2216 wide_prolog_niters = force_gimple_operand (wide_iters, &seq, false,
2217 var);
2218 if (seq)
2219 {
2220 /* Insert stmt on loop preheader edge. */
2221 basic_block new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
2222 gcc_assert (!new_bb);
2223 }
2224 }
2225
2226 /* Update the init conditions of the access functions of all data refs. */
2227 vect_update_inits_of_drs (loop_vinfo, wide_prolog_niters);
2228
2229 /* After peeling we have to reset scalar evolution analyzer. */
2230 scev_reset ();
2231
2232 free_original_copy_tables ();
2233 }
2234
2235
2236 /* Function vect_create_cond_for_align_checks.
2237
2238 Create a conditional expression that represents the alignment checks for
2239 all of data references (array element references) whose alignment must be
2240 checked at runtime.
2241
2242 Input:
2243 COND_EXPR - input conditional expression. New conditions will be chained
2244 with logical AND operation.
2245 LOOP_VINFO - two fields of the loop information are used.
2246 LOOP_VINFO_PTR_MASK is the mask used to check the alignment.
2247 LOOP_VINFO_MAY_MISALIGN_STMTS contains the refs to be checked.
2248
2249 Output:
2250 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2251 expression.
2252 The returned value is the conditional expression to be used in the if
2253 statement that controls which version of the loop gets executed at runtime.
2254
2255 The algorithm makes two assumptions:
2256 1) The number of bytes "n" in a vector is a power of 2.
2257 2) An address "a" is aligned if a%n is zero and that this
2258 test can be done as a&(n-1) == 0. For example, for 16
2259 byte vectors the test is a&0xf == 0. */
2260
2261 static void
2262 vect_create_cond_for_align_checks (loop_vec_info loop_vinfo,
2263 tree *cond_expr,
2264 gimple_seq *cond_expr_stmt_list)
2265 {
2266 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2267 vec<gimple> may_misalign_stmts
2268 = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
2269 gimple ref_stmt;
2270 int mask = LOOP_VINFO_PTR_MASK (loop_vinfo);
2271 tree mask_cst;
2272 unsigned int i;
2273 tree int_ptrsize_type;
2274 char tmp_name[20];
2275 tree or_tmp_name = NULL_TREE;
2276 tree and_tmp_name;
2277 gimple and_stmt;
2278 tree ptrsize_zero;
2279 tree part_cond_expr;
2280
2281 /* Check that mask is one less than a power of 2, i.e., mask is
2282 all zeros followed by all ones. */
2283 gcc_assert ((mask != 0) && ((mask & (mask+1)) == 0));
2284
2285 int_ptrsize_type = signed_type_for (ptr_type_node);
2286
2287 /* Create expression (mask & (dr_1 || ... || dr_n)) where dr_i is the address
2288 of the first vector of the i'th data reference. */
2289
2290 FOR_EACH_VEC_ELT (may_misalign_stmts, i, ref_stmt)
2291 {
2292 gimple_seq new_stmt_list = NULL;
2293 tree addr_base;
2294 tree addr_tmp_name;
2295 tree new_or_tmp_name;
2296 gimple addr_stmt, or_stmt;
2297 stmt_vec_info stmt_vinfo = vinfo_for_stmt (ref_stmt);
2298 tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
2299 bool negative = tree_int_cst_compare
2300 (DR_STEP (STMT_VINFO_DATA_REF (stmt_vinfo)), size_zero_node) < 0;
2301 tree offset = negative
2302 ? size_int (-TYPE_VECTOR_SUBPARTS (vectype) + 1) : NULL_TREE;
2303
2304 /* create: addr_tmp = (int)(address_of_first_vector) */
2305 addr_base =
2306 vect_create_addr_base_for_vector_ref (ref_stmt, &new_stmt_list,
2307 offset, loop);
2308 if (new_stmt_list != NULL)
2309 gimple_seq_add_seq (cond_expr_stmt_list, new_stmt_list);
2310
2311 sprintf (tmp_name, "addr2int%d", i);
2312 addr_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2313 addr_stmt = gimple_build_assign_with_ops (NOP_EXPR, addr_tmp_name,
2314 addr_base, NULL_TREE);
2315 gimple_seq_add_stmt (cond_expr_stmt_list, addr_stmt);
2316
2317 /* The addresses are OR together. */
2318
2319 if (or_tmp_name != NULL_TREE)
2320 {
2321 /* create: or_tmp = or_tmp | addr_tmp */
2322 sprintf (tmp_name, "orptrs%d", i);
2323 new_or_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, tmp_name);
2324 or_stmt = gimple_build_assign_with_ops (BIT_IOR_EXPR,
2325 new_or_tmp_name,
2326 or_tmp_name, addr_tmp_name);
2327 gimple_seq_add_stmt (cond_expr_stmt_list, or_stmt);
2328 or_tmp_name = new_or_tmp_name;
2329 }
2330 else
2331 or_tmp_name = addr_tmp_name;
2332
2333 } /* end for i */
2334
2335 mask_cst = build_int_cst (int_ptrsize_type, mask);
2336
2337 /* create: and_tmp = or_tmp & mask */
2338 and_tmp_name = make_temp_ssa_name (int_ptrsize_type, NULL, "andmask");
2339
2340 and_stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, and_tmp_name,
2341 or_tmp_name, mask_cst);
2342 gimple_seq_add_stmt (cond_expr_stmt_list, and_stmt);
2343
2344 /* Make and_tmp the left operand of the conditional test against zero.
2345 if and_tmp has a nonzero bit then some address is unaligned. */
2346 ptrsize_zero = build_int_cst (int_ptrsize_type, 0);
2347 part_cond_expr = fold_build2 (EQ_EXPR, boolean_type_node,
2348 and_tmp_name, ptrsize_zero);
2349 if (*cond_expr)
2350 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2351 *cond_expr, part_cond_expr);
2352 else
2353 *cond_expr = part_cond_expr;
2354 }
2355
2356
2357 /* Function vect_vfa_segment_size.
2358
2359 Create an expression that computes the size of segment
2360 that will be accessed for a data reference. The functions takes into
2361 account that realignment loads may access one more vector.
2362
2363 Input:
2364 DR: The data reference.
2365 LENGTH_FACTOR: segment length to consider.
2366
2367 Return an expression whose value is the size of segment which will be
2368 accessed by DR. */
2369
2370 static tree
2371 vect_vfa_segment_size (struct data_reference *dr, tree length_factor)
2372 {
2373 tree segment_length;
2374
2375 if (integer_zerop (DR_STEP (dr)))
2376 segment_length = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr)));
2377 else
2378 segment_length = size_binop (MULT_EXPR,
2379 fold_convert (sizetype, DR_STEP (dr)),
2380 fold_convert (sizetype, length_factor));
2381
2382 if (vect_supportable_dr_alignment (dr, false)
2383 == dr_explicit_realign_optimized)
2384 {
2385 tree vector_size = TYPE_SIZE_UNIT
2386 (STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr))));
2387
2388 segment_length = size_binop (PLUS_EXPR, segment_length, vector_size);
2389 }
2390 return segment_length;
2391 }
2392
2393
2394 /* Function vect_create_cond_for_alias_checks.
2395
2396 Create a conditional expression that represents the run-time checks for
2397 overlapping of address ranges represented by a list of data references
2398 relations passed as input.
2399
2400 Input:
2401 COND_EXPR - input conditional expression. New conditions will be chained
2402 with logical AND operation.
2403 LOOP_VINFO - field LOOP_VINFO_MAY_ALIAS_STMTS contains the list of ddrs
2404 to be checked.
2405
2406 Output:
2407 COND_EXPR - conditional expression.
2408 COND_EXPR_STMT_LIST - statements needed to construct the conditional
2409 expression.
2410
2411
2412 The returned value is the conditional expression to be used in the if
2413 statement that controls which version of the loop gets executed at runtime.
2414 */
2415
2416 static void
2417 vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
2418 tree * cond_expr,
2419 gimple_seq * cond_expr_stmt_list)
2420 {
2421 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2422 vec<ddr_p> may_alias_ddrs =
2423 LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
2424 int vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2425 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2426
2427 ddr_p ddr;
2428 unsigned int i;
2429 tree part_cond_expr, length_factor;
2430
2431 /* Create expression
2432 ((store_ptr_0 + store_segment_length_0) <= load_ptr_0)
2433 || (load_ptr_0 + load_segment_length_0) <= store_ptr_0))
2434 &&
2435 ...
2436 &&
2437 ((store_ptr_n + store_segment_length_n) <= load_ptr_n)
2438 || (load_ptr_n + load_segment_length_n) <= store_ptr_n)) */
2439
2440 if (may_alias_ddrs.is_empty ())
2441 return;
2442
2443 FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
2444 {
2445 struct data_reference *dr_a, *dr_b;
2446 gimple dr_group_first_a, dr_group_first_b;
2447 tree addr_base_a, addr_base_b;
2448 tree segment_length_a, segment_length_b;
2449 gimple stmt_a, stmt_b;
2450 tree seg_a_min, seg_a_max, seg_b_min, seg_b_max;
2451
2452 dr_a = DDR_A (ddr);
2453 stmt_a = DR_STMT (DDR_A (ddr));
2454 dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
2455 if (dr_group_first_a)
2456 {
2457 stmt_a = dr_group_first_a;
2458 dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
2459 }
2460
2461 dr_b = DDR_B (ddr);
2462 stmt_b = DR_STMT (DDR_B (ddr));
2463 dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
2464 if (dr_group_first_b)
2465 {
2466 stmt_b = dr_group_first_b;
2467 dr_b = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_b));
2468 }
2469
2470 addr_base_a =
2471 vect_create_addr_base_for_vector_ref (stmt_a, cond_expr_stmt_list,
2472 NULL_TREE, loop);
2473 addr_base_b =
2474 vect_create_addr_base_for_vector_ref (stmt_b, cond_expr_stmt_list,
2475 NULL_TREE, loop);
2476
2477 if (!operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0))
2478 length_factor = scalar_loop_iters;
2479 else
2480 length_factor = size_int (vect_factor);
2481 segment_length_a = vect_vfa_segment_size (dr_a, length_factor);
2482 segment_length_b = vect_vfa_segment_size (dr_b, length_factor);
2483
2484 if (dump_enabled_p ())
2485 {
2486 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2487 "create runtime check for data references ");
2488 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, DR_REF (dr_a));
2489 dump_printf (MSG_OPTIMIZED_LOCATIONS, " and ");
2490 dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, DR_REF (dr_b));
2491 }
2492
2493 seg_a_min = addr_base_a;
2494 seg_a_max = fold_build_pointer_plus (addr_base_a, segment_length_a);
2495 if (tree_int_cst_compare (DR_STEP (dr_a), size_zero_node) < 0)
2496 seg_a_min = seg_a_max, seg_a_max = addr_base_a;
2497
2498 seg_b_min = addr_base_b;
2499 seg_b_max = fold_build_pointer_plus (addr_base_b, segment_length_b);
2500 if (tree_int_cst_compare (DR_STEP (dr_b), size_zero_node) < 0)
2501 seg_b_min = seg_b_max, seg_b_max = addr_base_b;
2502
2503 part_cond_expr =
2504 fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
2505 fold_build2 (LE_EXPR, boolean_type_node, seg_a_max, seg_b_min),
2506 fold_build2 (LE_EXPR, boolean_type_node, seg_b_max, seg_a_min));
2507
2508 if (*cond_expr)
2509 *cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
2510 *cond_expr, part_cond_expr);
2511 else
2512 *cond_expr = part_cond_expr;
2513 }
2514
2515 if (dump_enabled_p ())
2516 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
2517 "created %u versioning for alias checks.\n",
2518 may_alias_ddrs.length ());
2519 }
2520
2521
2522 /* Function vect_loop_versioning.
2523
2524 If the loop has data references that may or may not be aligned or/and
2525 has data reference relations whose independence was not proven then
2526 two versions of the loop need to be generated, one which is vectorized
2527 and one which isn't. A test is then generated to control which of the
2528 loops is executed. The test checks for the alignment of all of the
2529 data references that may or may not be aligned. An additional
2530 sequence of runtime tests is generated for each pairs of DDRs whose
2531 independence was not proven. The vectorized version of loop is
2532 executed only if both alias and alignment tests are passed.
2533
2534 The test generated to check which version of loop is executed
2535 is modified to also check for profitability as indicated by the
2536 cost model initially.
2537
2538 The versioning precondition(s) are placed in *COND_EXPR and
2539 *COND_EXPR_STMT_LIST. */
2540
2541 void
2542 vect_loop_versioning (loop_vec_info loop_vinfo,
2543 unsigned int th, bool check_profitability)
2544 {
2545 struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2546 basic_block condition_bb;
2547 gimple_stmt_iterator gsi, cond_exp_gsi;
2548 basic_block merge_bb;
2549 basic_block new_exit_bb;
2550 edge new_exit_e, e;
2551 gimple orig_phi, new_phi;
2552 tree cond_expr = NULL_TREE;
2553 gimple_seq cond_expr_stmt_list = NULL;
2554 tree arg;
2555 unsigned prob = 4 * REG_BR_PROB_BASE / 5;
2556 gimple_seq gimplify_stmt_list = NULL;
2557 tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
2558
2559 if (check_profitability)
2560 {
2561 cond_expr = fold_build2 (GT_EXPR, boolean_type_node, scalar_loop_iters,
2562 build_int_cst (TREE_TYPE (scalar_loop_iters), th));
2563 cond_expr = force_gimple_operand_1 (cond_expr, &cond_expr_stmt_list,
2564 is_gimple_condexpr, NULL_TREE);
2565 }
2566
2567 if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
2568 vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
2569 &cond_expr_stmt_list);
2570
2571 if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
2572 vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr,
2573 &cond_expr_stmt_list);
2574
2575 cond_expr = force_gimple_operand_1 (cond_expr, &gimplify_stmt_list,
2576 is_gimple_condexpr, NULL_TREE);
2577 gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
2578
2579 initialize_original_copy_tables ();
2580 loop_version (loop, cond_expr, &condition_bb,
2581 prob, prob, REG_BR_PROB_BASE - prob, true);
2582 free_original_copy_tables();
2583
2584 /* Loop versioning violates an assumption we try to maintain during
2585 vectorization - that the loop exit block has a single predecessor.
2586 After versioning, the exit block of both loop versions is the same
2587 basic block (i.e. it has two predecessors). Just in order to simplify
2588 following transformations in the vectorizer, we fix this situation
2589 here by adding a new (empty) block on the exit-edge of the loop,
2590 with the proper loop-exit phis to maintain loop-closed-form. */
2591
2592 merge_bb = single_exit (loop)->dest;
2593 gcc_assert (EDGE_COUNT (merge_bb->preds) == 2);
2594 new_exit_bb = split_edge (single_exit (loop));
2595 new_exit_e = single_exit (loop);
2596 e = EDGE_SUCC (new_exit_bb, 0);
2597
2598 for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi); gsi_next (&gsi))
2599 {
2600 tree new_res;
2601 orig_phi = gsi_stmt (gsi);
2602 new_res = copy_ssa_name (PHI_RESULT (orig_phi), NULL);
2603 new_phi = create_phi_node (new_res, new_exit_bb);
2604 arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
2605 add_phi_arg (new_phi, arg, new_exit_e,
2606 gimple_phi_arg_location_from_edge (orig_phi, e));
2607 adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
2608 }
2609
2610 /* End loop-exit-fixes after versioning. */
2611
2612 update_ssa (TODO_update_ssa);
2613 if (cond_expr_stmt_list)
2614 {
2615 cond_exp_gsi = gsi_last_bb (condition_bb);
2616 gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
2617 GSI_SAME_STMT);
2618 }
2619 }