]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/loop-unroll.c
This patch implements the unification of the *bitmap interfaces as discussed.
[thirdparty/gcc.git] / gcc / loop-unroll.c
CommitLineData
a29c7ea6 1/* Loop unrolling and peeling.
0397b965 2 Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2010, 2011
66647d44 3 Free Software Foundation, Inc.
a29c7ea6
ZD
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9dcd6f09 9Software Foundation; either version 3, or (at your option) any later
a29c7ea6
ZD
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
9dcd6f09
NC
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
a29c7ea6
ZD
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "hard-reg-set.h"
7932a3db 27#include "obstack.h"
a29c7ea6
ZD
28#include "basic-block.h"
29#include "cfgloop.h"
a29c7ea6 30#include "params.h"
a29c7ea6 31#include "expr.h"
113d659a 32#include "hashtab.h"
b8698a0f 33#include "recog.h"
40ac4f73 34#include "target.h"
7ee2468b 35#include "dumpfile.h"
a29c7ea6
ZD
36
37/* This pass performs loop unrolling and peeling. We only perform these
e0bb17a8 38 optimizations on innermost loops (with single exception) because
a29c7ea6
ZD
39 the impact on performance is greatest here, and we want to avoid
40 unnecessary code size growth. The gain is caused by greater sequentiality
4d6922ee 41 of code, better code to optimize for further passes and in some cases
a29c7ea6
ZD
42 by fewer testings of exit conditions. The main problem is code growth,
43 that impacts performance negatively due to effect of caches.
44
45 What we do:
46
47 -- complete peeling of once-rolling loops; this is the above mentioned
48 exception, as this causes loop to be cancelled completely and
49 does not cause code growth
50 -- complete peeling of loops that roll (small) constant times.
51 -- simple peeling of first iterations of loops that do not roll much
52 (according to profile feedback)
53 -- unrolling of loops that roll constant times; this is almost always
54 win, as we get rid of exit condition tests.
55 -- unrolling of loops that roll number of times that we can compute
56 in runtime; we also get rid of exit condition tests here, but there
57 is the extra expense for calculating the number of iterations
58 -- simple unrolling of remaining loops; this is performed only if we
59 are asked to, as the gain is questionable in this case and often
60 it may even slow down the code
61 For more detailed descriptions of each of those, see comments at
62 appropriate function below.
63
64 There is a lot of parameters (defined and described in params.def) that
65 control how much we unroll/peel.
66
67 ??? A great problem is that we don't have a good way how to determine
68 how many times we should unroll the loop; the experiments I have made
69 showed that this choice may affect performance in order of several %.
70 */
71
113d659a
ZD
72/* Information about induction variables to split. */
73
74struct iv_to_split
75{
76 rtx insn; /* The insn in that the induction variable occurs. */
77 rtx base_var; /* The variable on that the values in the further
78 iterations are based. */
79 rtx step; /* Step of the induction variable. */
a9f6ecee 80 struct iv_to_split *next; /* Next entry in walking order. */
113d659a
ZD
81 unsigned n_loc;
82 unsigned loc[3]; /* Location where the definition of the induction
83 variable occurs in the insn. For example if
84 N_LOC is 2, the expression is located at
b8698a0f 85 XEXP (XEXP (single_set, loc[0]), loc[1]). */
113d659a
ZD
86};
87
f37a4f14
RE
88/* Information about accumulators to expand. */
89
90struct var_to_expand
91{
92 rtx insn; /* The insn in that the variable expansion occurs. */
471854f8 93 rtx reg; /* The accumulator which is expanded. */
b8698a0f 94 VEC(rtx,heap) *var_expansions; /* The copies of the accumulator which is expanded. */
a9f6ecee 95 struct var_to_expand *next; /* Next entry in walking order. */
b8698a0f 96 enum rtx_code op; /* The type of the accumulation - addition, subtraction
f37a4f14
RE
97 or multiplication. */
98 int expansion_count; /* Count the number of expansions generated so far. */
99 int reuse_expansion; /* The expansion we intend to reuse to expand
b8698a0f
L
100 the accumulator. If REUSE_EXPANSION is 0 reuse
101 the original accumulator. Else use
f37a4f14
RE
102 var_expansions[REUSE_EXPANSION - 1]. */
103};
104
105/* Information about optimization applied in
106 the unrolled loop. */
107
108struct opt_info
113d659a 109{
f37a4f14 110 htab_t insns_to_split; /* A hashtable of insns to split. */
a9f6ecee
AO
111 struct iv_to_split *iv_to_split_head; /* The first iv to split. */
112 struct iv_to_split **iv_to_split_tail; /* Pointer to the tail of the list. */
f37a4f14
RE
113 htab_t insns_with_var_to_expand; /* A hashtable of insns with accumulators
114 to expand. */
a9f6ecee
AO
115 struct var_to_expand *var_to_expand_head; /* The first var to expand. */
116 struct var_to_expand **var_to_expand_tail; /* Pointer to the tail of the list. */
f37a4f14
RE
117 unsigned first_new_block; /* The first basic block that was
118 duplicated. */
119 basic_block loop_exit; /* The loop exit basic block. */
120 basic_block loop_preheader; /* The loop preheader basic block. */
113d659a
ZD
121};
122
d73be268
ZD
123static void decide_unrolling_and_peeling (int);
124static void peel_loops_completely (int);
d47cc544
SB
125static void decide_peel_simple (struct loop *, int);
126static void decide_peel_once_rolling (struct loop *, int);
127static void decide_peel_completely (struct loop *, int);
128static void decide_unroll_stupid (struct loop *, int);
129static void decide_unroll_constant_iterations (struct loop *, int);
130static void decide_unroll_runtime_iterations (struct loop *, int);
d73be268
ZD
131static void peel_loop_simple (struct loop *);
132static void peel_loop_completely (struct loop *);
133static void unroll_loop_stupid (struct loop *);
134static void unroll_loop_constant_iterations (struct loop *);
135static void unroll_loop_runtime_iterations (struct loop *);
f37a4f14
RE
136static struct opt_info *analyze_insns_in_loop (struct loop *);
137static void opt_info_start_duplication (struct opt_info *);
138static void apply_opt_in_copies (struct opt_info *, unsigned, bool, bool);
139static void free_opt_info (struct opt_info *);
140static struct var_to_expand *analyze_insn_to_expand_var (struct loop*, rtx);
60c48e4c 141static bool referenced_in_one_insn_in_loop_p (struct loop *, rtx, int *);
f37a4f14
RE
142static struct iv_to_split *analyze_iv_to_split_insn (rtx);
143static void expand_var_during_unrolling (struct var_to_expand *, rtx);
a9f6ecee
AO
144static void insert_var_expansion_initialization (struct var_to_expand *,
145 basic_block);
146static void combine_var_copies_in_loop_exit (struct var_to_expand *,
147 basic_block);
f37a4f14 148static rtx get_expansion (struct var_to_expand *);
a29c7ea6
ZD
149
150/* Unroll and/or peel (depending on FLAGS) LOOPS. */
151void
d73be268 152unroll_and_peel_loops (int flags)
a29c7ea6 153{
42fd6772 154 struct loop *loop;
50654f6c 155 bool check;
42fd6772 156 loop_iterator li;
a29c7ea6
ZD
157
158 /* First perform complete loop peeling (it is almost surely a win,
159 and affects parameters for further decision a lot). */
d73be268 160 peel_loops_completely (flags);
a29c7ea6
ZD
161
162 /* Now decide rest of unrolling and peeling. */
d73be268 163 decide_unrolling_and_peeling (flags);
a29c7ea6 164
a29c7ea6 165 /* Scan the loops, inner ones first. */
42fd6772 166 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
a29c7ea6 167 {
50654f6c 168 check = true;
a29c7ea6
ZD
169 /* And perform the appropriate transformations. */
170 switch (loop->lpt_decision.decision)
171 {
172 case LPT_PEEL_COMPLETELY:
173 /* Already done. */
113d659a 174 gcc_unreachable ();
a29c7ea6 175 case LPT_PEEL_SIMPLE:
d73be268 176 peel_loop_simple (loop);
a29c7ea6
ZD
177 break;
178 case LPT_UNROLL_CONSTANT:
d73be268 179 unroll_loop_constant_iterations (loop);
a29c7ea6
ZD
180 break;
181 case LPT_UNROLL_RUNTIME:
d73be268 182 unroll_loop_runtime_iterations (loop);
a29c7ea6
ZD
183 break;
184 case LPT_UNROLL_STUPID:
d73be268 185 unroll_loop_stupid (loop);
a29c7ea6
ZD
186 break;
187 case LPT_NONE:
50654f6c 188 check = false;
a29c7ea6
ZD
189 break;
190 default:
113d659a 191 gcc_unreachable ();
a29c7ea6
ZD
192 }
193 if (check)
194 {
195#ifdef ENABLE_CHECKING
d73be268 196 verify_loop_structure ();
a29c7ea6
ZD
197#endif
198 }
a29c7ea6 199 }
50654f6c
ZD
200
201 iv_analysis_done ();
202}
203
204/* Check whether exit of the LOOP is at the end of loop body. */
205
206static bool
207loop_exit_at_end_p (struct loop *loop)
208{
209 struct niter_desc *desc = get_simple_loop_desc (loop);
210 rtx insn;
211
212 if (desc->in_edge->dest != loop->latch)
213 return false;
214
215 /* Check that the latch is empty. */
216 FOR_BB_INSNS (loop->latch, insn)
217 {
8efb4b35 218 if (NONDEBUG_INSN_P (insn))
50654f6c
ZD
219 return false;
220 }
221
222 return true;
a29c7ea6
ZD
223}
224
d73be268 225/* Depending on FLAGS, check whether to peel loops completely and do so. */
a29c7ea6 226static void
d73be268 227peel_loops_completely (int flags)
a29c7ea6 228{
2c790a28 229 struct loop *loop;
42fd6772 230 loop_iterator li;
a29c7ea6 231
2c790a28 232 /* Scan the loops, the inner ones first. */
42fd6772 233 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
a29c7ea6 234 {
a29c7ea6 235 loop->lpt_decision.decision = LPT_NONE;
0c20a65f 236
c263766c
RH
237 if (dump_file)
238 fprintf (dump_file,
239 "\n;; *** Considering loop %d for complete peeling ***\n",
a29c7ea6
ZD
240 loop->num);
241
a29c7ea6
ZD
242 loop->ninsns = num_loop_insns (loop);
243
d47cc544 244 decide_peel_once_rolling (loop, flags);
a29c7ea6 245 if (loop->lpt_decision.decision == LPT_NONE)
d47cc544 246 decide_peel_completely (loop, flags);
a29c7ea6
ZD
247
248 if (loop->lpt_decision.decision == LPT_PEEL_COMPLETELY)
249 {
d73be268 250 peel_loop_completely (loop);
a29c7ea6 251#ifdef ENABLE_CHECKING
d73be268 252 verify_loop_structure ();
a29c7ea6
ZD
253#endif
254 }
a29c7ea6
ZD
255 }
256}
257
d73be268 258/* Decide whether unroll or peel loops (depending on FLAGS) and how much. */
a29c7ea6 259static void
d73be268 260decide_unrolling_and_peeling (int flags)
a29c7ea6 261{
42fd6772
ZD
262 struct loop *loop;
263 loop_iterator li;
a29c7ea6
ZD
264
265 /* Scan the loops, inner ones first. */
42fd6772 266 FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
a29c7ea6 267 {
a29c7ea6
ZD
268 loop->lpt_decision.decision = LPT_NONE;
269
c263766c
RH
270 if (dump_file)
271 fprintf (dump_file, "\n;; *** Considering loop %d ***\n", loop->num);
a29c7ea6
ZD
272
273 /* Do not peel cold areas. */
efd8f750 274 if (optimize_loop_for_size_p (loop))
a29c7ea6 275 {
c263766c
RH
276 if (dump_file)
277 fprintf (dump_file, ";; Not considering loop, cold area\n");
a29c7ea6
ZD
278 continue;
279 }
280
281 /* Can the loop be manipulated? */
282 if (!can_duplicate_loop_p (loop))
283 {
c263766c
RH
284 if (dump_file)
285 fprintf (dump_file,
a29c7ea6 286 ";; Not considering loop, cannot duplicate\n");
a29c7ea6
ZD
287 continue;
288 }
289
290 /* Skip non-innermost loops. */
291 if (loop->inner)
292 {
c263766c
RH
293 if (dump_file)
294 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
a29c7ea6
ZD
295 continue;
296 }
297
298 loop->ninsns = num_loop_insns (loop);
299 loop->av_ninsns = average_num_loop_insns (loop);
300
301 /* Try transformations one by one in decreasing order of
302 priority. */
303
d47cc544 304 decide_unroll_constant_iterations (loop, flags);
a29c7ea6 305 if (loop->lpt_decision.decision == LPT_NONE)
d47cc544 306 decide_unroll_runtime_iterations (loop, flags);
a29c7ea6 307 if (loop->lpt_decision.decision == LPT_NONE)
d47cc544 308 decide_unroll_stupid (loop, flags);
a29c7ea6 309 if (loop->lpt_decision.decision == LPT_NONE)
d47cc544 310 decide_peel_simple (loop, flags);
a29c7ea6
ZD
311 }
312}
313
314/* Decide whether the LOOP is once rolling and suitable for complete
315 peeling. */
316static void
d47cc544 317decide_peel_once_rolling (struct loop *loop, int flags ATTRIBUTE_UNUSED)
a29c7ea6 318{
50654f6c
ZD
319 struct niter_desc *desc;
320
c263766c
RH
321 if (dump_file)
322 fprintf (dump_file, "\n;; Considering peeling once rolling loop\n");
a29c7ea6
ZD
323
324 /* Is the loop small enough? */
325 if ((unsigned) PARAM_VALUE (PARAM_MAX_ONCE_PEELED_INSNS) < loop->ninsns)
326 {
c263766c
RH
327 if (dump_file)
328 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
329 return;
330 }
331
332 /* Check for simple loops. */
50654f6c 333 desc = get_simple_loop_desc (loop);
a29c7ea6
ZD
334
335 /* Check number of iterations. */
50654f6c
ZD
336 if (!desc->simple_p
337 || desc->assumptions
4fbe4f91 338 || desc->infinite
50654f6c 339 || !desc->const_iter
e3a8f1fa
JH
340 || (desc->niter != 0
341 && max_loop_iterations_int (loop) != 0))
a29c7ea6 342 {
c263766c
RH
343 if (dump_file)
344 fprintf (dump_file,
345 ";; Unable to prove that the loop rolls exactly once\n");
a29c7ea6
ZD
346 return;
347 }
348
349 /* Success. */
c263766c
RH
350 if (dump_file)
351 fprintf (dump_file, ";; Decided to peel exactly once rolling loop\n");
a29c7ea6
ZD
352 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
353}
354
355/* Decide whether the LOOP is suitable for complete peeling. */
356static void
d47cc544 357decide_peel_completely (struct loop *loop, int flags ATTRIBUTE_UNUSED)
a29c7ea6
ZD
358{
359 unsigned npeel;
50654f6c 360 struct niter_desc *desc;
a29c7ea6 361
c263766c
RH
362 if (dump_file)
363 fprintf (dump_file, "\n;; Considering peeling completely\n");
a29c7ea6
ZD
364
365 /* Skip non-innermost loops. */
366 if (loop->inner)
367 {
c263766c
RH
368 if (dump_file)
369 fprintf (dump_file, ";; Not considering loop, is not innermost\n");
a29c7ea6
ZD
370 return;
371 }
372
35b07080 373 /* Do not peel cold areas. */
efd8f750 374 if (optimize_loop_for_size_p (loop))
35b07080 375 {
c263766c
RH
376 if (dump_file)
377 fprintf (dump_file, ";; Not considering loop, cold area\n");
35b07080
ZD
378 return;
379 }
380
381 /* Can the loop be manipulated? */
382 if (!can_duplicate_loop_p (loop))
383 {
c263766c
RH
384 if (dump_file)
385 fprintf (dump_file,
35b07080
ZD
386 ";; Not considering loop, cannot duplicate\n");
387 return;
388 }
389
3dc575ff 390 /* npeel = number of iterations to peel. */
a29c7ea6
ZD
391 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEELED_INSNS) / loop->ninsns;
392 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES))
393 npeel = PARAM_VALUE (PARAM_MAX_COMPLETELY_PEEL_TIMES);
394
395 /* Is the loop small enough? */
396 if (!npeel)
397 {
c263766c
RH
398 if (dump_file)
399 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
400 return;
401 }
402
403 /* Check for simple loops. */
50654f6c 404 desc = get_simple_loop_desc (loop);
a29c7ea6
ZD
405
406 /* Check number of iterations. */
50654f6c
ZD
407 if (!desc->simple_p
408 || desc->assumptions
4fbe4f91
ZD
409 || !desc->const_iter
410 || desc->infinite)
a29c7ea6 411 {
c263766c
RH
412 if (dump_file)
413 fprintf (dump_file,
414 ";; Unable to prove that the loop iterates constant times\n");
a29c7ea6
ZD
415 return;
416 }
417
50654f6c 418 if (desc->niter > npeel - 1)
a29c7ea6 419 {
c263766c 420 if (dump_file)
0c20a65f 421 {
c263766c
RH
422 fprintf (dump_file,
423 ";; Not peeling loop completely, rolls too much (");
424 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
425 fprintf (dump_file, " iterations > %d [maximum peelings])\n", npeel);
a29c7ea6
ZD
426 }
427 return;
428 }
429
430 /* Success. */
c263766c
RH
431 if (dump_file)
432 fprintf (dump_file, ";; Decided to peel loop completely\n");
a29c7ea6
ZD
433 loop->lpt_decision.decision = LPT_PEEL_COMPLETELY;
434}
435
436/* Peel all iterations of LOOP, remove exit edges and cancel the loop
437 completely. The transformation done:
0c20a65f 438
a29c7ea6
ZD
439 for (i = 0; i < 4; i++)
440 body;
441
442 ==>
0c20a65f
AJ
443
444 i = 0;
a29c7ea6
ZD
445 body; i++;
446 body; i++;
447 body; i++;
448 body; i++;
449 */
450static void
d73be268 451peel_loop_completely (struct loop *loop)
a29c7ea6
ZD
452{
453 sbitmap wont_exit;
454 unsigned HOST_WIDE_INT npeel;
ee8c1b05
ZD
455 unsigned i;
456 VEC (edge, heap) *remove_edges;
457 edge ein;
50654f6c 458 struct niter_desc *desc = get_simple_loop_desc (loop);
f37a4f14 459 struct opt_info *opt_info = NULL;
b8698a0f 460
a29c7ea6
ZD
461 npeel = desc->niter;
462
35b07080
ZD
463 if (npeel)
464 {
41806d92 465 bool ok;
b8698a0f 466
35b07080 467 wont_exit = sbitmap_alloc (npeel + 1);
f61e445a 468 bitmap_ones (wont_exit);
35b07080 469 RESET_BIT (wont_exit, 0);
50654f6c 470 if (desc->noloop_assumptions)
35b07080
ZD
471 RESET_BIT (wont_exit, 1);
472
ee8c1b05 473 remove_edges = NULL;
35b07080 474
113d659a 475 if (flag_split_ivs_in_unroller)
f37a4f14 476 opt_info = analyze_insns_in_loop (loop);
b8698a0f 477
f37a4f14 478 opt_info_start_duplication (opt_info);
41806d92 479 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
d73be268 480 npeel,
41806d92 481 wont_exit, desc->out_edge,
ee8c1b05 482 &remove_edges,
7f7b1718 483 DLTHE_FLAG_UPDATE_FREQ
178df94f 484 | DLTHE_FLAG_COMPLETTE_PEEL
7f7b1718
JH
485 | (opt_info
486 ? DLTHE_RECORD_COPY_NUMBER : 0));
41806d92 487 gcc_assert (ok);
35b07080
ZD
488
489 free (wont_exit);
b8698a0f 490
f37a4f14
RE
491 if (opt_info)
492 {
493 apply_opt_in_copies (opt_info, npeel, false, true);
494 free_opt_info (opt_info);
495 }
113d659a 496
35b07080 497 /* Remove the exit edges. */
ac47786e 498 FOR_EACH_VEC_ELT (edge, remove_edges, i, ein)
ee8c1b05
ZD
499 remove_path (ein);
500 VEC_free (edge, heap, remove_edges);
35b07080 501 }
a29c7ea6 502
628f6a4e 503 ein = desc->in_edge;
50654f6c
ZD
504 free_simple_loop_desc (loop);
505
35b07080
ZD
506 /* Now remove the unreachable part of the last iteration and cancel
507 the loop. */
d73be268 508 remove_path (ein);
a29c7ea6 509
c263766c
RH
510 if (dump_file)
511 fprintf (dump_file, ";; Peeled loop completely, %d times\n", (int) npeel);
a29c7ea6
ZD
512}
513
c263766c
RH
514/* Decide whether to unroll LOOP iterating constant number of times
515 and how much. */
50654f6c 516
a29c7ea6 517static void
d47cc544 518decide_unroll_constant_iterations (struct loop *loop, int flags)
a29c7ea6 519{
50654f6c
ZD
520 unsigned nunroll, nunroll_by_av, best_copies, best_unroll = 0, n_copies, i;
521 struct niter_desc *desc;
e598c332 522 double_int iterations;
a29c7ea6
ZD
523
524 if (!(flags & UAP_UNROLL))
525 {
526 /* We were not asked to, just return back silently. */
527 return;
528 }
529
c263766c
RH
530 if (dump_file)
531 fprintf (dump_file,
532 "\n;; Considering unrolling loop with constant "
533 "number of iterations\n");
a29c7ea6
ZD
534
535 /* nunroll = total number of copies of the original loop body in
536 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
537 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
c263766c
RH
538 nunroll_by_av
539 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
a29c7ea6
ZD
540 if (nunroll > nunroll_by_av)
541 nunroll = nunroll_by_av;
542 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
543 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
544
545 /* Skip big loops. */
546 if (nunroll <= 1)
547 {
c263766c
RH
548 if (dump_file)
549 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
550 return;
551 }
552
553 /* Check for simple loops. */
50654f6c 554 desc = get_simple_loop_desc (loop);
a29c7ea6
ZD
555
556 /* Check number of iterations. */
50654f6c 557 if (!desc->simple_p || !desc->const_iter || desc->assumptions)
a29c7ea6 558 {
c263766c
RH
559 if (dump_file)
560 fprintf (dump_file,
561 ";; Unable to prove that the loop iterates constant times\n");
a29c7ea6
ZD
562 return;
563 }
564
e598c332
JH
565 /* Check whether the loop rolls enough to consider.
566 Consult also loop bounds and profile; in the case the loop has more
567 than one exit it may well loop less than determined maximal number
568 of iterations. */
569 if (desc->niter < 2 * nunroll
570 || ((estimated_loop_iterations (loop, &iterations)
571 || max_loop_iterations (loop, &iterations))
572 && iterations.ult (double_int::from_shwi (2 * nunroll))))
a29c7ea6 573 {
c263766c
RH
574 if (dump_file)
575 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
a29c7ea6
ZD
576 return;
577 }
578
579 /* Success; now compute number of iterations to unroll. We alter
580 nunroll so that as few as possible copies of loop body are
e0bb17a8 581 necessary, while still not decreasing the number of unrollings
a29c7ea6
ZD
582 too much (at most by 1). */
583 best_copies = 2 * nunroll + 10;
584
585 i = 2 * nunroll + 2;
50654f6c
ZD
586 if (i - 1 >= desc->niter)
587 i = desc->niter - 2;
a29c7ea6
ZD
588
589 for (; i >= nunroll - 1; i--)
590 {
50654f6c 591 unsigned exit_mod = desc->niter % (i + 1);
a29c7ea6 592
50654f6c 593 if (!loop_exit_at_end_p (loop))
a29c7ea6 594 n_copies = exit_mod + i + 1;
50654f6c
ZD
595 else if (exit_mod != (unsigned) i
596 || desc->noloop_assumptions != NULL_RTX)
a29c7ea6
ZD
597 n_copies = exit_mod + i + 2;
598 else
599 n_copies = i + 1;
600
601 if (n_copies < best_copies)
602 {
603 best_copies = n_copies;
604 best_unroll = i;
605 }
606 }
607
a29c7ea6
ZD
608 loop->lpt_decision.decision = LPT_UNROLL_CONSTANT;
609 loop->lpt_decision.times = best_unroll;
b8698a0f 610
c263766c 611 if (dump_file)
4fc2e37d
EB
612 fprintf (dump_file, ";; Decided to unroll the loop %d times (%d copies).\n",
613 loop->lpt_decision.times, best_copies);
a29c7ea6
ZD
614}
615
4fc2e37d
EB
616/* Unroll LOOP with constant number of iterations LOOP->LPT_DECISION.TIMES times.
617 The transformation does this:
0c20a65f 618
a29c7ea6
ZD
619 for (i = 0; i < 102; i++)
620 body;
0c20a65f 621
4fc2e37d 622 ==> (LOOP->LPT_DECISION.TIMES == 3)
0c20a65f 623
a29c7ea6
ZD
624 i = 0;
625 body; i++;
626 body; i++;
627 while (i < 102)
628 {
629 body; i++;
630 body; i++;
631 body; i++;
632 body; i++;
633 }
634 */
635static void
d73be268 636unroll_loop_constant_iterations (struct loop *loop)
a29c7ea6
ZD
637{
638 unsigned HOST_WIDE_INT niter;
639 unsigned exit_mod;
640 sbitmap wont_exit;
ee8c1b05
ZD
641 unsigned i;
642 VEC (edge, heap) *remove_edges;
643 edge e;
a29c7ea6 644 unsigned max_unroll = loop->lpt_decision.times;
50654f6c
ZD
645 struct niter_desc *desc = get_simple_loop_desc (loop);
646 bool exit_at_end = loop_exit_at_end_p (loop);
f37a4f14 647 struct opt_info *opt_info = NULL;
41806d92 648 bool ok;
b8698a0f 649
a29c7ea6
ZD
650 niter = desc->niter;
651
113d659a
ZD
652 /* Should not get here (such loop should be peeled instead). */
653 gcc_assert (niter > max_unroll + 1);
a29c7ea6
ZD
654
655 exit_mod = niter % (max_unroll + 1);
656
657 wont_exit = sbitmap_alloc (max_unroll + 1);
f61e445a 658 bitmap_ones (wont_exit);
a29c7ea6 659
ee8c1b05 660 remove_edges = NULL;
b8698a0f 661 if (flag_split_ivs_in_unroller
f37a4f14
RE
662 || flag_variable_expansion_in_unroller)
663 opt_info = analyze_insns_in_loop (loop);
b8698a0f 664
50654f6c 665 if (!exit_at_end)
a29c7ea6 666 {
50654f6c 667 /* The exit is not at the end of the loop; leave exit test
a29c7ea6
ZD
668 in the first copy, so that the loops that start with test
669 of exit condition have continuous body after unrolling. */
670
c263766c 671 if (dump_file)
4fc2e37d 672 fprintf (dump_file, ";; Condition at beginning of loop.\n");
a29c7ea6
ZD
673
674 /* Peel exit_mod iterations. */
675 RESET_BIT (wont_exit, 0);
50654f6c 676 if (desc->noloop_assumptions)
a29c7ea6
ZD
677 RESET_BIT (wont_exit, 1);
678
50654f6c
ZD
679 if (exit_mod)
680 {
f37a4f14 681 opt_info_start_duplication (opt_info);
41806d92 682 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
d73be268 683 exit_mod,
50654f6c 684 wont_exit, desc->out_edge,
ee8c1b05 685 &remove_edges,
7f7b1718
JH
686 DLTHE_FLAG_UPDATE_FREQ
687 | (opt_info && exit_mod > 1
688 ? DLTHE_RECORD_COPY_NUMBER
689 : 0));
41806d92 690 gcc_assert (ok);
50654f6c 691
f37a4f14 692 if (opt_info && exit_mod > 1)
b8698a0f
L
693 apply_opt_in_copies (opt_info, exit_mod, false, false);
694
50654f6c
ZD
695 desc->noloop_assumptions = NULL_RTX;
696 desc->niter -= exit_mod;
e3a8f1fa
JH
697 loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod);
698 if (loop->any_estimate
699 && double_int::from_uhwi (exit_mod).ule
700 (loop->nb_iterations_estimate))
701 loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod);
702 else
703 loop->any_estimate = false;
50654f6c 704 }
a29c7ea6
ZD
705
706 SET_BIT (wont_exit, 1);
707 }
708 else
709 {
710 /* Leave exit test in last copy, for the same reason as above if
711 the loop tests the condition at the end of loop body. */
712
c263766c 713 if (dump_file)
4fc2e37d 714 fprintf (dump_file, ";; Condition at end of loop.\n");
a29c7ea6
ZD
715
716 /* We know that niter >= max_unroll + 2; so we do not need to care of
717 case when we would exit before reaching the loop. So just peel
50654f6c
ZD
718 exit_mod + 1 iterations. */
719 if (exit_mod != max_unroll
720 || desc->noloop_assumptions)
a29c7ea6
ZD
721 {
722 RESET_BIT (wont_exit, 0);
50654f6c 723 if (desc->noloop_assumptions)
a29c7ea6 724 RESET_BIT (wont_exit, 1);
b8698a0f 725
f37a4f14 726 opt_info_start_duplication (opt_info);
41806d92 727 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
d73be268 728 exit_mod + 1,
41806d92 729 wont_exit, desc->out_edge,
ee8c1b05 730 &remove_edges,
7f7b1718
JH
731 DLTHE_FLAG_UPDATE_FREQ
732 | (opt_info && exit_mod > 0
733 ? DLTHE_RECORD_COPY_NUMBER
734 : 0));
41806d92 735 gcc_assert (ok);
b8698a0f 736
f37a4f14
RE
737 if (opt_info && exit_mod > 0)
738 apply_opt_in_copies (opt_info, exit_mod + 1, false, false);
113d659a 739
50654f6c 740 desc->niter -= exit_mod + 1;
73367f92 741 loop->nb_iterations_upper_bound -= double_int::from_uhwi (exit_mod + 1);
e3a8f1fa
JH
742 if (loop->any_estimate
743 && double_int::from_uhwi (exit_mod + 1).ule
744 (loop->nb_iterations_estimate))
745 loop->nb_iterations_estimate -= double_int::from_uhwi (exit_mod + 1);
746 else
747 loop->any_estimate = false;
50654f6c
ZD
748 desc->noloop_assumptions = NULL_RTX;
749
a29c7ea6
ZD
750 SET_BIT (wont_exit, 0);
751 SET_BIT (wont_exit, 1);
752 }
753
754 RESET_BIT (wont_exit, max_unroll);
755 }
756
757 /* Now unroll the loop. */
b8698a0f 758
f37a4f14 759 opt_info_start_duplication (opt_info);
41806d92 760 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
d73be268 761 max_unroll,
41806d92 762 wont_exit, desc->out_edge,
ee8c1b05 763 &remove_edges,
7f7b1718
JH
764 DLTHE_FLAG_UPDATE_FREQ
765 | (opt_info
766 ? DLTHE_RECORD_COPY_NUMBER
767 : 0));
41806d92 768 gcc_assert (ok);
a29c7ea6 769
f37a4f14 770 if (opt_info)
113d659a 771 {
f37a4f14
RE
772 apply_opt_in_copies (opt_info, max_unroll, true, true);
773 free_opt_info (opt_info);
113d659a
ZD
774 }
775
a29c7ea6
ZD
776 free (wont_exit);
777
50654f6c
ZD
778 if (exit_at_end)
779 {
6580ee77 780 basic_block exit_block = get_bb_copy (desc->in_edge->src);
50654f6c 781 /* Find a new in and out edge; they are in the last copy we have made. */
b8698a0f 782
628f6a4e 783 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
50654f6c 784 {
628f6a4e
BE
785 desc->out_edge = EDGE_SUCC (exit_block, 0);
786 desc->in_edge = EDGE_SUCC (exit_block, 1);
50654f6c
ZD
787 }
788 else
789 {
628f6a4e
BE
790 desc->out_edge = EDGE_SUCC (exit_block, 1);
791 desc->in_edge = EDGE_SUCC (exit_block, 0);
50654f6c
ZD
792 }
793 }
794
795 desc->niter /= max_unroll + 1;
e3a8f1fa 796 loop->nb_iterations_upper_bound
73367f92 797 = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll
e3a8f1fa 798 + 1),
73367f92 799 TRUNC_DIV_EXPR);
e3a8f1fa
JH
800 if (loop->any_estimate)
801 loop->nb_iterations_estimate
73367f92 802 = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll
e3a8f1fa 803 + 1),
73367f92 804 TRUNC_DIV_EXPR);
50654f6c
ZD
805 desc->niter_expr = GEN_INT (desc->niter);
806
a29c7ea6 807 /* Remove the edges. */
ac47786e 808 FOR_EACH_VEC_ELT (edge, remove_edges, i, e)
ee8c1b05
ZD
809 remove_path (e);
810 VEC_free (edge, heap, remove_edges);
a29c7ea6 811
c263766c
RH
812 if (dump_file)
813 fprintf (dump_file,
814 ";; Unrolled loop %d times, constant # of iterations %i insns\n",
815 max_unroll, num_loop_insns (loop));
a29c7ea6
ZD
816}
817
818/* Decide whether to unroll LOOP iterating runtime computable number of times
819 and how much. */
820static void
d47cc544 821decide_unroll_runtime_iterations (struct loop *loop, int flags)
a29c7ea6
ZD
822{
823 unsigned nunroll, nunroll_by_av, i;
50654f6c 824 struct niter_desc *desc;
e3a8f1fa 825 double_int iterations;
a29c7ea6
ZD
826
827 if (!(flags & UAP_UNROLL))
828 {
829 /* We were not asked to, just return back silently. */
830 return;
831 }
832
c263766c
RH
833 if (dump_file)
834 fprintf (dump_file,
835 "\n;; Considering unrolling loop with runtime "
836 "computable number of iterations\n");
a29c7ea6
ZD
837
838 /* nunroll = total number of copies of the original loop body in
839 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
840 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
841 nunroll_by_av = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
842 if (nunroll > nunroll_by_av)
843 nunroll = nunroll_by_av;
844 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
845 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
846
40ac4f73
CB
847 if (targetm.loop_unroll_adjust)
848 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
849
a29c7ea6
ZD
850 /* Skip big loops. */
851 if (nunroll <= 1)
852 {
c263766c
RH
853 if (dump_file)
854 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
855 return;
856 }
857
858 /* Check for simple loops. */
50654f6c 859 desc = get_simple_loop_desc (loop);
a29c7ea6
ZD
860
861 /* Check simpleness. */
50654f6c 862 if (!desc->simple_p || desc->assumptions)
a29c7ea6 863 {
c263766c
RH
864 if (dump_file)
865 fprintf (dump_file,
866 ";; Unable to prove that the number of iterations "
867 "can be counted in runtime\n");
a29c7ea6
ZD
868 return;
869 }
870
50654f6c 871 if (desc->const_iter)
a29c7ea6 872 {
c263766c
RH
873 if (dump_file)
874 fprintf (dump_file, ";; Loop iterates constant times\n");
a29c7ea6
ZD
875 return;
876 }
877
73367f92 878 /* Check whether the loop rolls. */
e3a8f1fa
JH
879 if ((estimated_loop_iterations (loop, &iterations)
880 || max_loop_iterations (loop, &iterations))
73367f92 881 && iterations.ult (double_int::from_shwi (2 * nunroll)))
a29c7ea6 882 {
c263766c
RH
883 if (dump_file)
884 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
a29c7ea6
ZD
885 return;
886 }
887
888 /* Success; now force nunroll to be power of 2, as we are unable to
889 cope with overflows in computation of number of iterations. */
50654f6c
ZD
890 for (i = 1; 2 * i <= nunroll; i *= 2)
891 continue;
a29c7ea6
ZD
892
893 loop->lpt_decision.decision = LPT_UNROLL_RUNTIME;
894 loop->lpt_decision.times = i - 1;
b8698a0f 895
c263766c 896 if (dump_file)
4fc2e37d 897 fprintf (dump_file, ";; Decided to unroll the loop %d times.\n",
50654f6c 898 loop->lpt_decision.times);
a29c7ea6
ZD
899}
900
7d93d987
ZD
901/* Splits edge E and inserts the sequence of instructions INSNS on it, and
902 returns the newly created block. If INSNS is NULL_RTX, nothing is changed
903 and NULL is returned instead. */
598ec7bd
ZD
904
905basic_block
906split_edge_and_insert (edge e, rtx insns)
907{
7d93d987
ZD
908 basic_block bb;
909
910 if (!insns)
911 return NULL;
b8698a0f 912 bb = split_edge (e);
598ec7bd 913 emit_insn_after (insns, BB_END (bb));
7984c787
SB
914
915 /* ??? We used to assume that INSNS can contain control flow insns, and
916 that we had to try to find sub basic blocks in BB to maintain a valid
917 CFG. For this purpose we used to set the BB_SUPERBLOCK flag on BB
918 and call break_superblocks when going out of cfglayout mode. But it
919 turns out that this never happens; and that if it does ever happen,
c7dd803e 920 the TODO_verify_flow at the end of the RTL loop passes would fail.
7984c787
SB
921
922 There are two reasons why we expected we could have control flow insns
923 in INSNS. The first is when a comparison has to be done in parts, and
924 the second is when the number of iterations is computed for loops with
925 the number of iterations known at runtime. In both cases, test cases
926 to get control flow in INSNS appear to be impossible to construct:
927
928 * If do_compare_rtx_and_jump needs several branches to do comparison
929 in a mode that needs comparison by parts, we cannot analyze the
930 number of iterations of the loop, and we never get to unrolling it.
931
932 * The code in expand_divmod that was suspected to cause creation of
933 branching code seems to be only accessed for signed division. The
934 divisions used by # of iterations analysis are always unsigned.
935 Problems might arise on architectures that emits branching code
936 for some operations that may appear in the unroller (especially
937 for division), but we have no such architectures.
938
939 Considering all this, it was decided that we should for now assume
940 that INSNS can in theory contain control flow insns, but in practice
941 it never does. So we don't handle the theoretical case, and should
942 a real failure ever show up, we have a pretty good clue for how to
943 fix it. */
944
598ec7bd
ZD
945 return bb;
946}
947
4fc2e37d
EB
948/* Unroll LOOP for which we are able to count number of iterations in runtime
949 LOOP->LPT_DECISION.TIMES times. The transformation does this (with some
a29c7ea6 950 extra care for case n < 0):
0c20a65f 951
a29c7ea6
ZD
952 for (i = 0; i < n; i++)
953 body;
0c20a65f 954
4fc2e37d 955 ==> (LOOP->LPT_DECISION.TIMES == 3)
0c20a65f 956
a29c7ea6
ZD
957 i = 0;
958 mod = n % 4;
0c20a65f 959
a29c7ea6
ZD
960 switch (mod)
961 {
962 case 3:
963 body; i++;
964 case 2:
965 body; i++;
966 case 1:
967 body; i++;
968 case 0: ;
969 }
0c20a65f 970
a29c7ea6
ZD
971 while (i < n)
972 {
973 body; i++;
974 body; i++;
975 body; i++;
976 body; i++;
977 }
978 */
979static void
d73be268 980unroll_loop_runtime_iterations (struct loop *loop)
a29c7ea6 981{
50654f6c 982 rtx old_niter, niter, init_code, branch_code, tmp;
a29c7ea6 983 unsigned i, j, p;
66f97d31
ZD
984 basic_block preheader, *body, swtch, ezc_swtch;
985 VEC (basic_block, heap) *dom_bbs;
a29c7ea6
ZD
986 sbitmap wont_exit;
987 int may_exit_copy;
ee8c1b05
ZD
988 unsigned n_peel;
989 VEC (edge, heap) *remove_edges;
990 edge e;
a29c7ea6
ZD
991 bool extra_zero_check, last_may_exit;
992 unsigned max_unroll = loop->lpt_decision.times;
50654f6c
ZD
993 struct niter_desc *desc = get_simple_loop_desc (loop);
994 bool exit_at_end = loop_exit_at_end_p (loop);
f37a4f14 995 struct opt_info *opt_info = NULL;
41806d92 996 bool ok;
b8698a0f 997
f37a4f14
RE
998 if (flag_split_ivs_in_unroller
999 || flag_variable_expansion_in_unroller)
1000 opt_info = analyze_insns_in_loop (loop);
b8698a0f 1001
a29c7ea6 1002 /* Remember blocks whose dominators will have to be updated. */
66f97d31 1003 dom_bbs = NULL;
a29c7ea6
ZD
1004
1005 body = get_loop_body (loop);
1006 for (i = 0; i < loop->num_nodes; i++)
1007 {
66f97d31
ZD
1008 VEC (basic_block, heap) *ldom;
1009 basic_block bb;
a29c7ea6 1010
66f97d31 1011 ldom = get_dominated_by (CDI_DOMINATORS, body[i]);
ac47786e 1012 FOR_EACH_VEC_ELT (basic_block, ldom, j, bb)
66f97d31
ZD
1013 if (!flow_bb_inside_loop_p (loop, bb))
1014 VEC_safe_push (basic_block, heap, dom_bbs, bb);
a29c7ea6 1015
66f97d31 1016 VEC_free (basic_block, heap, ldom);
a29c7ea6
ZD
1017 }
1018 free (body);
1019
50654f6c 1020 if (!exit_at_end)
a29c7ea6
ZD
1021 {
1022 /* Leave exit in first copy (for explanation why see comment in
1023 unroll_loop_constant_iterations). */
1024 may_exit_copy = 0;
1025 n_peel = max_unroll - 1;
1026 extra_zero_check = true;
1027 last_may_exit = false;
1028 }
1029 else
1030 {
1031 /* Leave exit in last copy (for explanation why see comment in
1032 unroll_loop_constant_iterations). */
1033 may_exit_copy = max_unroll;
1034 n_peel = max_unroll;
1035 extra_zero_check = false;
1036 last_may_exit = true;
1037 }
1038
1039 /* Get expression for number of iterations. */
1040 start_sequence ();
50654f6c
ZD
1041 old_niter = niter = gen_reg_rtx (desc->mode);
1042 tmp = force_operand (copy_rtx (desc->niter_expr), niter);
1043 if (tmp != niter)
1044 emit_move_insn (niter, tmp);
a29c7ea6
ZD
1045
1046 /* Count modulo by ANDing it with max_unroll; we use the fact that
1047 the number of unrollings is a power of two, and thus this is correct
1048 even if there is overflow in the computation. */
50654f6c 1049 niter = expand_simple_binop (desc->mode, AND,
a29c7ea6
ZD
1050 niter,
1051 GEN_INT (max_unroll),
1052 NULL_RTX, 0, OPTAB_LIB_WIDEN);
1053
1054 init_code = get_insns ();
1055 end_sequence ();
2ed22578 1056 unshare_all_rtl_in_chain (init_code);
a29c7ea6
ZD
1057
1058 /* Precondition the loop. */
598ec7bd 1059 split_edge_and_insert (loop_preheader_edge (loop), init_code);
a29c7ea6 1060
ee8c1b05 1061 remove_edges = NULL;
a29c7ea6
ZD
1062
1063 wont_exit = sbitmap_alloc (max_unroll + 2);
1064
1065 /* Peel the first copy of loop body (almost always we must leave exit test
1066 here; the only exception is when we have extra zero check and the number
50654f6c
ZD
1067 of iterations is reliable. Also record the place of (possible) extra
1068 zero check. */
f61e445a 1069 bitmap_clear (wont_exit);
50654f6c
ZD
1070 if (extra_zero_check
1071 && !desc->noloop_assumptions)
a29c7ea6
ZD
1072 SET_BIT (wont_exit, 1);
1073 ezc_swtch = loop_preheader_edge (loop)->src;
41806d92 1074 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
d73be268 1075 1, wont_exit, desc->out_edge,
ee8c1b05 1076 &remove_edges,
41806d92
NS
1077 DLTHE_FLAG_UPDATE_FREQ);
1078 gcc_assert (ok);
a29c7ea6
ZD
1079
1080 /* Record the place where switch will be built for preconditioning. */
598ec7bd 1081 swtch = split_edge (loop_preheader_edge (loop));
a29c7ea6
ZD
1082
1083 for (i = 0; i < n_peel; i++)
1084 {
1085 /* Peel the copy. */
f61e445a 1086 bitmap_clear (wont_exit);
a29c7ea6
ZD
1087 if (i != n_peel - 1 || !last_may_exit)
1088 SET_BIT (wont_exit, 1);
41806d92 1089 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
d73be268 1090 1, wont_exit, desc->out_edge,
ee8c1b05 1091 &remove_edges,
41806d92
NS
1092 DLTHE_FLAG_UPDATE_FREQ);
1093 gcc_assert (ok);
a29c7ea6 1094
91f4cfe3
ZD
1095 /* Create item for switch. */
1096 j = n_peel - i - (extra_zero_check ? 0 : 1);
1097 p = REG_BR_PROB_BASE / (i + 2);
1098
598ec7bd 1099 preheader = split_edge (loop_preheader_edge (loop));
50654f6c 1100 branch_code = compare_and_jump_seq (copy_rtx (niter), GEN_INT (j), EQ,
41806d92
NS
1101 block_label (preheader), p,
1102 NULL_RTX);
91f4cfe3 1103
7d93d987
ZD
1104 /* We rely on the fact that the compare and jump cannot be optimized out,
1105 and hence the cfg we create is correct. */
1106 gcc_assert (branch_code != NULL_RTX);
1107
598ec7bd 1108 swtch = split_edge_and_insert (single_pred_edge (swtch), branch_code);
d47cc544 1109 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
c5cbcccf 1110 single_pred_edge (swtch)->probability = REG_BR_PROB_BASE - p;
91f4cfe3 1111 e = make_edge (swtch, preheader,
c5cbcccf 1112 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
e3a8f1fa 1113 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
91f4cfe3 1114 e->probability = p;
a29c7ea6
ZD
1115 }
1116
1117 if (extra_zero_check)
1118 {
1119 /* Add branch for zero iterations. */
1120 p = REG_BR_PROB_BASE / (max_unroll + 1);
1121 swtch = ezc_swtch;
598ec7bd 1122 preheader = split_edge (loop_preheader_edge (loop));
50654f6c 1123 branch_code = compare_and_jump_seq (copy_rtx (niter), const0_rtx, EQ,
41806d92
NS
1124 block_label (preheader), p,
1125 NULL_RTX);
7d93d987 1126 gcc_assert (branch_code != NULL_RTX);
a29c7ea6 1127
598ec7bd 1128 swtch = split_edge_and_insert (single_succ_edge (swtch), branch_code);
d47cc544 1129 set_immediate_dominator (CDI_DOMINATORS, preheader, swtch);
c5cbcccf 1130 single_succ_edge (swtch)->probability = REG_BR_PROB_BASE - p;
72b8d451 1131 e = make_edge (swtch, preheader,
c5cbcccf 1132 single_succ_edge (swtch)->flags & EDGE_IRREDUCIBLE_LOOP);
e3a8f1fa 1133 e->count = RDIV (preheader->count * REG_BR_PROB_BASE, p);
a29c7ea6
ZD
1134 e->probability = p;
1135 }
1136
1137 /* Recount dominators for outer blocks. */
66f97d31 1138 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
a29c7ea6
ZD
1139
1140 /* And unroll loop. */
1141
f61e445a 1142 bitmap_ones (wont_exit);
a29c7ea6 1143 RESET_BIT (wont_exit, may_exit_copy);
f37a4f14 1144 opt_info_start_duplication (opt_info);
b8698a0f 1145
41806d92 1146 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
d73be268 1147 max_unroll,
41806d92 1148 wont_exit, desc->out_edge,
ee8c1b05 1149 &remove_edges,
7f7b1718
JH
1150 DLTHE_FLAG_UPDATE_FREQ
1151 | (opt_info
1152 ? DLTHE_RECORD_COPY_NUMBER
1153 : 0));
41806d92 1154 gcc_assert (ok);
b8698a0f 1155
f37a4f14 1156 if (opt_info)
113d659a 1157 {
f37a4f14
RE
1158 apply_opt_in_copies (opt_info, max_unroll, true, true);
1159 free_opt_info (opt_info);
113d659a
ZD
1160 }
1161
a29c7ea6
ZD
1162 free (wont_exit);
1163
50654f6c
ZD
1164 if (exit_at_end)
1165 {
6580ee77 1166 basic_block exit_block = get_bb_copy (desc->in_edge->src);
41806d92
NS
1167 /* Find a new in and out edge; they are in the last copy we have
1168 made. */
b8698a0f 1169
628f6a4e 1170 if (EDGE_SUCC (exit_block, 0)->dest == desc->out_edge->dest)
50654f6c 1171 {
628f6a4e
BE
1172 desc->out_edge = EDGE_SUCC (exit_block, 0);
1173 desc->in_edge = EDGE_SUCC (exit_block, 1);
50654f6c
ZD
1174 }
1175 else
1176 {
628f6a4e
BE
1177 desc->out_edge = EDGE_SUCC (exit_block, 1);
1178 desc->in_edge = EDGE_SUCC (exit_block, 0);
50654f6c
ZD
1179 }
1180 }
1181
a29c7ea6 1182 /* Remove the edges. */
ac47786e 1183 FOR_EACH_VEC_ELT (edge, remove_edges, i, e)
ee8c1b05
ZD
1184 remove_path (e);
1185 VEC_free (edge, heap, remove_edges);
a29c7ea6 1186
50654f6c
ZD
1187 /* We must be careful when updating the number of iterations due to
1188 preconditioning and the fact that the value must be valid at entry
1189 of the loop. After passing through the above code, we see that
1190 the correct new number of iterations is this: */
113d659a 1191 gcc_assert (!desc->const_iter);
50654f6c 1192 desc->niter_expr =
41806d92
NS
1193 simplify_gen_binary (UDIV, desc->mode, old_niter,
1194 GEN_INT (max_unroll + 1));
e3a8f1fa
JH
1195 loop->nb_iterations_upper_bound
1196 = loop->nb_iterations_upper_bound.udiv (double_int::from_uhwi (max_unroll
1197 + 1),
73367f92 1198 TRUNC_DIV_EXPR);
e3a8f1fa
JH
1199 if (loop->any_estimate)
1200 loop->nb_iterations_estimate
1201 = loop->nb_iterations_estimate.udiv (double_int::from_uhwi (max_unroll
1202 + 1),
73367f92 1203 TRUNC_DIV_EXPR);
50654f6c
ZD
1204 if (exit_at_end)
1205 {
1206 desc->niter_expr =
1207 simplify_gen_binary (MINUS, desc->mode, desc->niter_expr, const1_rtx);
1208 desc->noloop_assumptions = NULL_RTX;
e3a8f1fa
JH
1209 --loop->nb_iterations_upper_bound;
1210 if (loop->any_estimate
1211 && loop->nb_iterations_estimate != double_int_zero)
1212 --loop->nb_iterations_estimate;
1213 else
1214 loop->any_estimate = false;
50654f6c
ZD
1215 }
1216
c263766c
RH
1217 if (dump_file)
1218 fprintf (dump_file,
1219 ";; Unrolled loop %d times, counting # of iterations "
1220 "in runtime, %i insns\n",
a29c7ea6 1221 max_unroll, num_loop_insns (loop));
b6cdba27 1222
66f97d31 1223 VEC_free (basic_block, heap, dom_bbs);
a29c7ea6 1224}
0c20a65f 1225
a29c7ea6
ZD
1226/* Decide whether to simply peel LOOP and how much. */
1227static void
d47cc544 1228decide_peel_simple (struct loop *loop, int flags)
a29c7ea6
ZD
1229{
1230 unsigned npeel;
e3a8f1fa 1231 double_int iterations;
a29c7ea6
ZD
1232
1233 if (!(flags & UAP_PEEL))
1234 {
1235 /* We were not asked to, just return back silently. */
1236 return;
1237 }
1238
c263766c
RH
1239 if (dump_file)
1240 fprintf (dump_file, "\n;; Considering simply peeling loop\n");
a29c7ea6 1241
3dc575ff 1242 /* npeel = number of iterations to peel. */
a29c7ea6
ZD
1243 npeel = PARAM_VALUE (PARAM_MAX_PEELED_INSNS) / loop->ninsns;
1244 if (npeel > (unsigned) PARAM_VALUE (PARAM_MAX_PEEL_TIMES))
1245 npeel = PARAM_VALUE (PARAM_MAX_PEEL_TIMES);
1246
1247 /* Skip big loops. */
1248 if (!npeel)
1249 {
c263766c
RH
1250 if (dump_file)
1251 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
1252 return;
1253 }
1254
a29c7ea6 1255 /* Do not simply peel loops with branches inside -- it increases number
6acf25e4
JH
1256 of mispredicts.
1257 Exception is when we do have profile and we however have good chance
1258 to peel proper number of iterations loop will iterate in practice.
1259 TODO: this heuristic needs tunning; while for complette unrolling
1260 the branch inside loop mostly eliminates any improvements, for
1261 peeling it is not the case. Also a function call inside loop is
1262 also branch from branch prediction POV (and probably better reason
1263 to not unroll/peel). */
1264 if (num_loop_branches (loop) > 1
1265 && profile_status != PROFILE_READ)
a29c7ea6 1266 {
c263766c
RH
1267 if (dump_file)
1268 fprintf (dump_file, ";; Not peeling, contains branches\n");
a29c7ea6
ZD
1269 return;
1270 }
1271
e3a8f1fa
JH
1272 /* If we have realistic estimate on number of iterations, use it. */
1273 if (estimated_loop_iterations (loop, &iterations))
a29c7ea6 1274 {
73367f92 1275 if (double_int::from_shwi (npeel).ule (iterations))
a29c7ea6 1276 {
c263766c 1277 if (dump_file)
a29c7ea6 1278 {
c263766c
RH
1279 fprintf (dump_file, ";; Not peeling loop, rolls too much (");
1280 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC,
e3a8f1fa 1281 (HOST_WIDEST_INT) (iterations.to_shwi () + 1));
c263766c
RH
1282 fprintf (dump_file, " iterations > %d [maximum peelings])\n",
1283 npeel);
a29c7ea6
ZD
1284 }
1285 return;
1286 }
e3a8f1fa 1287 npeel = iterations.to_shwi () + 1;
a29c7ea6 1288 }
e3a8f1fa
JH
1289 /* If we have small enough bound on iterations, we can still peel (completely
1290 unroll). */
1291 else if (max_loop_iterations (loop, &iterations)
73367f92 1292 && iterations.ult (double_int::from_shwi (npeel)))
e3a8f1fa 1293 npeel = iterations.to_shwi () + 1;
a29c7ea6
ZD
1294 else
1295 {
1296 /* For now we have no good heuristics to decide whether loop peeling
1297 will be effective, so disable it. */
c263766c
RH
1298 if (dump_file)
1299 fprintf (dump_file,
a29c7ea6
ZD
1300 ";; Not peeling loop, no evidence it will be profitable\n");
1301 return;
1302 }
1303
1304 /* Success. */
1305 loop->lpt_decision.decision = LPT_PEEL_SIMPLE;
1306 loop->lpt_decision.times = npeel;
b8698a0f 1307
c263766c 1308 if (dump_file)
4fc2e37d 1309 fprintf (dump_file, ";; Decided to simply peel the loop %d times.\n",
50654f6c 1310 loop->lpt_decision.times);
a29c7ea6
ZD
1311}
1312
4fc2e37d
EB
1313/* Peel a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1314
a29c7ea6
ZD
1315 while (cond)
1316 body;
1317
4fc2e37d 1318 ==> (LOOP->LPT_DECISION.TIMES == 3)
a29c7ea6
ZD
1319
1320 if (!cond) goto end;
1321 body;
1322 if (!cond) goto end;
1323 body;
4fc2e37d
EB
1324 if (!cond) goto end;
1325 body;
a29c7ea6
ZD
1326 while (cond)
1327 body;
1328 end: ;
1329 */
1330static void
d73be268 1331peel_loop_simple (struct loop *loop)
a29c7ea6
ZD
1332{
1333 sbitmap wont_exit;
1334 unsigned npeel = loop->lpt_decision.times;
50654f6c 1335 struct niter_desc *desc = get_simple_loop_desc (loop);
f37a4f14 1336 struct opt_info *opt_info = NULL;
41806d92 1337 bool ok;
b8698a0f 1338
113d659a 1339 if (flag_split_ivs_in_unroller && npeel > 1)
f37a4f14 1340 opt_info = analyze_insns_in_loop (loop);
b8698a0f 1341
a29c7ea6 1342 wont_exit = sbitmap_alloc (npeel + 1);
f61e445a 1343 bitmap_clear (wont_exit);
b8698a0f 1344
f37a4f14 1345 opt_info_start_duplication (opt_info);
b8698a0f 1346
41806d92 1347 ok = duplicate_loop_to_header_edge (loop, loop_preheader_edge (loop),
ee8c1b05 1348 npeel, wont_exit, NULL,
7f7b1718
JH
1349 NULL, DLTHE_FLAG_UPDATE_FREQ
1350 | (opt_info
1351 ? DLTHE_RECORD_COPY_NUMBER
1352 : 0));
41806d92 1353 gcc_assert (ok);
0c20a65f 1354
a29c7ea6 1355 free (wont_exit);
b8698a0f 1356
f37a4f14 1357 if (opt_info)
113d659a 1358 {
f37a4f14
RE
1359 apply_opt_in_copies (opt_info, npeel, false, false);
1360 free_opt_info (opt_info);
113d659a
ZD
1361 }
1362
50654f6c
ZD
1363 if (desc->simple_p)
1364 {
1365 if (desc->const_iter)
1366 {
1367 desc->niter -= npeel;
1368 desc->niter_expr = GEN_INT (desc->niter);
1369 desc->noloop_assumptions = NULL_RTX;
1370 }
1371 else
1372 {
1373 /* We cannot just update niter_expr, as its value might be clobbered
1374 inside loop. We could handle this by counting the number into
1375 temporary just like we do in runtime unrolling, but it does not
1376 seem worthwhile. */
1377 free_simple_loop_desc (loop);
1378 }
1379 }
c263766c
RH
1380 if (dump_file)
1381 fprintf (dump_file, ";; Peeling loop %d times\n", npeel);
a29c7ea6
ZD
1382}
1383
1384/* Decide whether to unroll LOOP stupidly and how much. */
1385static void
d47cc544 1386decide_unroll_stupid (struct loop *loop, int flags)
a29c7ea6
ZD
1387{
1388 unsigned nunroll, nunroll_by_av, i;
50654f6c 1389 struct niter_desc *desc;
e3a8f1fa 1390 double_int iterations;
a29c7ea6
ZD
1391
1392 if (!(flags & UAP_UNROLL_ALL))
1393 {
1394 /* We were not asked to, just return back silently. */
1395 return;
1396 }
1397
c263766c
RH
1398 if (dump_file)
1399 fprintf (dump_file, "\n;; Considering unrolling loop stupidly\n");
a29c7ea6
ZD
1400
1401 /* nunroll = total number of copies of the original loop body in
1402 unrolled loop (i.e. if it is 2, we have to duplicate loop body once. */
1403 nunroll = PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS) / loop->ninsns;
c263766c
RH
1404 nunroll_by_av
1405 = PARAM_VALUE (PARAM_MAX_AVERAGE_UNROLLED_INSNS) / loop->av_ninsns;
a29c7ea6
ZD
1406 if (nunroll > nunroll_by_av)
1407 nunroll = nunroll_by_av;
1408 if (nunroll > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLL_TIMES))
1409 nunroll = PARAM_VALUE (PARAM_MAX_UNROLL_TIMES);
1410
40ac4f73
CB
1411 if (targetm.loop_unroll_adjust)
1412 nunroll = targetm.loop_unroll_adjust (nunroll, loop);
1413
a29c7ea6
ZD
1414 /* Skip big loops. */
1415 if (nunroll <= 1)
1416 {
c263766c
RH
1417 if (dump_file)
1418 fprintf (dump_file, ";; Not considering loop, is too big\n");
a29c7ea6
ZD
1419 return;
1420 }
1421
1422 /* Check for simple loops. */
50654f6c 1423 desc = get_simple_loop_desc (loop);
a29c7ea6
ZD
1424
1425 /* Check simpleness. */
50654f6c 1426 if (desc->simple_p && !desc->assumptions)
a29c7ea6 1427 {
c263766c
RH
1428 if (dump_file)
1429 fprintf (dump_file, ";; The loop is simple\n");
a29c7ea6
ZD
1430 return;
1431 }
1432
1433 /* Do not unroll loops with branches inside -- it increases number
6acf25e4
JH
1434 of mispredicts.
1435 TODO: this heuristic needs tunning; call inside the loop body
1436 is also relatively good reason to not unroll. */
50654f6c 1437 if (num_loop_branches (loop) > 1)
a29c7ea6 1438 {
c263766c
RH
1439 if (dump_file)
1440 fprintf (dump_file, ";; Not unrolling, contains branches\n");
a29c7ea6
ZD
1441 return;
1442 }
1443
73367f92 1444 /* Check whether the loop rolls. */
e3a8f1fa
JH
1445 if ((estimated_loop_iterations (loop, &iterations)
1446 || max_loop_iterations (loop, &iterations))
73367f92 1447 && iterations.ult (double_int::from_shwi (2 * nunroll)))
a29c7ea6 1448 {
c263766c
RH
1449 if (dump_file)
1450 fprintf (dump_file, ";; Not unrolling loop, doesn't roll\n");
a29c7ea6
ZD
1451 return;
1452 }
1453
1454 /* Success. Now force nunroll to be power of 2, as it seems that this
e0bb17a8 1455 improves results (partially because of better alignments, partially
a29c7ea6 1456 because of some dark magic). */
50654f6c
ZD
1457 for (i = 1; 2 * i <= nunroll; i *= 2)
1458 continue;
a29c7ea6
ZD
1459
1460 loop->lpt_decision.decision = LPT_UNROLL_STUPID;
1461 loop->lpt_decision.times = i - 1;
b8698a0f 1462
c263766c 1463 if (dump_file)
4fc2e37d 1464 fprintf (dump_file, ";; Decided to unroll the loop stupidly %d times.\n",
50654f6c 1465 loop->lpt_decision.times);
a29c7ea6
ZD
1466}
1467
4fc2e37d
EB
1468/* Unroll a LOOP LOOP->LPT_DECISION.TIMES times. The transformation does this:
1469
a29c7ea6
ZD
1470 while (cond)
1471 body;
1472
4fc2e37d 1473 ==> (LOOP->LPT_DECISION.TIMES == 3)
a29c7ea6
ZD
1474
1475 while (cond)
1476 {
1477 body;
1478 if (!cond) break;
1479 body;
1480 if (!cond) break;
1481 body;
1482 if (!cond) break;
1483 body;
1484 }
1485 */
1486static void
d73be268 1487unroll_loop_stupid (struct loop *loop)
a29c7ea6
ZD
1488{
1489 sbitmap wont_exit;
1490 unsigned nunroll = loop->lpt_decision.times;
50654f6c 1491 struct niter_desc *desc = get_simple_loop_desc (loop);
f37a4f14 1492 struct opt_info *opt_info = NULL;
41806d92 1493 bool ok;
b8698a0f 1494
f37a4f14
RE
1495 if (flag_split_ivs_in_unroller
1496 || flag_variable_expansion_in_unroller)
1497 opt_info = analyze_insns_in_loop (loop);
b8698a0f
L
1498
1499
a29c7ea6 1500 wont_exit = sbitmap_alloc (nunroll + 1);
f61e445a 1501 bitmap_clear (wont_exit);
f37a4f14 1502 opt_info_start_duplication (opt_info);
b8698a0f 1503
41806d92 1504 ok = duplicate_loop_to_header_edge (loop, loop_latch_edge (loop),
d73be268 1505 nunroll, wont_exit,
ee8c1b05 1506 NULL, NULL,
7f7b1718
JH
1507 DLTHE_FLAG_UPDATE_FREQ
1508 | (opt_info
1509 ? DLTHE_RECORD_COPY_NUMBER
1510 : 0));
41806d92 1511 gcc_assert (ok);
b8698a0f 1512
f37a4f14 1513 if (opt_info)
113d659a 1514 {
f37a4f14
RE
1515 apply_opt_in_copies (opt_info, nunroll, true, true);
1516 free_opt_info (opt_info);
113d659a
ZD
1517 }
1518
a29c7ea6 1519 free (wont_exit);
0c20a65f 1520
50654f6c
ZD
1521 if (desc->simple_p)
1522 {
1523 /* We indeed may get here provided that there are nontrivial assumptions
1524 for a loop to be really simple. We could update the counts, but the
1525 problem is that we are unable to decide which exit will be taken
1526 (not really true in case the number of iterations is constant,
1527 but noone will do anything with this information, so we do not
1528 worry about it). */
1529 desc->simple_p = false;
1530 }
1531
c263766c
RH
1532 if (dump_file)
1533 fprintf (dump_file, ";; Unrolled loop %d times, %i insns\n",
a29c7ea6
ZD
1534 nunroll, num_loop_insns (loop));
1535}
113d659a
ZD
1536
1537/* A hash function for information about insns to split. */
1538
1539static hashval_t
1540si_info_hash (const void *ivts)
1541{
741ac903 1542 return (hashval_t) INSN_UID (((const struct iv_to_split *) ivts)->insn);
113d659a
ZD
1543}
1544
1545/* An equality functions for information about insns to split. */
1546
1547static int
1548si_info_eq (const void *ivts1, const void *ivts2)
1549{
d3bfe4de
KG
1550 const struct iv_to_split *const i1 = (const struct iv_to_split *) ivts1;
1551 const struct iv_to_split *const i2 = (const struct iv_to_split *) ivts2;
113d659a
ZD
1552
1553 return i1->insn == i2->insn;
1554}
1555
f37a4f14
RE
1556/* Return a hash for VES, which is really a "var_to_expand *". */
1557
1558static hashval_t
1559ve_info_hash (const void *ves)
1560{
741ac903 1561 return (hashval_t) INSN_UID (((const struct var_to_expand *) ves)->insn);
f37a4f14
RE
1562}
1563
b8698a0f 1564/* Return true if IVTS1 and IVTS2 (which are really both of type
471854f8 1565 "var_to_expand *") refer to the same instruction. */
f37a4f14
RE
1566
1567static int
1568ve_info_eq (const void *ivts1, const void *ivts2)
1569{
d3bfe4de
KG
1570 const struct var_to_expand *const i1 = (const struct var_to_expand *) ivts1;
1571 const struct var_to_expand *const i2 = (const struct var_to_expand *) ivts2;
b8698a0f 1572
f37a4f14
RE
1573 return i1->insn == i2->insn;
1574}
1575
60c48e4c
AO
1576/* Returns true if REG is referenced in one nondebug insn in LOOP.
1577 Set *DEBUG_USES to the number of debug insns that reference the
1578 variable. */
f37a4f14
RE
1579
1580bool
60c48e4c
AO
1581referenced_in_one_insn_in_loop_p (struct loop *loop, rtx reg,
1582 int *debug_uses)
f37a4f14
RE
1583{
1584 basic_block *body, bb;
1585 unsigned i;
1586 int count_ref = 0;
1587 rtx insn;
b8698a0f
L
1588
1589 body = get_loop_body (loop);
f37a4f14
RE
1590 for (i = 0; i < loop->num_nodes; i++)
1591 {
1592 bb = body[i];
b8698a0f 1593
f37a4f14 1594 FOR_BB_INSNS (bb, insn)
60c48e4c
AO
1595 if (!rtx_referenced_p (reg, insn))
1596 continue;
1597 else if (DEBUG_INSN_P (insn))
1598 ++*debug_uses;
1599 else if (++count_ref > 1)
1600 break;
f37a4f14 1601 }
60c48e4c 1602 free (body);
f37a4f14
RE
1603 return (count_ref == 1);
1604}
1605
60c48e4c
AO
1606/* Reset the DEBUG_USES debug insns in LOOP that reference REG. */
1607
1608static void
1609reset_debug_uses_in_loop (struct loop *loop, rtx reg, int debug_uses)
1610{
1611 basic_block *body, bb;
1612 unsigned i;
1613 rtx insn;
1614
1615 body = get_loop_body (loop);
1616 for (i = 0; debug_uses && i < loop->num_nodes; i++)
1617 {
1618 bb = body[i];
1619
1620 FOR_BB_INSNS (bb, insn)
1621 if (!DEBUG_INSN_P (insn) || !rtx_referenced_p (reg, insn))
1622 continue;
1623 else
1624 {
1625 validate_change (insn, &INSN_VAR_LOCATION_LOC (insn),
1626 gen_rtx_UNKNOWN_VAR_LOC (), 0);
1627 if (!--debug_uses)
1628 break;
1629 }
1630 }
1631 free (body);
1632}
1633
f37a4f14 1634/* Determine whether INSN contains an accumulator
b8698a0f 1635 which can be expanded into separate copies,
f37a4f14 1636 one for each copy of the LOOP body.
b8698a0f 1637
f37a4f14
RE
1638 for (i = 0 ; i < n; i++)
1639 sum += a[i];
b8698a0f 1640
f37a4f14 1641 ==>
b8698a0f 1642
f37a4f14
RE
1643 sum += a[i]
1644 ....
1645 i = i+1;
1646 sum1 += a[i]
1647 ....
1648 i = i+1
1649 sum2 += a[i];
1650 ....
1651
b8698a0f
L
1652 Return NULL if INSN contains no opportunity for expansion of accumulator.
1653 Otherwise, allocate a VAR_TO_EXPAND structure, fill it with the relevant
f37a4f14
RE
1654 information and return a pointer to it.
1655*/
1656
1657static struct var_to_expand *
1658analyze_insn_to_expand_var (struct loop *loop, rtx insn)
1659{
531e5376 1660 rtx set, dest, src;
f37a4f14 1661 struct var_to_expand *ves;
76fd2caa 1662 unsigned accum_pos;
531e5376 1663 enum rtx_code code;
60c48e4c 1664 int debug_uses = 0;
76fd2caa 1665
f37a4f14
RE
1666 set = single_set (insn);
1667 if (!set)
1668 return NULL;
b8698a0f 1669
f37a4f14
RE
1670 dest = SET_DEST (set);
1671 src = SET_SRC (set);
531e5376 1672 code = GET_CODE (src);
b8698a0f 1673
531e5376 1674 if (code != PLUS && code != MINUS && code != MULT && code != FMA)
f37a4f14 1675 return NULL;
f2dd440f 1676
531e5376
RH
1677 if (FLOAT_MODE_P (GET_MODE (dest)))
1678 {
1679 if (!flag_associative_math)
1680 return NULL;
1681 /* In the case of FMA, we're also changing the rounding. */
1682 if (code == FMA && !flag_unsafe_math_optimizations)
1683 return NULL;
1684 }
1685
f2dd440f
SB
1686 /* Hmm, this is a bit paradoxical. We know that INSN is a valid insn
1687 in MD. But if there is no optab to generate the insn, we can not
1688 perform the variable expansion. This can happen if an MD provides
1689 an insn but not a named pattern to generate it, for example to avoid
1690 producing code that needs additional mode switches like for x87/mmx.
1691
1692 So we check have_insn_for which looks for an optab for the operation
1693 in SRC. If it doesn't exist, we can't perform the expansion even
1694 though INSN is valid. */
531e5376 1695 if (!have_insn_for (code, GET_MODE (src)))
f2dd440f
SB
1696 return NULL;
1697
f37a4f14
RE
1698 if (!REG_P (dest)
1699 && !(GET_CODE (dest) == SUBREG
1700 && REG_P (SUBREG_REG (dest))))
1701 return NULL;
b8698a0f 1702
531e5376
RH
1703 /* Find the accumulator use within the operation. */
1704 if (code == FMA)
1705 {
1706 /* We only support accumulation via FMA in the ADD position. */
1707 if (!rtx_equal_p (dest, XEXP (src, 2)))
1708 return NULL;
1709 accum_pos = 2;
1710 }
1711 else if (rtx_equal_p (dest, XEXP (src, 0)))
76fd2caa 1712 accum_pos = 0;
531e5376
RH
1713 else if (rtx_equal_p (dest, XEXP (src, 1)))
1714 {
1715 /* The method of expansion that we are using; which includes the
1716 initialization of the expansions with zero and the summation of
1717 the expansions at the end of the computation will yield wrong
1718 results for (x = something - x) thus avoid using it in that case. */
1719 if (code == MINUS)
1720 return NULL;
1721 accum_pos = 1;
1722 }
76fd2caa
RE
1723 else
1724 return NULL;
1725
531e5376
RH
1726 /* It must not otherwise be used. */
1727 if (code == FMA)
1728 {
1729 if (rtx_referenced_p (dest, XEXP (src, 0))
1730 || rtx_referenced_p (dest, XEXP (src, 1)))
1731 return NULL;
1732 }
1733 else if (rtx_referenced_p (dest, XEXP (src, 1 - accum_pos)))
f37a4f14 1734 return NULL;
b8698a0f 1735
531e5376 1736 /* It must be used in exactly one insn. */
60c48e4c 1737 if (!referenced_in_one_insn_in_loop_p (loop, dest, &debug_uses))
f37a4f14 1738 return NULL;
b8698a0f 1739
c1c5a431 1740 if (dump_file)
531e5376
RH
1741 {
1742 fprintf (dump_file, "\n;; Expanding Accumulator ");
1743 print_rtl (dump_file, dest);
1744 fprintf (dump_file, "\n");
1745 }
c1c5a431 1746
60c48e4c
AO
1747 if (debug_uses)
1748 /* Instead of resetting the debug insns, we could replace each
1749 debug use in the loop with the sum or product of all expanded
1750 accummulators. Since we'll only know of all expansions at the
1751 end, we'd have to keep track of which vars_to_expand a debug
1752 insn in the loop references, take note of each copy of the
1753 debug insn during unrolling, and when it's all done, compute
1754 the sum or product of each variable and adjust the original
1755 debug insn and each copy thereof. What a pain! */
1756 reset_debug_uses_in_loop (loop, dest, debug_uses);
1757
f37a4f14 1758 /* Record the accumulator to expand. */
5ed6ace5 1759 ves = XNEW (struct var_to_expand);
f37a4f14 1760 ves->insn = insn;
f37a4f14 1761 ves->reg = copy_rtx (dest);
a9f6ecee
AO
1762 ves->var_expansions = VEC_alloc (rtx, heap, 1);
1763 ves->next = NULL;
f37a4f14
RE
1764 ves->op = GET_CODE (src);
1765 ves->expansion_count = 0;
1766 ves->reuse_expansion = 0;
b8698a0f 1767 return ves;
f37a4f14
RE
1768}
1769
113d659a 1770/* Determine whether there is an induction variable in INSN that
b8698a0f 1771 we would like to split during unrolling.
f37a4f14
RE
1772
1773 I.e. replace
1774
1775 i = i + 1;
1776 ...
1777 i = i + 1;
1778 ...
1779 i = i + 1;
1780 ...
1781
1782 type chains by
1783
1784 i0 = i + 1
1785 ...
1786 i = i0 + 1
1787 ...
1788 i = i0 + 2
1789 ...
1790
b8698a0f 1791 Return NULL if INSN contains no interesting IVs. Otherwise, allocate
f37a4f14 1792 an IV_TO_SPLIT structure, fill it with the relevant information and return a
113d659a
ZD
1793 pointer to it. */
1794
1795static struct iv_to_split *
1796analyze_iv_to_split_insn (rtx insn)
1797{
1798 rtx set, dest;
1799 struct rtx_iv iv;
1800 struct iv_to_split *ivts;
41806d92 1801 bool ok;
113d659a
ZD
1802
1803 /* For now we just split the basic induction variables. Later this may be
1804 extended for example by selecting also addresses of memory references. */
1805 set = single_set (insn);
1806 if (!set)
1807 return NULL;
1808
1809 dest = SET_DEST (set);
1810 if (!REG_P (dest))
1811 return NULL;
1812
1813 if (!biv_p (insn, dest))
1814 return NULL;
1815
03fd2215 1816 ok = iv_analyze_result (insn, dest, &iv);
4dc7782d
JL
1817
1818 /* This used to be an assert under the assumption that if biv_p returns
1819 true that iv_analyze_result must also return true. However, that
1820 assumption is not strictly correct as evidenced by pr25569.
1821
1822 Returning NULL when iv_analyze_result returns false is safe and
1823 avoids the problems in pr25569 until the iv_analyze_* routines
1824 can be fixed, which is apparently hard and time consuming
1825 according to their author. */
1826 if (! ok)
1827 return NULL;
113d659a
ZD
1828
1829 if (iv.step == const0_rtx
1830 || iv.mode != iv.extend_mode)
1831 return NULL;
1832
1833 /* Record the insn to split. */
5ed6ace5 1834 ivts = XNEW (struct iv_to_split);
113d659a
ZD
1835 ivts->insn = insn;
1836 ivts->base_var = NULL_RTX;
1837 ivts->step = iv.step;
a9f6ecee 1838 ivts->next = NULL;
113d659a
ZD
1839 ivts->n_loc = 1;
1840 ivts->loc[0] = 1;
b8698a0f 1841
113d659a
ZD
1842 return ivts;
1843}
1844
f37a4f14
RE
1845/* Determines which of insns in LOOP can be optimized.
1846 Return a OPT_INFO struct with the relevant hash tables filled
1847 with all insns to be optimized. The FIRST_NEW_BLOCK field
113d659a
ZD
1848 is undefined for the return value. */
1849
f37a4f14
RE
1850static struct opt_info *
1851analyze_insns_in_loop (struct loop *loop)
113d659a
ZD
1852{
1853 basic_block *body, bb;
ca83d385 1854 unsigned i;
5ed6ace5 1855 struct opt_info *opt_info = XCNEW (struct opt_info);
113d659a 1856 rtx insn;
f37a4f14
RE
1857 struct iv_to_split *ivts = NULL;
1858 struct var_to_expand *ves = NULL;
1859 PTR *slot1;
1860 PTR *slot2;
ca83d385
ZD
1861 VEC (edge, heap) *edges = get_loop_exit_edges (loop);
1862 edge exit;
f37a4f14 1863 bool can_apply = false;
b8698a0f 1864
113d659a
ZD
1865 iv_analysis_loop_init (loop);
1866
1867 body = get_loop_body (loop);
f37a4f14
RE
1868
1869 if (flag_split_ivs_in_unroller)
a9f6ecee
AO
1870 {
1871 opt_info->insns_to_split = htab_create (5 * loop->num_nodes,
1872 si_info_hash, si_info_eq, free);
1873 opt_info->iv_to_split_head = NULL;
1874 opt_info->iv_to_split_tail = &opt_info->iv_to_split_head;
1875 }
b8698a0f 1876
f37a4f14 1877 /* Record the loop exit bb and loop preheader before the unrolling. */
598ec7bd 1878 opt_info->loop_preheader = loop_preheader_edge (loop)->src;
b8698a0f 1879
ca83d385 1880 if (VEC_length (edge, edges) == 1)
f37a4f14 1881 {
ca83d385
ZD
1882 exit = VEC_index (edge, edges, 0);
1883 if (!(exit->flags & EDGE_COMPLEX))
1884 {
1885 opt_info->loop_exit = split_edge (exit);
1886 can_apply = true;
1887 }
f37a4f14 1888 }
b8698a0f 1889
f37a4f14
RE
1890 if (flag_variable_expansion_in_unroller
1891 && can_apply)
a9f6ecee
AO
1892 {
1893 opt_info->insns_with_var_to_expand = htab_create (5 * loop->num_nodes,
1894 ve_info_hash,
1895 ve_info_eq, free);
1896 opt_info->var_to_expand_head = NULL;
1897 opt_info->var_to_expand_tail = &opt_info->var_to_expand_head;
1898 }
b8698a0f 1899
113d659a
ZD
1900 for (i = 0; i < loop->num_nodes; i++)
1901 {
1902 bb = body[i];
1903 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1904 continue;
1905
1906 FOR_BB_INSNS (bb, insn)
f37a4f14
RE
1907 {
1908 if (!INSN_P (insn))
1909 continue;
b8698a0f 1910
f37a4f14
RE
1911 if (opt_info->insns_to_split)
1912 ivts = analyze_iv_to_split_insn (insn);
b8698a0f 1913
f37a4f14
RE
1914 if (ivts)
1915 {
1916 slot1 = htab_find_slot (opt_info->insns_to_split, ivts, INSERT);
a9f6ecee 1917 gcc_assert (*slot1 == NULL);
f37a4f14 1918 *slot1 = ivts;
a9f6ecee
AO
1919 *opt_info->iv_to_split_tail = ivts;
1920 opt_info->iv_to_split_tail = &ivts->next;
f37a4f14
RE
1921 continue;
1922 }
b8698a0f 1923
f37a4f14
RE
1924 if (opt_info->insns_with_var_to_expand)
1925 ves = analyze_insn_to_expand_var (loop, insn);
b8698a0f 1926
f37a4f14
RE
1927 if (ves)
1928 {
1929 slot2 = htab_find_slot (opt_info->insns_with_var_to_expand, ves, INSERT);
a9f6ecee 1930 gcc_assert (*slot2 == NULL);
f37a4f14 1931 *slot2 = ves;
a9f6ecee
AO
1932 *opt_info->var_to_expand_tail = ves;
1933 opt_info->var_to_expand_tail = &ves->next;
f37a4f14
RE
1934 }
1935 }
113d659a 1936 }
b8698a0f 1937
ca83d385 1938 VEC_free (edge, heap, edges);
113d659a 1939 free (body);
f37a4f14 1940 return opt_info;
113d659a
ZD
1941}
1942
1943/* Called just before loop duplication. Records start of duplicated area
f37a4f14 1944 to OPT_INFO. */
113d659a 1945
b8698a0f 1946static void
f37a4f14 1947opt_info_start_duplication (struct opt_info *opt_info)
113d659a 1948{
f37a4f14
RE
1949 if (opt_info)
1950 opt_info->first_new_block = last_basic_block;
113d659a
ZD
1951}
1952
1953/* Determine the number of iterations between initialization of the base
1954 variable and the current copy (N_COPY). N_COPIES is the total number
1955 of newly created copies. UNROLLING is true if we are unrolling
1956 (not peeling) the loop. */
1957
1958static unsigned
1959determine_split_iv_delta (unsigned n_copy, unsigned n_copies, bool unrolling)
1960{
1961 if (unrolling)
1962 {
1963 /* If we are unrolling, initialization is done in the original loop
1964 body (number 0). */
1965 return n_copy;
1966 }
1967 else
1968 {
1969 /* If we are peeling, the copy in that the initialization occurs has
1970 number 1. The original loop (number 0) is the last. */
1971 if (n_copy)
1972 return n_copy - 1;
1973 else
1974 return n_copies;
1975 }
1976}
1977
1978/* Locate in EXPR the expression corresponding to the location recorded
1979 in IVTS, and return a pointer to the RTX for this location. */
1980
1981static rtx *
1982get_ivts_expr (rtx expr, struct iv_to_split *ivts)
1983{
1984 unsigned i;
1985 rtx *ret = &expr;
1986
1987 for (i = 0; i < ivts->n_loc; i++)
1988 ret = &XEXP (*ret, ivts->loc[i]);
1989
1990 return ret;
1991}
1992
a9f6ecee 1993/* Allocate basic variable for the induction variable chain. */
113d659a 1994
a9f6ecee
AO
1995static void
1996allocate_basic_variable (struct iv_to_split *ivts)
113d659a 1997{
113d659a
ZD
1998 rtx expr = *get_ivts_expr (single_set (ivts->insn), ivts);
1999
2000 ivts->base_var = gen_reg_rtx (GET_MODE (expr));
113d659a
ZD
2001}
2002
2003/* Insert initialization of basic variable of IVTS before INSN, taking
2004 the initial value from INSN. */
2005
2006static void
2007insert_base_initialization (struct iv_to_split *ivts, rtx insn)
2008{
2009 rtx expr = copy_rtx (*get_ivts_expr (single_set (insn), ivts));
2010 rtx seq;
2011
2012 start_sequence ();
2013 expr = force_operand (expr, ivts->base_var);
2014 if (expr != ivts->base_var)
2015 emit_move_insn (ivts->base_var, expr);
2016 seq = get_insns ();
2017 end_sequence ();
2018
2019 emit_insn_before (seq, insn);
2020}
2021
2022/* Replace the use of induction variable described in IVTS in INSN
2023 by base variable + DELTA * step. */
2024
2025static void
2026split_iv (struct iv_to_split *ivts, rtx insn, unsigned delta)
2027{
2028 rtx expr, *loc, seq, incr, var;
2029 enum machine_mode mode = GET_MODE (ivts->base_var);
2030 rtx src, dest, set;
2031
2032 /* Construct base + DELTA * step. */
2033 if (!delta)
2034 expr = ivts->base_var;
2035 else
2036 {
2037 incr = simplify_gen_binary (MULT, mode,
2038 ivts->step, gen_int_mode (delta, mode));
2039 expr = simplify_gen_binary (PLUS, GET_MODE (ivts->base_var),
2040 ivts->base_var, incr);
2041 }
2042
2043 /* Figure out where to do the replacement. */
2044 loc = get_ivts_expr (single_set (insn), ivts);
2045
2046 /* If we can make the replacement right away, we're done. */
2047 if (validate_change (insn, loc, expr, 0))
2048 return;
2049
2050 /* Otherwise, force EXPR into a register and try again. */
2051 start_sequence ();
2052 var = gen_reg_rtx (mode);
2053 expr = force_operand (expr, var);
2054 if (expr != var)
2055 emit_move_insn (var, expr);
2056 seq = get_insns ();
2057 end_sequence ();
2058 emit_insn_before (seq, insn);
b8698a0f 2059
113d659a
ZD
2060 if (validate_change (insn, loc, var, 0))
2061 return;
2062
2063 /* The last chance. Try recreating the assignment in insn
2064 completely from scratch. */
2065 set = single_set (insn);
2066 gcc_assert (set);
2067
2068 start_sequence ();
2069 *loc = var;
2070 src = copy_rtx (SET_SRC (set));
2071 dest = copy_rtx (SET_DEST (set));
2072 src = force_operand (src, dest);
2073 if (src != dest)
2074 emit_move_insn (dest, src);
2075 seq = get_insns ();
2076 end_sequence ();
b8698a0f 2077
113d659a
ZD
2078 emit_insn_before (seq, insn);
2079 delete_insn (insn);
2080}
2081
113d659a 2082
2cd0e9f4 2083/* Return one expansion of the accumulator recorded in struct VE. */
113d659a 2084
f37a4f14
RE
2085static rtx
2086get_expansion (struct var_to_expand *ve)
2087{
2088 rtx reg;
b8698a0f 2089
f37a4f14
RE
2090 if (ve->reuse_expansion == 0)
2091 reg = ve->reg;
2092 else
0cc39082 2093 reg = VEC_index (rtx, ve->var_expansions, ve->reuse_expansion - 1);
b8698a0f 2094
0cc39082 2095 if (VEC_length (rtx, ve->var_expansions) == (unsigned) ve->reuse_expansion)
f37a4f14 2096 ve->reuse_expansion = 0;
b8698a0f 2097 else
f37a4f14 2098 ve->reuse_expansion++;
b8698a0f 2099
f37a4f14
RE
2100 return reg;
2101}
113d659a 2102
113d659a 2103
b8698a0f 2104/* Given INSN replace the uses of the accumulator recorded in VE
f37a4f14
RE
2105 with a new register. */
2106
2107static void
2108expand_var_during_unrolling (struct var_to_expand *ve, rtx insn)
2109{
2110 rtx new_reg, set;
2111 bool really_new_expansion = false;
b8698a0f 2112
f37a4f14 2113 set = single_set (insn);
b5e624c6 2114 gcc_assert (set);
b8698a0f 2115
f37a4f14
RE
2116 /* Generate a new register only if the expansion limit has not been
2117 reached. Else reuse an already existing expansion. */
2118 if (PARAM_VALUE (PARAM_MAX_VARIABLE_EXPANSIONS) > ve->expansion_count)
2119 {
2120 really_new_expansion = true;
2121 new_reg = gen_reg_rtx (GET_MODE (ve->reg));
2122 }
2123 else
2124 new_reg = get_expansion (ve);
2125
6e74642b 2126 validate_replace_rtx_group (SET_DEST (set), new_reg, insn);
f37a4f14
RE
2127 if (apply_change_group ())
2128 if (really_new_expansion)
2129 {
0cc39082 2130 VEC_safe_push (rtx, heap, ve->var_expansions, new_reg);
f37a4f14
RE
2131 ve->expansion_count++;
2132 }
2133}
2134
a9f6ecee
AO
2135/* Initialize the variable expansions in loop preheader. PLACE is the
2136 loop-preheader basic block where the initialization of the
2137 expansions should take place. The expansions are initialized with
2138 (-0) when the operation is plus or minus to honor sign zero. This
2139 way we can prevent cases where the sign of the final result is
2140 effected by the sign of the expansion. Here is an example to
2141 demonstrate this:
b8698a0f 2142
290358f7
RE
2143 for (i = 0 ; i < n; i++)
2144 sum += something;
2145
2146 ==>
2147
2148 sum += something
2149 ....
2150 i = i+1;
2151 sum1 += something
2152 ....
2153 i = i+1
2154 sum2 += something;
2155 ....
b8698a0f 2156
290358f7
RE
2157 When SUM is initialized with -zero and SOMETHING is also -zero; the
2158 final result of sum should be -zero thus the expansions sum1 and sum2
2159 should be initialized with -zero as well (otherwise we will get +zero
2160 as the final result). */
f37a4f14 2161
a9f6ecee
AO
2162static void
2163insert_var_expansion_initialization (struct var_to_expand *ve,
2164 basic_block place)
f37a4f14 2165{
6e74642b 2166 rtx seq, var, zero_init;
f37a4f14 2167 unsigned i;
290358f7
RE
2168 enum machine_mode mode = GET_MODE (ve->reg);
2169 bool honor_signed_zero_p = HONOR_SIGNED_ZEROS (mode);
2170
0cc39082 2171 if (VEC_length (rtx, ve->var_expansions) == 0)
a9f6ecee 2172 return;
b8698a0f 2173
f37a4f14 2174 start_sequence ();
531e5376
RH
2175 switch (ve->op)
2176 {
2177 case FMA:
2178 /* Note that we only accumulate FMA via the ADD operand. */
2179 case PLUS:
2180 case MINUS:
2181 FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
2182 {
2183 if (honor_signed_zero_p)
2184 zero_init = simplify_gen_unary (NEG, mode, CONST0_RTX (mode), mode);
2185 else
2186 zero_init = CONST0_RTX (mode);
2187 emit_move_insn (var, zero_init);
2188 }
2189 break;
2190
2191 case MULT:
2192 FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
2193 {
2194 zero_init = CONST1_RTX (GET_MODE (var));
2195 emit_move_insn (var, zero_init);
2196 }
2197 break;
2198
2199 default:
2200 gcc_unreachable ();
2201 }
b8698a0f 2202
f37a4f14
RE
2203 seq = get_insns ();
2204 end_sequence ();
b8698a0f 2205
6e74642b 2206 emit_insn_after (seq, BB_END (place));
f37a4f14
RE
2207}
2208
a9f6ecee
AO
2209/* Combine the variable expansions at the loop exit. PLACE is the
2210 loop exit basic block where the summation of the expansions should
2211 take place. */
f37a4f14 2212
a9f6ecee
AO
2213static void
2214combine_var_copies_in_loop_exit (struct var_to_expand *ve, basic_block place)
f37a4f14 2215{
f37a4f14
RE
2216 rtx sum = ve->reg;
2217 rtx expr, seq, var, insn;
2218 unsigned i;
2219
0cc39082 2220 if (VEC_length (rtx, ve->var_expansions) == 0)
a9f6ecee 2221 return;
b8698a0f 2222
f37a4f14 2223 start_sequence ();
531e5376
RH
2224 switch (ve->op)
2225 {
2226 case FMA:
2227 /* Note that we only accumulate FMA via the ADD operand. */
2228 case PLUS:
2229 case MINUS:
2230 FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
2231 sum = simplify_gen_binary (PLUS, GET_MODE (ve->reg), var, sum);
2232 break;
2233
2234 case MULT:
2235 FOR_EACH_VEC_ELT (rtx, ve->var_expansions, i, var)
2236 sum = simplify_gen_binary (MULT, GET_MODE (ve->reg), var, sum);
2237 break;
2238
2239 default:
2240 gcc_unreachable ();
2241 }
b8698a0f 2242
f37a4f14
RE
2243 expr = force_operand (sum, ve->reg);
2244 if (expr != ve->reg)
2245 emit_move_insn (ve->reg, expr);
2246 seq = get_insns ();
2247 end_sequence ();
b8698a0f 2248
f37a4f14
RE
2249 insn = BB_HEAD (place);
2250 while (!NOTE_INSN_BASIC_BLOCK_P (insn))
2251 insn = NEXT_INSN (insn);
2252
2253 emit_insn_after (seq, insn);
f37a4f14
RE
2254}
2255
b8698a0f
L
2256/* Apply loop optimizations in loop copies using the
2257 data which gathered during the unrolling. Structure
f37a4f14 2258 OPT_INFO record that data.
b8698a0f 2259
113d659a
ZD
2260 UNROLLING is true if we unrolled (not peeled) the loop.
2261 REWRITE_ORIGINAL_BODY is true if we should also rewrite the original body of
2262 the loop (as it should happen in complete unrolling, but not in ordinary
2263 peeling of the loop). */
2264
2265static void
b8698a0f
L
2266apply_opt_in_copies (struct opt_info *opt_info,
2267 unsigned n_copies, bool unrolling,
f37a4f14 2268 bool rewrite_original_loop)
113d659a
ZD
2269{
2270 unsigned i, delta;
2271 basic_block bb, orig_bb;
2272 rtx insn, orig_insn, next;
2273 struct iv_to_split ivts_templ, *ivts;
f37a4f14 2274 struct var_to_expand ve_templ, *ves;
b8698a0f 2275
113d659a
ZD
2276 /* Sanity check -- we need to put initialization in the original loop
2277 body. */
2278 gcc_assert (!unrolling || rewrite_original_loop);
b8698a0f 2279
113d659a 2280 /* Allocate the basic variables (i0). */
f37a4f14 2281 if (opt_info->insns_to_split)
a9f6ecee
AO
2282 for (ivts = opt_info->iv_to_split_head; ivts; ivts = ivts->next)
2283 allocate_basic_variable (ivts);
b8698a0f 2284
f37a4f14 2285 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
113d659a
ZD
2286 {
2287 bb = BASIC_BLOCK (i);
6580ee77 2288 orig_bb = get_bb_original (bb);
b8698a0f 2289
6580ee77
JH
2290 /* bb->aux holds position in copy sequence initialized by
2291 duplicate_loop_to_header_edge. */
2292 delta = determine_split_iv_delta ((size_t)bb->aux, n_copies,
113d659a 2293 unrolling);
7f7b1718 2294 bb->aux = 0;
113d659a
ZD
2295 orig_insn = BB_HEAD (orig_bb);
2296 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
f37a4f14
RE
2297 {
2298 next = NEXT_INSN (insn);
0397b965
JJ
2299 if (!INSN_P (insn)
2300 || (DEBUG_INSN_P (insn)
2301 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL))
f37a4f14 2302 continue;
b8698a0f 2303
0397b965
JJ
2304 while (!INSN_P (orig_insn)
2305 || (DEBUG_INSN_P (orig_insn)
2306 && (TREE_CODE (INSN_VAR_LOCATION_DECL (orig_insn))
2307 == LABEL_DECL)))
f37a4f14 2308 orig_insn = NEXT_INSN (orig_insn);
b8698a0f 2309
f37a4f14
RE
2310 ivts_templ.insn = orig_insn;
2311 ve_templ.insn = orig_insn;
b8698a0f 2312
f37a4f14
RE
2313 /* Apply splitting iv optimization. */
2314 if (opt_info->insns_to_split)
2315 {
d3bfe4de
KG
2316 ivts = (struct iv_to_split *)
2317 htab_find (opt_info->insns_to_split, &ivts_templ);
b8698a0f 2318
f37a4f14
RE
2319 if (ivts)
2320 {
21f868a2
ZD
2321 gcc_assert (GET_CODE (PATTERN (insn))
2322 == GET_CODE (PATTERN (orig_insn)));
b8698a0f 2323
f37a4f14
RE
2324 if (!delta)
2325 insert_base_initialization (ivts, insn);
2326 split_iv (ivts, insn, delta);
2327 }
2328 }
2329 /* Apply variable expansion optimization. */
2330 if (unrolling && opt_info->insns_with_var_to_expand)
2331 {
d3bfe4de
KG
2332 ves = (struct var_to_expand *)
2333 htab_find (opt_info->insns_with_var_to_expand, &ve_templ);
f37a4f14 2334 if (ves)
b8698a0f 2335 {
21f868a2
ZD
2336 gcc_assert (GET_CODE (PATTERN (insn))
2337 == GET_CODE (PATTERN (orig_insn)));
f37a4f14
RE
2338 expand_var_during_unrolling (ves, insn);
2339 }
2340 }
2341 orig_insn = NEXT_INSN (orig_insn);
2342 }
113d659a
ZD
2343 }
2344
2345 if (!rewrite_original_loop)
2346 return;
b8698a0f 2347
f37a4f14 2348 /* Initialize the variable expansions in the loop preheader
b8698a0f 2349 and take care of combining them at the loop exit. */
f37a4f14
RE
2350 if (opt_info->insns_with_var_to_expand)
2351 {
a9f6ecee
AO
2352 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2353 insert_var_expansion_initialization (ves, opt_info->loop_preheader);
2354 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2355 combine_var_copies_in_loop_exit (ves, opt_info->loop_exit);
f37a4f14 2356 }
b8698a0f 2357
113d659a
ZD
2358 /* Rewrite also the original loop body. Find them as originals of the blocks
2359 in the last copied iteration, i.e. those that have
6580ee77 2360 get_bb_copy (get_bb_original (bb)) == bb. */
f37a4f14 2361 for (i = opt_info->first_new_block; i < (unsigned) last_basic_block; i++)
113d659a
ZD
2362 {
2363 bb = BASIC_BLOCK (i);
6580ee77
JH
2364 orig_bb = get_bb_original (bb);
2365 if (get_bb_copy (orig_bb) != bb)
113d659a 2366 continue;
b8698a0f 2367
113d659a
ZD
2368 delta = determine_split_iv_delta (0, n_copies, unrolling);
2369 for (orig_insn = BB_HEAD (orig_bb);
f37a4f14
RE
2370 orig_insn != NEXT_INSN (BB_END (bb));
2371 orig_insn = next)
2372 {
2373 next = NEXT_INSN (orig_insn);
b8698a0f 2374
f37a4f14
RE
2375 if (!INSN_P (orig_insn))
2376 continue;
b8698a0f 2377
f37a4f14
RE
2378 ivts_templ.insn = orig_insn;
2379 if (opt_info->insns_to_split)
2380 {
d3bfe4de
KG
2381 ivts = (struct iv_to_split *)
2382 htab_find (opt_info->insns_to_split, &ivts_templ);
f37a4f14
RE
2383 if (ivts)
2384 {
2385 if (!delta)
2386 insert_base_initialization (ivts, orig_insn);
2387 split_iv (ivts, orig_insn, delta);
2388 continue;
2389 }
2390 }
b8698a0f 2391
f37a4f14
RE
2392 }
2393 }
2394}
113d659a 2395
f37a4f14 2396/* Release OPT_INFO. */
113d659a
ZD
2397
2398static void
f37a4f14 2399free_opt_info (struct opt_info *opt_info)
113d659a 2400{
f37a4f14
RE
2401 if (opt_info->insns_to_split)
2402 htab_delete (opt_info->insns_to_split);
2403 if (opt_info->insns_with_var_to_expand)
2404 {
a9f6ecee
AO
2405 struct var_to_expand *ves;
2406
2407 for (ves = opt_info->var_to_expand_head; ves; ves = ves->next)
2408 VEC_free (rtx, heap, ves->var_expansions);
f37a4f14
RE
2409 htab_delete (opt_info->insns_with_var_to_expand);
2410 }
2411 free (opt_info);
113d659a 2412}