]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/loop.c
* jump.c: Convert prototypes to ISO C90.
[thirdparty/gcc.git] / gcc / loop.c
CommitLineData
098e6c28 1/* Perform various loop optimizations, including strength reduction.
2f791da8 2 Copyright (C) 1987, 1988, 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997,
a4589b78 3 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
04aa27b1 4
f12b58b3 5This file is part of GCC.
04aa27b1 6
f12b58b3 7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 2, or (at your option) any later
10version.
04aa27b1 11
f12b58b3 12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
04aa27b1 16
17You should have received a copy of the GNU General Public License
f12b58b3 18along with GCC; see the file COPYING. If not, write to the Free
19Software Foundation, 59 Temple Place - Suite 330, Boston, MA
2002111-1307, USA. */
04aa27b1 21
04aa27b1 22/* This is the loop optimization pass of the compiler.
23 It finds invariant computations within loops and moves them
4a8f0b95 24 to the beginning of the loop. Then it identifies basic and
04aa27b1 25 general induction variables. Strength reduction is applied to the general
26 induction variables, and induction variable elimination is applied to
27 the basic induction variables.
28
29 It also finds cases where
30 a register is set within the loop by zero-extending a narrower value
31 and changes these to zero the entire register once before the loop
32 and merely copy the low part within the loop.
33
34 Most of the complexity is in heuristics to decide when it is worth
35 while to do these things. */
36
37#include "config.h"
405711de 38#include "system.h"
805e22b2 39#include "coretypes.h"
40#include "tm.h"
04aa27b1 41#include "rtl.h"
7953c610 42#include "tm_p.h"
0a893c29 43#include "function.h"
04aa27b1 44#include "expr.h"
d6cb6164 45#include "hard-reg-set.h"
8955ba91 46#include "basic-block.h"
04aa27b1 47#include "insn-config.h"
04aa27b1 48#include "regs.h"
04aa27b1 49#include "recog.h"
50#include "flags.h"
51#include "real.h"
04aa27b1 52#include "loop.h"
1617c276 53#include "cselib.h"
485aaaaf 54#include "except.h"
ce1fd7fc 55#include "toplev.h"
482cd3c1 56#include "predict.h"
ba38e12b 57#include "insn-flags.h"
3d941517 58#include "optabs.h"
862be747 59#include "cfgloop.h"
ba38e12b 60
61/* Not really meaningful values, but at least something. */
62#ifndef SIMULTANEOUS_PREFETCHES
63#define SIMULTANEOUS_PREFETCHES 3
64#endif
65#ifndef PREFETCH_BLOCK
66#define PREFETCH_BLOCK 32
67#endif
68#ifndef HAVE_prefetch
69#define HAVE_prefetch 0
71fe72ed 70#define CODE_FOR_prefetch 0
ba38e12b 71#define gen_prefetch(a,b,c) (abort(), NULL_RTX)
72#endif
73
d01481af 74/* Give up the prefetch optimizations once we exceed a given threshold.
ba38e12b 75 It is unlikely that we would be able to optimize something in a loop
76 with so many detected prefetches. */
77#define MAX_PREFETCHES 100
78/* The number of prefetch blocks that are beneficial to fetch at once before
79 a loop with a known (and low) iteration count. */
80#define PREFETCH_BLOCKS_BEFORE_LOOP_MAX 6
81/* For very tiny loops it is not worthwhile to prefetch even before the loop,
82 since it is likely that the data are already in the cache. */
83#define PREFETCH_BLOCKS_BEFORE_LOOP_MIN 2
ba38e12b 84
85/* Parameterize some prefetch heuristics so they can be turned on and off
b903337a 86 easily for performance testing on new architectures. These can be
ba38e12b 87 defined in target-dependent files. */
88
89/* Prefetch is worthwhile only when loads/stores are dense. */
90#ifndef PREFETCH_ONLY_DENSE_MEM
91#define PREFETCH_ONLY_DENSE_MEM 1
92#endif
93
94/* Define what we mean by "dense" loads and stores; This value divided by 256
95 is the minimum percentage of memory references that worth prefetching. */
96#ifndef PREFETCH_DENSE_MEM
97#define PREFETCH_DENSE_MEM 220
98#endif
99
100/* Do not prefetch for a loop whose iteration count is known to be low. */
101#ifndef PREFETCH_NO_LOW_LOOPCNT
102#define PREFETCH_NO_LOW_LOOPCNT 1
103#endif
104
105/* Define what we mean by a "low" iteration count. */
106#ifndef PREFETCH_LOW_LOOPCNT
107#define PREFETCH_LOW_LOOPCNT 32
108#endif
109
110/* Do not prefetch for a loop that contains a function call; such a loop is
111 probably not an internal loop. */
112#ifndef PREFETCH_NO_CALL
113#define PREFETCH_NO_CALL 1
114#endif
115
116/* Do not prefetch accesses with an extreme stride. */
117#ifndef PREFETCH_NO_EXTREME_STRIDE
118#define PREFETCH_NO_EXTREME_STRIDE 1
119#endif
120
121/* Define what we mean by an "extreme" stride. */
122#ifndef PREFETCH_EXTREME_STRIDE
123#define PREFETCH_EXTREME_STRIDE 4096
124#endif
125
44d23341 126/* Define a limit to how far apart indices can be and still be merged
127 into a single prefetch. */
128#ifndef PREFETCH_EXTREME_DIFFERENCE
129#define PREFETCH_EXTREME_DIFFERENCE 4096
130#endif
131
132/* Issue prefetch instructions before the loop to fetch data to be used
133 in the first few loop iterations. */
134#ifndef PREFETCH_BEFORE_LOOP
135#define PREFETCH_BEFORE_LOOP 1
136#endif
137
ba38e12b 138/* Do not handle reversed order prefetches (negative stride). */
139#ifndef PREFETCH_NO_REVERSE_ORDER
140#define PREFETCH_NO_REVERSE_ORDER 1
141#endif
142
44d23341 143/* Prefetch even if the GIV is in conditional code. */
144#ifndef PREFETCH_CONDITIONAL
cd6839f2 145#define PREFETCH_CONDITIONAL 1
ba38e12b 146#endif
147
97923856 148#define LOOP_REG_LIFETIME(LOOP, REGNO) \
149((REGNO_LAST_LUID (REGNO) - REGNO_FIRST_LUID (REGNO)))
150
151#define LOOP_REG_GLOBAL_P(LOOP, REGNO) \
152((REGNO_LAST_LUID (REGNO) > INSN_LUID ((LOOP)->end) \
153 || REGNO_FIRST_LUID (REGNO) < INSN_LUID ((LOOP)->start)))
154
8096c8ed 155#define LOOP_REGNO_NREGS(REGNO, SET_DEST) \
156((REGNO) < FIRST_PSEUDO_REGISTER \
4b6dfc37 157 ? (int) HARD_REGNO_NREGS ((REGNO), GET_MODE (SET_DEST)) : 1)
8096c8ed 158
23e52523 159
04aa27b1 160/* Vector mapping INSN_UIDs to luids.
f9e15121 161 The luids are like uids but increase monotonically always.
04aa27b1 162 We use them to see whether a jump comes from outside a given loop. */
163
164int *uid_luid;
165
166/* Indexed by INSN_UID, contains the ordinal giving the (innermost) loop
167 number the insn is contained in. */
168
ec7d7ef9 169struct loop **uid_loop;
04aa27b1 170
171/* 1 + largest uid of any insn. */
172
173int max_uid_for_loop;
174
04aa27b1 175/* Number of loops detected in current function. Used as index to the
176 next few tables. */
177
178static int max_loop_num;
179
04aa27b1 180/* Bound on pseudo register number before loop optimization.
181 A pseudo has valid regscan info if its number is < max_reg_before_loop. */
02e7a332 182unsigned int max_reg_before_loop;
04aa27b1 183
ce201b73 184/* The value to pass to the next call of reg_scan_update. */
185static int loop_max_reg;
04aa27b1 186\f
187/* During the analysis of a loop, a chain of `struct movable's
188 is made to record all the movable insns found.
189 Then the entire chain can be scanned to decide which to move. */
190
191struct movable
192{
193 rtx insn; /* A movable insn */
a92771b8 194 rtx set_src; /* The expression this reg is set from. */
195 rtx set_dest; /* The destination of this SET. */
04aa27b1 196 rtx dependencies; /* When INSN is libcall, this is an EXPR_LIST
a92771b8 197 of any registers used within the LIBCALL. */
4a8f0b95 198 int consec; /* Number of consecutive following insns
04aa27b1 199 that must be moved with this one. */
02e7a332 200 unsigned int regno; /* The register it sets */
04aa27b1 201 short lifetime; /* lifetime of that register;
202 may be adjusted when matching movables
203 that load the same value are found. */
204 short savings; /* Number of insns we can move for this reg,
205 including other movables that force this
206 or match this one. */
3ad4992f 207 ENUM_BITFIELD(machine_mode) savemode : 8; /* Nonzero means it is a mode for
62b58625 208 a low part that we should avoid changing when
209 clearing the rest of the reg. */
04aa27b1 210 unsigned int cond : 1; /* 1 if only conditionally movable */
211 unsigned int force : 1; /* 1 means MUST move this insn */
212 unsigned int global : 1; /* 1 means reg is live outside this loop */
213 /* If PARTIAL is 1, GLOBAL means something different:
214 that the reg is live outside the range from where it is set
215 to the following label. */
216 unsigned int done : 1; /* 1 inhibits further processing of this */
4a8f0b95 217
04aa27b1 218 unsigned int partial : 1; /* 1 means this reg is used for zero-extending.
219 In particular, moving it does not make it
220 invariant. */
221 unsigned int move_insn : 1; /* 1 means that we call emit_move_insn to
222 load SRC, rather than copying INSN. */
aea4708b 223 unsigned int move_insn_first:1;/* Same as above, if this is necessary for the
224 first insn of a consecutive sets group. */
a92771b8 225 unsigned int is_equiv : 1; /* 1 means a REG_EQUIV is present on INSN. */
62b58625 226 unsigned int insert_temp : 1; /* 1 means we copy to a new pseudo and replace
227 the original insn with a copy from that
037845e5 228 pseudo, rather than deleting it. */
04aa27b1 229 struct movable *match; /* First entry for same value */
230 struct movable *forces; /* An insn that must be moved if this is */
231 struct movable *next;
232};
233
1d322a97 234
04aa27b1 235FILE *loop_dump_stream;
236
237/* Forward declarations. */
238
3ad4992f 239static void invalidate_loops_containing_label (rtx);
240static void find_and_verify_loops (rtx, struct loops *);
241static void mark_loop_jump (rtx, struct loop *);
242static void prescan_loop (struct loop *);
243static int reg_in_basic_block_p (rtx, rtx);
244static int consec_sets_invariant_p (const struct loop *, rtx, int, rtx);
245static int labels_in_range_p (rtx, int);
246static void count_one_set (struct loop_regs *, rtx, rtx, rtx *);
247static void note_addr_stored (rtx, rtx, void *);
248static void note_set_pseudo_multiple_uses (rtx, rtx, void *);
249static int loop_reg_used_before_p (const struct loop *, rtx, rtx);
250static void scan_loop (struct loop*, int);
0dbd1c74 251#if 0
3ad4992f 252static void replace_call_address (rtx, rtx, rtx);
0dbd1c74 253#endif
3ad4992f 254static rtx skip_consec_insns (rtx, int);
255static int libcall_benefit (rtx);
256static void ignore_some_movables (struct loop_movables *);
257static void force_movables (struct loop_movables *);
258static void combine_movables (struct loop_movables *, struct loop_regs *);
259static int num_unmoved_movables (const struct loop *);
260static int regs_match_p (rtx, rtx, struct loop_movables *);
261static int rtx_equal_for_loop_p (rtx, rtx, struct loop_movables *,
262 struct loop_regs *);
263static void add_label_notes (rtx, rtx);
264static void move_movables (struct loop *loop, struct loop_movables *, int,
265 int);
266static void loop_movables_add (struct loop_movables *, struct movable *);
267static void loop_movables_free (struct loop_movables *);
268static int count_nonfixed_reads (const struct loop *, rtx);
269static void loop_bivs_find (struct loop *);
270static void loop_bivs_init_find (struct loop *);
271static void loop_bivs_check (struct loop *);
272static void loop_givs_find (struct loop *);
273static void loop_givs_check (struct loop *);
274static int loop_biv_eliminable_p (struct loop *, struct iv_class *, int, int);
275static int loop_giv_reduce_benefit (struct loop *, struct iv_class *,
276 struct induction *, rtx);
277static void loop_givs_dead_check (struct loop *, struct iv_class *);
278static void loop_givs_reduce (struct loop *, struct iv_class *);
279static void loop_givs_rescan (struct loop *, struct iv_class *, rtx *);
280static void loop_ivs_free (struct loop *);
281static void strength_reduce (struct loop *, int);
282static void find_single_use_in_loop (struct loop_regs *, rtx, rtx);
283static int valid_initial_value_p (rtx, rtx, int, rtx);
284static void find_mem_givs (const struct loop *, rtx, rtx, int, int);
285static void record_biv (struct loop *, struct induction *, rtx, rtx, rtx,
286 rtx, rtx *, int, int);
287static void check_final_value (const struct loop *, struct induction *);
288static void loop_ivs_dump (const struct loop *, FILE *, int);
289static void loop_iv_class_dump (const struct iv_class *, FILE *, int);
290static void loop_biv_dump (const struct induction *, FILE *, int);
291static void loop_giv_dump (const struct induction *, FILE *, int);
292static void record_giv (const struct loop *, struct induction *, rtx, rtx,
293 rtx, rtx, rtx, rtx, int, enum g_types, int, int,
294 rtx *);
295static void update_giv_derive (const struct loop *, rtx);
296static void check_ext_dependent_givs (struct iv_class *, struct loop_info *);
297static int basic_induction_var (const struct loop *, rtx, enum machine_mode,
298 rtx, rtx, rtx *, rtx *, rtx **);
299static rtx simplify_giv_expr (const struct loop *, rtx, rtx *, int *);
300static int general_induction_var (const struct loop *loop, rtx, rtx *, rtx *,
301 rtx *, rtx *, int, int *, enum machine_mode);
302static int consec_sets_giv (const struct loop *, int, rtx, rtx, rtx, rtx *,
303 rtx *, rtx *, rtx *);
304static int check_dbra_loop (struct loop *, int);
305static rtx express_from_1 (rtx, rtx, rtx);
306static rtx combine_givs_p (struct induction *, struct induction *);
307static int cmp_combine_givs_stats (const void *, const void *);
308static void combine_givs (struct loop_regs *, struct iv_class *);
309static int product_cheap_p (rtx, rtx);
310static int maybe_eliminate_biv (const struct loop *, struct iv_class *, int,
311 int, int);
312static int maybe_eliminate_biv_1 (const struct loop *, rtx, rtx,
313 struct iv_class *, int, basic_block, rtx);
314static int last_use_this_basic_block (rtx, rtx);
315static void record_initial (rtx, rtx, void *);
316static void update_reg_last_use (rtx, rtx);
317static rtx next_insn_in_loop (const struct loop *, rtx);
318static void loop_regs_scan (const struct loop *, int);
319static int count_insns_in_loop (const struct loop *);
320static int find_mem_in_note_1 (rtx *, void *);
321static rtx find_mem_in_note (rtx);
322static void load_mems (const struct loop *);
323static int insert_loop_mem (rtx *, void *);
324static int replace_loop_mem (rtx *, void *);
325static void replace_loop_mems (rtx, rtx, rtx, int);
326static int replace_loop_reg (rtx *, void *);
327static void replace_loop_regs (rtx insn, rtx, rtx);
328static void note_reg_stored (rtx, rtx, void *);
329static void try_copy_prop (const struct loop *, rtx, unsigned int);
330static void try_swap_copy_prop (const struct loop *, rtx, unsigned int);
331static rtx check_insn_for_givs (struct loop *, rtx, int, int);
332static rtx check_insn_for_bivs (struct loop *, rtx, int, int);
333static rtx gen_add_mult (rtx, rtx, rtx, rtx);
334static void loop_regs_update (const struct loop *, rtx);
335static int iv_add_mult_cost (rtx, rtx, rtx, rtx);
336
337static rtx loop_insn_emit_after (const struct loop *, basic_block, rtx, rtx);
338static rtx loop_call_insn_emit_before (const struct loop *, basic_block,
339 rtx, rtx);
340static rtx loop_call_insn_hoist (const struct loop *, rtx);
341static rtx loop_insn_sink_or_swim (const struct loop *, rtx);
342
343static void loop_dump_aux (const struct loop *, FILE *, int);
344static void loop_delete_insns (rtx, rtx);
345static HOST_WIDE_INT remove_constant_addition (rtx *);
346static rtx gen_load_of_final_value (rtx, rtx);
347void debug_ivs (const struct loop *);
348void debug_iv_class (const struct iv_class *);
349void debug_biv (const struct induction *);
350void debug_giv (const struct induction *);
351void debug_loop (const struct loop *);
352void debug_loops (const struct loops *);
0437fa92 353
2ff1269a 354typedef struct loop_replace_args
355{
356 rtx match;
357 rtx replacement;
358 rtx insn;
359} loop_replace_args;
360
ce326ac0 361/* Nonzero iff INSN is between START and END, inclusive. */
4bb30577 362#define INSN_IN_RANGE_P(INSN, START, END) \
363 (INSN_UID (INSN) < max_uid_for_loop \
ce326ac0 364 && INSN_LUID (INSN) >= INSN_LUID (START) \
365 && INSN_LUID (INSN) <= INSN_LUID (END))
3eb9a99d 366
ca6d6e84 367/* Indirect_jump_in_function is computed once per function. */
9641f63c 368static int indirect_jump_in_function;
3ad4992f 369static int indirect_jump_in_function_p (rtx);
ca6d6e84 370
3ad4992f 371static int compute_luids (rtx, rtx, int);
9246c12c 372
3ad4992f 373static int biv_elimination_giv_has_0_offset (struct induction *,
374 struct induction *, rtx);
04aa27b1 375\f
04aa27b1 376/* Benefit penalty, if a giv is not replaceable, i.e. must emit an insn to
377 copy the value of the strength reduced giv to its original register. */
1d322a97 378static int copy_cost;
379
380/* Cost of using a register, to normalize the benefits of a giv. */
381static int reg_address_cost;
382
04aa27b1 383void
3ad4992f 384init_loop (void)
04aa27b1 385{
941522d6 386 rtx reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
04aa27b1 387
cf495191 388 reg_address_cost = address_cost (reg, SImode);
1d322a97 389
589ff9e7 390 copy_cost = COSTS_N_INSNS (1);
04aa27b1 391}
392\f
8bd88b68 393/* Compute the mapping from uids to luids.
394 LUIDs are numbers assigned to insns, like uids,
395 except that luids increase monotonically through the code.
396 Start at insn START and stop just before END. Assign LUIDs
397 starting with PREV_LUID + 1. Return the last assigned LUID + 1. */
398static int
3ad4992f 399compute_luids (rtx start, rtx end, int prev_luid)
8bd88b68 400{
401 int i;
402 rtx insn;
403
404 for (insn = start, i = prev_luid; insn != end; insn = NEXT_INSN (insn))
405 {
406 if (INSN_UID (insn) >= max_uid_for_loop)
407 continue;
408 /* Don't assign luids to line-number NOTEs, so that the distance in
409 luids between two insns is not affected by -g. */
410 if (GET_CODE (insn) != NOTE
411 || NOTE_LINE_NUMBER (insn) <= 0)
412 uid_luid[INSN_UID (insn)] = ++i;
413 else
414 /* Give a line number note the same luid as preceding insn. */
415 uid_luid[INSN_UID (insn)] = i;
416 }
417 return i + 1;
418}
419\f
04aa27b1 420/* Entry point of this file. Perform loop optimization
421 on the current function. F is the first insn of the function
422 and DUMPFILE is a stream for output of a trace of actions taken
423 (or 0 if none should be output). */
424
425void
3ad4992f 426loop_optimize (rtx f, FILE *dumpfile, int flags)
04aa27b1 427{
19cb6b50 428 rtx insn;
429 int i;
ec7d7ef9 430 struct loops loops_data;
431 struct loops *loops = &loops_data;
5ba8b451 432 struct loop_info *loops_info;
04aa27b1 433
434 loop_dump_stream = dumpfile;
435
436 init_recog_no_volatile ();
04aa27b1 437
438 max_reg_before_loop = max_reg_num ();
ce201b73 439 loop_max_reg = max_reg_before_loop;
04aa27b1 440
04aa27b1 441 regs_may_share = 0;
442
a92771b8 443 /* Count the number of loops. */
04aa27b1 444
445 max_loop_num = 0;
446 for (insn = f; insn; insn = NEXT_INSN (insn))
447 {
448 if (GET_CODE (insn) == NOTE
449 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
450 max_loop_num++;
451 }
452
453 /* Don't waste time if no loops. */
454 if (max_loop_num == 0)
455 return;
456
ec7d7ef9 457 loops->num = max_loop_num;
458
04aa27b1 459 /* Get size to use for tables indexed by uids.
460 Leave some space for labels allocated by find_and_verify_loops. */
440fc8ec 461 max_uid_for_loop = get_max_uid () + 1 + max_loop_num * 32;
04aa27b1 462
8b861be4 463 uid_luid = (int *) xcalloc (max_uid_for_loop, sizeof (int));
4a8f0b95 464 uid_loop = (struct loop **) xcalloc (max_uid_for_loop,
ec7d7ef9 465 sizeof (struct loop *));
3eb9a99d 466
5ba8b451 467 /* Allocate storage for array of loops. */
468 loops->array = (struct loop *)
469 xcalloc (loops->num, sizeof (struct loop));
470
04aa27b1 471 /* Find and process each loop.
472 First, find them, and record them in order of their beginnings. */
ec7d7ef9 473 find_and_verify_loops (f, loops);
04aa27b1 474
5ba8b451 475 /* Allocate and initialize auxiliary loop information. */
476 loops_info = xcalloc (loops->num, sizeof (struct loop_info));
862be747 477 for (i = 0; i < (int) loops->num; i++)
7bc9de79 478 loops->array[i].aux = loops_info + i;
5ba8b451 479
04aa27b1 480 /* Now find all register lifetimes. This must be done after
481 find_and_verify_loops, because it might reorder the insns in the
482 function. */
ce201b73 483 reg_scan (f, max_reg_before_loop, 1);
04aa27b1 484
18aa2adf 485 /* This must occur after reg_scan so that registers created by gcse
486 will have entries in the register tables.
487
488 We could have added a call to reg_scan after gcse_main in toplev.c,
489 but moving this call to init_alias_analysis is more efficient. */
490 init_alias_analysis ();
491
a410d4cb 492 /* See if we went too far. Note that get_max_uid already returns
493 one more that the maximum uid of all insn. */
440fc8ec 494 if (get_max_uid () > max_uid_for_loop)
495 abort ();
c5aa1e92 496 /* Now reset it to the actual size we need. See above. */
a410d4cb 497 max_uid_for_loop = get_max_uid ();
440fc8ec 498
ec7d7ef9 499 /* find_and_verify_loops has already called compute_luids, but it
500 might have rearranged code afterwards, so we need to recompute
501 the luids now. */
805e22b2 502 compute_luids (f, NULL_RTX, 0);
04aa27b1 503
504 /* Don't leave gaps in uid_luid for insns that have been
505 deleted. It is possible that the first or last insn
506 using some register has been deleted by cross-jumping.
507 Make sure that uid_luid for that former insn's uid
508 points to the general area where that insn used to be. */
509 for (i = 0; i < max_uid_for_loop; i++)
510 {
511 uid_luid[0] = uid_luid[i];
512 if (uid_luid[0] != 0)
513 break;
514 }
515 for (i = 0; i < max_uid_for_loop; i++)
516 if (uid_luid[i] == 0)
517 uid_luid[i] = uid_luid[i - 1];
518
ca6d6e84 519 /* Determine if the function has indirect jump. On some systems
520 this prevents low overhead loop instructions from being used. */
3eb9a99d 521 indirect_jump_in_function = indirect_jump_in_function_p (f);
3eb9a99d 522
85f4f7fe 523 /* Now scan the loops, last ones first, since this means inner ones are done
524 before outer ones. */
525 for (i = max_loop_num - 1; i >= 0; i--)
526 {
527 struct loop *loop = &loops->array[i];
528
ec7d7ef9 529 if (! loop->invalid && loop->end)
bcfc9e7d 530 scan_loop (loop, flags);
ec7d7ef9 531 }
9d2fe1e6 532
1d322a97 533 end_alias_analysis ();
8b861be4 534
535 /* Clean up. */
8b861be4 536 free (uid_luid);
ec7d7ef9 537 free (uid_loop);
5ba8b451 538 free (loops_info);
539 free (loops->array);
04aa27b1 540}
541\f
ce326ac0 542/* Returns the next insn, in execution order, after INSN. START and
543 END are the NOTE_INSN_LOOP_BEG and NOTE_INSN_LOOP_END for the loop,
15fc3eb7 544 respectively. LOOP->TOP, if non-NULL, is the top of the loop in the
ce326ac0 545 insn-stream; it is used with loops that are entered near the
546 bottom. */
547
548static rtx
3ad4992f 549next_insn_in_loop (const struct loop *loop, rtx insn)
ce326ac0 550{
551 insn = NEXT_INSN (insn);
552
ec7d7ef9 553 if (insn == loop->end)
ce326ac0 554 {
ec7d7ef9 555 if (loop->top)
ce326ac0 556 /* Go to the top of the loop, and continue there. */
ec7d7ef9 557 insn = loop->top;
ce326ac0 558 else
559 /* We're done. */
560 insn = NULL_RTX;
561 }
562
ec7d7ef9 563 if (insn == loop->scan_start)
ce326ac0 564 /* We're done. */
565 insn = NULL_RTX;
566
567 return insn;
568}
569
ec7d7ef9 570/* Optimize one loop described by LOOP. */
04aa27b1 571
572/* ??? Could also move memory writes out of loops if the destination address
573 is invariant, the source is invariant, the memory write is not volatile,
574 and if we can prove that no read inside the loop can read this address
575 before the write occurs. If there is a read of this address after the
576 write, then we can also mark the memory read as invariant. */
577
578static void
3ad4992f 579scan_loop (struct loop *loop, int flags)
04aa27b1 580{
e9b78d43 581 struct loop_info *loop_info = LOOP_INFO (loop);
582 struct loop_regs *regs = LOOP_REGS (loop);
19cb6b50 583 int i;
ec7d7ef9 584 rtx loop_start = loop->start;
585 rtx loop_end = loop->end;
ce326ac0 586 rtx p;
04aa27b1 587 /* 1 if we are scanning insns that could be executed zero times. */
588 int maybe_never = 0;
589 /* 1 if we are scanning insns that might never be executed
590 due to a subroutine call which might exit before they are reached. */
591 int call_passed = 0;
04aa27b1 592 /* Number of insns in the loop. */
593 int insn_count;
04aa27b1 594 int tem;
ce201b73 595 rtx temp, update_start, update_end;
04aa27b1 596 /* The SET from an insn, if it is the only SET in the insn. */
597 rtx set, set1;
598 /* Chain describing insns movable in current loop. */
df3b4a51 599 struct loop_movables *movables = LOOP_MOVABLES (loop);
04aa27b1 600 /* Ratio of extra register life span we can justify
601 for saving an instruction. More if loop doesn't call subroutines
602 since in that case saving an insn makes more difference
603 and more registers are available. */
604 int threshold;
6ed0bf1f 605 /* Nonzero if we are scanning instructions in a sub-loop. */
606 int loop_depth = 0;
e29238fc 607 int in_libcall;
ec7d7ef9 608
2ff1269a 609 loop->top = 0;
610
5695261c 611 movables->head = 0;
612 movables->last = 0;
5695261c 613
04aa27b1 614 /* Determine whether this loop starts with a jump down to a test at
615 the end. This will occur for a small number of loops with a test
616 that is too complex to duplicate in front of the loop.
617
618 We search for the first insn or label in the loop, skipping NOTEs.
619 However, we must be careful not to skip past a NOTE_INSN_LOOP_BEG
620 (because we might have a loop executed only once that contains a
621 loop which starts with a jump to its exit test) or a NOTE_INSN_LOOP_END
622 (in case we have a degenerate loop).
623
624 Note that if we mistakenly think that a loop is entered at the top
625 when, in fact, it is entered at the exit test, the only effect will be
626 slightly poorer optimization. Making the opposite error can generate
4a8f0b95 627 incorrect code. Since very few loops now start with a jump to the
04aa27b1 628 exit test, the code here to detect that case is very conservative. */
629
630 for (p = NEXT_INSN (loop_start);
ec7d7ef9 631 p != loop_end
9204e736 632 && GET_CODE (p) != CODE_LABEL && ! INSN_P (p)
04aa27b1 633 && (GET_CODE (p) != NOTE
634 || (NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_BEG
635 && NOTE_LINE_NUMBER (p) != NOTE_INSN_LOOP_END));
636 p = NEXT_INSN (p))
637 ;
638
ec7d7ef9 639 loop->scan_start = p;
04aa27b1 640
89e8d34f 641 /* If loop end is the end of the current function, then emit a
642 NOTE_INSN_DELETED after loop_end and set loop->sink to the dummy
643 note insn. This is the position we use when sinking insns out of
644 the loop. */
645 if (NEXT_INSN (loop->end) != 0)
646 loop->sink = NEXT_INSN (loop->end);
647 else
648 loop->sink = emit_note_after (NOTE_INSN_DELETED, loop->end);
649
04aa27b1 650 /* Set up variables describing this loop. */
ec7d7ef9 651 prescan_loop (loop);
1f8922d4 652 threshold = (loop_info->has_call ? 1 : 2) * (1 + n_non_fixed_regs);
04aa27b1 653
654 /* If loop has a jump before the first label,
655 the true entry is the target of that jump.
656 Start scan from there.
ec7d7ef9 657 But record in LOOP->TOP the place where the end-test jumps
04aa27b1 658 back to so we can scan that after the end of the loop. */
805e22b2 659 if (GET_CODE (p) == JUMP_INSN
04aa27b1 660 /* Loop entry must be unconditional jump (and not a RETURN) */
805e22b2 661 && any_uncondjump_p (p)
662 && JUMP_LABEL (p) != 0
663 /* Check to see whether the jump actually
664 jumps out of the loop (meaning it's no loop).
665 This case can happen for things like
666 do {..} while (0). If this label was generated previously
667 by loop, we can't tell anything about it and have to reject
668 the loop. */
669 && INSN_IN_RANGE_P (JUMP_LABEL (p), loop_start, loop_end))
670 {
671 loop->top = next_label (loop->scan_start);
672 loop->scan_start = JUMP_LABEL (p);
04aa27b1 673 }
674
ec7d7ef9 675 /* If LOOP->SCAN_START was an insn created by loop, we don't know its luid
04aa27b1 676 as required by loop_reg_used_before_p. So skip such loops. (This
4a8f0b95 677 test may never be true, but it's best to play it safe.)
04aa27b1 678
679 Also, skip loops where we do not start scanning at a label. This
680 test also rejects loops starting with a JUMP_INSN that failed the
681 test above. */
682
ec7d7ef9 683 if (INSN_UID (loop->scan_start) >= max_uid_for_loop
684 || GET_CODE (loop->scan_start) != CODE_LABEL)
04aa27b1 685 {
686 if (loop_dump_stream)
687 fprintf (loop_dump_stream, "\nLoop from %d to %d is phony.\n\n",
ec7d7ef9 688 INSN_UID (loop_start), INSN_UID (loop_end));
04aa27b1 689 return;
690 }
691
eab144bb 692 /* Allocate extra space for REGs that might be created by load_mems.
693 We allocate a little extra slop as well, in the hopes that we
694 won't have to reallocate the regs array. */
85bd9543 695 loop_regs_scan (loop, loop_info->mems_idx + 16);
696 insn_count = count_insns_in_loop (loop);
04aa27b1 697
698 if (loop_dump_stream)
699 {
700 fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n",
ec7d7ef9 701 INSN_UID (loop_start), INSN_UID (loop_end), insn_count);
702 if (loop->cont)
04aa27b1 703 fprintf (loop_dump_stream, "Continue at insn %d.\n",
ec7d7ef9 704 INSN_UID (loop->cont));
04aa27b1 705 }
706
707 /* Scan through the loop finding insns that are safe to move.
05cb4e54 708 Set REGS->ARRAY[I].SET_IN_LOOP negative for the reg I being set, so that
04aa27b1 709 this reg will be considered invariant for subsequent insns.
710 We consider whether subsequent insns use the reg
711 in deciding whether it is worth actually moving.
712
713 MAYBE_NEVER is nonzero if we have passed a conditional jump insn
714 and therefore it is possible that the insns we are scanning
715 would never be executed. At such times, we must make sure
716 that it is safe to execute the insn once instead of zero times.
717 When MAYBE_NEVER is 0, all insns will be executed at least once
718 so that is not a problem. */
719
e29238fc 720 for (in_libcall = 0, p = next_insn_in_loop (loop, loop->scan_start);
ce326ac0 721 p != NULL_RTX;
ec7d7ef9 722 p = next_insn_in_loop (loop, p))
04aa27b1 723 {
e29238fc 724 if (in_libcall && INSN_P (p) && find_reg_note (p, REG_RETVAL, NULL_RTX))
725 in_libcall--;
726 if (GET_CODE (p) == INSN)
04aa27b1 727 {
e29238fc 728 temp = find_reg_note (p, REG_LIBCALL, NULL_RTX);
04aa27b1 729 if (temp)
e29238fc 730 in_libcall++;
731 if (! in_libcall
732 && (set = single_set (p))
733 && GET_CODE (SET_DEST (set)) == REG
734#ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
735 && SET_DEST (set) != pic_offset_table_rtx
736#endif
737 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
04aa27b1 738 {
e29238fc 739 int tem1 = 0;
740 int tem2 = 0;
741 int move_insn = 0;
62b58625 742 int insert_temp = 0;
e29238fc 743 rtx src = SET_SRC (set);
744 rtx dependencies = 0;
745
746 /* Figure out what to use as a source of this insn. If a
747 REG_EQUIV note is given or if a REG_EQUAL note with a
748 constant operand is specified, use it as the source and
749 mark that we should move this insn by calling
750 emit_move_insn rather that duplicating the insn.
751
752 Otherwise, only use the REG_EQUAL contents if a REG_RETVAL
753 note is present. */
754 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
755 if (temp)
04aa27b1 756 src = XEXP (temp, 0), move_insn = 1;
e29238fc 757 else
04aa27b1 758 {
e29238fc 759 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
760 if (temp && CONSTANT_P (XEXP (temp, 0)))
761 src = XEXP (temp, 0), move_insn = 1;
762 if (temp && find_reg_note (p, REG_RETVAL, NULL_RTX))
763 {
764 src = XEXP (temp, 0);
765 /* A libcall block can use regs that don't appear in
766 the equivalent expression. To move the libcall,
767 we must move those regs too. */
768 dependencies = libcall_other_reg (p, src);
769 }
04aa27b1 770 }
771
b903337a 772 /* For parallels, add any possible uses to the dependencies, as
e29238fc 773 we can't move the insn without resolving them first. */
774 if (GET_CODE (PATTERN (p)) == PARALLEL)
04aa27b1 775 {
e29238fc 776 for (i = 0; i < XVECLEN (PATTERN (p), 0); i++)
04aa27b1 777 {
e29238fc 778 rtx x = XVECEXP (PATTERN (p), 0, i);
779 if (GET_CODE (x) == USE)
780 dependencies
781 = gen_rtx_EXPR_LIST (VOIDmode, XEXP (x, 0),
782 dependencies);
04aa27b1 783 }
04aa27b1 784 }
e29238fc 785
62b58625 786 if (/* The register is used in basic blocks other
787 than the one where it is set (meaning that
788 something after this point in the loop might
789 depend on its value before the set). */
790 ! reg_in_basic_block_p (p, SET_DEST (set))
791 /* And the set is not guaranteed to be executed once
792 the loop starts, or the value before the set is
793 needed before the set occurs...
794
795 ??? Note we have quadratic behavior here, mitigated
796 by the fact that the previous test will often fail for
797 large loops. Rather than re-scanning the entire loop
798 each time for register usage, we should build tables
799 of the register usage and use them here instead. */
800 && (maybe_never
801 || loop_reg_used_before_p (loop, set, p)))
802 /* It is unsafe to move the set. However, it may be OK to
3ad4992f 803 move the source into a new pseudo, and substitute a
62b58625 804 reg-to-reg copy for the original insn.
805
806 This code used to consider it OK to move a set of a variable
807 which was not created by the user and not used in an exit
808 test.
809 That behavior is incorrect and was removed. */
810 insert_temp = 1;
811
48691de1 812 /* Don't try to optimize a MODE_CC set with a constant
813 source. It probably will be combined with a conditional
814 jump. */
815 if (GET_MODE_CLASS (GET_MODE (SET_DEST (set))) == MODE_CC
816 && CONSTANT_P (src))
817 ;
e29238fc 818 /* Don't try to optimize a register that was made
819 by loop-optimization for an inner loop.
820 We don't know its life-span, so we can't compute
821 the benefit. */
48691de1 822 else if (REGNO (SET_DEST (set)) >= max_reg_before_loop)
e29238fc 823 ;
3d055936 824 /* Don't move the source and add a reg-to-reg copy:
825 - with -Os (this certainly increases size),
826 - if the mode doesn't support copy operations (obviously),
827 - if the source is already a reg (the motion will gain nothing),
828 - if the source is a legitimate constant (likewise). */
3ad4992f 829 else if (insert_temp
3d055936 830 && (optimize_size
831 || ! can_copy_p (GET_MODE (SET_SRC (set)))
832 || GET_CODE (SET_SRC (set)) == REG
62b58625 833 || (CONSTANT_P (SET_SRC (set))
834 && LEGITIMATE_CONSTANT_P (SET_SRC (set)))))
e29238fc 835 ;
836 else if ((tem = loop_invariant_p (loop, src))
837 && (dependencies == 0
838 || (tem2
839 = loop_invariant_p (loop, dependencies)) != 0)
840 && (regs->array[REGNO (SET_DEST (set))].set_in_loop == 1
841 || (tem1
842 = consec_sets_invariant_p
843 (loop, SET_DEST (set),
844 regs->array[REGNO (SET_DEST (set))].set_in_loop,
845 p)))
846 /* If the insn can cause a trap (such as divide by zero),
847 can't move it unless it's guaranteed to be executed
848 once loop is entered. Even a function call might
849 prevent the trap insn from being reached
850 (since it might exit!) */
851 && ! ((maybe_never || call_passed)
852 && may_trap_p (src)))
04aa27b1 853 {
19cb6b50 854 struct movable *m;
e29238fc 855 int regno = REGNO (SET_DEST (set));
856
857 /* A potential lossage is where we have a case where two insns
858 can be combined as long as they are both in the loop, but
859 we move one of them outside the loop. For large loops,
860 this can lose. The most common case of this is the address
861 of a function being called.
862
863 Therefore, if this register is marked as being used
864 exactly once if we are in a loop with calls
865 (a "large loop"), see if we can replace the usage of
866 this register with the source of this SET. If we can,
867 delete this insn.
868
869 Don't do this if P has a REG_RETVAL note or if we have
870 SMALL_REGISTER_CLASSES and SET_SRC is a hard register. */
871
872 if (loop_info->has_call
873 && regs->array[regno].single_usage != 0
874 && regs->array[regno].single_usage != const0_rtx
875 && REGNO_FIRST_UID (regno) == INSN_UID (p)
876 && (REGNO_LAST_UID (regno)
877 == INSN_UID (regs->array[regno].single_usage))
878 && regs->array[regno].set_in_loop == 1
879 && GET_CODE (SET_SRC (set)) != ASM_OPERANDS
880 && ! side_effects_p (SET_SRC (set))
881 && ! find_reg_note (p, REG_RETVAL, NULL_RTX)
882 && (! SMALL_REGISTER_CLASSES
883 || (! (GET_CODE (SET_SRC (set)) == REG
884 && (REGNO (SET_SRC (set))
885 < FIRST_PSEUDO_REGISTER))))
886 /* This test is not redundant; SET_SRC (set) might be
887 a call-clobbered register and the life of REGNO
888 might span a call. */
889 && ! modified_between_p (SET_SRC (set), p,
890 regs->array[regno].single_usage)
891 && no_labels_between_p (p,
892 regs->array[regno].single_usage)
893 && validate_replace_rtx (SET_DEST (set), SET_SRC (set),
894 regs->array[regno].single_usage))
895 {
896 /* Replace any usage in a REG_EQUAL note. Must copy
897 the new source, so that we don't get rtx sharing
898 between the SET_SOURCE and REG_NOTES of insn p. */
899 REG_NOTES (regs->array[regno].single_usage)
900 = (replace_rtx
901 (REG_NOTES (regs->array[regno].single_usage),
902 SET_DEST (set), copy_rtx (SET_SRC (set))));
903
904 delete_insn (p);
905 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
906 i++)
907 regs->array[regno+i].set_in_loop = 0;
908 continue;
909 }
910
f26a9c45 911 m = (struct movable *) xmalloc (sizeof (struct movable));
04aa27b1 912 m->next = 0;
913 m->insn = p;
e29238fc 914 m->set_src = src;
915 m->dependencies = dependencies;
04aa27b1 916 m->set_dest = SET_DEST (set);
04aa27b1 917 m->force = 0;
e29238fc 918 m->consec
919 = regs->array[REGNO (SET_DEST (set))].set_in_loop - 1;
04aa27b1 920 m->done = 0;
921 m->forces = 0;
e29238fc 922 m->partial = 0;
923 m->move_insn = move_insn;
c3ff27c6 924 m->move_insn_first = 0;
62b58625 925 m->insert_temp = insert_temp;
e29238fc 926 m->is_equiv = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
927 m->savemode = VOIDmode;
04aa27b1 928 m->regno = regno;
e29238fc 929 /* Set M->cond if either loop_invariant_p
930 or consec_sets_invariant_p returned 2
931 (only conditionally invariant). */
932 m->cond = ((tem | tem1 | tem2) > 1);
933 m->global = LOOP_REG_GLOBAL_P (loop, regno);
04aa27b1 934 m->match = 0;
97923856 935 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
e29238fc 936 m->savings = regs->array[regno].n_times_set;
937 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
938 m->savings += libcall_benefit (p);
4b6dfc37 939 for (i = 0; i < LOOP_REGNO_NREGS (regno, SET_DEST (set)); i++)
e29238fc 940 regs->array[regno+i].set_in_loop = move_insn ? -2 : -1;
04aa27b1 941 /* Add M to the end of the chain MOVABLES. */
df3b4a51 942 loop_movables_add (movables, m);
e29238fc 943
944 if (m->consec > 0)
945 {
946 /* It is possible for the first instruction to have a
947 REG_EQUAL note but a non-invariant SET_SRC, so we must
948 remember the status of the first instruction in case
949 the last instruction doesn't have a REG_EQUAL note. */
950 m->move_insn_first = m->move_insn;
951
952 /* Skip this insn, not checking REG_LIBCALL notes. */
953 p = next_nonnote_insn (p);
954 /* Skip the consecutive insns, if there are any. */
955 p = skip_consec_insns (p, m->consec);
956 /* Back up to the last insn of the consecutive group. */
957 p = prev_nonnote_insn (p);
958
959 /* We must now reset m->move_insn, m->is_equiv, and
960 possibly m->set_src to correspond to the effects of
961 all the insns. */
962 temp = find_reg_note (p, REG_EQUIV, NULL_RTX);
963 if (temp)
964 m->set_src = XEXP (temp, 0), m->move_insn = 1;
965 else
966 {
967 temp = find_reg_note (p, REG_EQUAL, NULL_RTX);
968 if (temp && CONSTANT_P (XEXP (temp, 0)))
969 m->set_src = XEXP (temp, 0), m->move_insn = 1;
970 else
971 m->move_insn = 0;
972
973 }
974 m->is_equiv
975 = (find_reg_note (p, REG_EQUIV, NULL_RTX) != 0);
976 }
977 }
978 /* If this register is always set within a STRICT_LOW_PART
979 or set to zero, then its high bytes are constant.
980 So clear them outside the loop and within the loop
981 just load the low bytes.
982 We must check that the machine has an instruction to do so.
983 Also, if the value loaded into the register
984 depends on the same register, this cannot be done. */
985 else if (SET_SRC (set) == const0_rtx
986 && GET_CODE (NEXT_INSN (p)) == INSN
987 && (set1 = single_set (NEXT_INSN (p)))
988 && GET_CODE (set1) == SET
989 && (GET_CODE (SET_DEST (set1)) == STRICT_LOW_PART)
990 && (GET_CODE (XEXP (SET_DEST (set1), 0)) == SUBREG)
991 && (SUBREG_REG (XEXP (SET_DEST (set1), 0))
992 == SET_DEST (set))
993 && !reg_mentioned_p (SET_DEST (set), SET_SRC (set1)))
994 {
995 int regno = REGNO (SET_DEST (set));
996 if (regs->array[regno].set_in_loop == 2)
997 {
998 struct movable *m;
999 m = (struct movable *) xmalloc (sizeof (struct movable));
1000 m->next = 0;
1001 m->insn = p;
1002 m->set_dest = SET_DEST (set);
1003 m->dependencies = 0;
1004 m->force = 0;
1005 m->consec = 0;
1006 m->done = 0;
1007 m->forces = 0;
1008 m->move_insn = 0;
1009 m->move_insn_first = 0;
62b58625 1010 m->insert_temp = insert_temp;
e29238fc 1011 m->partial = 1;
1012 /* If the insn may not be executed on some cycles,
1013 we can't clear the whole reg; clear just high part.
1014 Not even if the reg is used only within this loop.
1015 Consider this:
1016 while (1)
1017 while (s != t) {
1018 if (foo ()) x = *s;
1019 use (x);
1020 }
1021 Clearing x before the inner loop could clobber a value
1022 being saved from the last time around the outer loop.
1023 However, if the reg is not used outside this loop
1024 and all uses of the register are in the same
1025 basic block as the store, there is no problem.
1026
1027 If this insn was made by loop, we don't know its
1028 INSN_LUID and hence must make a conservative
1029 assumption. */
1030 m->global = (INSN_UID (p) >= max_uid_for_loop
1031 || LOOP_REG_GLOBAL_P (loop, regno)
1032 || (labels_in_range_p
1033 (p, REGNO_FIRST_LUID (regno))));
1034 if (maybe_never && m->global)
1035 m->savemode = GET_MODE (SET_SRC (set1));
1036 else
1037 m->savemode = VOIDmode;
1038 m->regno = regno;
1039 m->cond = 0;
1040 m->match = 0;
1041 m->lifetime = LOOP_REG_LIFETIME (loop, regno);
1042 m->savings = 1;
1043 for (i = 0;
4b6dfc37 1044 i < LOOP_REGNO_NREGS (regno, SET_DEST (set));
e29238fc 1045 i++)
1046 regs->array[regno+i].set_in_loop = -1;
1047 /* Add M to the end of the chain MOVABLES. */
1048 loop_movables_add (movables, m);
1049 }
04aa27b1 1050 }
1051 }
1052 }
1053 /* Past a call insn, we get to insns which might not be executed
1054 because the call might exit. This matters for insns that trap.
a8081abb 1055 Constant and pure call insns always return, so they don't count. */
06a652d1 1056 else if (GET_CODE (p) == CALL_INSN && ! CONST_OR_PURE_CALL_P (p))
04aa27b1 1057 call_passed = 1;
1058 /* Past a label or a jump, we get to insns for which we
1059 can't count on whether or how many times they will be
1060 executed during each iteration. Therefore, we can
1061 only move out sets of trivial variables
1062 (those not used after the loop). */
a5e23478 1063 /* Similar code appears twice in strength_reduce. */
04aa27b1 1064 else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN)
1065 /* If we enter the loop in the middle, and scan around to the
1066 beginning, don't set maybe_never for that. This must be an
1067 unconditional jump, otherwise the code at the top of the
1068 loop might never be executed. Unconditional jumps are
dae39efc 1069 followed by a barrier then the loop_end. */
8851e806 1070 && ! (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p) == loop->top
ec7d7ef9 1071 && NEXT_INSN (NEXT_INSN (p)) == loop_end
b2816317 1072 && any_uncondjump_p (p)))
04aa27b1 1073 maybe_never = 1;
6ed0bf1f 1074 else if (GET_CODE (p) == NOTE)
1075 {
1076 /* At the virtual top of a converted loop, insns are again known to
1077 be executed: logically, the loop begins here even though the exit
1078 code has been duplicated. */
1079 if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP && loop_depth == 0)
1080 maybe_never = call_passed = 0;
1081 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
1082 loop_depth++;
1083 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
1084 loop_depth--;
1085 }
04aa27b1 1086 }
1087
1088 /* If one movable subsumes another, ignore that other. */
1089
1090 ignore_some_movables (movables);
1091
1092 /* For each movable insn, see if the reg that it loads
1093 leads when it dies right into another conditionally movable insn.
1094 If so, record that the second insn "forces" the first one,
1095 since the second can be moved only if the first is. */
1096
1097 force_movables (movables);
1098
1099 /* See if there are multiple movable insns that load the same value.
1100 If there are, make all but the first point at the first one
1101 through the `match' field, and add the priorities of them
1102 all together as the priority of the first. */
1103
e9b78d43 1104 combine_movables (movables, regs);
4a8f0b95 1105
04aa27b1 1106 /* Now consider each movable insn to decide whether it is worth moving.
05cb4e54 1107 Store 0 in regs->array[I].set_in_loop for each reg I that is moved.
04aa27b1 1108
a5760913 1109 For machines with few registers this increases code size, so do not
3ad4992f 1110 move moveables when optimizing for code size on such machines.
a5760913 1111 (The 18 below is the value for i386.) */
4283c46c 1112
3ad4992f 1113 if (!optimize_size
a5760913 1114 || (reg_class_size[GENERAL_REGS] > 18 && !loop_info->has_call))
5f2a9f4c 1115 {
1116 move_movables (loop, movables, threshold, insn_count);
1117
1118 /* Recalculate regs->array if move_movables has created new
1119 registers. */
1120 if (max_reg_num () > regs->num)
1121 {
1122 loop_regs_scan (loop, 0);
1123 for (update_start = loop_start;
1124 PREV_INSN (update_start)
1125 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
1126 update_start = PREV_INSN (update_start))
1127 ;
1128 update_end = NEXT_INSN (loop_end);
1129
1130 reg_scan_update (update_start, update_end, loop_max_reg);
1131 loop_max_reg = max_reg_num ();
1132 }
1133 }
04aa27b1 1134
1135 /* Now candidates that still are negative are those not moved.
05cb4e54 1136 Change regs->array[I].set_in_loop to indicate that those are not actually
e9b78d43 1137 invariant. */
05cb4e54 1138 for (i = 0; i < regs->num; i++)
1139 if (regs->array[i].set_in_loop < 0)
1140 regs->array[i].set_in_loop = regs->array[i].n_times_set;
04aa27b1 1141
8bd88b68 1142 /* Now that we've moved some things out of the loop, we might be able to
7b1ff6f6 1143 hoist even more memory references. */
eab144bb 1144 load_mems (loop);
1145
1146 /* Recalculate regs->array if load_mems has created new registers. */
1147 if (max_reg_num () > regs->num)
85bd9543 1148 loop_regs_scan (loop, 0);
7e6b5a2a 1149
ce201b73 1150 for (update_start = loop_start;
15fc3eb7 1151 PREV_INSN (update_start)
1152 && GET_CODE (PREV_INSN (update_start)) != CODE_LABEL;
ce201b73 1153 update_start = PREV_INSN (update_start))
1154 ;
ec7d7ef9 1155 update_end = NEXT_INSN (loop_end);
ce201b73 1156
1157 reg_scan_update (update_start, update_end, loop_max_reg);
1158 loop_max_reg = max_reg_num ();
1159
04aa27b1 1160 if (flag_strength_reduce)
1d322a97 1161 {
5338d8ac 1162 if (update_end && GET_CODE (update_end) == CODE_LABEL)
1163 /* Ensure our label doesn't go away. */
1164 LABEL_NUSES (update_end)++;
1165
85bd9543 1166 strength_reduce (loop, flags);
ce201b73 1167
1168 reg_scan_update (update_start, update_end, loop_max_reg);
1169 loop_max_reg = max_reg_num ();
5338d8ac 1170
1171 if (update_end && GET_CODE (update_end) == CODE_LABEL
1172 && --LABEL_NUSES (update_end) == 0)
e4bf866d 1173 delete_related_insns (update_end);
1d322a97 1174 }
d1ae477c 1175
df3b4a51 1176
1177 /* The movable information is required for strength reduction. */
1178 loop_movables_free (movables);
1179
05cb4e54 1180 free (regs->array);
1181 regs->array = 0;
1182 regs->num = 0;
04aa27b1 1183}
1184\f
1185/* Add elements to *OUTPUT to record all the pseudo-regs
1186 mentioned in IN_THIS but not mentioned in NOT_IN_THIS. */
1187
1188void
3ad4992f 1189record_excess_regs (rtx in_this, rtx not_in_this, rtx *output)
04aa27b1 1190{
1191 enum rtx_code code;
d2ca078f 1192 const char *fmt;
04aa27b1 1193 int i;
1194
1195 code = GET_CODE (in_this);
1196
1197 switch (code)
1198 {
1199 case PC:
1200 case CC0:
1201 case CONST_INT:
1202 case CONST_DOUBLE:
1203 case CONST:
1204 case SYMBOL_REF:
1205 case LABEL_REF:
1206 return;
1207
1208 case REG:
1209 if (REGNO (in_this) >= FIRST_PSEUDO_REGISTER
1210 && ! reg_mentioned_p (in_this, not_in_this))
941522d6 1211 *output = gen_rtx_EXPR_LIST (VOIDmode, in_this, *output);
04aa27b1 1212 return;
4a8f0b95 1213
0dbd1c74 1214 default:
1215 break;
04aa27b1 1216 }
1217
1218 fmt = GET_RTX_FORMAT (code);
1219 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1220 {
1221 int j;
1222
1223 switch (fmt[i])
1224 {
1225 case 'E':
1226 for (j = 0; j < XVECLEN (in_this, i); j++)
1227 record_excess_regs (XVECEXP (in_this, i, j), not_in_this, output);
1228 break;
1229
1230 case 'e':
1231 record_excess_regs (XEXP (in_this, i), not_in_this, output);
1232 break;
1233 }
1234 }
1235}
1236\f
1237/* Check what regs are referred to in the libcall block ending with INSN,
1238 aside from those mentioned in the equivalent value.
1239 If there are none, return 0.
1240 If there are one or more, return an EXPR_LIST containing all of them. */
1241
e91c8aa9 1242rtx
3ad4992f 1243libcall_other_reg (rtx insn, rtx equiv)
04aa27b1 1244{
df38d76e 1245 rtx note = find_reg_note (insn, REG_RETVAL, NULL_RTX);
04aa27b1 1246 rtx p = XEXP (note, 0);
1247 rtx output = 0;
1248
1249 /* First, find all the regs used in the libcall block
1250 that are not mentioned as inputs to the result. */
1251
1252 while (p != insn)
1253 {
1254 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
1255 || GET_CODE (p) == CALL_INSN)
1256 record_excess_regs (PATTERN (p), equiv, &output);
1257 p = NEXT_INSN (p);
1258 }
1259
1260 return output;
1261}
1262\f
1263/* Return 1 if all uses of REG
1264 are between INSN and the end of the basic block. */
1265
4a8f0b95 1266static int
3ad4992f 1267reg_in_basic_block_p (rtx insn, rtx reg)
04aa27b1 1268{
1269 int regno = REGNO (reg);
1270 rtx p;
1271
394685a4 1272 if (REGNO_FIRST_UID (regno) != INSN_UID (insn))
04aa27b1 1273 return 0;
1274
1275 /* Search this basic block for the already recorded last use of the reg. */
1276 for (p = insn; p; p = NEXT_INSN (p))
1277 {
1278 switch (GET_CODE (p))
1279 {
1280 case NOTE:
1281 break;
1282
1283 case INSN:
1284 case CALL_INSN:
1285 /* Ordinary insn: if this is the last use, we win. */
394685a4 1286 if (REGNO_LAST_UID (regno) == INSN_UID (p))
04aa27b1 1287 return 1;
1288 break;
1289
1290 case JUMP_INSN:
1291 /* Jump insn: if this is the last use, we win. */
394685a4 1292 if (REGNO_LAST_UID (regno) == INSN_UID (p))
04aa27b1 1293 return 1;
1294 /* Otherwise, it's the end of the basic block, so we lose. */
1295 return 0;
1296
1297 case CODE_LABEL:
1298 case BARRIER:
1299 /* It's the end of the basic block, so we lose. */
1300 return 0;
4a8f0b95 1301
0dbd1c74 1302 default:
1303 break;
04aa27b1 1304 }
1305 }
1306
f41269a8 1307 /* The "last use" that was recorded can't be found after the first
1308 use. This can happen when the last use was deleted while
1309 processing an inner loop, this inner loop was then completely
1310 unrolled, and the outer loop is always exited after the inner loop,
1311 so that everything after the first use becomes a single basic block. */
1312 return 1;
04aa27b1 1313}
1314\f
1315/* Compute the benefit of eliminating the insns in the block whose
1316 last insn is LAST. This may be a group of insns used to compute a
1317 value directly or can contain a library call. */
1318
1319static int
3ad4992f 1320libcall_benefit (rtx last)
04aa27b1 1321{
1322 rtx insn;
1323 int benefit = 0;
1324
df38d76e 1325 for (insn = XEXP (find_reg_note (last, REG_RETVAL, NULL_RTX), 0);
04aa27b1 1326 insn != last; insn = NEXT_INSN (insn))
1327 {
1328 if (GET_CODE (insn) == CALL_INSN)
1329 benefit += 10; /* Assume at least this many insns in a library
a92771b8 1330 routine. */
04aa27b1 1331 else if (GET_CODE (insn) == INSN
1332 && GET_CODE (PATTERN (insn)) != USE
1333 && GET_CODE (PATTERN (insn)) != CLOBBER)
1334 benefit++;
1335 }
1336
1337 return benefit;
1338}
1339\f
1340/* Skip COUNT insns from INSN, counting library calls as 1 insn. */
1341
1342static rtx
3ad4992f 1343skip_consec_insns (rtx insn, int count)
04aa27b1 1344{
1345 for (; count > 0; count--)
1346 {
1347 rtx temp;
1348
1349 /* If first insn of libcall sequence, skip to end. */
4a8f0b95 1350 /* Do this at start of loop, since INSN is guaranteed to
04aa27b1 1351 be an insn here. */
1352 if (GET_CODE (insn) != NOTE
df38d76e 1353 && (temp = find_reg_note (insn, REG_LIBCALL, NULL_RTX)))
04aa27b1 1354 insn = XEXP (temp, 0);
1355
4a8f0b95 1356 do
1357 insn = NEXT_INSN (insn);
04aa27b1 1358 while (GET_CODE (insn) == NOTE);
1359 }
1360
1361 return insn;
1362}
1363
1364/* Ignore any movable whose insn falls within a libcall
1365 which is part of another movable.
1366 We make use of the fact that the movable for the libcall value
1367 was made later and so appears later on the chain. */
1368
1369static void
3ad4992f 1370ignore_some_movables (struct loop_movables *movables)
04aa27b1 1371{
19cb6b50 1372 struct movable *m, *m1;
04aa27b1 1373
5695261c 1374 for (m = movables->head; m; m = m->next)
04aa27b1 1375 {
1376 /* Is this a movable for the value of a libcall? */
df38d76e 1377 rtx note = find_reg_note (m->insn, REG_RETVAL, NULL_RTX);
04aa27b1 1378 if (note)
1379 {
1380 rtx insn;
1381 /* Check for earlier movables inside that range,
1382 and mark them invalid. We cannot use LUIDs here because
1383 insns created by loop.c for prior loops don't have LUIDs.
1384 Rather than reject all such insns from movables, we just
1385 explicitly check each insn in the libcall (since invariant
1386 libcalls aren't that common). */
1387 for (insn = XEXP (note, 0); insn != m->insn; insn = NEXT_INSN (insn))
5695261c 1388 for (m1 = movables->head; m1 != m; m1 = m1->next)
04aa27b1 1389 if (m1->insn == insn)
1390 m1->done = 1;
1391 }
1392 }
4a8f0b95 1393}
04aa27b1 1394
1395/* For each movable insn, see if the reg that it loads
1396 leads when it dies right into another conditionally movable insn.
1397 If so, record that the second insn "forces" the first one,
1398 since the second can be moved only if the first is. */
1399
1400static void
3ad4992f 1401force_movables (struct loop_movables *movables)
04aa27b1 1402{
19cb6b50 1403 struct movable *m, *m1;
1404
5695261c 1405 for (m1 = movables->head; m1; m1 = m1->next)
04aa27b1 1406 /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */
1407 if (!m1->partial && !m1->done)
1408 {
1409 int regno = m1->regno;
1410 for (m = m1->next; m; m = m->next)
1411 /* ??? Could this be a bug? What if CSE caused the
1412 register of M1 to be used after this insn?
1413 Since CSE does not update regno_last_uid,
1414 this insn M->insn might not be where it dies.
1415 But very likely this doesn't matter; what matters is
1416 that M's reg is computed from M1's reg. */
394685a4 1417 if (INSN_UID (m->insn) == REGNO_LAST_UID (regno)
04aa27b1 1418 && !m->done)
1419 break;
1420 if (m != 0 && m->set_src == m1->set_dest
1421 /* If m->consec, m->set_src isn't valid. */
1422 && m->consec == 0)
1423 m = 0;
1424
1425 /* Increase the priority of the moving the first insn
1426 since it permits the second to be moved as well. */
1427 if (m != 0)
1428 {
1429 m->forces = m1;
1430 m1->lifetime += m->lifetime;
fbeaeaad 1431 m1->savings += m->savings;
04aa27b1 1432 }
1433 }
1434}
1435\f
1436/* Find invariant expressions that are equal and can be combined into
1437 one register. */
1438
1439static void
3ad4992f 1440combine_movables (struct loop_movables *movables, struct loop_regs *regs)
04aa27b1 1441{
19cb6b50 1442 struct movable *m;
8ec5f078 1443 char *matched_regs = (char *) xmalloc (regs->num);
04aa27b1 1444 enum machine_mode mode;
1445
1446 /* Regs that are set more than once are not allowed to match
1447 or be matched. I'm no longer sure why not. */
0e6137dd 1448 /* Only pseudo registers are allowed to match or be matched,
1449 since move_movables does not validate the change. */
04aa27b1 1450 /* Perhaps testing m->consec_sets would be more appropriate here? */
1451
5695261c 1452 for (m = movables->head; m; m = m->next)
05cb4e54 1453 if (m->match == 0 && regs->array[m->regno].n_times_set == 1
0e6137dd 1454 && m->regno >= FIRST_PSEUDO_REGISTER
62b58625 1455 && !m->insert_temp
4a8f0b95 1456 && !m->partial)
04aa27b1 1457 {
19cb6b50 1458 struct movable *m1;
04aa27b1 1459 int regno = m->regno;
04aa27b1 1460
93d3b7de 1461 memset (matched_regs, 0, regs->num);
04aa27b1 1462 matched_regs[regno] = 1;
1463
163c015f 1464 /* We want later insns to match the first one. Don't make the first
1465 one match any later ones. So start this loop at m->next. */
1466 for (m1 = m->next; m1; m1 = m1->next)
0e6137dd 1467 if (m != m1 && m1->match == 0
62b58625 1468 && !m1->insert_temp
05cb4e54 1469 && regs->array[m1->regno].n_times_set == 1
0e6137dd 1470 && m1->regno >= FIRST_PSEUDO_REGISTER
04aa27b1 1471 /* A reg used outside the loop mustn't be eliminated. */
1472 && !m1->global
1473 /* A reg used for zero-extending mustn't be eliminated. */
1474 && !m1->partial
1475 && (matched_regs[m1->regno]
1476 ||
1477 (
1478 /* Can combine regs with different modes loaded from the
1479 same constant only if the modes are the same or
1480 if both are integer modes with M wider or the same
1481 width as M1. The check for integer is redundant, but
1482 safe, since the only case of differing destination
1483 modes with equal sources is when both sources are
1484 VOIDmode, i.e., CONST_INT. */
1485 (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest)
1486 || (GET_MODE_CLASS (GET_MODE (m->set_dest)) == MODE_INT
1487 && GET_MODE_CLASS (GET_MODE (m1->set_dest)) == MODE_INT
1488 && (GET_MODE_BITSIZE (GET_MODE (m->set_dest))
1489 >= GET_MODE_BITSIZE (GET_MODE (m1->set_dest)))))
1490 /* See if the source of M1 says it matches M. */
1491 && ((GET_CODE (m1->set_src) == REG
1492 && matched_regs[REGNO (m1->set_src)])
1493 || rtx_equal_for_loop_p (m->set_src, m1->set_src,
e9b78d43 1494 movables, regs))))
04aa27b1 1495 && ((m->dependencies == m1->dependencies)
1496 || rtx_equal_p (m->dependencies, m1->dependencies)))
1497 {
1498 m->lifetime += m1->lifetime;
1499 m->savings += m1->savings;
1500 m1->done = 1;
1501 m1->match = m;
1502 matched_regs[m1->regno] = 1;
1503 }
1504 }
1505
1506 /* Now combine the regs used for zero-extension.
1507 This can be done for those not marked `global'
1508 provided their lives don't overlap. */
1509
1510 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
1511 mode = GET_MODE_WIDER_MODE (mode))
1512 {
19cb6b50 1513 struct movable *m0 = 0;
04aa27b1 1514
1515 /* Combine all the registers for extension from mode MODE.
1516 Don't combine any that are used outside this loop. */
5695261c 1517 for (m = movables->head; m; m = m->next)
04aa27b1 1518 if (m->partial && ! m->global
1519 && mode == GET_MODE (SET_SRC (PATTERN (NEXT_INSN (m->insn)))))
1520 {
19cb6b50 1521 struct movable *m1;
1522
23e52523 1523 int first = REGNO_FIRST_LUID (m->regno);
1524 int last = REGNO_LAST_LUID (m->regno);
04aa27b1 1525
1526 if (m0 == 0)
1527 {
1528 /* First one: don't check for overlap, just record it. */
1529 m0 = m;
4a8f0b95 1530 continue;
04aa27b1 1531 }
1532
1533 /* Make sure they extend to the same mode.
1534 (Almost always true.) */
1535 if (GET_MODE (m->set_dest) != GET_MODE (m0->set_dest))
4a8f0b95 1536 continue;
04aa27b1 1537
1538 /* We already have one: check for overlap with those
1539 already combined together. */
5695261c 1540 for (m1 = movables->head; m1 != m; m1 = m1->next)
04aa27b1 1541 if (m1 == m0 || (m1->partial && m1->match == m0))
23e52523 1542 if (! (REGNO_FIRST_LUID (m1->regno) > last
1543 || REGNO_LAST_LUID (m1->regno) < first))
04aa27b1 1544 goto overlap;
1545
1546 /* No overlap: we can combine this with the others. */
1547 m0->lifetime += m->lifetime;
1548 m0->savings += m->savings;
1549 m->done = 1;
1550 m->match = m0;
1551
4a8f0b95 1552 overlap:
1553 ;
04aa27b1 1554 }
1555 }
b9cf3f63 1556
1557 /* Clean up. */
1558 free (matched_regs);
04aa27b1 1559}
85bd9543 1560
1561/* Returns the number of movable instructions in LOOP that were not
1562 moved outside the loop. */
1563
1564static int
3ad4992f 1565num_unmoved_movables (const struct loop *loop)
85bd9543 1566{
1567 int num = 0;
1568 struct movable *m;
1569
1570 for (m = LOOP_MOVABLES (loop)->head; m; m = m->next)
1571 if (!m->done)
1572 ++num;
1573
1574 return num;
1575}
1576
04aa27b1 1577\f
1578/* Return 1 if regs X and Y will become the same if moved. */
1579
1580static int
3ad4992f 1581regs_match_p (rtx x, rtx y, struct loop_movables *movables)
04aa27b1 1582{
02e7a332 1583 unsigned int xn = REGNO (x);
1584 unsigned int yn = REGNO (y);
04aa27b1 1585 struct movable *mx, *my;
1586
5695261c 1587 for (mx = movables->head; mx; mx = mx->next)
04aa27b1 1588 if (mx->regno == xn)
1589 break;
1590
5695261c 1591 for (my = movables->head; my; my = my->next)
04aa27b1 1592 if (my->regno == yn)
1593 break;
1594
1595 return (mx && my
1596 && ((mx->match == my->match && mx->match != 0)
1597 || mx->match == my
1598 || mx == my->match));
1599}
1600
1601/* Return 1 if X and Y are identical-looking rtx's.
1602 This is the Lisp function EQUAL for rtx arguments.
1603
1604 If two registers are matching movables or a movable register and an
1605 equivalent constant, consider them equal. */
1606
1607static int
3ad4992f 1608rtx_equal_for_loop_p (rtx x, rtx y, struct loop_movables *movables,
1609 struct loop_regs *regs)
04aa27b1 1610{
19cb6b50 1611 int i;
1612 int j;
1613 struct movable *m;
1614 enum rtx_code code;
1615 const char *fmt;
04aa27b1 1616
1617 if (x == y)
1618 return 1;
1619 if (x == 0 || y == 0)
1620 return 0;
1621
1622 code = GET_CODE (x);
1623
1624 /* If we have a register and a constant, they may sometimes be
1625 equal. */
05cb4e54 1626 if (GET_CODE (x) == REG && regs->array[REGNO (x)].set_in_loop == -2
04aa27b1 1627 && CONSTANT_P (y))
1d60621b 1628 {
5695261c 1629 for (m = movables->head; m; m = m->next)
1d60621b 1630 if (m->move_insn && m->regno == REGNO (x)
1631 && rtx_equal_p (m->set_src, y))
1632 return 1;
1633 }
05cb4e54 1634 else if (GET_CODE (y) == REG && regs->array[REGNO (y)].set_in_loop == -2
04aa27b1 1635 && CONSTANT_P (x))
1d60621b 1636 {
5695261c 1637 for (m = movables->head; m; m = m->next)
1d60621b 1638 if (m->move_insn && m->regno == REGNO (y)
1639 && rtx_equal_p (m->set_src, x))
1640 return 1;
1641 }
04aa27b1 1642
1643 /* Otherwise, rtx's of different codes cannot be equal. */
1644 if (code != GET_CODE (y))
1645 return 0;
1646
1647 /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.
1648 (REG:SI x) and (REG:HI x) are NOT equivalent. */
1649
1650 if (GET_MODE (x) != GET_MODE (y))
1651 return 0;
1652
1653 /* These three types of rtx's can be compared nonrecursively. */
1654 if (code == REG)
1655 return (REGNO (x) == REGNO (y) || regs_match_p (x, y, movables));
1656
1657 if (code == LABEL_REF)
1658 return XEXP (x, 0) == XEXP (y, 0);
1659 if (code == SYMBOL_REF)
1660 return XSTR (x, 0) == XSTR (y, 0);
1661
1662 /* Compare the elements. If any pair of corresponding elements
1663 fail to match, return 0 for the whole things. */
1664
1665 fmt = GET_RTX_FORMAT (code);
1666 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1667 {
1668 switch (fmt[i])
1669 {
df38d76e 1670 case 'w':
1671 if (XWINT (x, i) != XWINT (y, i))
1672 return 0;
1673 break;
1674
04aa27b1 1675 case 'i':
1676 if (XINT (x, i) != XINT (y, i))
1677 return 0;
1678 break;
1679
1680 case 'E':
1681 /* Two vectors must have the same length. */
1682 if (XVECLEN (x, i) != XVECLEN (y, i))
1683 return 0;
1684
1685 /* And the corresponding elements must match. */
1686 for (j = 0; j < XVECLEN (x, i); j++)
e9b78d43 1687 if (rtx_equal_for_loop_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
1688 movables, regs) == 0)
04aa27b1 1689 return 0;
1690 break;
1691
1692 case 'e':
8ec5f078 1693 if (rtx_equal_for_loop_p (XEXP (x, i), XEXP (y, i), movables, regs)
1694 == 0)
04aa27b1 1695 return 0;
1696 break;
1697
1698 case 's':
1699 if (strcmp (XSTR (x, i), XSTR (y, i)))
1700 return 0;
1701 break;
1702
1703 case 'u':
1704 /* These are just backpointers, so they don't matter. */
1705 break;
1706
1707 case '0':
1708 break;
1709
1710 /* It is believed that rtx's at this level will never
1711 contain anything but integers and other rtx's,
1712 except for within LABEL_REFs and SYMBOL_REFs. */
1713 default:
1714 abort ();
1715 }
1716 }
1717 return 1;
1718}
1719\f
e1a25d7f 1720/* If X contains any LABEL_REF's, add REG_LABEL notes for them to all
05bcc2d9 1721 insns in INSNS which use the reference. LABEL_NUSES for CODE_LABEL
aa40f561 1722 references is incremented once for each added note. */
e1a25d7f 1723
1724static void
3ad4992f 1725add_label_notes (rtx x, rtx insns)
e1a25d7f 1726{
1727 enum rtx_code code = GET_CODE (x);
56a1cb6b 1728 int i, j;
d2ca078f 1729 const char *fmt;
e1a25d7f 1730 rtx insn;
1731
85fa35e4 1732 if (code == LABEL_REF && !LABEL_REF_NONLOCAL_P (x))
e1a25d7f 1733 {
e72f55f8 1734 /* This code used to ignore labels that referred to dispatch tables to
d01481af 1735 avoid flow generating (slightly) worse code.
e72f55f8 1736
1737 We no longer ignore such label references (see LABEL_REF handling in
1738 mark_jump_label for additional information). */
1739 for (insn = insns; insn; insn = NEXT_INSN (insn))
1740 if (reg_mentioned_p (XEXP (x, 0), insn))
05bcc2d9 1741 {
4bb30577 1742 REG_NOTES (insn) = gen_rtx_INSN_LIST (REG_LABEL, XEXP (x, 0),
05bcc2d9 1743 REG_NOTES (insn));
1744 if (LABEL_P (XEXP (x, 0)))
1745 LABEL_NUSES (XEXP (x, 0))++;
1746 }
e1a25d7f 1747 }
1748
1749 fmt = GET_RTX_FORMAT (code);
1750 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
56a1cb6b 1751 {
1752 if (fmt[i] == 'e')
1753 add_label_notes (XEXP (x, i), insns);
1754 else if (fmt[i] == 'E')
1755 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1756 add_label_notes (XVECEXP (x, i, j), insns);
1757 }
e1a25d7f 1758}
1759\f
04aa27b1 1760/* Scan MOVABLES, and move the insns that deserve to be moved.
1761 If two matching movables are combined, replace one reg with the
1762 other throughout. */
1763
1764static void
3ad4992f 1765move_movables (struct loop *loop, struct loop_movables *movables,
1766 int threshold, int insn_count)
04aa27b1 1767{
e9b78d43 1768 struct loop_regs *regs = LOOP_REGS (loop);
8ec5f078 1769 int nregs = regs->num;
04aa27b1 1770 rtx new_start = 0;
19cb6b50 1771 struct movable *m;
1772 rtx p;
15fc3eb7 1773 rtx loop_start = loop->start;
1774 rtx loop_end = loop->end;
04aa27b1 1775 /* Map of pseudo-register replacements to handle combining
1776 when we move several insns that load the same value
1777 into different pseudo-registers. */
b9cf3f63 1778 rtx *reg_map = (rtx *) xcalloc (nregs, sizeof (rtx));
1779 char *already_moved = (char *) xcalloc (nregs, sizeof (char));
04aa27b1 1780
5695261c 1781 for (m = movables->head; m; m = m->next)
04aa27b1 1782 {
1783 /* Describe this movable insn. */
1784
1785 if (loop_dump_stream)
1786 {
1787 fprintf (loop_dump_stream, "Insn %d: regno %d (life %d), ",
1788 INSN_UID (m->insn), m->regno, m->lifetime);
1789 if (m->consec > 0)
1790 fprintf (loop_dump_stream, "consec %d, ", m->consec);
1791 if (m->cond)
1792 fprintf (loop_dump_stream, "cond ");
1793 if (m->force)
1794 fprintf (loop_dump_stream, "force ");
1795 if (m->global)
1796 fprintf (loop_dump_stream, "global ");
1797 if (m->done)
1798 fprintf (loop_dump_stream, "done ");
1799 if (m->move_insn)
1800 fprintf (loop_dump_stream, "move-insn ");
1801 if (m->match)
1802 fprintf (loop_dump_stream, "matches %d ",
1803 INSN_UID (m->match->insn));
1804 if (m->forces)
1805 fprintf (loop_dump_stream, "forces %d ",
1806 INSN_UID (m->forces->insn));
1807 }
1808
04aa27b1 1809 /* Ignore the insn if it's already done (it matched something else).
1810 Otherwise, see if it is now safe to move. */
1811
1812 if (!m->done
1813 && (! m->cond
15fc3eb7 1814 || (1 == loop_invariant_p (loop, m->set_src)
04aa27b1 1815 && (m->dependencies == 0
15fc3eb7 1816 || 1 == loop_invariant_p (loop, m->dependencies))
04aa27b1 1817 && (m->consec == 0
15fc3eb7 1818 || 1 == consec_sets_invariant_p (loop, m->set_dest,
04aa27b1 1819 m->consec + 1,
1820 m->insn))))
1821 && (! m->forces || m->forces->done))
1822 {
19cb6b50 1823 int regno;
1824 rtx p;
04aa27b1 1825 int savings = m->savings;
1826
1827 /* We have an insn that is safe to move.
1828 Compute its desirability. */
1829
1830 p = m->insn;
1831 regno = m->regno;
1832
1833 if (loop_dump_stream)
1834 fprintf (loop_dump_stream, "savings %d ", savings);
1835
05cb4e54 1836 if (regs->array[regno].moved_once && loop_dump_stream)
8882a1d3 1837 fprintf (loop_dump_stream, "halved since already moved ");
04aa27b1 1838
1839 /* An insn MUST be moved if we already moved something else
1840 which is safe only if this one is moved too: that is,
1841 if already_moved[REGNO] is nonzero. */
1842
1843 /* An insn is desirable to move if the new lifetime of the
1844 register is no more than THRESHOLD times the old lifetime.
1845 If it's not desirable, it means the loop is so big
1846 that moving won't speed things up much,
1847 and it is liable to make register usage worse. */
1848
1849 /* It is also desirable to move if it can be moved at no
1850 extra cost because something else was already moved. */
1851
1852 if (already_moved[regno]
a539e2e6 1853 || flag_move_all_movables
8882a1d3 1854 || (threshold * savings * m->lifetime) >=
05cb4e54 1855 (regs->array[regno].moved_once ? insn_count * 2 : insn_count)
04aa27b1 1856 || (m->forces && m->forces->done
05cb4e54 1857 && regs->array[m->forces->regno].n_times_set == 1))
04aa27b1 1858 {
1859 int count;
19cb6b50 1860 struct movable *m1;
df9f2bb6 1861 rtx first = NULL_RTX;
62b58625 1862 rtx newreg = NULL_RTX;
1863
1864 if (m->insert_temp)
1865 newreg = gen_reg_rtx (GET_MODE (m->set_dest));
04aa27b1 1866
1867 /* Now move the insns that set the reg. */
1868
1869 if (m->partial && m->match)
1870 {
1871 rtx newpat, i1;
1872 rtx r1, r2;
1873 /* Find the end of this chain of matching regs.
1874 Thus, we load each reg in the chain from that one reg.
1875 And that reg is loaded with 0 directly,
1876 since it has ->match == 0. */
1877 for (m1 = m; m1->match; m1 = m1->match);
1878 newpat = gen_move_insn (SET_DEST (PATTERN (m->insn)),
1879 SET_DEST (PATTERN (m1->insn)));
26d6ff2a 1880 i1 = loop_insn_hoist (loop, newpat);
04aa27b1 1881
1882 /* Mark the moved, invariant reg as being allowed to
1883 share a hard reg with the other matching invariant. */
1884 REG_NOTES (i1) = REG_NOTES (m->insn);
1885 r1 = SET_DEST (PATTERN (m->insn));
1886 r2 = SET_DEST (PATTERN (m1->insn));
941522d6 1887 regs_may_share
1888 = gen_rtx_EXPR_LIST (VOIDmode, r1,
1889 gen_rtx_EXPR_LIST (VOIDmode, r2,
1890 regs_may_share));
06786d0d 1891 delete_insn (m->insn);
04aa27b1 1892
1893 if (new_start == 0)
1894 new_start = i1;
1895
1896 if (loop_dump_stream)
1897 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1898 }
1899 /* If we are to re-generate the item being moved with a
1900 new move insn, first delete what we have and then emit
1901 the move insn before the loop. */
1902 else if (m->move_insn)
1903 {
26d6ff2a 1904 rtx i1, temp, seq;
04aa27b1 1905
1906 for (count = m->consec; count >= 0; count--)
1907 {
1908 /* If this is the first insn of a library call sequence,
e29238fc 1909 something is very wrong. */
04aa27b1 1910 if (GET_CODE (p) != NOTE
df38d76e 1911 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
e29238fc 1912 abort ();
04aa27b1 1913
1914 /* If this is the last insn of a libcall sequence, then
1915 delete every insn in the sequence except the last.
1916 The last insn is handled in the normal manner. */
1917 if (GET_CODE (p) != NOTE
df38d76e 1918 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
04aa27b1 1919 {
1920 temp = XEXP (temp, 0);
1921 while (temp != p)
06786d0d 1922 temp = delete_insn (temp);
04aa27b1 1923 }
1924
f6d26bd2 1925 temp = p;
06786d0d 1926 p = delete_insn (p);
f6d26bd2 1927
1928 /* simplify_giv_expr expects that it can walk the insns
1929 at m->insn forwards and see this old sequence we are
1930 tossing here. delete_insn does preserve the next
1931 pointers, but when we skip over a NOTE we must fix
1932 it up. Otherwise that code walks into the non-deleted
1933 insn stream. */
9c141d50 1934 while (p && GET_CODE (p) == NOTE)
f6d26bd2 1935 p = NEXT_INSN (temp) = NEXT_INSN (p);
62b58625 1936
1937 if (m->insert_temp)
1938 {
1939 /* Replace the original insn with a move from
037845e5 1940 our newly created temp. */
62b58625 1941 start_sequence ();
3ad4992f 1942 emit_move_insn (m->set_dest, newreg);
62b58625 1943 seq = get_insns ();
1944 end_sequence ();
1945 emit_insn_before (seq, p);
1946 }
04aa27b1 1947 }
1948
1949 start_sequence ();
3ad4992f 1950 emit_move_insn (m->insert_temp ? newreg : m->set_dest,
62b58625 1951 m->set_src);
31d3e01c 1952 seq = get_insns ();
04aa27b1 1953 end_sequence ();
1954
31d3e01c 1955 add_label_notes (m->set_src, seq);
e1a25d7f 1956
26d6ff2a 1957 i1 = loop_insn_hoist (loop, seq);
df38d76e 1958 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
c080d8f0 1959 set_unique_reg_note (i1,
1960 m->is_equiv ? REG_EQUIV : REG_EQUAL,
1961 m->set_src);
04aa27b1 1962
1963 if (loop_dump_stream)
1964 fprintf (loop_dump_stream, " moved to %d", INSN_UID (i1));
1965
1966 /* The more regs we move, the less we like moving them. */
1967 threshold -= 3;
1968 }
1969 else
1970 {
1971 for (count = m->consec; count >= 0; count--)
1972 {
1973 rtx i1, temp;
1974
a92771b8 1975 /* If first insn of libcall sequence, skip to end. */
4a8f0b95 1976 /* Do this at start of loop, since p is guaranteed to
04aa27b1 1977 be an insn here. */
1978 if (GET_CODE (p) != NOTE
df38d76e 1979 && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
04aa27b1 1980 p = XEXP (temp, 0);
1981
1982 /* If last insn of libcall sequence, move all
1983 insns except the last before the loop. The last
1984 insn is handled in the normal manner. */
1985 if (GET_CODE (p) != NOTE
df38d76e 1986 && (temp = find_reg_note (p, REG_RETVAL, NULL_RTX)))
04aa27b1 1987 {
1988 rtx fn_address = 0;
1989 rtx fn_reg = 0;
1990 rtx fn_address_insn = 0;
1991
1992 first = 0;
1993 for (temp = XEXP (temp, 0); temp != p;
1994 temp = NEXT_INSN (temp))
1995 {
1996 rtx body;
1997 rtx n;
1998 rtx next;
1999
2000 if (GET_CODE (temp) == NOTE)
2001 continue;
2002
2003 body = PATTERN (temp);
2004
2005 /* Find the next insn after TEMP,
2006 not counting USE or NOTE insns. */
2007 for (next = NEXT_INSN (temp); next != p;
2008 next = NEXT_INSN (next))
2009 if (! (GET_CODE (next) == INSN
2010 && GET_CODE (PATTERN (next)) == USE)
2011 && GET_CODE (next) != NOTE)
2012 break;
4a8f0b95 2013
04aa27b1 2014 /* If that is the call, this may be the insn
2015 that loads the function address.
2016
2017 Extract the function address from the insn
2018 that loads it into a register.
2019 If this insn was cse'd, we get incorrect code.
2020
2021 So emit a new move insn that copies the
2022 function address into the register that the
2023 call insn will use. flow.c will delete any
2024 redundant stores that we have created. */
2025 if (GET_CODE (next) == CALL_INSN
2026 && GET_CODE (body) == SET
2027 && GET_CODE (SET_DEST (body)) == REG
df38d76e 2028 && (n = find_reg_note (temp, REG_EQUAL,
2029 NULL_RTX)))
04aa27b1 2030 {
2031 fn_reg = SET_SRC (body);
2032 if (GET_CODE (fn_reg) != REG)
2033 fn_reg = SET_DEST (body);
2034 fn_address = XEXP (n, 0);
2035 fn_address_insn = temp;
2036 }
2037 /* We have the call insn.
2038 If it uses the register we suspect it might,
2039 load it with the correct address directly. */
2040 if (GET_CODE (temp) == CALL_INSN
2041 && fn_address != 0
c744ab4c 2042 && reg_referenced_p (fn_reg, body))
0ab94f0c 2043 loop_insn_emit_after (loop, 0, fn_address_insn,
2044 gen_move_insn
2045 (fn_reg, fn_address));
04aa27b1 2046
2047 if (GET_CODE (temp) == CALL_INSN)
a42950ef 2048 {
0ab94f0c 2049 i1 = loop_call_insn_hoist (loop, body);
a42950ef 2050 /* Because the USAGE information potentially
2051 contains objects other than hard registers
2052 we need to copy it. */
96c9268a 2053 if (CALL_INSN_FUNCTION_USAGE (temp))
0bc644e0 2054 CALL_INSN_FUNCTION_USAGE (i1)
2055 = copy_rtx (CALL_INSN_FUNCTION_USAGE (temp));
a42950ef 2056 }
04aa27b1 2057 else
26d6ff2a 2058 i1 = loop_insn_hoist (loop, body);
04aa27b1 2059 if (first == 0)
2060 first = i1;
2061 if (temp == fn_address_insn)
2062 fn_address_insn = i1;
2063 REG_NOTES (i1) = REG_NOTES (temp);
06786d0d 2064 REG_NOTES (temp) = NULL;
2065 delete_insn (temp);
04aa27b1 2066 }
73d774ec 2067 if (new_start == 0)
2068 new_start = first;
04aa27b1 2069 }
2070 if (m->savemode != VOIDmode)
2071 {
2072 /* P sets REG to zero; but we should clear only
2073 the bits that are not covered by the mode
2074 m->savemode. */
2075 rtx reg = m->set_dest;
2076 rtx sequence;
2077 rtx tem;
4a8f0b95 2078
04aa27b1 2079 start_sequence ();
ad99e708 2080 tem = expand_simple_binop
2081 (GET_MODE (reg), AND, reg,
df38d76e 2082 GEN_INT ((((HOST_WIDE_INT) 1
2083 << GET_MODE_BITSIZE (m->savemode)))
04aa27b1 2084 - 1),
2085 reg, 1, OPTAB_LIB_WIDEN);
2086 if (tem == 0)
2087 abort ();
2088 if (tem != reg)
2089 emit_move_insn (reg, tem);
31d3e01c 2090 sequence = get_insns ();
04aa27b1 2091 end_sequence ();
26d6ff2a 2092 i1 = loop_insn_hoist (loop, sequence);
04aa27b1 2093 }
2094 else if (GET_CODE (p) == CALL_INSN)
a42950ef 2095 {
0ab94f0c 2096 i1 = loop_call_insn_hoist (loop, PATTERN (p));
a42950ef 2097 /* Because the USAGE information potentially
2098 contains objects other than hard registers
2099 we need to copy it. */
96c9268a 2100 if (CALL_INSN_FUNCTION_USAGE (p))
0bc644e0 2101 CALL_INSN_FUNCTION_USAGE (i1)
2102 = copy_rtx (CALL_INSN_FUNCTION_USAGE (p));
a42950ef 2103 }
aea4708b 2104 else if (count == m->consec && m->move_insn_first)
2105 {
26d6ff2a 2106 rtx seq;
aea4708b 2107 /* The SET_SRC might not be invariant, so we must
2108 use the REG_EQUAL note. */
2109 start_sequence ();
2110 emit_move_insn (m->set_dest, m->set_src);
31d3e01c 2111 seq = get_insns ();
aea4708b 2112 end_sequence ();
2113
31d3e01c 2114 add_label_notes (m->set_src, seq);
aea4708b 2115
26d6ff2a 2116 i1 = loop_insn_hoist (loop, seq);
aea4708b 2117 if (! find_reg_note (i1, REG_EQUAL, NULL_RTX))
c080d8f0 2118 set_unique_reg_note (i1, m->is_equiv ? REG_EQUIV
2119 : REG_EQUAL, m->set_src);
aea4708b 2120 }
62b58625 2121 else if (m->insert_temp)
2122 {
3ad4992f 2123 rtx *reg_map2 = (rtx *) xcalloc (REGNO (newreg),
62b58625 2124 sizeof(rtx));
2125 reg_map2 [m->regno] = newreg;
2126
2127 i1 = loop_insn_hoist (loop, copy_rtx (PATTERN (p)));
2128 replace_regs (i1, reg_map2, REGNO (newreg), 1);
2129 free (reg_map2);
3ad4992f 2130 }
04aa27b1 2131 else
26d6ff2a 2132 i1 = loop_insn_hoist (loop, PATTERN (p));
04aa27b1 2133
aea4708b 2134 if (REG_NOTES (i1) == 0)
2135 {
2136 REG_NOTES (i1) = REG_NOTES (p);
06786d0d 2137 REG_NOTES (p) = NULL;
04aa27b1 2138
aea4708b 2139 /* If there is a REG_EQUAL note present whose value
2140 is not loop invariant, then delete it, since it
2141 may cause problems with later optimization passes.
2142 It is possible for cse to create such notes
2143 like this as a result of record_jump_cond. */
4a8f0b95 2144
aea4708b 2145 if ((temp = find_reg_note (i1, REG_EQUAL, NULL_RTX))
15fc3eb7 2146 && ! loop_invariant_p (loop, XEXP (temp, 0)))
aea4708b 2147 remove_note (i1, temp);
2148 }
f3a282b0 2149
04aa27b1 2150 if (new_start == 0)
2151 new_start = i1;
2152
2153 if (loop_dump_stream)
2154 fprintf (loop_dump_stream, " moved to %d",
2155 INSN_UID (i1));
2156
04aa27b1 2157 /* If library call, now fix the REG_NOTES that contain
2158 insn pointers, namely REG_LIBCALL on FIRST
2159 and REG_RETVAL on I1. */
7d27e4c9 2160 if ((temp = find_reg_note (i1, REG_RETVAL, NULL_RTX)))
04aa27b1 2161 {
2162 XEXP (temp, 0) = first;
df38d76e 2163 temp = find_reg_note (first, REG_LIBCALL, NULL_RTX);
04aa27b1 2164 XEXP (temp, 0) = i1;
2165 }
2166
f6d26bd2 2167 temp = p;
06786d0d 2168 delete_insn (p);
f6d26bd2 2169 p = NEXT_INSN (p);
2170
2171 /* simplify_giv_expr expects that it can walk the insns
2172 at m->insn forwards and see this old sequence we are
2173 tossing here. delete_insn does preserve the next
2174 pointers, but when we skip over a NOTE we must fix
2175 it up. Otherwise that code walks into the non-deleted
2176 insn stream. */
2177 while (p && GET_CODE (p) == NOTE)
2178 p = NEXT_INSN (temp) = NEXT_INSN (p);
62b58625 2179
2180 if (m->insert_temp)
2181 {
2182 rtx seq;
2183 /* Replace the original insn with a move from
037845e5 2184 our newly created temp. */
62b58625 2185 start_sequence ();
3ad4992f 2186 emit_move_insn (m->set_dest, newreg);
62b58625 2187 seq = get_insns ();
2188 end_sequence ();
2189 emit_insn_before (seq, p);
2190 }
04aa27b1 2191 }
2192
2193 /* The more regs we move, the less we like moving them. */
2194 threshold -= 3;
2195 }
2196
62b58625 2197 m->done = 1;
04aa27b1 2198
62b58625 2199 if (!m->insert_temp)
8096c8ed 2200 {
62b58625 2201 /* Any other movable that loads the same register
2202 MUST be moved. */
2203 already_moved[regno] = 1;
04aa27b1 2204
62b58625 2205 /* This reg has been moved out of one loop. */
2206 regs->array[regno].moved_once = 1;
04aa27b1 2207
62b58625 2208 /* The reg set here is now invariant. */
2209 if (! m->partial)
2210 {
2211 int i;
2212 for (i = 0; i < LOOP_REGNO_NREGS (regno, m->set_dest); i++)
2213 regs->array[regno+i].set_in_loop = 0;
2214 }
2215
2216 /* Change the length-of-life info for the register
2217 to say it lives at least the full length of this loop.
2218 This will help guide optimizations in outer loops. */
2219
2220 if (REGNO_FIRST_LUID (regno) > INSN_LUID (loop_start))
2221 /* This is the old insn before all the moved insns.
2222 We can't use the moved insn because it is out of range
2223 in uid_luid. Only the old insns have luids. */
2224 REGNO_FIRST_UID (regno) = INSN_UID (loop_start);
2225 if (REGNO_LAST_LUID (regno) < INSN_LUID (loop_end))
2226 REGNO_LAST_UID (regno) = INSN_UID (loop_end);
2227 }
04aa27b1 2228
2229 /* Combine with this moved insn any other matching movables. */
2230
2231 if (! m->partial)
5695261c 2232 for (m1 = movables->head; m1; m1 = m1->next)
04aa27b1 2233 if (m1->match == m)
2234 {
2235 rtx temp;
2236
2237 /* Schedule the reg loaded by M1
2238 for replacement so that shares the reg of M.
2239 If the modes differ (only possible in restricted
d8330ae2 2240 circumstances, make a SUBREG.
2241
2242 Note this assumes that the target dependent files
2243 treat REG and SUBREG equally, including within
2244 GO_IF_LEGITIMATE_ADDRESS and in all the
2245 predicates since we never verify that replacing the
2246 original register with a SUBREG results in a
2247 recognizable insn. */
04aa27b1 2248 if (GET_MODE (m->set_dest) == GET_MODE (m1->set_dest))
2249 reg_map[m1->regno] = m->set_dest;
2250 else
2251 reg_map[m1->regno]
2252 = gen_lowpart_common (GET_MODE (m1->set_dest),
2253 m->set_dest);
4a8f0b95 2254
04aa27b1 2255 /* Get rid of the matching insn
2256 and prevent further processing of it. */
2257 m1->done = 1;
2258
06786d0d 2259 /* if library call, delete all insns. */
7d27e4c9 2260 if ((temp = find_reg_note (m1->insn, REG_RETVAL,
2261 NULL_RTX)))
06786d0d 2262 delete_insn_chain (XEXP (temp, 0), m1->insn);
2263 else
2264 delete_insn (m1->insn);
04aa27b1 2265
2266 /* Any other movable that loads the same register
2267 MUST be moved. */
2268 already_moved[m1->regno] = 1;
2269
2270 /* The reg merged here is now invariant,
2271 if the reg it matches is invariant. */
2272 if (! m->partial)
8096c8ed 2273 {
2274 int i;
2275 for (i = 0;
4b6dfc37 2276 i < LOOP_REGNO_NREGS (regno, m1->set_dest);
8096c8ed 2277 i++)
2278 regs->array[m1->regno+i].set_in_loop = 0;
2279 }
04aa27b1 2280 }
2281 }
2282 else if (loop_dump_stream)
2283 fprintf (loop_dump_stream, "not desirable");
2284 }
2285 else if (loop_dump_stream && !m->match)
2286 fprintf (loop_dump_stream, "not safe");
2287
2288 if (loop_dump_stream)
2289 fprintf (loop_dump_stream, "\n");
2290 }
2291
2292 if (new_start == 0)
2293 new_start = loop_start;
2294
2295 /* Go through all the instructions in the loop, making
2296 all the register substitutions scheduled in REG_MAP. */
15fc3eb7 2297 for (p = new_start; p != loop_end; p = NEXT_INSN (p))
04aa27b1 2298 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
2299 || GET_CODE (p) == CALL_INSN)
2300 {
2301 replace_regs (PATTERN (p), reg_map, nregs, 0);
2302 replace_regs (REG_NOTES (p), reg_map, nregs, 0);
03104e7c 2303 INSN_CODE (p) = -1;
04aa27b1 2304 }
b9cf3f63 2305
2306 /* Clean up. */
2307 free (reg_map);
2308 free (already_moved);
04aa27b1 2309}
df3b4a51 2310
2311
2312static void
3ad4992f 2313loop_movables_add (struct loop_movables *movables, struct movable *m)
df3b4a51 2314{
2315 if (movables->head == 0)
2316 movables->head = m;
2317 else
2318 movables->last->next = m;
2319 movables->last = m;
2320}
2321
2322
2323static void
3ad4992f 2324loop_movables_free (struct loop_movables *movables)
df3b4a51 2325{
2326 struct movable *m;
2327 struct movable *m_next;
2328
2329 for (m = movables->head; m; m = m_next)
2330 {
2331 m_next = m->next;
2332 free (m);
2333 }
4bb30577 2334}
04aa27b1 2335\f
2336#if 0
2337/* Scan X and replace the address of any MEM in it with ADDR.
2338 REG is the address that MEM should have before the replacement. */
2339
2340static void
3ad4992f 2341replace_call_address (rtx x, rtx reg, rtx addr)
04aa27b1 2342{
19cb6b50 2343 enum rtx_code code;
2344 int i;
2345 const char *fmt;
04aa27b1 2346
2347 if (x == 0)
2348 return;
2349 code = GET_CODE (x);
2350 switch (code)
2351 {
2352 case PC:
2353 case CC0:
2354 case CONST_INT:
2355 case CONST_DOUBLE:
2356 case CONST:
2357 case SYMBOL_REF:
2358 case LABEL_REF:
2359 case REG:
2360 return;
2361
2362 case SET:
2363 /* Short cut for very common case. */
2364 replace_call_address (XEXP (x, 1), reg, addr);
2365 return;
2366
2367 case CALL:
2368 /* Short cut for very common case. */
2369 replace_call_address (XEXP (x, 0), reg, addr);
2370 return;
2371
2372 case MEM:
2373 /* If this MEM uses a reg other than the one we expected,
2374 something is wrong. */
2375 if (XEXP (x, 0) != reg)
2376 abort ();
2377 XEXP (x, 0) = addr;
2378 return;
4a8f0b95 2379
0dbd1c74 2380 default:
2381 break;
04aa27b1 2382 }
2383
2384 fmt = GET_RTX_FORMAT (code);
2385 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2386 {
2387 if (fmt[i] == 'e')
2388 replace_call_address (XEXP (x, i), reg, addr);
1bd8ca86 2389 else if (fmt[i] == 'E')
04aa27b1 2390 {
19cb6b50 2391 int j;
04aa27b1 2392 for (j = 0; j < XVECLEN (x, i); j++)
2393 replace_call_address (XVECEXP (x, i, j), reg, addr);
2394 }
2395 }
2396}
2397#endif
2398\f
2399/* Return the number of memory refs to addresses that vary
2400 in the rtx X. */
2401
2402static int
3ad4992f 2403count_nonfixed_reads (const struct loop *loop, rtx x)
04aa27b1 2404{
19cb6b50 2405 enum rtx_code code;
2406 int i;
2407 const char *fmt;
04aa27b1 2408 int value;
2409
2410 if (x == 0)
2411 return 0;
2412
2413 code = GET_CODE (x);
2414 switch (code)
2415 {
2416 case PC:
2417 case CC0:
2418 case CONST_INT:
2419 case CONST_DOUBLE:
2420 case CONST:
2421 case SYMBOL_REF:
2422 case LABEL_REF:
2423 case REG:
2424 return 0;
2425
2426 case MEM:
15fc3eb7 2427 return ((loop_invariant_p (loop, XEXP (x, 0)) != 1)
2428 + count_nonfixed_reads (loop, XEXP (x, 0)));
4a8f0b95 2429
0dbd1c74 2430 default:
2431 break;
04aa27b1 2432 }
2433
2434 value = 0;
2435 fmt = GET_RTX_FORMAT (code);
2436 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2437 {
2438 if (fmt[i] == 'e')
15fc3eb7 2439 value += count_nonfixed_reads (loop, XEXP (x, i));
2440 if (fmt[i] == 'E')
04aa27b1 2441 {
19cb6b50 2442 int j;
04aa27b1 2443 for (j = 0; j < XVECLEN (x, i); j++)
15fc3eb7 2444 value += count_nonfixed_reads (loop, XVECEXP (x, i, j));
04aa27b1 2445 }
2446 }
2447 return value;
2448}
04aa27b1 2449\f
1f8922d4 2450/* Scan a loop setting the elements `cont', `vtop', `loops_enclosed',
d286d8a0 2451 `has_call', `has_nonconst_call', `has_volatile', `has_tablejump',
2ff1269a 2452 `unknown_address_altered', `unknown_constant_address_altered', and
2453 `num_mem_sets' in LOOP. Also, fill in the array `mems' and the
2454 list `store_mems' in LOOP. */
04aa27b1 2455
2456static void
3ad4992f 2457prescan_loop (struct loop *loop)
04aa27b1 2458{
19cb6b50 2459 int level = 1;
ce326ac0 2460 rtx insn;
7bc9de79 2461 struct loop_info *loop_info = LOOP_INFO (loop);
ec7d7ef9 2462 rtx start = loop->start;
2463 rtx end = loop->end;
ce326ac0 2464 /* The label after END. Jumping here is just like falling off the
2465 end of the loop. We use next_nonnote_insn instead of next_label
2466 as a hedge against the (pathological) case where some actual insn
2467 might end up between the two. */
2468 rtx exit_target = next_nonnote_insn (end);
1f8922d4 2469
1f8922d4 2470 loop_info->has_indirect_jump = indirect_jump_in_function;
e7b494d3 2471 loop_info->pre_header_has_call = 0;
1f8922d4 2472 loop_info->has_call = 0;
d286d8a0 2473 loop_info->has_nonconst_call = 0;
cd6839f2 2474 loop_info->has_prefetch = 0;
1f8922d4 2475 loop_info->has_volatile = 0;
2476 loop_info->has_tablejump = 0;
1f8922d4 2477 loop_info->has_multiple_exit_targets = 0;
ec7d7ef9 2478 loop->level = 1;
04aa27b1 2479
2ff1269a 2480 loop_info->unknown_address_altered = 0;
2481 loop_info->unknown_constant_address_altered = 0;
2482 loop_info->store_mems = NULL_RTX;
2483 loop_info->first_loop_store_insn = NULL_RTX;
2484 loop_info->mems_idx = 0;
2485 loop_info->num_mem_sets = 0;
45498ea1 2486 /* If loop opts run twice, this was set on 1st pass for 2nd. */
d090276e 2487 loop_info->preconditioned = NOTE_PRECONDITIONED (end);
e7b494d3 2488
4bb30577 2489 for (insn = start; insn && GET_CODE (insn) != CODE_LABEL;
e7b494d3 2490 insn = PREV_INSN (insn))
2491 {
2492 if (GET_CODE (insn) == CALL_INSN)
2493 {
2494 loop_info->pre_header_has_call = 1;
2495 break;
2496 }
2497 }
2498
04aa27b1 2499 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2500 insn = NEXT_INSN (insn))
2501 {
6ed9d6f6 2502 switch (GET_CODE (insn))
04aa27b1 2503 {
6ed9d6f6 2504 case NOTE:
04aa27b1 2505 if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_BEG)
2506 {
2507 ++level;
2508 /* Count number of loops contained in this one. */
ec7d7ef9 2509 loop->level++;
04aa27b1 2510 }
2511 else if (NOTE_LINE_NUMBER (insn) == NOTE_INSN_LOOP_END)
6ed9d6f6 2512 --level;
2513 break;
2514
2515 case CALL_INSN:
06a652d1 2516 if (! CONST_OR_PURE_CALL_P (insn))
d286d8a0 2517 {
2518 loop_info->unknown_address_altered = 1;
2519 loop_info->has_nonconst_call = 1;
2520 }
ef15379a 2521 else if (pure_call_p (insn))
2522 loop_info->has_nonconst_call = 1;
1f8922d4 2523 loop_info->has_call = 1;
6ed9d6f6 2524 if (can_throw_internal (insn))
2525 loop_info->has_multiple_exit_targets = 1;
d5f9786f 2526
2527 /* Calls initializing constant objects have CLOBBER of MEM /u in the
2528 attached FUNCTION_USAGE expression list, not accounted for by the
2529 code above. We should note these to avoid missing dependencies in
2530 later references. */
2531 {
2532 rtx fusage_entry;
3ad4992f 2533
2534 for (fusage_entry = CALL_INSN_FUNCTION_USAGE (insn);
d5f9786f 2535 fusage_entry; fusage_entry = XEXP (fusage_entry, 1))
2536 {
2537 rtx fusage = XEXP (fusage_entry, 0);
2538
2539 if (GET_CODE (fusage) == CLOBBER
2540 && GET_CODE (XEXP (fusage, 0)) == MEM
2541 && RTX_UNCHANGING_P (XEXP (fusage, 0)))
2542 {
2543 note_stores (fusage, note_addr_stored, loop_info);
2544 if (! loop_info->first_loop_store_insn
2545 && loop_info->store_mems)
2546 loop_info->first_loop_store_insn = insn;
2547 }
2548 }
2549 }
6ed9d6f6 2550 break;
2551
2552 case JUMP_INSN:
2553 if (! loop_info->has_multiple_exit_targets)
2554 {
2555 rtx set = pc_set (insn);
2556
2557 if (set)
2558 {
dfb8ebb3 2559 rtx src = SET_SRC (set);
6ed9d6f6 2560 rtx label1, label2;
2561
dfb8ebb3 2562 if (GET_CODE (src) == IF_THEN_ELSE)
6ed9d6f6 2563 {
dfb8ebb3 2564 label1 = XEXP (src, 1);
2565 label2 = XEXP (src, 2);
6ed9d6f6 2566 }
2567 else
2568 {
dfb8ebb3 2569 label1 = src;
6ed9d6f6 2570 label2 = NULL_RTX;
2571 }
2572
2573 do
2574 {
2575 if (label1 && label1 != pc_rtx)
2576 {
2577 if (GET_CODE (label1) != LABEL_REF)
2578 {
2579 /* Something tricky. */
2580 loop_info->has_multiple_exit_targets = 1;
2581 break;
2582 }
2583 else if (XEXP (label1, 0) != exit_target
2584 && LABEL_OUTSIDE_LOOP_P (label1))
2585 {
2586 /* A jump outside the current loop. */
2587 loop_info->has_multiple_exit_targets = 1;
2588 break;
2589 }
2590 }
2591
2592 label1 = label2;
2593 label2 = NULL_RTX;
2594 }
2595 while (label1);
2596 }
2597 else
2598 {
2599 /* A return, or something tricky. */
2600 loop_info->has_multiple_exit_targets = 1;
2601 }
2602 }
2603 /* FALLTHRU */
ce326ac0 2604
6ed9d6f6 2605 case INSN:
ce326ac0 2606 if (volatile_refs_p (PATTERN (insn)))
1f8922d4 2607 loop_info->has_volatile = 1;
9a5e3964 2608
2609 if (GET_CODE (insn) == JUMP_INSN
2610 && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
2611 || GET_CODE (PATTERN (insn)) == ADDR_VEC))
1f8922d4 2612 loop_info->has_tablejump = 1;
4a8f0b95 2613
2ff1269a 2614 note_stores (PATTERN (insn), note_addr_stored, loop_info);
2615 if (! loop_info->first_loop_store_insn && loop_info->store_mems)
2616 loop_info->first_loop_store_insn = insn;
ce326ac0 2617
6ed9d6f6 2618 if (flag_non_call_exceptions && can_throw_internal (insn))
2619 loop_info->has_multiple_exit_targets = 1;
2620 break;
06679858 2621
6ed9d6f6 2622 default:
2623 break;
04aa27b1 2624 }
2625 }
ce326ac0 2626
2627 /* Now, rescan the loop, setting up the LOOP_MEMS array. */
229e30fe 2628 if (/* An exception thrown by a called function might land us
ce326ac0 2629 anywhere. */
d286d8a0 2630 ! loop_info->has_nonconst_call
ce326ac0 2631 /* We don't want loads for MEMs moved to a location before the
2632 one at which their stack memory becomes allocated. (Note
2633 that this is not a problem for malloc, etc., since those
2634 require actual function calls. */
ec7d7ef9 2635 && ! current_function_calls_alloca
ce326ac0 2636 /* There are ways to leave the loop other than falling off the
2637 end. */
ec7d7ef9 2638 && ! loop_info->has_multiple_exit_targets)
ce326ac0 2639 for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2640 insn = NEXT_INSN (insn))
2ff1269a 2641 for_each_rtx (&insn, insert_loop_mem, loop_info);
229e30fe 2642
2643 /* BLKmode MEMs are added to LOOP_STORE_MEM as necessary so
2644 that loop_invariant_p and load_mems can use true_dependence
2645 to determine what is really clobbered. */
2ff1269a 2646 if (loop_info->unknown_address_altered)
229e30fe 2647 {
2648 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2649
72ae569f 2650 loop_info->store_mems
2ff1269a 2651 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
229e30fe 2652 }
2ff1269a 2653 if (loop_info->unknown_constant_address_altered)
229e30fe 2654 {
2655 rtx mem = gen_rtx_MEM (BLKmode, const0_rtx);
2656
2657 RTX_UNCHANGING_P (mem) = 1;
72ae569f 2658 loop_info->store_mems
2ff1269a 2659 = gen_rtx_EXPR_LIST (VOIDmode, mem, loop_info->store_mems);
229e30fe 2660 }
04aa27b1 2661}
2662\f
8f8dcce4 2663/* Invalidate all loops containing LABEL. */
2664
2665static void
3ad4992f 2666invalidate_loops_containing_label (rtx label)
8f8dcce4 2667{
2668 struct loop *loop;
2669 for (loop = uid_loop[INSN_UID (label)]; loop; loop = loop->outer)
2670 loop->invalid = 1;
2671}
2672
04aa27b1 2673/* Scan the function looking for loops. Record the start and end of each loop.
2674 Also mark as invalid loops any loops that contain a setjmp or are branched
2675 to from outside the loop. */
2676
2677static void
3ad4992f 2678find_and_verify_loops (rtx f, struct loops *loops)
04aa27b1 2679{
ec7d7ef9 2680 rtx insn;
2681 rtx label;
2682 int num_loops;
2683 struct loop *current_loop;
2684 struct loop *next_loop;
2685 struct loop *loop;
2686
2687 num_loops = loops->num;
04aa27b1 2688
8bd88b68 2689 compute_luids (f, NULL_RTX, 0);
2690
04aa27b1 2691 /* If there are jumps to undefined labels,
2692 treat them as jumps out of any/all loops.
2693 This also avoids writing past end of tables when there are no loops. */
ec7d7ef9 2694 uid_loop[0] = NULL;
04aa27b1 2695
2696 /* Find boundaries of loops, mark which loops are contained within
2697 loops, and invalidate loops that have setjmp. */
2698
ec7d7ef9 2699 num_loops = 0;
2700 current_loop = NULL;
04aa27b1 2701 for (insn = f; insn; insn = NEXT_INSN (insn))
2702 {
2703 if (GET_CODE (insn) == NOTE)
2704 switch (NOTE_LINE_NUMBER (insn))
2705 {
2706 case NOTE_INSN_LOOP_BEG:
ec7d7ef9 2707 next_loop = loops->array + num_loops;
2708 next_loop->num = num_loops;
2709 num_loops++;
2710 next_loop->start = insn;
2711 next_loop->outer = current_loop;
04aa27b1 2712 current_loop = next_loop;
2713 break;
2714
8bd88b68 2715 case NOTE_INSN_LOOP_CONT:
ec7d7ef9 2716 current_loop->cont = insn;
8bd88b68 2717 break;
aef2463e 2718
2719 case NOTE_INSN_LOOP_VTOP:
2720 current_loop->vtop = insn;
2721 break;
2722
04aa27b1 2723 case NOTE_INSN_LOOP_END:
ec7d7ef9 2724 if (! current_loop)
04aa27b1 2725 abort ();
2726
ec7d7ef9 2727 current_loop->end = insn;
ec7d7ef9 2728 current_loop = current_loop->outer;
04aa27b1 2729 break;
2730
0dbd1c74 2731 default:
2732 break;
04aa27b1 2733 }
2734
f6025ee7 2735 if (GET_CODE (insn) == CALL_INSN
9239aee6 2736 && find_reg_note (insn, REG_SETJMP, NULL))
2737 {
2738 /* In this case, we must invalidate our current loop and any
2739 enclosing loop. */
2740 for (loop = current_loop; loop; loop = loop->outer)
2741 {
2742 loop->invalid = 1;
2743 if (loop_dump_stream)
2744 fprintf (loop_dump_stream,
2745 "\nLoop at %d ignored due to setjmp.\n",
2746 INSN_UID (loop->start));
2747 }
2748 }
2749
04aa27b1 2750 /* Note that this will mark the NOTE_INSN_LOOP_END note as being in the
2751 enclosing loop, but this doesn't matter. */
ec7d7ef9 2752 uid_loop[INSN_UID (insn)] = current_loop;
04aa27b1 2753 }
2754
c5caa50f 2755 /* Any loop containing a label used in an initializer must be invalidated,
2756 because it can be jumped into from anywhere. */
c5caa50f 2757 for (label = forced_labels; label; label = XEXP (label, 1))
8f8dcce4 2758 invalidate_loops_containing_label (XEXP (label, 0));
c5caa50f 2759
485aaaaf 2760 /* Any loop containing a label used for an exception handler must be
2761 invalidated, because it can be jumped into from anywhere. */
8f8dcce4 2762 for_each_eh_label (invalidate_loops_containing_label);
485aaaaf 2763
c5caa50f 2764 /* Now scan all insn's in the function. If any JUMP_INSN branches into a
2765 loop that it is not contained within, that loop is marked invalid.
2766 If any INSN or CALL_INSN uses a label's address, then the loop containing
2767 that label is marked invalid, because it could be jumped into from
2768 anywhere.
04aa27b1 2769
2770 Also look for blocks of code ending in an unconditional branch that
4a8f0b95 2771 exits the loop. If such a block is surrounded by a conditional
04aa27b1 2772 branch around the block, move the block elsewhere (see below) and
2773 invert the jump to point to the code block. This may eliminate a
2774 label in our loop and will simplify processing by both us and a
2775 possible second cse pass. */
2776
2777 for (insn = f; insn; insn = NEXT_INSN (insn))
9204e736 2778 if (INSN_P (insn))
04aa27b1 2779 {
ec7d7ef9 2780 struct loop *this_loop = uid_loop[INSN_UID (insn)];
04aa27b1 2781
c5caa50f 2782 if (GET_CODE (insn) == INSN || GET_CODE (insn) == CALL_INSN)
2783 {
2784 rtx note = find_reg_note (insn, REG_LABEL, NULL_RTX);
2785 if (note)
8f8dcce4 2786 invalidate_loops_containing_label (XEXP (note, 0));
c5caa50f 2787 }
2788
2789 if (GET_CODE (insn) != JUMP_INSN)
2790 continue;
2791
ec7d7ef9 2792 mark_loop_jump (PATTERN (insn), this_loop);
04aa27b1 2793
2794 /* See if this is an unconditional branch outside the loop. */
ec7d7ef9 2795 if (this_loop
04aa27b1 2796 && (GET_CODE (PATTERN (insn)) == RETURN
b2816317 2797 || (any_uncondjump_p (insn)
2798 && onlyjump_p (insn)
ec7d7ef9 2799 && (uid_loop[INSN_UID (JUMP_LABEL (insn))]
2800 != this_loop)))
440fc8ec 2801 && get_max_uid () < max_uid_for_loop)
04aa27b1 2802 {
2803 rtx p;
2804 rtx our_next = next_real_insn (insn);
b067e925 2805 rtx last_insn_to_move = NEXT_INSN (insn);
ec7d7ef9 2806 struct loop *dest_loop;
2807 struct loop *outer_loop = NULL;
04aa27b1 2808
2809 /* Go backwards until we reach the start of the loop, a label,
2810 or a JUMP_INSN. */
2811 for (p = PREV_INSN (insn);
2812 GET_CODE (p) != CODE_LABEL
2813 && ! (GET_CODE (p) == NOTE
2814 && NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
2815 && GET_CODE (p) != JUMP_INSN;
2816 p = PREV_INSN (p))
2817 ;
2818
87729415 2819 /* Check for the case where we have a jump to an inner nested
2820 loop, and do not perform the optimization in that case. */
2821
3e6a1a9e 2822 if (JUMP_LABEL (insn))
87729415 2823 {
ec7d7ef9 2824 dest_loop = uid_loop[INSN_UID (JUMP_LABEL (insn))];
2825 if (dest_loop)
3e6a1a9e 2826 {
ec7d7ef9 2827 for (outer_loop = dest_loop; outer_loop;
2828 outer_loop = outer_loop->outer)
2829 if (outer_loop == this_loop)
3e6a1a9e 2830 break;
2831 }
87729415 2832 }
87729415 2833
a5c87b4e 2834 /* Make sure that the target of P is within the current loop. */
2835
a4e33616 2836 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
ec7d7ef9 2837 && uid_loop[INSN_UID (JUMP_LABEL (p))] != this_loop)
2838 outer_loop = this_loop;
a5c87b4e 2839
04aa27b1 2840 /* If we stopped on a JUMP_INSN to the next insn after INSN,
2841 we have a block of code to try to move.
2842
2843 We look backward and then forward from the target of INSN
2844 to find a BARRIER at the same loop depth as the target.
2845 If we find such a BARRIER, we make a new label for the start
2846 of the block, invert the jump in P and point it to that label,
2847 and move the block of code to the spot we found. */
2848
ec7d7ef9 2849 if (! outer_loop
87729415 2850 && GET_CODE (p) == JUMP_INSN
17713c68 2851 && JUMP_LABEL (p) != 0
2852 /* Just ignore jumps to labels that were never emitted.
2853 These always indicate compilation errors. */
2854 && INSN_UID (JUMP_LABEL (p)) != 0
b2816317 2855 && any_condjump_p (p) && onlyjump_p (p)
b067e925 2856 && next_real_insn (JUMP_LABEL (p)) == our_next
2857 /* If it's not safe to move the sequence, then we
2858 mustn't try. */
4a8f0b95 2859 && insns_safe_to_move_p (p, NEXT_INSN (insn),
b067e925 2860 &last_insn_to_move))
04aa27b1 2861 {
2862 rtx target
2863 = JUMP_LABEL (insn) ? JUMP_LABEL (insn) : get_last_insn ();
ec7d7ef9 2864 struct loop *target_loop = uid_loop[INSN_UID (target)];
1d322be1 2865 rtx loc, loc2;
7ae0043b 2866 rtx tmp;
2867
2868 /* Search for possible garbage past the conditional jumps
bd9587cb 2869 and look for the last barrier. */
7ae0043b 2870 for (tmp = last_insn_to_move;
2871 tmp && GET_CODE (tmp) != CODE_LABEL; tmp = NEXT_INSN (tmp))
2872 if (GET_CODE (tmp) == BARRIER)
2873 last_insn_to_move = tmp;
04aa27b1 2874
2875 for (loc = target; loc; loc = PREV_INSN (loc))
2876 if (GET_CODE (loc) == BARRIER
1d322be1 2877 /* Don't move things inside a tablejump. */
2878 && ((loc2 = next_nonnote_insn (loc)) == 0
2879 || GET_CODE (loc2) != CODE_LABEL
2880 || (loc2 = next_nonnote_insn (loc2)) == 0
2881 || GET_CODE (loc2) != JUMP_INSN
2882 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2883 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
ec7d7ef9 2884 && uid_loop[INSN_UID (loc)] == target_loop)
04aa27b1 2885 break;
2886
2887 if (loc == 0)
2888 for (loc = target; loc; loc = NEXT_INSN (loc))
2889 if (GET_CODE (loc) == BARRIER
1d322be1 2890 /* Don't move things inside a tablejump. */
2891 && ((loc2 = next_nonnote_insn (loc)) == 0
2892 || GET_CODE (loc2) != CODE_LABEL
2893 || (loc2 = next_nonnote_insn (loc2)) == 0
2894 || GET_CODE (loc2) != JUMP_INSN
2895 || (GET_CODE (PATTERN (loc2)) != ADDR_VEC
2896 && GET_CODE (PATTERN (loc2)) != ADDR_DIFF_VEC))
ec7d7ef9 2897 && uid_loop[INSN_UID (loc)] == target_loop)
04aa27b1 2898 break;
2899
2900 if (loc)
2901 {
2902 rtx cond_label = JUMP_LABEL (p);
2903 rtx new_label = get_label_after (p);
2904
2905 /* Ensure our label doesn't go away. */
2906 LABEL_NUSES (cond_label)++;
2907
ec7d7ef9 2908 /* Verify that uid_loop is large enough and that
a92771b8 2909 we can invert P. */
f8cacb57 2910 if (invert_jump (p, new_label, 1))
2911 {
2912 rtx q, r;
2913
2914 /* If no suitable BARRIER was found, create a suitable
2915 one before TARGET. Since TARGET is a fall through
edc2a478 2916 path, we'll need to insert a jump around our block
20dd417a 2917 and add a BARRIER before TARGET.
f8cacb57 2918
2919 This creates an extra unconditional jump outside
2920 the loop. However, the benefits of removing rarely
2921 executed instructions from inside the loop usually
2922 outweighs the cost of the extra unconditional jump
2923 outside the loop. */
2924 if (loc == 0)
2925 {
2926 rtx temp;
2927
2928 temp = gen_jump (JUMP_LABEL (insn));
2929 temp = emit_jump_insn_before (temp, target);
2930 JUMP_LABEL (temp) = JUMP_LABEL (insn);
2931 LABEL_NUSES (JUMP_LABEL (insn))++;
2932 loc = emit_barrier_before (target);
2933 }
2934
2935 /* Include the BARRIER after INSN and copy the
2936 block after LOC. */
87dc0300 2937 if (squeeze_notes (&new_label, &last_insn_to_move))
2938 abort ();
f8cacb57 2939 reorder_insns (new_label, last_insn_to_move, loc);
2940
2941 /* All those insns are now in TARGET_LOOP. */
4a8f0b95 2942 for (q = new_label;
f8cacb57 2943 q != NEXT_INSN (last_insn_to_move);
2944 q = NEXT_INSN (q))
2945 uid_loop[INSN_UID (q)] = target_loop;
2946
2947 /* The label jumped to by INSN is no longer a loop
2948 exit. Unless INSN does not have a label (e.g.,
2949 it is a RETURN insn), search loop->exit_labels
2950 to find its label_ref, and remove it. Also turn
2951 off LABEL_OUTSIDE_LOOP_P bit. */
2952 if (JUMP_LABEL (insn))
2953 {
72ae569f 2954 for (q = 0, r = this_loop->exit_labels;
2955 r;
2956 q = r, r = LABEL_NEXTREF (r))
f8cacb57 2957 if (XEXP (r, 0) == JUMP_LABEL (insn))
2958 {
2959 LABEL_OUTSIDE_LOOP_P (r) = 0;
2960 if (q)
2961 LABEL_NEXTREF (q) = LABEL_NEXTREF (r);
2962 else
2963 this_loop->exit_labels = LABEL_NEXTREF (r);
2964 break;
2965 }
2966
2967 for (loop = this_loop; loop && loop != target_loop;
2968 loop = loop->outer)
2969 loop->exit_count--;
2970
2971 /* If we didn't find it, then something is
2972 wrong. */
2973 if (! r)
2974 abort ();
2975 }
2976
2977 /* P is now a jump outside the loop, so it must be put
2978 in loop->exit_labels, and marked as such.
2979 The easiest way to do this is to just call
2980 mark_loop_jump again for P. */
2981 mark_loop_jump (PATTERN (p), this_loop);
2982
2983 /* If INSN now jumps to the insn after it,
2984 delete INSN. */
2985 if (JUMP_LABEL (insn) != 0
2986 && (next_real_insn (JUMP_LABEL (insn))
2987 == next_real_insn (insn)))
e4bf866d 2988 delete_related_insns (insn);
f8cacb57 2989 }
04aa27b1 2990
2991 /* Continue the loop after where the conditional
2992 branch used to jump, since the only branch insn
2993 in the block (if it still remains) is an inter-loop
2994 branch and hence needs no processing. */
2995 insn = NEXT_INSN (cond_label);
2996
2997 if (--LABEL_NUSES (cond_label) == 0)
e4bf866d 2998 delete_related_insns (cond_label);
02e97247 2999
3000 /* This loop will be continued with NEXT_INSN (insn). */
3001 insn = PREV_INSN (insn);
04aa27b1 3002 }
3003 }
3004 }
3005 }
3006}
3007
3008/* If any label in X jumps to a loop different from LOOP_NUM and any of the
3009 loops it is contained in, mark the target loop invalid.
3010
3011 For speed, we assume that X is part of a pattern of a JUMP_INSN. */
3012
3013static void
3ad4992f 3014mark_loop_jump (rtx x, struct loop *loop)
04aa27b1 3015{
ec7d7ef9 3016 struct loop *dest_loop;
3017 struct loop *outer_loop;
04aa27b1 3018 int i;
3019
3020 switch (GET_CODE (x))
3021 {
3022 case PC:
3023 case USE:
3024 case CLOBBER:
3025 case REG:
3026 case MEM:
3027 case CONST_INT:
3028 case CONST_DOUBLE:
3029 case RETURN:
3030 return;
3031
3032 case CONST:
3033 /* There could be a label reference in here. */
ec7d7ef9 3034 mark_loop_jump (XEXP (x, 0), loop);
04aa27b1 3035 return;
3036
3037 case PLUS:
3038 case MINUS:
3039 case MULT:
ec7d7ef9 3040 mark_loop_jump (XEXP (x, 0), loop);
3041 mark_loop_jump (XEXP (x, 1), loop);
04aa27b1 3042 return;
3043
55fff164 3044 case LO_SUM:
3045 /* This may refer to a LABEL_REF or SYMBOL_REF. */
ec7d7ef9 3046 mark_loop_jump (XEXP (x, 1), loop);
55fff164 3047 return;
3048
04aa27b1 3049 case SIGN_EXTEND:
3050 case ZERO_EXTEND:
ec7d7ef9 3051 mark_loop_jump (XEXP (x, 0), loop);
04aa27b1 3052 return;
3053
3054 case LABEL_REF:
ec7d7ef9 3055 dest_loop = uid_loop[INSN_UID (XEXP (x, 0))];
04aa27b1 3056
3057 /* Link together all labels that branch outside the loop. This
3058 is used by final_[bg]iv_value and the loop unrolling code. Also
3059 mark this LABEL_REF so we know that this branch should predict
3060 false. */
3061
87729415 3062 /* A check to make sure the label is not in an inner nested loop,
3063 since this does not count as a loop exit. */
ec7d7ef9 3064 if (dest_loop)
87729415 3065 {
ec7d7ef9 3066 for (outer_loop = dest_loop; outer_loop;
3067 outer_loop = outer_loop->outer)
3068 if (outer_loop == loop)
87729415 3069 break;
3070 }
3071 else
ec7d7ef9 3072 outer_loop = NULL;
87729415 3073
ec7d7ef9 3074 if (loop && ! outer_loop)
04aa27b1 3075 {
3076 LABEL_OUTSIDE_LOOP_P (x) = 1;
ec7d7ef9 3077 LABEL_NEXTREF (x) = loop->exit_labels;
3078 loop->exit_labels = x;
d1539241 3079
ec7d7ef9 3080 for (outer_loop = loop;
3081 outer_loop && outer_loop != dest_loop;
3082 outer_loop = outer_loop->outer)
3083 outer_loop->exit_count++;
04aa27b1 3084 }
3085
3086 /* If this is inside a loop, but not in the current loop or one enclosed
3087 by it, it invalidates at least one loop. */
3088
ec7d7ef9 3089 if (! dest_loop)
04aa27b1 3090 return;
3091
3092 /* We must invalidate every nested loop containing the target of this
3093 label, except those that also contain the jump insn. */
3094
ec7d7ef9 3095 for (; dest_loop; dest_loop = dest_loop->outer)
04aa27b1 3096 {
3097 /* Stop when we reach a loop that also contains the jump insn. */
ec7d7ef9 3098 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
04aa27b1 3099 if (dest_loop == outer_loop)
3100 return;
3101
3102 /* If we get here, we know we need to invalidate a loop. */
ec7d7ef9 3103 if (loop_dump_stream && ! dest_loop->invalid)
04aa27b1 3104 fprintf (loop_dump_stream,
3105 "\nLoop at %d ignored due to multiple entry points.\n",
ec7d7ef9 3106 INSN_UID (dest_loop->start));
4a8f0b95 3107
ec7d7ef9 3108 dest_loop->invalid = 1;
04aa27b1 3109 }
3110 return;
3111
3112 case SET:
3113 /* If this is not setting pc, ignore. */
3114 if (SET_DEST (x) == pc_rtx)
ec7d7ef9 3115 mark_loop_jump (SET_SRC (x), loop);
04aa27b1 3116 return;
3117
3118 case IF_THEN_ELSE:
ec7d7ef9 3119 mark_loop_jump (XEXP (x, 1), loop);
3120 mark_loop_jump (XEXP (x, 2), loop);
04aa27b1 3121 return;
3122
3123 case PARALLEL:
3124 case ADDR_VEC:
3125 for (i = 0; i < XVECLEN (x, 0); i++)
ec7d7ef9 3126 mark_loop_jump (XVECEXP (x, 0, i), loop);
04aa27b1 3127 return;
3128
3129 case ADDR_DIFF_VEC:
3130 for (i = 0; i < XVECLEN (x, 1); i++)
ec7d7ef9 3131 mark_loop_jump (XVECEXP (x, 1, i), loop);
04aa27b1 3132 return;
3133
3134 default:
55fff164 3135 /* Strictly speaking this is not a jump into the loop, only a possible
3136 jump out of the loop. However, we have no way to link the destination
3137 of this jump onto the list of exit labels. To be safe we mark this
3138 loop and any containing loops as invalid. */
ec7d7ef9 3139 if (loop)
d1539241 3140 {
ec7d7ef9 3141 for (outer_loop = loop; outer_loop; outer_loop = outer_loop->outer)
55fff164 3142 {
ec7d7ef9 3143 if (loop_dump_stream && ! outer_loop->invalid)
55fff164 3144 fprintf (loop_dump_stream,
3145 "\nLoop at %d ignored due to unknown exit jump.\n",
ec7d7ef9 3146 INSN_UID (outer_loop->start));
3147 outer_loop->invalid = 1;
55fff164 3148 }
d1539241 3149 }
779c10db 3150 return;
04aa27b1 3151 }
3152}
3153\f
3154/* Return nonzero if there is a label in the range from
3155 insn INSN to and including the insn whose luid is END
3156 INSN must have an assigned luid (i.e., it must not have
3157 been previously created by loop.c). */
3158
3159static int
3ad4992f 3160labels_in_range_p (rtx insn, int end)
04aa27b1 3161{
3162 while (insn && INSN_LUID (insn) <= end)
3163 {
3164 if (GET_CODE (insn) == CODE_LABEL)
3165 return 1;
3166 insn = NEXT_INSN (insn);
3167 }
3168
3169 return 0;
3170}
3171
3172/* Record that a memory reference X is being set. */
3173
3174static void
3ad4992f 3175note_addr_stored (rtx x, rtx y ATTRIBUTE_UNUSED,
3176 void *data ATTRIBUTE_UNUSED)
04aa27b1 3177{
2ff1269a 3178 struct loop_info *loop_info = data;
3179
04aa27b1 3180 if (x == 0 || GET_CODE (x) != MEM)
3181 return;
3182
3183 /* Count number of memory writes.
3184 This affects heuristics in strength_reduce. */
2ff1269a 3185 loop_info->num_mem_sets++;
72ae569f 3186
04fd8429 3187 /* BLKmode MEM means all memory is clobbered. */
2ff1269a 3188 if (GET_MODE (x) == BLKmode)
155b05dc 3189 {
3190 if (RTX_UNCHANGING_P (x))
2ff1269a 3191 loop_info->unknown_constant_address_altered = 1;
155b05dc 3192 else
2ff1269a 3193 loop_info->unknown_address_altered = 1;
72ae569f 3194
155b05dc 3195 return;
3196 }
72ae569f 3197
3198 loop_info->store_mems = gen_rtx_EXPR_LIST (VOIDmode, x,
2ff1269a 3199 loop_info->store_mems);
04aa27b1 3200}
090651ad 3201
3202/* X is a value modified by an INSN that references a biv inside a loop
3203 exit test (ie, X is somehow related to the value of the biv). If X
3204 is a pseudo that is used more than once, then the biv is (effectively)
5da8f84e 3205 used more than once. DATA is a pointer to a loop_regs structure. */
090651ad 3206
3207static void
3ad4992f 3208note_set_pseudo_multiple_uses (rtx x, rtx y ATTRIBUTE_UNUSED, void *data)
090651ad 3209{
e9b78d43 3210 struct loop_regs *regs = (struct loop_regs *) data;
3211
090651ad 3212 if (x == 0)
3213 return;
3214
3215 while (GET_CODE (x) == STRICT_LOW_PART
3216 || GET_CODE (x) == SIGN_EXTRACT
3217 || GET_CODE (x) == ZERO_EXTRACT
3218 || GET_CODE (x) == SUBREG)
3219 x = XEXP (x, 0);
3220
3221 if (GET_CODE (x) != REG || REGNO (x) < FIRST_PSEUDO_REGISTER)
3222 return;
3223
3224 /* If we do not have usage information, or if we know the register
3225 is used more than once, note that fact for check_dbra_loop. */
3226 if (REGNO (x) >= max_reg_before_loop
05cb4e54 3227 || ! regs->array[REGNO (x)].single_usage
3228 || regs->array[REGNO (x)].single_usage == const0_rtx)
5da8f84e 3229 regs->multiple_uses = 1;
090651ad 3230}
04aa27b1 3231\f
3232/* Return nonzero if the rtx X is invariant over the current loop.
3233
3234 The value is 2 if we refer to something only conditionally invariant.
3235
229e30fe 3236 A memory ref is invariant if it is not volatile and does not conflict
2ff1269a 3237 with anything stored in `loop_info->store_mems'. */
04aa27b1 3238
3239int
3ad4992f 3240loop_invariant_p (const struct loop *loop, rtx x)
04aa27b1 3241{
2ff1269a 3242 struct loop_info *loop_info = LOOP_INFO (loop);
e9b78d43 3243 struct loop_regs *regs = LOOP_REGS (loop);
19cb6b50 3244 int i;
3245 enum rtx_code code;
3246 const char *fmt;
04aa27b1 3247 int conditional = 0;
8e34e843 3248 rtx mem_list_entry;
04aa27b1 3249
3250 if (x == 0)
3251 return 1;
3252 code = GET_CODE (x);
3253 switch (code)
3254 {
3255 case CONST_INT:
3256 case CONST_DOUBLE:
3257 case SYMBOL_REF:
3258 case CONST:
3259 return 1;
3260
3261 case LABEL_REF:
3262 /* A LABEL_REF is normally invariant, however, if we are unrolling
3263 loops, and this label is inside the loop, then it isn't invariant.
3264 This is because each unrolled copy of the loop body will have
3265 a copy of this label. If this was invariant, then an insn loading
3266 the address of this label into a register might get moved outside
3267 the loop, and then each loop body would end up using the same label.
3268
3269 We don't know the loop bounds here though, so just fail for all
3270 labels. */
ce32fe65 3271 if (flag_old_unroll_loops)
04aa27b1 3272 return 0;
3273 else
3274 return 1;
3275
3276 case PC:
3277 case CC0:
3278 case UNSPEC_VOLATILE:
3279 return 0;
3280
3281 case REG:
3282 /* We used to check RTX_UNCHANGING_P (x) here, but that is invalid
3283 since the reg might be set by initialization within the loop. */
695dc1a8 3284
3285 if ((x == frame_pointer_rtx || x == hard_frame_pointer_rtx
930cea6a 3286 || x == arg_pointer_rtx || x == pic_offset_table_rtx)
695dc1a8 3287 && ! current_function_has_nonlocal_goto)
04aa27b1 3288 return 1;
695dc1a8 3289
15fc3eb7 3290 if (LOOP_INFO (loop)->has_call
04aa27b1 3291 && REGNO (x) < FIRST_PSEUDO_REGISTER && call_used_regs[REGNO (x)])
3292 return 0;
695dc1a8 3293
b2114062 3294 /* Out-of-range regs can occur when we are called from unrolling.
3295 These have always been created by the unroller and are set in
6a8fa8e2 3296 the loop, hence are never invariant. */
b2114062 3297
3473aefe 3298 if (REGNO (x) >= (unsigned) regs->num)
b2114062 3299 return 0;
3300
05cb4e54 3301 if (regs->array[REGNO (x)].set_in_loop < 0)
04aa27b1 3302 return 2;
695dc1a8 3303
05cb4e54 3304 return regs->array[REGNO (x)].set_in_loop == 0;
04aa27b1 3305
3306 case MEM:
08c6f23a 3307 /* Volatile memory references must be rejected. Do this before
3308 checking for read-only items, so that volatile read-only items
3309 will be rejected also. */
3310 if (MEM_VOLATILE_P (x))
3311 return 0;
3312
04aa27b1 3313 /* See if there is any dependence between a store and this load. */
2ff1269a 3314 mem_list_entry = loop_info->store_mems;
8e34e843 3315 while (mem_list_entry)
3316 {
3317 if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
3318 x, rtx_varies_p))
3319 return 0;
155b05dc 3320
8e34e843 3321 mem_list_entry = XEXP (mem_list_entry, 1);
3322 }
04aa27b1 3323
3324 /* It's not invalidated by a store in memory
3325 but we must still verify the address is invariant. */
3326 break;
3327
3328 case ASM_OPERANDS:
3329 /* Don't mess with insns declared volatile. */
3330 if (MEM_VOLATILE_P (x))
3331 return 0;
0dbd1c74 3332 break;
4a8f0b95 3333
0dbd1c74 3334 default:
3335 break;
04aa27b1 3336 }
3337
3338 fmt = GET_RTX_FORMAT (code);
3339 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3340 {
3341 if (fmt[i] == 'e')
3342 {
15fc3eb7 3343 int tem = loop_invariant_p (loop, XEXP (x, i));
04aa27b1 3344 if (tem == 0)
3345 return 0;
3346 if (tem == 2)
3347 conditional = 1;
3348 }
3349 else if (fmt[i] == 'E')
3350 {
19cb6b50 3351 int j;
04aa27b1 3352 for (j = 0; j < XVECLEN (x, i); j++)
3353 {
15fc3eb7 3354 int tem = loop_invariant_p (loop, XVECEXP (x, i, j));
04aa27b1 3355 if (tem == 0)
3356 return 0;
3357 if (tem == 2)
3358 conditional = 1;
3359 }
3360
3361 }
3362 }
3363
3364 return 1 + conditional;
3365}
04aa27b1 3366\f
3367/* Return nonzero if all the insns in the loop that set REG
3368 are INSN and the immediately following insns,
3369 and if each of those insns sets REG in an invariant way
3370 (not counting uses of REG in them).
3371
3372 The value is 2 if some of these insns are only conditionally invariant.
3373
3374 We assume that INSN itself is the first set of REG
3375 and that its source is invariant. */
3376
3377static int
3ad4992f 3378consec_sets_invariant_p (const struct loop *loop, rtx reg, int n_sets,
3379 rtx insn)
04aa27b1 3380{
e9b78d43 3381 struct loop_regs *regs = LOOP_REGS (loop);
02e7a332 3382 rtx p = insn;
3383 unsigned int regno = REGNO (reg);
04aa27b1 3384 rtx temp;
3385 /* Number of sets we have to insist on finding after INSN. */
3386 int count = n_sets - 1;
05cb4e54 3387 int old = regs->array[regno].set_in_loop;
04aa27b1 3388 int value = 0;
3389 int this;
3390
3391 /* If N_SETS hit the limit, we can't rely on its value. */
3392 if (n_sets == 127)
3393 return 0;
3394
05cb4e54 3395 regs->array[regno].set_in_loop = 0;
04aa27b1 3396
3397 while (count > 0)
3398 {
19cb6b50 3399 enum rtx_code code;
04aa27b1 3400 rtx set;
3401
3402 p = NEXT_INSN (p);
3403 code = GET_CODE (p);
3404
3398e91d 3405 /* If library call, skip to end of it. */
df38d76e 3406 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
04aa27b1 3407 p = XEXP (temp, 0);
3408
3409 this = 0;
3410 if (code == INSN
3411 && (set = single_set (p))
3412 && GET_CODE (SET_DEST (set)) == REG
3413 && REGNO (SET_DEST (set)) == regno)
3414 {
15fc3eb7 3415 this = loop_invariant_p (loop, SET_SRC (set));
04aa27b1 3416 if (this != 0)
3417 value |= this;
7d27e4c9 3418 else if ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX)))
04aa27b1 3419 {
949094da 3420 /* If this is a libcall, then any invariant REG_EQUAL note is OK.
3421 If this is an ordinary insn, then only CONSTANT_P REG_EQUAL
3422 notes are OK. */
3423 this = (CONSTANT_P (XEXP (temp, 0))
3424 || (find_reg_note (p, REG_RETVAL, NULL_RTX)
15fc3eb7 3425 && loop_invariant_p (loop, XEXP (temp, 0))));
04aa27b1 3426 if (this != 0)
3427 value |= this;
3428 }
3429 }
3430 if (this != 0)
3431 count--;
3432 else if (code != NOTE)
3433 {
05cb4e54 3434 regs->array[regno].set_in_loop = old;
04aa27b1 3435 return 0;
3436 }
3437 }
3438
05cb4e54 3439 regs->array[regno].set_in_loop = old;
15fc3eb7 3440 /* If loop_invariant_p ever returned 2, we return 2. */
04aa27b1 3441 return 1 + (value & 2);
3442}
3443
3444#if 0
3445/* I don't think this condition is sufficient to allow INSN
3446 to be moved, so we no longer test it. */
3447
3448/* Return 1 if all insns in the basic block of INSN and following INSN
3449 that set REG are invariant according to TABLE. */
3450
3451static int
3ad4992f 3452all_sets_invariant_p (rtx reg, rtx insn, short *table)
04aa27b1 3453{
19cb6b50 3454 rtx p = insn;
3455 int regno = REGNO (reg);
04aa27b1 3456
3457 while (1)
3458 {
19cb6b50 3459 enum rtx_code code;
04aa27b1 3460 p = NEXT_INSN (p);
3461 code = GET_CODE (p);
3462 if (code == CODE_LABEL || code == JUMP_INSN)
3463 return 1;
3464 if (code == INSN && GET_CODE (PATTERN (p)) == SET
3465 && GET_CODE (SET_DEST (PATTERN (p))) == REG
3466 && REGNO (SET_DEST (PATTERN (p))) == regno)
3467 {
15fc3eb7 3468 if (! loop_invariant_p (loop, SET_SRC (PATTERN (p)), table))
04aa27b1 3469 return 0;
3470 }
3471 }
3472}
3473#endif /* 0 */
3474\f
3475/* Look at all uses (not sets) of registers in X. For each, if it is
3476 the single use, set USAGE[REGNO] to INSN; if there was a previous use in
3477 a different insn, set USAGE[REGNO] to const0_rtx. */
3478
3479static void
3ad4992f 3480find_single_use_in_loop (struct loop_regs *regs, rtx insn, rtx x)
04aa27b1 3481{
3482 enum rtx_code code = GET_CODE (x);
d2ca078f 3483 const char *fmt = GET_RTX_FORMAT (code);
04aa27b1 3484 int i, j;
3485
3486 if (code == REG)
05cb4e54 3487 regs->array[REGNO (x)].single_usage
3488 = (regs->array[REGNO (x)].single_usage != 0
3489 && regs->array[REGNO (x)].single_usage != insn)
04aa27b1 3490 ? const0_rtx : insn;
3491
3492 else if (code == SET)
3493 {
3494 /* Don't count SET_DEST if it is a REG; otherwise count things
3495 in SET_DEST because if a register is partially modified, it won't
4a8f0b95 3496 show up as a potential movable so we don't care how USAGE is set
04aa27b1 3497 for it. */
3498 if (GET_CODE (SET_DEST (x)) != REG)
05cb4e54 3499 find_single_use_in_loop (regs, insn, SET_DEST (x));
3500 find_single_use_in_loop (regs, insn, SET_SRC (x));
04aa27b1 3501 }
3502 else
3503 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3504 {
3505 if (fmt[i] == 'e' && XEXP (x, i) != 0)
05cb4e54 3506 find_single_use_in_loop (regs, insn, XEXP (x, i));
04aa27b1 3507 else if (fmt[i] == 'E')
3508 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
05cb4e54 3509 find_single_use_in_loop (regs, insn, XVECEXP (x, i, j));
04aa27b1 3510 }
3511}
3512\f
0fa3405d 3513/* Count and record any set in X which is contained in INSN. Update
05cb4e54 3514 REGS->array[I].MAY_NOT_OPTIMIZE and LAST_SET for any register I set
3515 in X. */
0fa3405d 3516
3517static void
3ad4992f 3518count_one_set (struct loop_regs *regs, rtx insn, rtx x, rtx *last_set)
0fa3405d 3519{
3520 if (GET_CODE (x) == CLOBBER && GET_CODE (XEXP (x, 0)) == REG)
3521 /* Don't move a reg that has an explicit clobber.
3522 It's not worth the pain to try to do it correctly. */
05cb4e54 3523 regs->array[REGNO (XEXP (x, 0))].may_not_optimize = 1;
0fa3405d 3524
3525 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
3526 {
3527 rtx dest = SET_DEST (x);
3528 while (GET_CODE (dest) == SUBREG
3529 || GET_CODE (dest) == ZERO_EXTRACT
3530 || GET_CODE (dest) == SIGN_EXTRACT
3531 || GET_CODE (dest) == STRICT_LOW_PART)
3532 dest = XEXP (dest, 0);
3533 if (GET_CODE (dest) == REG)
3534 {
8096c8ed 3535 int i;
19cb6b50 3536 int regno = REGNO (dest);
4b6dfc37 3537 for (i = 0; i < LOOP_REGNO_NREGS (regno, dest); i++)
8096c8ed 3538 {
3539 /* If this is the first setting of this reg
3540 in current basic block, and it was set before,
3541 it must be set in two basic blocks, so it cannot
3542 be moved out of the loop. */
3543 if (regs->array[regno].set_in_loop > 0
374fafbc 3544 && last_set[regno] == 0)
8096c8ed 3545 regs->array[regno+i].may_not_optimize = 1;
3546 /* If this is not first setting in current basic block,
3547 see if reg was used in between previous one and this.
3548 If so, neither one can be moved. */
3549 if (last_set[regno] != 0
3550 && reg_used_between_p (dest, last_set[regno], insn))
3551 regs->array[regno+i].may_not_optimize = 1;
3552 if (regs->array[regno+i].set_in_loop < 127)
3553 ++regs->array[regno+i].set_in_loop;
3554 last_set[regno+i] = insn;
3555 }
0fa3405d 3556 }
3557 }
3558}
04aa27b1 3559\f
15fc3eb7 3560/* Given a loop that is bounded by LOOP->START and LOOP->END and that
3561 is entered at LOOP->SCAN_START, return 1 if the register set in SET
3562 contained in insn INSN is used by any insn that precedes INSN in
3563 cyclic order starting from the loop entry point.
04aa27b1 3564
3565 We don't want to use INSN_LUID here because if we restrict INSN to those
3566 that have a valid INSN_LUID, it means we cannot move an invariant out
3567 from an inner loop past two loops. */
3568
3569static int
3ad4992f 3570loop_reg_used_before_p (const struct loop *loop, rtx set, rtx insn)
04aa27b1 3571{
3572 rtx reg = SET_DEST (set);
3573 rtx p;
3574
3575 /* Scan forward checking for register usage. If we hit INSN, we
ec7d7ef9 3576 are done. Otherwise, if we hit LOOP->END, wrap around to LOOP->START. */
3577 for (p = loop->scan_start; p != insn; p = NEXT_INSN (p))
04aa27b1 3578 {
9204e736 3579 if (INSN_P (p) && reg_overlap_mentioned_p (reg, PATTERN (p)))
04aa27b1 3580 return 1;
3581
ec7d7ef9 3582 if (p == loop->end)
3583 p = loop->start;
04aa27b1 3584 }
3585
3586 return 0;
3587}
3588\f
ba38e12b 3589
3590/* Information we collect about arrays that we might want to prefetch. */
3591struct prefetch_info
3592{
3593 struct iv_class *class; /* Class this prefetch is based on. */
3594 struct induction *giv; /* GIV this prefetch is based on. */
3595 rtx base_address; /* Start prefetching from this address plus
3596 index. */
3597 HOST_WIDE_INT index;
3598 HOST_WIDE_INT stride; /* Prefetch stride in bytes in each
3599 iteration. */
ee5cad94 3600 unsigned int bytes_accessed; /* Sum of sizes of all accesses to this
ba38e12b 3601 prefetch area in one iteration. */
3602 unsigned int total_bytes; /* Total bytes loop will access in this block.
3603 This is set only for loops with known
3604 iteration counts and is 0xffffffff
3605 otherwise. */
cd6839f2 3606 int prefetch_in_loop; /* Number of prefetch insns in loop. */
3607 int prefetch_before_loop; /* Number of prefetch insns before loop. */
ba38e12b 3608 unsigned int write : 1; /* 1 for read/write prefetches. */
ba38e12b 3609};
3610
3611/* Data used by check_store function. */
3612struct check_store_data
3613{
3614 rtx mem_address;
3615 int mem_write;
3616};
3617
3ad4992f 3618static void check_store (rtx, rtx, void *);
3619static void emit_prefetch_instructions (struct loop *);
3620static int rtx_equal_for_prefetch_p (rtx, rtx);
ba38e12b 3621
3622/* Set mem_write when mem_address is found. Used as callback to
3623 note_stores. */
3624static void
3ad4992f 3625check_store (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
ba38e12b 3626{
e17f5b23 3627 struct check_store_data *d = (struct check_store_data *) data;
ba38e12b 3628
3629 if ((GET_CODE (x) == MEM) && rtx_equal_p (d->mem_address, XEXP (x, 0)))
3630 d->mem_write = 1;
3631}
3632\f
3633/* Like rtx_equal_p, but attempts to swap commutative operands. This is
3634 important to get some addresses combined. Later more sophisticated
b903337a 3635 transformations can be added when necessary.
ba38e12b 3636
3637 ??? Same trick with swapping operand is done at several other places.
3638 It can be nice to develop some common way to handle this. */
3639
3640static int
3ad4992f 3641rtx_equal_for_prefetch_p (rtx x, rtx y)
ba38e12b 3642{
3643 int i;
3644 int j;
3645 enum rtx_code code = GET_CODE (x);
3646 const char *fmt;
3647
3648 if (x == y)
3649 return 1;
3650 if (code != GET_CODE (y))
3651 return 0;
3652
3653 code = GET_CODE (x);
3654
3655 if (GET_RTX_CLASS (code) == 'c')
3656 {
3657 return ((rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 0))
3658 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 1)))
3659 || (rtx_equal_for_prefetch_p (XEXP (x, 0), XEXP (y, 1))
3660 && rtx_equal_for_prefetch_p (XEXP (x, 1), XEXP (y, 0))));
3661 }
3662 /* Compare the elements. If any pair of corresponding elements fails to
3663 match, return 0 for the whole thing. */
3664
3665 fmt = GET_RTX_FORMAT (code);
3666 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3667 {
3668 switch (fmt[i])
3669 {
3670 case 'w':
3671 if (XWINT (x, i) != XWINT (y, i))
3672 return 0;
3673 break;
3674
3675 case 'i':
3676 if (XINT (x, i) != XINT (y, i))
3677 return 0;
3678 break;
3679
3680 case 'E':
3681 /* Two vectors must have the same length. */
3682 if (XVECLEN (x, i) != XVECLEN (y, i))
3683 return 0;
3684
3685 /* And the corresponding elements must match. */
3686 for (j = 0; j < XVECLEN (x, i); j++)
3687 if (rtx_equal_for_prefetch_p (XVECEXP (x, i, j),
3688 XVECEXP (y, i, j)) == 0)
3689 return 0;
3690 break;
3691
3692 case 'e':
3693 if (rtx_equal_for_prefetch_p (XEXP (x, i), XEXP (y, i)) == 0)
3694 return 0;
3695 break;
3696
3697 case 's':
3698 if (strcmp (XSTR (x, i), XSTR (y, i)))
3699 return 0;
3700 break;
3701
3702 case 'u':
3703 /* These are just backpointers, so they don't matter. */
3704 break;
3705
3706 case '0':
3707 break;
3708
3709 /* It is believed that rtx's at this level will never
3710 contain anything but integers and other rtx's,
3711 except for within LABEL_REFs and SYMBOL_REFs. */
3712 default:
3713 abort ();
3714 }
3715 }
3716 return 1;
3717}
3718\f
3719/* Remove constant addition value from the expression X (when present)
3720 and return it. */
4022cdd6 3721
ba38e12b 3722static HOST_WIDE_INT
3ad4992f 3723remove_constant_addition (rtx *x)
ba38e12b 3724{
3725 HOST_WIDE_INT addval = 0;
4022cdd6 3726 rtx exp = *x;
ba38e12b 3727
56b6134d 3728 /* Avoid clobbering a shared CONST expression. */
ba38e12b 3729 if (GET_CODE (exp) == CONST)
56b6134d 3730 {
3731 if (GET_CODE (XEXP (exp, 0)) == PLUS
3732 && GET_CODE (XEXP (XEXP (exp, 0), 0)) == SYMBOL_REF
3733 && GET_CODE (XEXP (XEXP (exp, 0), 1)) == CONST_INT)
3734 {
3735 *x = XEXP (XEXP (exp, 0), 0);
3736 return INTVAL (XEXP (XEXP (exp, 0), 1));
3737 }
3738 return 0;
3739 }
3740
ba38e12b 3741 if (GET_CODE (exp) == CONST_INT)
3742 {
3743 addval = INTVAL (exp);
3744 *x = const0_rtx;
3745 }
4022cdd6 3746
ba38e12b 3747 /* For plus expression recurse on ourself. */
3748 else if (GET_CODE (exp) == PLUS)
3749 {
3750 addval += remove_constant_addition (&XEXP (exp, 0));
3751 addval += remove_constant_addition (&XEXP (exp, 1));
4022cdd6 3752
3753 /* In case our parameter was constant, remove extra zero from the
3754 expression. */
ba38e12b 3755 if (XEXP (exp, 0) == const0_rtx)
8851e806 3756 *x = XEXP (exp, 1);
ba38e12b 3757 else if (XEXP (exp, 1) == const0_rtx)
8851e806 3758 *x = XEXP (exp, 0);
ba38e12b 3759 }
4022cdd6 3760
ba38e12b 3761 return addval;
3762}
3763
3764/* Attempt to identify accesses to arrays that are most likely to cause cache
3765 misses, and emit prefetch instructions a few prefetch blocks forward.
3766
3767 To detect the arrays we use the GIV information that was collected by the
3768 strength reduction pass.
3769
3770 The prefetch instructions are generated after the GIV information is done
3771 and before the strength reduction process. The new GIVs are injected into
3772 the strength reduction tables, so the prefetch addresses are optimized as
3773 well.
3774
3775 GIVs are split into base address, stride, and constant addition values.
3776 GIVs with the same address, stride and close addition values are combined
3777 into a single prefetch. Also writes to GIVs are detected, so that prefetch
3778 for write instructions can be used for the block we write to, on machines
3779 that support write prefetches.
3780
3781 Several heuristics are used to determine when to prefetch. They are
c0b8fbcb 3782 controlled by defined symbols that can be overridden for each target. */
4022cdd6 3783
ba38e12b 3784static void
3ad4992f 3785emit_prefetch_instructions (struct loop *loop)
ba38e12b 3786{
3787 int num_prefetches = 0;
3788 int num_real_prefetches = 0;
3789 int num_real_write_prefetches = 0;
cd6839f2 3790 int num_prefetches_before = 0;
3791 int num_write_prefetches_before = 0;
3792 int ahead = 0;
ba38e12b 3793 int i;
3794 struct iv_class *bl;
3795 struct induction *iv;
3796 struct prefetch_info info[MAX_PREFETCHES];
3797 struct loop_ivs *ivs = LOOP_IVS (loop);
3798
3799 if (!HAVE_prefetch)
3800 return;
3801
3802 /* Consider only loops w/o calls. When a call is done, the loop is probably
3803 slow enough to read the memory. */
3804 if (PREFETCH_NO_CALL && LOOP_INFO (loop)->has_call)
3805 {
3806 if (loop_dump_stream)
44d23341 3807 fprintf (loop_dump_stream, "Prefetch: ignoring loop: has call.\n");
4022cdd6 3808
ba38e12b 3809 return;
3810 }
3811
44d23341 3812 /* Don't prefetch in loops known to have few iterations. */
ba38e12b 3813 if (PREFETCH_NO_LOW_LOOPCNT
3814 && LOOP_INFO (loop)->n_iterations
3815 && LOOP_INFO (loop)->n_iterations <= PREFETCH_LOW_LOOPCNT)
3816 {
3817 if (loop_dump_stream)
3818 fprintf (loop_dump_stream,
44d23341 3819 "Prefetch: ignoring loop: not enough iterations.\n");
ba38e12b 3820 return;
3821 }
3822
3823 /* Search all induction variables and pick those interesting for the prefetch
3824 machinery. */
3825 for (bl = ivs->list; bl; bl = bl->next)
3826 {
3827 struct induction *biv = bl->biv, *biv1;
3828 int basestride = 0;
3829
3830 biv1 = biv;
4022cdd6 3831
ba38e12b 3832 /* Expect all BIVs to be executed in each iteration. This makes our
3833 analysis more conservative. */
3834 while (biv1)
3835 {
3836 /* Discard non-constant additions that we can't handle well yet, and
3837 BIVs that are executed multiple times; such BIVs ought to be
3838 handled in the nested loop. We accept not_every_iteration BIVs,
3839 since these only result in larger strides and make our
44d23341 3840 heuristics more conservative. */
ba38e12b 3841 if (GET_CODE (biv->add_val) != CONST_INT)
3842 {
3843 if (loop_dump_stream)
3844 {
4022cdd6 3845 fprintf (loop_dump_stream,
44d23341 3846 "Prefetch: ignoring biv %d: non-constant addition at insn %d:",
ba38e12b 3847 REGNO (biv->src_reg), INSN_UID (biv->insn));
3848 print_rtl (loop_dump_stream, biv->add_val);
3849 fprintf (loop_dump_stream, "\n");
3850 }
3851 break;
3852 }
4022cdd6 3853
ba38e12b 3854 if (biv->maybe_multiple)
3855 {
3856 if (loop_dump_stream)
3857 {
4022cdd6 3858 fprintf (loop_dump_stream,
44d23341 3859 "Prefetch: ignoring biv %d: maybe_multiple at insn %i:",
ba38e12b 3860 REGNO (biv->src_reg), INSN_UID (biv->insn));
3861 print_rtl (loop_dump_stream, biv->add_val);
3862 fprintf (loop_dump_stream, "\n");
3863 }
3864 break;
3865 }
4022cdd6 3866
ba38e12b 3867 basestride += INTVAL (biv1->add_val);
3868 biv1 = biv1->next_iv;
3869 }
4022cdd6 3870
ba38e12b 3871 if (biv1 || !basestride)
3872 continue;
4022cdd6 3873
ba38e12b 3874 for (iv = bl->giv; iv; iv = iv->next_iv)
3875 {
3876 rtx address;
3877 rtx temp;
3878 HOST_WIDE_INT index = 0;
3879 int add = 1;
44d23341 3880 HOST_WIDE_INT stride = 0;
3881 int stride_sign = 1;
ba38e12b 3882 struct check_store_data d;
44d23341 3883 const char *ignore_reason = NULL;
ba38e12b 3884 int size = GET_MODE_SIZE (GET_MODE (iv));
3885
44d23341 3886 /* See whether an induction variable is interesting to us and if
3887 not, report the reason. */
3888 if (iv->giv_type != DEST_ADDR)
3889 ignore_reason = "giv is not a destination address";
3890
3891 /* We are interested only in constant stride memory references
3892 in order to be able to compute density easily. */
3893 else if (GET_CODE (iv->mult_val) != CONST_INT)
3894 ignore_reason = "stride is not constant";
3895
3896 else
3897 {
3898 stride = INTVAL (iv->mult_val) * basestride;
3899 if (stride < 0)
cd6839f2 3900 {
44d23341 3901 stride = -stride;
3902 stride_sign = -1;
cd6839f2 3903 }
44d23341 3904
3905 /* On some targets, reversed order prefetches are not
cd6839f2 3906 worthwhile. */
44d23341 3907 if (PREFETCH_NO_REVERSE_ORDER && stride_sign < 0)
3908 ignore_reason = "reversed order stride";
3909
3910 /* Prefetch of accesses with an extreme stride might not be
cd6839f2 3911 worthwhile, either. */
44d23341 3912 else if (PREFETCH_NO_EXTREME_STRIDE
3913 && stride > PREFETCH_EXTREME_STRIDE)
3914 ignore_reason = "extreme stride";
3915
4022cdd6 3916 /* Ignore GIVs with varying add values; we can't predict the
cd6839f2 3917 value for the next iteration. */
44d23341 3918 else if (!loop_invariant_p (loop, iv->add_val))
3919 ignore_reason = "giv has varying add value";
3920
4022cdd6 3921 /* Ignore GIVs in the nested loops; they ought to have been
cd6839f2 3922 handled already. */
44d23341 3923 else if (iv->maybe_multiple)
3924 ignore_reason = "giv is in nested loop";
3925 }
3926
3927 if (ignore_reason != NULL)
ba38e12b 3928 {
3929 if (loop_dump_stream)
44d23341 3930 fprintf (loop_dump_stream,
3931 "Prefetch: ignoring giv at %d: %s.\n",
3932 INSN_UID (iv->insn), ignore_reason);
ba38e12b 3933 continue;
3934 }
3935
3936 /* Determine the pointer to the basic array we are examining. It is
3937 the sum of the BIV's initial value and the GIV's add_val. */
ba38e12b 3938 address = copy_rtx (iv->add_val);
3939 temp = copy_rtx (bl->initial_value);
3940
3941 address = simplify_gen_binary (PLUS, Pmode, temp, address);
3942 index = remove_constant_addition (&address);
3943
ba38e12b 3944 d.mem_write = 0;
3945 d.mem_address = *iv->location;
4022cdd6 3946
ba38e12b 3947 /* When the GIV is not always executed, we might be better off by
3948 not dirtying the cache pages. */
44d23341 3949 if (PREFETCH_CONDITIONAL || iv->always_executed)
ba38e12b 3950 note_stores (PATTERN (iv->insn), check_store, &d);
cd6839f2 3951 else
3952 {
3953 if (loop_dump_stream)
3954 fprintf (loop_dump_stream, "Prefetch: Ignoring giv at %d: %s\n",
3955 INSN_UID (iv->insn), "in conditional code.");
3956 continue;
3957 }
ba38e12b 3958
3959 /* Attempt to find another prefetch to the same array and see if we
3960 can merge this one. */
3961 for (i = 0; i < num_prefetches; i++)
3962 if (rtx_equal_for_prefetch_p (address, info[i].base_address)
3963 && stride == info[i].stride)
3964 {
3965 /* In case both access same array (same location
3966 just with small difference in constant indexes), merge
3967 the prefetches. Just do the later and the earlier will
3968 get prefetched from previous iteration.
44d23341 3969 The artificial threshold should not be too small,
ba38e12b 3970 but also not bigger than small portion of memory usually
3971 traversed by single loop. */
44d23341 3972 if (index >= info[i].index
3973 && index - info[i].index < PREFETCH_EXTREME_DIFFERENCE)
ba38e12b 3974 {
3975 info[i].write |= d.mem_write;
44d23341 3976 info[i].bytes_accessed += size;
ba38e12b 3977 info[i].index = index;
3978 info[i].giv = iv;
3979 info[i].class = bl;
3980 info[num_prefetches].base_address = address;
3981 add = 0;
3982 break;
3983 }
4022cdd6 3984
44d23341 3985 if (index < info[i].index
3986 && info[i].index - index < PREFETCH_EXTREME_DIFFERENCE)
ba38e12b 3987 {
3988 info[i].write |= d.mem_write;
44d23341 3989 info[i].bytes_accessed += size;
ba38e12b 3990 add = 0;
3991 break;
3992 }
3993 }
4022cdd6 3994
ba38e12b 3995 /* Merging failed. */
3996 if (add)
3997 {
3998 info[num_prefetches].giv = iv;
3999 info[num_prefetches].class = bl;
4000 info[num_prefetches].index = index;
4001 info[num_prefetches].stride = stride;
4002 info[num_prefetches].base_address = address;
4003 info[num_prefetches].write = d.mem_write;
44d23341 4004 info[num_prefetches].bytes_accessed = size;
ba38e12b 4005 num_prefetches++;
4006 if (num_prefetches >= MAX_PREFETCHES)
4007 {
4008 if (loop_dump_stream)
4022cdd6 4009 fprintf (loop_dump_stream,
4010 "Maximal number of prefetches exceeded.\n");
ba38e12b 4011 return;
4012 }
4013 }
4014 }
4015 }
4022cdd6 4016
ba38e12b 4017 for (i = 0; i < num_prefetches; i++)
4018 {
44d23341 4019 int density;
4020
4021 /* Attempt to calculate the total number of bytes fetched by all
4022 iterations of the loop. Avoid overflow. */
ba38e12b 4023 if (LOOP_INFO (loop)->n_iterations
cd6839f2 4024 && ((unsigned HOST_WIDE_INT) (0xffffffff / info[i].stride)
4022cdd6 4025 >= LOOP_INFO (loop)->n_iterations))
ba38e12b 4026 info[i].total_bytes = info[i].stride * LOOP_INFO (loop)->n_iterations;
4027 else
4028 info[i].total_bytes = 0xffffffff;
4029
44d23341 4030 density = info[i].bytes_accessed * 100 / info[i].stride;
4031
4032 /* Prefetch might be worthwhile only when the loads/stores are dense. */
4033 if (PREFETCH_ONLY_DENSE_MEM)
4034 if (density * 256 > PREFETCH_DENSE_MEM * 100
4035 && (info[i].total_bytes / PREFETCH_BLOCK
cd6839f2 4036 >= PREFETCH_BLOCKS_BEFORE_LOOP_MIN))
44d23341 4037 {
4038 info[i].prefetch_before_loop = 1;
4039 info[i].prefetch_in_loop
4040 = (info[i].total_bytes / PREFETCH_BLOCK
cd6839f2 4041 > PREFETCH_BLOCKS_BEFORE_LOOP_MAX);
44d23341 4042 }
cd6839f2 4043 else
44d23341 4044 {
4045 info[i].prefetch_in_loop = 0, info[i].prefetch_before_loop = 0;
4046 if (loop_dump_stream)
4047 fprintf (loop_dump_stream,
4048 "Prefetch: ignoring giv at %d: %d%% density is too low.\n",
4049 INSN_UID (info[i].giv->insn), density);
4050 }
ba38e12b 4051 else
44d23341 4052 info[i].prefetch_in_loop = 1, info[i].prefetch_before_loop = 1;
ba38e12b 4053
cd6839f2 4054 /* Find how many prefetch instructions we'll use within the loop. */
4055 if (info[i].prefetch_in_loop != 0)
ba38e12b 4056 {
cd6839f2 4057 info[i].prefetch_in_loop = ((info[i].stride + PREFETCH_BLOCK - 1)
ba38e12b 4058 / PREFETCH_BLOCK);
cd6839f2 4059 num_real_prefetches += info[i].prefetch_in_loop;
ba38e12b 4060 if (info[i].write)
cd6839f2 4061 num_real_write_prefetches += info[i].prefetch_in_loop;
ba38e12b 4062 }
4063 }
4022cdd6 4064
cd6839f2 4065 /* Determine how many iterations ahead to prefetch within the loop, based
4066 on how many prefetches we currently expect to do within the loop. */
4067 if (num_real_prefetches != 0)
4068 {
4069 if ((ahead = SIMULTANEOUS_PREFETCHES / num_real_prefetches) == 0)
4070 {
4071 if (loop_dump_stream)
4072 fprintf (loop_dump_stream,
4073 "Prefetch: ignoring prefetches within loop: ahead is zero; %d < %d\n",
4074 SIMULTANEOUS_PREFETCHES, num_real_prefetches);
4075 num_real_prefetches = 0, num_real_write_prefetches = 0;
4076 }
4077 }
4078 /* We'll also use AHEAD to determine how many prefetch instructions to
4079 emit before a loop, so don't leave it zero. */
4080 if (ahead == 0)
4081 ahead = PREFETCH_BLOCKS_BEFORE_LOOP_MAX;
4082
4083 for (i = 0; i < num_prefetches; i++)
ba38e12b 4084 {
cd6839f2 4085 /* Update if we've decided not to prefetch anything within the loop. */
4086 if (num_real_prefetches == 0)
4087 info[i].prefetch_in_loop = 0;
4088
4089 /* Find how many prefetch instructions we'll use before the loop. */
4090 if (info[i].prefetch_before_loop != 0)
4091 {
4092 int n = info[i].total_bytes / PREFETCH_BLOCK;
4093 if (n > ahead)
4094 n = ahead;
4095 info[i].prefetch_before_loop = n;
4096 num_prefetches_before += n;
4097 if (info[i].write)
4098 num_write_prefetches_before += n;
4099 }
4100
4101 if (loop_dump_stream)
ba38e12b 4102 {
44d23341 4103 if (info[i].prefetch_in_loop == 0
4104 && info[i].prefetch_before_loop == 0)
4105 continue;
4106 fprintf (loop_dump_stream, "Prefetch insn: %d",
ba38e12b 4107 INSN_UID (info[i].giv->insn));
4022cdd6 4108 fprintf (loop_dump_stream,
cd6839f2 4109 "; in loop: %d; before: %d; %s\n",
4110 info[i].prefetch_in_loop,
4111 info[i].prefetch_before_loop,
44d23341 4112 info[i].write ? "read/write" : "read only");
4113 fprintf (loop_dump_stream,
4114 " density: %d%%; bytes_accessed: %u; total_bytes: %u\n",
4115 (int) (info[i].bytes_accessed * 100 / info[i].stride),
4116 info[i].bytes_accessed, info[i].total_bytes);
85aa12f7 4117 fprintf (loop_dump_stream, " index: " HOST_WIDE_INT_PRINT_DEC
4118 "; stride: " HOST_WIDE_INT_PRINT_DEC "; address: ",
4119 info[i].index, info[i].stride);
44d23341 4120 print_rtl (loop_dump_stream, info[i].base_address);
4121 fprintf (loop_dump_stream, "\n");
ba38e12b 4122 }
ba38e12b 4123 }
4124
cd6839f2 4125 if (num_real_prefetches + num_prefetches_before > 0)
44d23341 4126 {
cd6839f2 4127 /* Record that this loop uses prefetch instructions. */
4128 LOOP_INFO (loop)->has_prefetch = 1;
4129
44d23341 4130 if (loop_dump_stream)
cd6839f2 4131 {
4132 fprintf (loop_dump_stream, "Real prefetches needed within loop: %d (write: %d)\n",
4133 num_real_prefetches, num_real_write_prefetches);
4134 fprintf (loop_dump_stream, "Real prefetches needed before loop: %d (write: %d)\n",
4135 num_prefetches_before, num_write_prefetches_before);
4136 }
44d23341 4137 }
4022cdd6 4138
ba38e12b 4139 for (i = 0; i < num_prefetches; i++)
4140 {
cd6839f2 4141 int y;
4022cdd6 4142
cd6839f2 4143 for (y = 0; y < info[i].prefetch_in_loop; y++)
4144 {
4145 rtx loc = copy_rtx (*info[i].giv->location);
4146 rtx insn;
4147 int bytes_ahead = PREFETCH_BLOCK * (ahead + y);
4148 rtx before_insn = info[i].giv->insn;
4149 rtx prev_insn = PREV_INSN (info[i].giv->insn);
ab87d1bc 4150 rtx seq;
cd6839f2 4151
4152 /* We can save some effort by offsetting the address on
4153 architectures with offsettable memory references. */
4154 if (offsettable_address_p (0, VOIDmode, loc))
4155 loc = plus_constant (loc, bytes_ahead);
4156 else
ba38e12b 4157 {
cd6839f2 4158 rtx reg = gen_reg_rtx (Pmode);
4159 loop_iv_add_mult_emit_before (loop, loc, const1_rtx,
3ad4992f 4160 GEN_INT (bytes_ahead), reg,
4161 0, before_insn);
cd6839f2 4162 loc = reg;
4163 }
ba38e12b 4164
ab87d1bc 4165 start_sequence ();
cd6839f2 4166 /* Make sure the address operand is valid for prefetch. */
4167 if (! (*insn_data[(int)CODE_FOR_prefetch].operand[0].predicate)
4168 (loc, insn_data[(int)CODE_FOR_prefetch].operand[0].mode))
4169 loc = force_reg (Pmode, loc);
ab87d1bc 4170 emit_insn (gen_prefetch (loc, GEN_INT (info[i].write),
4171 GEN_INT (3)));
31d3e01c 4172 seq = get_insns ();
ab87d1bc 4173 end_sequence ();
4174 emit_insn_before (seq, before_insn);
cd6839f2 4175
4176 /* Check all insns emitted and record the new GIV
4177 information. */
4178 insn = NEXT_INSN (prev_insn);
4179 while (insn != before_insn)
4180 {
4181 insn = check_insn_for_givs (loop, insn,
4182 info[i].giv->always_executed,
4183 info[i].giv->maybe_multiple);
4184 insn = NEXT_INSN (insn);
ba38e12b 4185 }
4186 }
4022cdd6 4187
cd6839f2 4188 if (PREFETCH_BEFORE_LOOP)
ba38e12b 4189 {
cd6839f2 4190 /* Emit insns before the loop to fetch the first cache lines or,
4191 if we're not prefetching within the loop, everything we expect
4192 to need. */
4193 for (y = 0; y < info[i].prefetch_before_loop; y++)
ba38e12b 4194 {
4195 rtx reg = gen_reg_rtx (Pmode);
4196 rtx loop_start = loop->start;
cd6839f2 4197 rtx init_val = info[i].class->initial_value;
ba38e12b 4198 rtx add_val = simplify_gen_binary (PLUS, Pmode,
4199 info[i].giv->add_val,
4200 GEN_INT (y * PREFETCH_BLOCK));
4022cdd6 4201
cd6839f2 4202 /* Functions called by LOOP_IV_ADD_EMIT_BEFORE expect a
4203 non-constant INIT_VAL to have the same mode as REG, which
4204 in this case we know to be Pmode. */
4205 if (GET_MODE (init_val) != Pmode && !CONSTANT_P (init_val))
22aae821 4206 {
4207 rtx seq;
4208
4209 start_sequence ();
4210 init_val = convert_to_mode (Pmode, init_val, 0);
4211 seq = get_insns ();
4212 end_sequence ();
4213 loop_insn_emit_before (loop, 0, loop_start, seq);
4214 }
cd6839f2 4215 loop_iv_add_mult_emit_before (loop, init_val,
ba38e12b 4216 info[i].giv->mult_val,
cd6839f2 4217 add_val, reg, 0, loop_start);
ba38e12b 4218 emit_insn_before (gen_prefetch (reg, GEN_INT (info[i].write),
4022cdd6 4219 GEN_INT (3)),
4220 loop_start);
ba38e12b 4221 }
4222 }
4223 }
4022cdd6 4224
ba38e12b 4225 return;
4226}
4227\f
04aa27b1 4228/* A "basic induction variable" or biv is a pseudo reg that is set
4229 (within this loop) only by incrementing or decrementing it. */
4230/* A "general induction variable" or giv is a pseudo reg whose
4231 value is a linear function of a biv. */
4232
4233/* Bivs are recognized by `basic_induction_var';
1d322a97 4234 Givs by `general_induction_var'. */
04aa27b1 4235
04aa27b1 4236/* Communication with routines called via `note_stores'. */
4237
4238static rtx note_insn;
4239
6ef828f9 4240/* Dummy register to have nonzero DEST_REG for DEST_ADDR type givs. */
04aa27b1 4241
4242static rtx addr_placeholder;
4243
4244/* ??? Unfinished optimizations, and possible future optimizations,
4245 for the strength reduction code. */
4246
04aa27b1 4247/* ??? The interaction of biv elimination, and recognition of 'constant'
a92771b8 4248 bivs, may cause problems. */
04aa27b1 4249
4250/* ??? Add heuristics so that DEST_ADDR strength reduction does not cause
4251 performance problems.
4252
4253 Perhaps don't eliminate things that can be combined with an addressing
4254 mode. Find all givs that have the same biv, mult_val, and add_val;
4255 then for each giv, check to see if its only use dies in a following
4256 memory address. If so, generate a new memory address and check to see
4257 if it is valid. If it is valid, then store the modified memory address,
4258 otherwise, mark the giv as not done so that it will get its own iv. */
4259
4260/* ??? Could try to optimize branches when it is known that a biv is always
4261 positive. */
4262
4263/* ??? When replace a biv in a compare insn, we should replace with closest
4264 giv so that an optimized branch can still be recognized by the combiner,
4265 e.g. the VAX acb insn. */
4266
4267/* ??? Many of the checks involving uid_luid could be simplified if regscan
4268 was rerun in loop_optimize whenever a register was added or moved.
4269 Also, some of the optimizations could be a little less conservative. */
4270\f
f13bf5f6 4271/* Scan the loop body and call FNCALL for each insn. In the addition to the
4272 LOOP and INSN parameters pass MAYBE_MULTIPLE and NOT_EVERY_ITERATION to the
4273 callback.
4a8f0b95 4274
fa813440 4275 NOT_EVERY_ITERATION is 1 if current insn is not known to be executed at
4276 least once for every loop iteration except for the last one.
f13bf5f6 4277
4278 MAYBE_MULTIPLE is 1 if current insn may be executed more than once for every
4279 loop iteration.
4280 */
4281void
3ad4992f 4282for_each_insn_in_loop (struct loop *loop, loop_insn_callback fncall)
04aa27b1 4283{
04aa27b1 4284 int not_every_iteration = 0;
56a1cb6b 4285 int maybe_multiple = 0;
2f5f24a5 4286 int past_loop_latch = 0;
6ed0bf1f 4287 int loop_depth = 0;
f13bf5f6 4288 rtx p;
04aa27b1 4289
ec7d7ef9 4290 /* If loop_scan_start points to the loop exit test, we have to be wary of
a490489b 4291 subversive use of gotos inside expression statements. */
f13bf5f6 4292 if (prev_nonnote_insn (loop->scan_start) != prev_nonnote_insn (loop->start))
4293 maybe_multiple = back_branch_in_range_p (loop, loop->scan_start);
04aa27b1 4294
7299020b 4295 /* Scan through loop and update NOT_EVERY_ITERATION and MAYBE_MULTIPLE. */
f13bf5f6 4296 for (p = next_insn_in_loop (loop, loop->scan_start);
ce326ac0 4297 p != NULL_RTX;
ec7d7ef9 4298 p = next_insn_in_loop (loop, p))
04aa27b1 4299 {
b207b5cc 4300 p = fncall (loop, p, not_every_iteration, maybe_multiple);
04aa27b1 4301
56a1cb6b 4302 /* Past CODE_LABEL, we get to insns that may be executed multiple
f13bf5f6 4303 times. The only way we can be sure that they can't is if every
4304 jump insn between here and the end of the loop either
4305 returns, exits the loop, is a jump to a location that is still
4306 behind the label, or is a jump to the loop start. */
56a1cb6b 4307
4308 if (GET_CODE (p) == CODE_LABEL)
4309 {
4310 rtx insn = p;
4311
4312 maybe_multiple = 0;
4313
4314 while (1)
4315 {
4316 insn = NEXT_INSN (insn);
f13bf5f6 4317 if (insn == loop->scan_start)
56a1cb6b 4318 break;
f13bf5f6 4319 if (insn == loop->end)
56a1cb6b 4320 {
f13bf5f6 4321 if (loop->top != 0)
4322 insn = loop->top;
56a1cb6b 4323 else
4324 break;
f13bf5f6 4325 if (insn == loop->scan_start)
56a1cb6b 4326 break;
4327 }
4328
4329 if (GET_CODE (insn) == JUMP_INSN
4330 && GET_CODE (PATTERN (insn)) != RETURN
b2816317 4331 && (!any_condjump_p (insn)
56a1cb6b 4332 || (JUMP_LABEL (insn) != 0
f13bf5f6 4333 && JUMP_LABEL (insn) != loop->scan_start
4334 && !loop_insn_first_p (p, JUMP_LABEL (insn)))))
a5e23478 4335 {
4336 maybe_multiple = 1;
4337 break;
4338 }
56a1cb6b 4339 }
4340 }
4341
a5e23478 4342 /* Past a jump, we get to insns for which we can't count
f13bf5f6 4343 on whether they will be executed during each iteration. */
a5e23478 4344 /* This code appears twice in strength_reduce. There is also similar
f13bf5f6 4345 code in scan_loop. */
a5e23478 4346 if (GET_CODE (p) == JUMP_INSN
f13bf5f6 4347 /* If we enter the loop in the middle, and scan around to the
4348 beginning, don't set not_every_iteration for that.
4349 This can be any kind of jump, since we want to know if insns
4350 will be executed if the loop is executed. */
4351 && !(JUMP_LABEL (p) == loop->top
fa813440 4352 && ((NEXT_INSN (NEXT_INSN (p)) == loop->end
4353 && any_uncondjump_p (p))
4354 || (NEXT_INSN (p) == loop->end && any_condjump_p (p)))))
a5e23478 4355 {
4356 rtx label = 0;
4357
4358 /* If this is a jump outside the loop, then it also doesn't
4359 matter. Check to see if the target of this branch is on the
ec7d7ef9 4360 loop->exits_labels list. */
f13bf5f6 4361
15fc3eb7 4362 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
a5e23478 4363 if (XEXP (label, 0) == JUMP_LABEL (p))
4364 break;
4365
f13bf5f6 4366 if (!label)
a5e23478 4367 not_every_iteration = 1;
4368 }
04aa27b1 4369
6ed0bf1f 4370 else if (GET_CODE (p) == NOTE)
4371 {
4372 /* At the virtual top of a converted loop, insns are again known to
4373 be executed each iteration: logically, the loop begins here
e6aa115d 4374 even though the exit code has been duplicated.
4375
4376 Insns are also again known to be executed each iteration at
4377 the LOOP_CONT note. */
4378 if ((NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_VTOP
4379 || NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_CONT)
4380 && loop_depth == 0)
6ed0bf1f 4381 not_every_iteration = 0;
4382 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_BEG)
4383 loop_depth++;
4384 else if (NOTE_LINE_NUMBER (p) == NOTE_INSN_LOOP_END)
4385 loop_depth--;
4386 }
04aa27b1 4387
2f5f24a5 4388 /* Note if we pass a loop latch. If we do, then we can not clear
f13bf5f6 4389 NOT_EVERY_ITERATION below when we pass the last CODE_LABEL in
4390 a loop since a jump before the last CODE_LABEL may have started
4391 a new loop iteration.
4392
4393 Note that LOOP_TOP is only set for rotated loops and we need
4394 this check for all loops, so compare against the CODE_LABEL
4395 which immediately follows LOOP_START. */
4396 if (GET_CODE (p) == JUMP_INSN
4397 && JUMP_LABEL (p) == NEXT_INSN (loop->start))
2f5f24a5 4398 past_loop_latch = 1;
4399
04aa27b1 4400 /* Unlike in the code motion pass where MAYBE_NEVER indicates that
f13bf5f6 4401 an insn may never be executed, NOT_EVERY_ITERATION indicates whether
4402 or not an insn is known to be executed each iteration of the
4403 loop, whether or not any iterations are known to occur.
04aa27b1 4404
f13bf5f6 4405 Therefore, if we have just passed a label and have no more labels
4406 between here and the test insn of the loop, and we have not passed
4407 a jump to the top of the loop, then we know these insns will be
4408 executed each iteration. */
04aa27b1 4409
f13bf5f6 4410 if (not_every_iteration
4411 && !past_loop_latch
2f5f24a5 4412 && GET_CODE (p) == CODE_LABEL
f13bf5f6 4413 && no_labels_between_p (p, loop->end)
4414 && loop_insn_first_p (p, loop->cont))
04aa27b1 4415 not_every_iteration = 0;
4416 }
f13bf5f6 4417}
4418\f
f13bf5f6 4419static void
3ad4992f 4420loop_bivs_find (struct loop *loop)
f13bf5f6 4421{
e9b78d43 4422 struct loop_regs *regs = LOOP_REGS (loop);
8ec5f078 4423 struct loop_ivs *ivs = LOOP_IVS (loop);
6f812a51 4424 /* Temporary list pointers for traversing ivs->list. */
f13bf5f6 4425 struct iv_class *bl, **backbl;
f13bf5f6 4426
6f812a51 4427 ivs->list = 0;
f13bf5f6 4428
f13bf5f6 4429 for_each_insn_in_loop (loop, check_insn_for_bivs);
4bb30577 4430
6f812a51 4431 /* Scan ivs->list to remove all regs that proved not to be bivs.
e9b78d43 4432 Make a sanity check against regs->n_times_set. */
6f812a51 4433 for (backbl = &ivs->list, bl = *backbl; bl; bl = bl->next)
04aa27b1 4434 {
8ec5f078 4435 if (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
04aa27b1 4436 /* Above happens if register modified by subreg, etc. */
4437 /* Make sure it is not recognized as a basic induction var: */
05cb4e54 4438 || regs->array[bl->regno].n_times_set != bl->biv_count
04aa27b1 4439 /* If never incremented, it is invariant that we decided not to
4440 move. So leave it alone. */
4441 || ! bl->incremented)
4442 {
4443 if (loop_dump_stream)
bb6f5cc9 4444 fprintf (loop_dump_stream, "Biv %d: discarded, %s\n",
04aa27b1 4445 bl->regno,
8ec5f078 4446 (REG_IV_TYPE (ivs, bl->regno) != BASIC_INDUCT
04aa27b1 4447 ? "not induction variable"
4448 : (! bl->incremented ? "never incremented"
4449 : "count error")));
4a8f0b95 4450
8ec5f078 4451 REG_IV_TYPE (ivs, bl->regno) = NOT_BASIC_INDUCT;
04aa27b1 4452 *backbl = bl->next;
4453 }
4454 else
4455 {
4456 backbl = &bl->next;
4457
4458 if (loop_dump_stream)
bb6f5cc9 4459 fprintf (loop_dump_stream, "Biv %d: verified\n", bl->regno);
04aa27b1 4460 }
4461 }
d59b4e47 4462}
04aa27b1 4463
04aa27b1 4464
457275b6 4465/* Determine how BIVS are initialized by looking through pre-header
d59b4e47 4466 extended basic block. */
4467static void
3ad4992f 4468loop_bivs_init_find (struct loop *loop)
d59b4e47 4469{
d59b4e47 4470 struct loop_ivs *ivs = LOOP_IVS (loop);
6f812a51 4471 /* Temporary list pointers for traversing ivs->list. */
d59b4e47 4472 struct iv_class *bl;
e7b494d3 4473 int call_seen;
4474 rtx p;
04aa27b1 4475
4476 /* Find initial value for each biv by searching backwards from loop_start,
4477 halting at first label. Also record any test condition. */
4478
4479 call_seen = 0;
e7b494d3 4480 for (p = loop->start; p && GET_CODE (p) != CODE_LABEL; p = PREV_INSN (p))
04aa27b1 4481 {
e7b494d3 4482 rtx test;
4483
04aa27b1 4484 note_insn = p;
4485
4486 if (GET_CODE (p) == CALL_INSN)
4487 call_seen = 1;
4488
2cbea1dc 4489 if (INSN_P (p))
8ec5f078 4490 note_stores (PATTERN (p), record_initial, ivs);
04aa27b1 4491
4492 /* Record any test of a biv that branches around the loop if no store
4493 between it and the start of loop. We only care about tests with
4494 constants and registers and only certain of those. */
4495 if (GET_CODE (p) == JUMP_INSN
4496 && JUMP_LABEL (p) != 0
e7b494d3 4497 && next_real_insn (JUMP_LABEL (p)) == next_real_insn (loop->end)
15fc3eb7 4498 && (test = get_condition_for_loop (loop, p)) != 0
04aa27b1 4499 && GET_CODE (XEXP (test, 0)) == REG
4500 && REGNO (XEXP (test, 0)) < max_reg_before_loop
b995c337 4501 && (bl = REG_IV_CLASS (ivs, REGNO (XEXP (test, 0)))) != 0
e7b494d3 4502 && valid_initial_value_p (XEXP (test, 1), p, call_seen, loop->start)
04aa27b1 4503 && bl->init_insn == 0)
4504 {
4505 /* If an NE test, we have an initial value! */
4506 if (GET_CODE (test) == NE)
4507 {
4508 bl->init_insn = p;
941522d6 4509 bl->init_set = gen_rtx_SET (VOIDmode,
4510 XEXP (test, 0), XEXP (test, 1));
04aa27b1 4511 }
4512 else
4513 bl->initial_test = test;
4514 }
4515 }
d59b4e47 4516}
4517
4518
4519/* Look at the each biv and see if we can say anything better about its
4520 initial value from any initializing insns set up above. (This is done
4521 in two passes to avoid missing SETs in a PARALLEL.) */
4522static void
3ad4992f 4523loop_bivs_check (struct loop *loop)
d59b4e47 4524{
4525 struct loop_ivs *ivs = LOOP_IVS (loop);
6f812a51 4526 /* Temporary list pointers for traversing ivs->list. */
d59b4e47 4527 struct iv_class *bl;
4528 struct iv_class **backbl;
04aa27b1 4529
6f812a51 4530 for (backbl = &ivs->list; (bl = *backbl); backbl = &bl->next)
04aa27b1 4531 {
4532 rtx src;
ad87de1e 4533 rtx note;
04aa27b1 4534
4535 if (! bl->init_insn)
4536 continue;
4537
ad87de1e 4538 /* IF INIT_INSN has a REG_EQUAL or REG_EQUIV note and the value
4539 is a constant, use the value of that. */
4540 if (((note = find_reg_note (bl->init_insn, REG_EQUAL, 0)) != NULL
4541 && CONSTANT_P (XEXP (note, 0)))
4542 || ((note = find_reg_note (bl->init_insn, REG_EQUIV, 0)) != NULL
4543 && CONSTANT_P (XEXP (note, 0))))
4544 src = XEXP (note, 0);
4545 else
4546 src = SET_SRC (bl->init_set);
04aa27b1 4547
4548 if (loop_dump_stream)
4549 fprintf (loop_dump_stream,
bb6f5cc9 4550 "Biv %d: initialized at insn %d: initial value ",
04aa27b1 4551 bl->regno, INSN_UID (bl->init_insn));
4552
38ed0638 4553 if ((GET_MODE (src) == GET_MODE (regno_reg_rtx[bl->regno])
4554 || GET_MODE (src) == VOIDmode)
4bb30577 4555 && valid_initial_value_p (src, bl->init_insn,
4556 LOOP_INFO (loop)->pre_header_has_call,
e7b494d3 4557 loop->start))
04aa27b1 4558 {
4559 bl->initial_value = src;
4560
4561 if (loop_dump_stream)
4562 {
bb6f5cc9 4563 print_simple_rtl (loop_dump_stream, src);
4564 fputc ('\n', loop_dump_stream);
04aa27b1 4565 }
4566 }
7912da71 4567 /* If we can't make it a giv,
d59b4e47 4568 let biv keep initial value of "itself". */
7912da71 4569 else if (loop_dump_stream)
4570 fprintf (loop_dump_stream, "is complex\n");
8bd88b68 4571 }
d59b4e47 4572}
8bd88b68 4573
04aa27b1 4574
d59b4e47 4575/* Search the loop for general induction variables. */
4576
4577static void
3ad4992f 4578loop_givs_find (struct loop* loop)
d59b4e47 4579{
f13bf5f6 4580 for_each_insn_in_loop (loop, check_insn_for_givs);
d59b4e47 4581}
04aa27b1 4582
04aa27b1 4583
d59b4e47 4584/* For each giv for which we still don't know whether or not it is
4585 replaceable, check to see if it is replaceable because its final value
1e625a2e 4586 can be calculated. */
04aa27b1 4587
d59b4e47 4588static void
3ad4992f 4589loop_givs_check (struct loop *loop)
d59b4e47 4590{
4591 struct loop_ivs *ivs = LOOP_IVS (loop);
4592 struct iv_class *bl;
04aa27b1 4593
6f812a51 4594 for (bl = ivs->list; bl; bl = bl->next)
04aa27b1 4595 {
4596 struct induction *v;
4597
4598 for (v = bl->giv; v; v = v->next_iv)
4599 if (! v->replaceable && ! v->not_replaceable)
15fc3eb7 4600 check_final_value (loop, v);
04aa27b1 4601 }
d59b4e47 4602}
4603
4604
6ef828f9 4605/* Return nonzero if it is possible to eliminate the biv BL provided
e7b494d3 4606 all givs are reduced. This is possible if either the reg is not
4607 used outside the loop, or we can compute what its final value will
4608 be. */
4609
4610static int
3ad4992f 4611loop_biv_eliminable_p (struct loop *loop, struct iv_class *bl,
4612 int threshold, int insn_count)
d59b4e47 4613{
e7b494d3 4614 /* For architectures with a decrement_and_branch_until_zero insn,
4615 don't do this if we put a REG_NONNEG note on the endtest for this
4616 biv. */
4617
4618#ifdef HAVE_decrement_and_branch_until_zero
4619 if (bl->nonneg)
4620 {
4621 if (loop_dump_stream)
4622 fprintf (loop_dump_stream,
4623 "Cannot eliminate nonneg biv %d.\n", bl->regno);
4624 return 0;
4625 }
4626#endif
4627
4628 /* Check that biv is used outside loop or if it has a final value.
4629 Compare against bl->init_insn rather than loop->start. We aren't
4630 concerned with any uses of the biv between init_insn and
4631 loop->start since these won't be affected by the value of the biv
4632 elsewhere in the function, so long as init_insn doesn't use the
4633 biv itself. */
4bb30577 4634
d59b4e47 4635 if ((REGNO_LAST_LUID (bl->regno) < INSN_LUID (loop->end)
4636 && bl->init_insn
4637 && INSN_UID (bl->init_insn) < max_uid_for_loop
4638 && REGNO_FIRST_LUID (bl->regno) >= INSN_LUID (bl->init_insn)
d59b4e47 4639 && ! reg_mentioned_p (bl->biv->dest_reg, SET_SRC (bl->init_set)))
e7b494d3 4640 || (bl->final_value = final_biv_value (loop, bl)))
d59b4e47 4641 return maybe_eliminate_biv (loop, bl, 0, threshold, insn_count);
4bb30577 4642
e7b494d3 4643 if (loop_dump_stream)
4644 {
4645 fprintf (loop_dump_stream,
4646 "Cannot eliminate biv %d.\n",
4647 bl->regno);
4648 fprintf (loop_dump_stream,
4649 "First use: insn %d, last use: insn %d.\n",
4650 REGNO_FIRST_UID (bl->regno),
4651 REGNO_LAST_UID (bl->regno));
4652 }
4653 return 0;
4654}
4655
4656
4657/* Reduce each giv of BL that we have decided to reduce. */
4658
4659static void
3ad4992f 4660loop_givs_reduce (struct loop *loop, struct iv_class *bl)
e7b494d3 4661{
4662 struct induction *v;
4663
4664 for (v = bl->giv; v; v = v->next_iv)
4665 {
4666 struct induction *tv;
4667 if (! v->ignore && v->same == 0)
4668 {
4669 int auto_inc_opt = 0;
4bb30577 4670
e7b494d3 4671 /* If the code for derived givs immediately below has already
4672 allocated a new_reg, we must keep it. */
4673 if (! v->new_reg)
4674 v->new_reg = gen_reg_rtx (v->mode);
4bb30577 4675
e7b494d3 4676#ifdef AUTO_INC_DEC
4677 /* If the target has auto-increment addressing modes, and
4678 this is an address giv, then try to put the increment
4679 immediately after its use, so that flow can create an
4680 auto-increment addressing mode. */
4681 if (v->giv_type == DEST_ADDR && bl->biv_count == 1
4682 && bl->biv->always_executed && ! bl->biv->maybe_multiple
4683 /* We don't handle reversed biv's because bl->biv->insn
4684 does not have a valid INSN_LUID. */
4685 && ! bl->reversed
4686 && v->always_executed && ! v->maybe_multiple
4687 && INSN_UID (v->insn) < max_uid_for_loop)
4688 {
4689 /* If other giv's have been combined with this one, then
4690 this will work only if all uses of the other giv's occur
4691 before this giv's insn. This is difficult to check.
4bb30577 4692
e7b494d3 4693 We simplify this by looking for the common case where
4694 there is one DEST_REG giv, and this giv's insn is the
4695 last use of the dest_reg of that DEST_REG giv. If the
4696 increment occurs after the address giv, then we can
4697 perform the optimization. (Otherwise, the increment
4698 would have to go before other_giv, and we would not be
4699 able to combine it with the address giv to get an
4700 auto-inc address.) */
4701 if (v->combined_with)
4702 {
4703 struct induction *other_giv = 0;
4bb30577 4704
e7b494d3 4705 for (tv = bl->giv; tv; tv = tv->next_iv)
4706 if (tv->same == v)
4707 {
4708 if (other_giv)
4709 break;
4710 else
4711 other_giv = tv;
4712 }
4713 if (! tv && other_giv
4714 && REGNO (other_giv->dest_reg) < max_reg_before_loop
4715 && (REGNO_LAST_UID (REGNO (other_giv->dest_reg))
4716 == INSN_UID (v->insn))
4717 && INSN_LUID (v->insn) < INSN_LUID (bl->biv->insn))
4718 auto_inc_opt = 1;
4719 }
4720 /* Check for case where increment is before the address
4721 giv. Do this test in "loop order". */
4722 else if ((INSN_LUID (v->insn) > INSN_LUID (bl->biv->insn)
4723 && (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4724 || (INSN_LUID (bl->biv->insn)
4725 > INSN_LUID (loop->scan_start))))
4726 || (INSN_LUID (v->insn) < INSN_LUID (loop->scan_start)
4727 && (INSN_LUID (loop->scan_start)
4728 < INSN_LUID (bl->biv->insn))))
4729 auto_inc_opt = -1;
4730 else
4731 auto_inc_opt = 1;
4bb30577 4732
e7b494d3 4733#ifdef HAVE_cc0
4734 {
4735 rtx prev;
4bb30577 4736
e7b494d3 4737 /* We can't put an insn immediately after one setting
4738 cc0, or immediately before one using cc0. */
4739 if ((auto_inc_opt == 1 && sets_cc0_p (PATTERN (v->insn)))
4740 || (auto_inc_opt == -1
4741 && (prev = prev_nonnote_insn (v->insn)) != 0
4742 && INSN_P (prev)
4743 && sets_cc0_p (PATTERN (prev))))
4744 auto_inc_opt = 0;
4745 }
4746#endif
4bb30577 4747
e7b494d3 4748 if (auto_inc_opt)
4749 v->auto_inc_opt = 1;
4750 }
4751#endif
4bb30577 4752
e7b494d3 4753 /* For each place where the biv is incremented, add an insn
4754 to increment the new, reduced reg for the giv. */
4755 for (tv = bl->biv; tv; tv = tv->next_iv)
4756 {
4757 rtx insert_before;
4bb30577 4758
c2034a85 4759 /* Skip if location is the same as a previous one. */
4760 if (tv->same)
4761 continue;
e7b494d3 4762 if (! auto_inc_opt)
97f8ce30 4763 insert_before = NEXT_INSN (tv->insn);
e7b494d3 4764 else if (auto_inc_opt == 1)
4765 insert_before = NEXT_INSN (v->insn);
4766 else
4767 insert_before = v->insn;
4bb30577 4768
e7b494d3 4769 if (tv->mult_val == const1_rtx)
89e8d34f 4770 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4bb30577 4771 v->new_reg, v->new_reg,
89e8d34f 4772 0, insert_before);
e7b494d3 4773 else /* tv->mult_val == const0_rtx */
4774 /* A multiply is acceptable here
4775 since this is presumed to be seldom executed. */
89e8d34f 4776 loop_iv_add_mult_emit_before (loop, tv->add_val, v->mult_val,
4bb30577 4777 v->add_val, v->new_reg,
89e8d34f 4778 0, insert_before);
e7b494d3 4779 }
4bb30577 4780
e7b494d3 4781 /* Add code at loop start to initialize giv's reduced reg. */
4bb30577 4782
89e8d34f 4783 loop_iv_add_mult_hoist (loop,
4784 extend_value_for_giv (v, bl->initial_value),
4785 v->mult_val, v->add_val, v->new_reg);
e7b494d3 4786 }
4787 }
4788}
4789
4790
4791/* Check for givs whose first use is their definition and whose
4792 last use is the definition of another giv. If so, it is likely
4793 dead and should not be used to derive another giv nor to
4794 eliminate a biv. */
4795
4796static void
3ad4992f 4797loop_givs_dead_check (struct loop *loop ATTRIBUTE_UNUSED, struct iv_class *bl)
e7b494d3 4798{
4799 struct induction *v;
4800
4801 for (v = bl->giv; v; v = v->next_iv)
4802 {
4803 if (v->ignore
4804 || (v->same && v->same->ignore))
4805 continue;
4bb30577 4806
e7b494d3 4807 if (v->giv_type == DEST_REG
4808 && REGNO_FIRST_UID (REGNO (v->dest_reg)) == INSN_UID (v->insn))
4809 {
4810 struct induction *v1;
4bb30577 4811
e7b494d3 4812 for (v1 = bl->giv; v1; v1 = v1->next_iv)
4813 if (REGNO_LAST_UID (REGNO (v->dest_reg)) == INSN_UID (v1->insn))
4814 v->maybe_dead = 1;
4815 }
4816 }
4817}
4818
4819
4820static void
3ad4992f 4821loop_givs_rescan (struct loop *loop, struct iv_class *bl, rtx *reg_map)
e7b494d3 4822{
4823 struct induction *v;
4824
4825 for (v = bl->giv; v; v = v->next_iv)
4826 {
4827 if (v->same && v->same->ignore)
4828 v->ignore = 1;
4bb30577 4829
e7b494d3 4830 if (v->ignore)
4831 continue;
4bb30577 4832
e7b494d3 4833 /* Update expression if this was combined, in case other giv was
4834 replaced. */
4835 if (v->same)
4836 v->new_reg = replace_rtx (v->new_reg,
4837 v->same->dest_reg, v->same->new_reg);
4bb30577 4838
e7b494d3 4839 /* See if this register is known to be a pointer to something. If
4840 so, see if we can find the alignment. First see if there is a
4841 destination register that is a pointer. If so, this shares the
4842 alignment too. Next see if we can deduce anything from the
4843 computational information. If not, and this is a DEST_ADDR
4844 giv, at least we know that it's a pointer, though we don't know
4845 the alignment. */
4846 if (GET_CODE (v->new_reg) == REG
4847 && v->giv_type == DEST_REG
4848 && REG_POINTER (v->dest_reg))
4849 mark_reg_pointer (v->new_reg,
4850 REGNO_POINTER_ALIGN (REGNO (v->dest_reg)));
4851 else if (GET_CODE (v->new_reg) == REG
4852 && REG_POINTER (v->src_reg))
4853 {
4854 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->src_reg));
4bb30577 4855
e7b494d3 4856 if (align == 0
4857 || GET_CODE (v->add_val) != CONST_INT
4858 || INTVAL (v->add_val) % (align / BITS_PER_UNIT) != 0)
4859 align = 0;
4bb30577 4860
e7b494d3 4861 mark_reg_pointer (v->new_reg, align);
4862 }
4863 else if (GET_CODE (v->new_reg) == REG
4864 && GET_CODE (v->add_val) == REG
4865 && REG_POINTER (v->add_val))
4866 {
4867 unsigned int align = REGNO_POINTER_ALIGN (REGNO (v->add_val));
4bb30577 4868
e7b494d3 4869 if (align == 0 || GET_CODE (v->mult_val) != CONST_INT
4870 || INTVAL (v->mult_val) % (align / BITS_PER_UNIT) != 0)
4871 align = 0;
4bb30577 4872
e7b494d3 4873 mark_reg_pointer (v->new_reg, align);
4874 }
4875 else if (GET_CODE (v->new_reg) == REG && v->giv_type == DEST_ADDR)
4876 mark_reg_pointer (v->new_reg, 0);
4bb30577 4877
e7b494d3 4878 if (v->giv_type == DEST_ADDR)
4879 /* Store reduced reg as the address in the memref where we found
4880 this giv. */
4881 validate_change (v->insn, v->location, v->new_reg, 0);
4882 else if (v->replaceable)
4883 {
4884 reg_map[REGNO (v->dest_reg)] = v->new_reg;
4885 }
4886 else
4887 {
237f0dff 4888 rtx original_insn = v->insn;
4c694afd 4889 rtx note;
237f0dff 4890
e7b494d3 4891 /* Not replaceable; emit an insn to set the original giv reg from
4892 the reduced giv, same as above. */
237f0dff 4893 v->insn = loop_insn_emit_after (loop, 0, original_insn,
4894 gen_move_insn (v->dest_reg,
4895 v->new_reg));
4896
3ad4992f 4897 /* The original insn may have a REG_EQUAL note. This note is
4898 now incorrect and may result in invalid substitutions later.
4899 The original insn is dead, but may be part of a libcall
4900 sequence, which doesn't seem worth the bother of handling. */
4901 note = find_reg_note (original_insn, REG_EQUAL, NULL_RTX);
4902 if (note)
4903 remove_note (original_insn, note);
e7b494d3 4904 }
4bb30577 4905
e7b494d3 4906 /* When a loop is reversed, givs which depend on the reversed
4907 biv, and which are live outside the loop, must be set to their
4908 correct final value. This insn is only needed if the giv is
4909 not replaceable. The correct final value is the same as the
4910 value that the giv starts the reversed loop with. */
4911 if (bl->reversed && ! v->replaceable)
4bb30577 4912 loop_iv_add_mult_sink (loop,
89e8d34f 4913 extend_value_for_giv (v, bl->initial_value),
4914 v->mult_val, v->add_val, v->dest_reg);
e7b494d3 4915 else if (v->final_value)
4bb30577 4916 loop_insn_sink_or_swim (loop,
2ecaad6d 4917 gen_load_of_final_value (v->dest_reg,
4918 v->final_value));
4bb30577 4919
e7b494d3 4920 if (loop_dump_stream)
4921 {
4922 fprintf (loop_dump_stream, "giv at %d reduced to ",
4923 INSN_UID (v->insn));
bb6f5cc9 4924 print_simple_rtl (loop_dump_stream, v->new_reg);
e7b494d3 4925 fprintf (loop_dump_stream, "\n");
4926 }
4927 }
4928}
4929
4930
4931static int
3ad4992f 4932loop_giv_reduce_benefit (struct loop *loop ATTRIBUTE_UNUSED,
4933 struct iv_class *bl, struct induction *v,
4934 rtx test_reg)
e7b494d3 4935{
4936 int add_cost;
4937 int benefit;
4938
4939 benefit = v->benefit;
4940 PUT_MODE (test_reg, v->mode);
4941 add_cost = iv_add_mult_cost (bl->biv->add_val, v->mult_val,
4942 test_reg, test_reg);
4bb30577 4943
e7b494d3 4944 /* Reduce benefit if not replaceable, since we will insert a
4945 move-insn to replace the insn that calculates this giv. Don't do
4946 this unless the giv is a user variable, since it will often be
4947 marked non-replaceable because of the duplication of the exit
4948 code outside the loop. In such a case, the copies we insert are
4949 dead and will be deleted. So they don't have a cost. Similar
4950 situations exist. */
4951 /* ??? The new final_[bg]iv_value code does a much better job of
4952 finding replaceable giv's, and hence this code may no longer be
4953 necessary. */
4954 if (! v->replaceable && ! bl->eliminable
4955 && REG_USERVAR_P (v->dest_reg))
4956 benefit -= copy_cost;
4bb30577 4957
e7b494d3 4958 /* Decrease the benefit to count the add-insns that we will insert
4959 to increment the reduced reg for the giv. ??? This can
4960 overestimate the run-time cost of the additional insns, e.g. if
4961 there are multiple basic blocks that increment the biv, but only
4962 one of these blocks is executed during each iteration. There is
4963 no good way to detect cases like this with the current structure
4964 of the loop optimizer. This code is more accurate for
4965 determining code size than run-time benefits. */
4966 benefit -= add_cost * bl->biv_count;
4967
4968 /* Decide whether to strength-reduce this giv or to leave the code
4969 unchanged (recompute it from the biv each time it is used). This
4970 decision can be made independently for each giv. */
4971
4972#ifdef AUTO_INC_DEC
4973 /* Attempt to guess whether autoincrement will handle some of the
4974 new add insns; if so, increase BENEFIT (undo the subtraction of
4975 add_cost that was done above). */
4976 if (v->giv_type == DEST_ADDR
4977 /* Increasing the benefit is risky, since this is only a guess.
4978 Avoid increasing register pressure in cases where there would
4979 be no other benefit from reducing this giv. */
4980 && benefit > 0
4981 && GET_CODE (v->mult_val) == CONST_INT)
4982 {
4acc436e 4983 int size = GET_MODE_SIZE (GET_MODE (v->mem));
4984
e7b494d3 4985 if (HAVE_POST_INCREMENT
4acc436e 4986 && INTVAL (v->mult_val) == size)
e7b494d3 4987 benefit += add_cost * bl->biv_count;
4988 else if (HAVE_PRE_INCREMENT
4acc436e 4989 && INTVAL (v->mult_val) == size)
e7b494d3 4990 benefit += add_cost * bl->biv_count;
4991 else if (HAVE_POST_DECREMENT
4acc436e 4992 && -INTVAL (v->mult_val) == size)
e7b494d3 4993 benefit += add_cost * bl->biv_count;
4994 else if (HAVE_PRE_DECREMENT
4acc436e 4995 && -INTVAL (v->mult_val) == size)
e7b494d3 4996 benefit += add_cost * bl->biv_count;
4997 }
4998#endif
4999
5000 return benefit;
d59b4e47 5001}
5002
5003
a7eee06c 5004/* Free IV structures for LOOP. */
5005
5006static void
3ad4992f 5007loop_ivs_free (struct loop *loop)
a7eee06c 5008{
5009 struct loop_ivs *ivs = LOOP_IVS (loop);
5010 struct iv_class *iv = ivs->list;
4bb30577 5011
a7eee06c 5012 free (ivs->regs);
5013
5014 while (iv)
5015 {
5016 struct iv_class *next = iv->next;
5017 struct induction *induction;
5018 struct induction *next_induction;
4bb30577 5019
a7eee06c 5020 for (induction = iv->biv; induction; induction = next_induction)
5021 {
5022 next_induction = induction->next_iv;
5023 free (induction);
5024 }
5025 for (induction = iv->giv; induction; induction = next_induction)
5026 {
5027 next_induction = induction->next_iv;
5028 free (induction);
5029 }
4bb30577 5030
a7eee06c 5031 free (iv);
5032 iv = next;
5033 }
5034}
5035
5036
d59b4e47 5037/* Perform strength reduction and induction variable elimination.
5038
5039 Pseudo registers created during this function will be beyond the
05cb4e54 5040 last valid index in several tables including
5041 REGS->ARRAY[I].N_TIMES_SET and REGNO_LAST_UID. This does not cause a
5042 problem here, because the added registers cannot be givs outside of
5043 their loop, and hence will never be reconsidered. But scan_loop
5044 must check regnos to make sure they are in bounds. */
d59b4e47 5045
5046static void
3ad4992f 5047strength_reduce (struct loop *loop, int flags)
d59b4e47 5048{
5049 struct loop_info *loop_info = LOOP_INFO (loop);
5050 struct loop_regs *regs = LOOP_REGS (loop);
5051 struct loop_ivs *ivs = LOOP_IVS (loop);
5052 rtx p;
6f812a51 5053 /* Temporary list pointer for traversing ivs->list. */
e7b494d3 5054 struct iv_class *bl;
d59b4e47 5055 /* Ratio of extra register life span we can justify
5056 for saving an instruction. More if loop doesn't call subroutines
5057 since in that case saving an insn makes more difference
5058 and more registers are available. */
5059 /* ??? could set this to last value of threshold in move_movables */
5060 int threshold = (loop_info->has_call ? 1 : 2) * (3 + n_non_fixed_regs);
5061 /* Map of pseudo-register replacements. */
5062 rtx *reg_map = NULL;
5063 int reg_map_size;
d59b4e47 5064 int unrolled_insn_copies = 0;
d59b4e47 5065 rtx test_reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
85bd9543 5066 int insn_count = count_insns_in_loop (loop);
d59b4e47 5067
5068 addr_placeholder = gen_reg_rtx (Pmode);
5069
6f812a51 5070 ivs->n_regs = max_reg_before_loop;
5071 ivs->regs = (struct iv *) xcalloc (ivs->n_regs, sizeof (struct iv));
d59b4e47 5072
5073 /* Find all BIVs in loop. */
5074 loop_bivs_find (loop);
5075
5076 /* Exit if there are no bivs. */
6f812a51 5077 if (! ivs->list)
d59b4e47 5078 {
5079 /* Can still unroll the loop anyways, but indicate that there is no
5080 strength reduction info available. */
5081 if (flags & LOOP_UNROLL)
89e8d34f 5082 unroll_loop (loop, insn_count, 0);
d59b4e47 5083
a7eee06c 5084 loop_ivs_free (loop);
5085 return;
d59b4e47 5086 }
5087
457275b6 5088 /* Determine how BIVS are initialized by looking through pre-header
d59b4e47 5089 extended basic block. */
5090 loop_bivs_init_find (loop);
5091
5092 /* Look at the each biv and see if we can say anything better about its
5093 initial value from any initializing insns set up above. */
5094 loop_bivs_check (loop);
5095
5096 /* Search the loop for general induction variables. */
5097 loop_givs_find (loop);
5098
5099 /* Try to calculate and save the number of loop iterations. This is
5100 set to zero if the actual number can not be calculated. This must
5101 be called after all giv's have been identified, since otherwise it may
5102 fail if the iteration variable is a giv. */
5103 loop_iterations (loop);
5104
ba38e12b 5105#ifdef HAVE_prefetch
5106 if (flags & LOOP_PREFETCH)
5107 emit_prefetch_instructions (loop);
5108#endif
5109
d59b4e47 5110 /* Now for each giv for which we still don't know whether or not it is
5111 replaceable, check to see if it is replaceable because its final value
5112 can be calculated. This must be done after loop_iterations is called,
5113 so that final_giv_value will work correctly. */
5114 loop_givs_check (loop);
04aa27b1 5115
5116 /* Try to prove that the loop counter variable (if any) is always
5117 nonnegative; if so, record that fact with a REG_NONNEG note
5118 so that "decrement and branch until zero" insn can be used. */
ec7d7ef9 5119 check_dbra_loop (loop, insn_count);
04aa27b1 5120
a5c62b07 5121 /* Create reg_map to hold substitutions for replaceable giv regs.
5122 Some givs might have been made from biv increments, so look at
8ec5f078 5123 ivs->reg_iv_type for a suitable size. */
6f812a51 5124 reg_map_size = ivs->n_regs;
b9cf3f63 5125 reg_map = (rtx *) xcalloc (reg_map_size, sizeof (rtx));
04aa27b1 5126
5127 /* Examine each iv class for feasibility of strength reduction/induction
5128 variable elimination. */
5129
6f812a51 5130 for (bl = ivs->list; bl; bl = bl->next)
04aa27b1 5131 {
5132 struct induction *v;
5133 int benefit;
4bb30577 5134
04aa27b1 5135 /* Test whether it will be possible to eliminate this biv
d59b4e47 5136 provided all givs are reduced. */
e7b494d3 5137 bl->eliminable = loop_biv_eliminable_p (loop, bl, threshold, insn_count);
04aa27b1 5138
af10ea53 5139 /* This will be true at the end, if all givs which depend on this
5140 biv have been strength reduced.
5141 We can't (currently) eliminate the biv unless this is so. */
5142 bl->all_reduced = 1;
5143
d59b4e47 5144 /* Check each extension dependent giv in this class to see if its
ff57e249 5145 root biv is safe from wrapping in the interior mode. */
5f155d21 5146 check_ext_dependent_givs (bl, loop_info);
ff57e249 5147
04aa27b1 5148 /* Combine all giv's for this iv_class. */
e9b78d43 5149 combine_givs (regs, bl);
04aa27b1 5150
04aa27b1 5151 for (v = bl->giv; v; v = v->next_iv)
5152 {
5153 struct induction *tv;
5154
5155 if (v->ignore || v->same)
5156 continue;
5157
e7b494d3 5158 benefit = loop_giv_reduce_benefit (loop, bl, v, test_reg);
04aa27b1 5159
5160 /* If an insn is not to be strength reduced, then set its ignore
e7b494d3 5161 flag, and clear bl->all_reduced. */
04aa27b1 5162
0061fcce 5163 /* A giv that depends on a reversed biv must be reduced if it is
5164 used after the loop exit, otherwise, it would have the wrong
5165 value after the loop exit. To make it simple, just reduce all
5166 of such giv's whether or not we know they are used after the loop
5167 exit. */
5168
d59b4e47 5169 if (! flag_reduce_all_givs
5170 && v->lifetime * threshold * benefit < insn_count
5171 && ! bl->reversed)
04aa27b1 5172 {
5173 if (loop_dump_stream)
5174 fprintf (loop_dump_stream,
5175 "giv of insn %d not worth while, %d vs %d.\n",
5176 INSN_UID (v->insn),
5177 v->lifetime * threshold * benefit, insn_count);
5178 v->ignore = 1;
e7b494d3 5179 bl->all_reduced = 0;
04aa27b1 5180 }
5181 else
5182 {
5183 /* Check that we can increment the reduced giv without a
5184 multiply insn. If not, reject it. */
5185
5186 for (tv = bl->biv; tv; tv = tv->next_iv)
5187 if (tv->mult_val == const1_rtx
5188 && ! product_cheap_p (tv->add_val, v->mult_val))
5189 {
5190 if (loop_dump_stream)
5191 fprintf (loop_dump_stream,
5192 "giv of insn %d: would need a multiply.\n",
5193 INSN_UID (v->insn));
5194 v->ignore = 1;
e7b494d3 5195 bl->all_reduced = 0;
04aa27b1 5196 break;
5197 }
5198 }
5199 }
5200
dd8b2ce4 5201 /* Check for givs whose first use is their definition and whose
5202 last use is the definition of another giv. If so, it is likely
5203 dead and should not be used to derive another giv nor to
5204 eliminate a biv. */
e7b494d3 5205 loop_givs_dead_check (loop, bl);
dd8b2ce4 5206
04aa27b1 5207 /* Reduce each giv that we decided to reduce. */
e7b494d3 5208 loop_givs_reduce (loop, bl);
04aa27b1 5209
5210 /* Rescan all givs. If a giv is the same as a giv not reduced, mark it
5211 as not reduced.
4a8f0b95 5212
04aa27b1 5213 For each giv register that can be reduced now: if replaceable,
5214 substitute reduced reg wherever the old giv occurs;
dd8b2ce4 5215 else add new move insn "giv_reg = reduced_reg". */
89e8d34f 5216 loop_givs_rescan (loop, bl, reg_map);
04aa27b1 5217
5218 /* All the givs based on the biv bl have been reduced if they
5219 merit it. */
5220
5221 /* For each giv not marked as maybe dead that has been combined with a
5222 second giv, clear any "maybe dead" mark on that second giv.
5223 v->new_reg will either be or refer to the register of the giv it
5224 combined with.
5225
e7b494d3 5226 Doing this clearing avoids problems in biv elimination where
5227 a giv's new_reg is a complex value that can't be put in the
5228 insn but the giv combined with (with a reg as new_reg) is
5229 marked maybe_dead. Since the register will be used in either
5230 case, we'd prefer it be used from the simpler giv. */
04aa27b1 5231
5232 for (v = bl->giv; v; v = v->next_iv)
5233 if (! v->maybe_dead && v->same)
5234 v->same->maybe_dead = 0;
5235
5236 /* Try to eliminate the biv, if it is a candidate.
e7b494d3 5237 This won't work if ! bl->all_reduced,
04aa27b1 5238 since the givs we planned to use might not have been reduced.
5239
e7b494d3 5240 We have to be careful that we didn't initially think we could
5241 eliminate this biv because of a giv that we now think may be
5242 dead and shouldn't be used as a biv replacement.
04aa27b1 5243
5244 Also, there is the possibility that we may have a giv that looks
5245 like it can be used to eliminate a biv, but the resulting insn
4a8f0b95 5246 isn't valid. This can happen, for example, on the 88k, where a
04aa27b1 5247 JUMP_INSN can compare a register only with zero. Attempts to
b550e058 5248 replace it with a compare with a constant will fail.
04aa27b1 5249
5250 Note that in cases where this call fails, we may have replaced some
5251 of the occurrences of the biv with a giv, but no harm was done in
5252 doing so in the rare cases where it can occur. */
5253
e7b494d3 5254 if (bl->all_reduced == 1 && bl->eliminable
15fc3eb7 5255 && maybe_eliminate_biv (loop, bl, 1, threshold, insn_count))
04aa27b1 5256 {
5257 /* ?? If we created a new test to bypass the loop entirely,
5258 or otherwise drop straight in, based on this test, then
5259 we might want to rewrite it also. This way some later
5260 pass has more hope of removing the initialization of this
a92771b8 5261 biv entirely. */
04aa27b1 5262
5263 /* If final_value != 0, then the biv may be used after loop end
5264 and we must emit an insn to set it just in case.
5265
5266 Reversed bivs already have an insn after the loop setting their
5267 value, so we don't need another one. We can't calculate the
a92771b8 5268 proper final value for such a biv here anyways. */
e7b494d3 5269 if (bl->final_value && ! bl->reversed)
2ecaad6d 5270 loop_insn_sink_or_swim (loop,
5271 gen_load_of_final_value (bl->biv->dest_reg,
5272 bl->final_value));
04aa27b1 5273
04aa27b1 5274 if (loop_dump_stream)
5275 fprintf (loop_dump_stream, "Reg %d: biv eliminated\n",
5276 bl->regno);
5277 }
d2aa2d17 5278 /* See above note wrt final_value. But since we couldn't eliminate
5279 the biv, we must set the value after the loop instead of before. */
5280 else if (bl->final_value && ! bl->reversed)
2ecaad6d 5281 loop_insn_sink (loop, gen_load_of_final_value (bl->biv->dest_reg,
5282 bl->final_value));
04aa27b1 5283 }
5284
5285 /* Go through all the instructions in the loop, making all the
5286 register substitutions scheduled in REG_MAP. */
5287
e7b494d3 5288 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
04aa27b1 5289 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4a8f0b95 5290 || GET_CODE (p) == CALL_INSN)
04aa27b1 5291 {
a5c62b07 5292 replace_regs (PATTERN (p), reg_map, reg_map_size, 0);
5293 replace_regs (REG_NOTES (p), reg_map, reg_map_size, 0);
03104e7c 5294 INSN_CODE (p) = -1;
04aa27b1 5295 }
5296
b000c11e 5297 if (loop_info->n_iterations > 0)
5298 {
5299 /* When we completely unroll a loop we will likely not need the increment
5300 of the loop BIV and we will not need the conditional branch at the
5301 end of the loop. */
5302 unrolled_insn_copies = insn_count - 2;
5303
5304#ifdef HAVE_cc0
5305 /* When we completely unroll a loop on a HAVE_cc0 machine we will not
5306 need the comparison before the conditional branch at the end of the
5307 loop. */
3aa79698 5308 unrolled_insn_copies -= 1;
b000c11e 5309#endif
5310
5311 /* We'll need one copy for each loop iteration. */
5312 unrolled_insn_copies *= loop_info->n_iterations;
5313
5314 /* A little slop to account for the ability to remove initialization
5315 code, better CSE, and other secondary benefits of completely
5316 unrolling some loops. */
5317 unrolled_insn_copies -= 1;
5318
5319 /* Clamp the value. */
5320 if (unrolled_insn_copies < 0)
5321 unrolled_insn_copies = 0;
5322 }
4a8f0b95 5323
04aa27b1 5324 /* Unroll loops from within strength reduction so that we can use the
5325 induction variable information that strength_reduce has already
b000c11e 5326 collected. Always unroll loops that would be as small or smaller
5327 unrolled than when rolled. */
bcfc9e7d 5328 if ((flags & LOOP_UNROLL)
eb9d297d 5329 || ((flags & LOOP_AUTO_UNROLL)
893ff8a6 5330 && loop_info->n_iterations > 0
b000c11e 5331 && unrolled_insn_copies <= insn_count))
89e8d34f 5332 unroll_loop (loop, insn_count, 1);
04aa27b1 5333
9641f63c 5334#ifdef HAVE_doloop_end
5335 if (HAVE_doloop_end && (flags & LOOP_BCT) && flag_branch_on_count_reg)
5336 doloop_optimize (loop);
5337#endif /* HAVE_doloop_end */
3eb9a99d 5338
482cd3c1 5339 /* In case number of iterations is known, drop branch prediction note
5340 in the branch. Do that only in second loop pass, as loop unrolling
5341 may change the number of iterations performed. */
3864028d 5342 if (flags & LOOP_BCT)
482cd3c1 5343 {
3864028d 5344 unsigned HOST_WIDE_INT n
5345 = loop_info->n_iterations / loop_info->unroll_number;
5346 if (n > 1)
978dc23e 5347 predict_insn (prev_nonnote_insn (loop->end), PRED_LOOP_ITERATIONS,
3864028d 5348 REG_BR_PROB_BASE - REG_BR_PROB_BASE / n);
482cd3c1 5349 }
5350
04aa27b1 5351 if (loop_dump_stream)
5352 fprintf (loop_dump_stream, "\n");
03bfb68f 5353
a7eee06c 5354 loop_ivs_free (loop);
b9cf3f63 5355 if (reg_map)
5356 free (reg_map);
04aa27b1 5357}
5358\f
f13bf5f6 5359/*Record all basic induction variables calculated in the insn. */
e3ebd67e 5360static rtx
3ad4992f 5361check_insn_for_bivs (struct loop *loop, rtx p, int not_every_iteration,
5362 int maybe_multiple)
f13bf5f6 5363{
8ec5f078 5364 struct loop_ivs *ivs = LOOP_IVS (loop);
f13bf5f6 5365 rtx set;
5366 rtx dest_reg;
5367 rtx inc_val;
5368 rtx mult_val;
5369 rtx *location;
5370
5371 if (GET_CODE (p) == INSN
5372 && (set = single_set (p))
5373 && GET_CODE (SET_DEST (set)) == REG)
5374 {
5375 dest_reg = SET_DEST (set);
5376 if (REGNO (dest_reg) < max_reg_before_loop
5377 && REGNO (dest_reg) >= FIRST_PSEUDO_REGISTER
8ec5f078 5378 && REG_IV_TYPE (ivs, REGNO (dest_reg)) != NOT_BASIC_INDUCT)
f13bf5f6 5379 {
f13bf5f6 5380 if (basic_induction_var (loop, SET_SRC (set),
5381 GET_MODE (SET_SRC (set)),
5382 dest_reg, p, &inc_val, &mult_val,
369943ac 5383 &location))
f13bf5f6 5384 {
5385 /* It is a possible basic induction variable.
5386 Create and initialize an induction structure for it. */
5387
5388 struct induction *v
d7c47c0e 5389 = (struct induction *) xmalloc (sizeof (struct induction));
f13bf5f6 5390
8ec5f078 5391 record_biv (loop, v, p, dest_reg, inc_val, mult_val, location,
369943ac 5392 not_every_iteration, maybe_multiple);
8ec5f078 5393 REG_IV_TYPE (ivs, REGNO (dest_reg)) = BASIC_INDUCT;
f13bf5f6 5394 }
d1908618 5395 else if (REGNO (dest_reg) < ivs->n_regs)
8ec5f078 5396 REG_IV_TYPE (ivs, REGNO (dest_reg)) = NOT_BASIC_INDUCT;
f13bf5f6 5397 }
5398 }
e3ebd67e 5399 return p;
f13bf5f6 5400}
5401\f
4a8f0b95 5402/* Record all givs calculated in the insn.
f13bf5f6 5403 A register is a giv if: it is only set once, it is a function of a
5404 biv and a constant (or invariant), and it is not a biv. */
e3ebd67e 5405static rtx
3ad4992f 5406check_insn_for_givs (struct loop *loop, rtx p, int not_every_iteration,
5407 int maybe_multiple)
f13bf5f6 5408{
e9b78d43 5409 struct loop_regs *regs = LOOP_REGS (loop);
8ec5f078 5410
f13bf5f6 5411 rtx set;
5412 /* Look for a general induction variable in a register. */
5413 if (GET_CODE (p) == INSN
5414 && (set = single_set (p))
5415 && GET_CODE (SET_DEST (set)) == REG
05cb4e54 5416 && ! regs->array[REGNO (SET_DEST (set))].may_not_optimize)
f13bf5f6 5417 {
5418 rtx src_reg;
5419 rtx dest_reg;
5420 rtx add_val;
5421 rtx mult_val;
ff57e249 5422 rtx ext_val;
f13bf5f6 5423 int benefit;
5424 rtx regnote = 0;
5425 rtx last_consec_insn;
5426
5427 dest_reg = SET_DEST (set);
5428 if (REGNO (dest_reg) < FIRST_PSEUDO_REGISTER)
e3ebd67e 5429 return p;
f13bf5f6 5430
5431 if (/* SET_SRC is a giv. */
5432 (general_induction_var (loop, SET_SRC (set), &src_reg, &add_val,
ff57e249 5433 &mult_val, &ext_val, 0, &benefit, VOIDmode)
f13bf5f6 5434 /* Equivalent expression is a giv. */
5435 || ((regnote = find_reg_note (p, REG_EQUAL, NULL_RTX))
5436 && general_induction_var (loop, XEXP (regnote, 0), &src_reg,
ff57e249 5437 &add_val, &mult_val, &ext_val, 0,
cf495191 5438 &benefit, VOIDmode)))
f13bf5f6 5439 /* Don't try to handle any regs made by loop optimization.
5440 We have nothing on them in regno_first_uid, etc. */
5441 && REGNO (dest_reg) < max_reg_before_loop
5442 /* Don't recognize a BASIC_INDUCT_VAR here. */
5443 && dest_reg != src_reg
5444 /* This must be the only place where the register is set. */
05cb4e54 5445 && (regs->array[REGNO (dest_reg)].n_times_set == 1
f13bf5f6 5446 /* or all sets must be consecutive and make a giv. */
5447 || (benefit = consec_sets_giv (loop, benefit, p,
5448 src_reg, dest_reg,
ff57e249 5449 &add_val, &mult_val, &ext_val,
f13bf5f6 5450 &last_consec_insn))))
5451 {
5452 struct induction *v
d7c47c0e 5453 = (struct induction *) xmalloc (sizeof (struct induction));
f13bf5f6 5454
5455 /* If this is a library call, increase benefit. */
5456 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
5457 benefit += libcall_benefit (p);
5458
5459 /* Skip the consecutive insns, if there are any. */
05cb4e54 5460 if (regs->array[REGNO (dest_reg)].n_times_set != 1)
f13bf5f6 5461 p = last_consec_insn;
5462
5463 record_giv (loop, v, p, src_reg, dest_reg, mult_val, add_val,
ff57e249 5464 ext_val, benefit, DEST_REG, not_every_iteration,
e17f5b23 5465 maybe_multiple, (rtx*) 0);
f13bf5f6 5466
5467 }
5468 }
5469
f13bf5f6 5470 /* Look for givs which are memory addresses. */
f13bf5f6 5471 if (GET_CODE (p) == INSN)
5472 find_mem_givs (loop, PATTERN (p), p, not_every_iteration,
5473 maybe_multiple);
f13bf5f6 5474
5475 /* Update the status of whether giv can derive other givs. This can
5476 change when we pass a label or an insn that updates a biv. */
5477 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
4a8f0b95 5478 || GET_CODE (p) == CODE_LABEL)
f13bf5f6 5479 update_giv_derive (loop, p);
e3ebd67e 5480 return p;
f13bf5f6 5481}
5482\f
04aa27b1 5483/* Return 1 if X is a valid source for an initial value (or as value being
5484 compared against in an initial test).
5485
5486 X must be either a register or constant and must not be clobbered between
5487 the current insn and the start of the loop.
5488
5489 INSN is the insn containing X. */
5490
5491static int
3ad4992f 5492valid_initial_value_p (rtx x, rtx insn, int call_seen, rtx loop_start)
04aa27b1 5493{
5494 if (CONSTANT_P (x))
5495 return 1;
5496
f9e15121 5497 /* Only consider pseudos we know about initialized in insns whose luids
04aa27b1 5498 we know. */
5499 if (GET_CODE (x) != REG
5500 || REGNO (x) >= max_reg_before_loop)
5501 return 0;
5502
5503 /* Don't use call-clobbered registers across a call which clobbers it. On
5504 some machines, don't use any hard registers at all. */
5505 if (REGNO (x) < FIRST_PSEUDO_REGISTER
0dbd1c74 5506 && (SMALL_REGISTER_CLASSES
5507 || (call_used_regs[REGNO (x)] && call_seen)))
04aa27b1 5508 return 0;
5509
5510 /* Don't use registers that have been clobbered before the start of the
5511 loop. */
5512 if (reg_set_between_p (x, insn, loop_start))
5513 return 0;
5514
5515 return 1;
5516}
5517\f
5518/* Scan X for memory refs and check each memory address
5519 as a possible giv. INSN is the insn whose pattern X comes from.
5520 NOT_EVERY_ITERATION is 1 if the insn might not be executed during
7014838c 5521 every loop iteration. MAYBE_MULTIPLE is 1 if the insn might be executed
b903337a 5522 more than once in each loop iteration. */
04aa27b1 5523
5524static void
3ad4992f 5525find_mem_givs (const struct loop *loop, rtx x, rtx insn,
5526 int not_every_iteration, int maybe_multiple)
04aa27b1 5527{
19cb6b50 5528 int i, j;
5529 enum rtx_code code;
5530 const char *fmt;
04aa27b1 5531
5532 if (x == 0)
5533 return;
5534
5535 code = GET_CODE (x);
5536 switch (code)
5537 {
5538 case REG:
5539 case CONST_INT:
5540 case CONST:
5541 case CONST_DOUBLE:
5542 case SYMBOL_REF:
5543 case LABEL_REF:
5544 case PC:
5545 case CC0:
5546 case ADDR_VEC:
5547 case ADDR_DIFF_VEC:
5548 case USE:
5549 case CLOBBER:
5550 return;
5551
5552 case MEM:
5553 {
5554 rtx src_reg;
5555 rtx add_val;
5556 rtx mult_val;
ff57e249 5557 rtx ext_val;
04aa27b1 5558 int benefit;
5559
1d322a97 5560 /* This code used to disable creating GIVs with mult_val == 1 and
4a8f0b95 5561 add_val == 0. However, this leads to lost optimizations when
1d322a97 5562 it comes time to combine a set of related DEST_ADDR GIVs, since
1e625a2e 5563 this one would not be seen. */
04aa27b1 5564
15fc3eb7 5565 if (general_induction_var (loop, XEXP (x, 0), &src_reg, &add_val,
ff57e249 5566 &mult_val, &ext_val, 1, &benefit,
5567 GET_MODE (x)))
04aa27b1 5568 {
5569 /* Found one; record it. */
5570 struct induction *v
d7c47c0e 5571 = (struct induction *) xmalloc (sizeof (struct induction));
04aa27b1 5572
15fc3eb7 5573 record_giv (loop, v, insn, src_reg, addr_placeholder, mult_val,
ff57e249 5574 add_val, ext_val, benefit, DEST_ADDR,
5575 not_every_iteration, maybe_multiple, &XEXP (x, 0));
04aa27b1 5576
3b853ab8 5577 v->mem = x;
04aa27b1 5578 }
04aa27b1 5579 }
0dbd1c74 5580 return;
5581
5582 default:
5583 break;
04aa27b1 5584 }
5585
5586 /* Recursively scan the subexpressions for other mem refs. */
5587
5588 fmt = GET_RTX_FORMAT (code);
5589 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
5590 if (fmt[i] == 'e')
15fc3eb7 5591 find_mem_givs (loop, XEXP (x, i), insn, not_every_iteration,
5592 maybe_multiple);
04aa27b1 5593 else if (fmt[i] == 'E')
5594 for (j = 0; j < XVECLEN (x, i); j++)
15fc3eb7 5595 find_mem_givs (loop, XVECEXP (x, i, j), insn, not_every_iteration,
5596 maybe_multiple);
04aa27b1 5597}
5598\f
5599/* Fill in the data about one biv update.
5600 V is the `struct induction' in which we record the biv. (It is
5601 allocated by the caller, with alloca.)
5602 INSN is the insn that sets it.
5603 DEST_REG is the biv's reg.
5604
5605 MULT_VAL is const1_rtx if the biv is being incremented here, in which case
5606 INC_VAL is the increment. Otherwise, MULT_VAL is const0_rtx and the biv is
56a1cb6b 5607 being set to INC_VAL.
5608
5609 NOT_EVERY_ITERATION is nonzero if this biv update is not know to be
5610 executed every iteration; MAYBE_MULTIPLE is nonzero if this biv update
5611 can be executed more than once per iteration. If MAYBE_MULTIPLE
5612 and NOT_EVERY_ITERATION are both zero, we know that the biv update is
5613 executed exactly once per iteration. */
04aa27b1 5614
5615static void
3ad4992f 5616record_biv (struct loop *loop, struct induction *v, rtx insn, rtx dest_reg,
5617 rtx inc_val, rtx mult_val, rtx *location,
5618 int not_every_iteration, int maybe_multiple)
04aa27b1 5619{
8ec5f078 5620 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 5621 struct iv_class *bl;
5622
5623 v->insn = insn;
5624 v->src_reg = dest_reg;
5625 v->dest_reg = dest_reg;
5626 v->mult_val = mult_val;
5627 v->add_val = inc_val;
5f155d21 5628 v->ext_dependent = NULL_RTX;
8bd88b68 5629 v->location = location;
04aa27b1 5630 v->mode = GET_MODE (dest_reg);
5631 v->always_computable = ! not_every_iteration;
a5e23478 5632 v->always_executed = ! not_every_iteration;
56a1cb6b 5633 v->maybe_multiple = maybe_multiple;
c2034a85 5634 v->same = 0;
04aa27b1 5635
5636 /* Add this to the reg's iv_class, creating a class
5637 if this is the first incrementation of the reg. */
5638
b995c337 5639 bl = REG_IV_CLASS (ivs, REGNO (dest_reg));
04aa27b1 5640 if (bl == 0)
5641 {
5642 /* Create and initialize new iv_class. */
5643
d7c47c0e 5644 bl = (struct iv_class *) xmalloc (sizeof (struct iv_class));
04aa27b1 5645
5646 bl->regno = REGNO (dest_reg);
5647 bl->biv = 0;
5648 bl->giv = 0;
5649 bl->biv_count = 0;
5650 bl->giv_count = 0;
5651
5652 /* Set initial value to the reg itself. */
5653 bl->initial_value = dest_reg;
e7b494d3 5654 bl->final_value = 0;
b550e058 5655 /* We haven't seen the initializing insn yet */
04aa27b1 5656 bl->init_insn = 0;
5657 bl->init_set = 0;
5658 bl->initial_test = 0;
5659 bl->incremented = 0;
5660 bl->eliminable = 0;
5661 bl->nonneg = 0;
5662 bl->reversed = 0;
40adc019 5663 bl->total_benefit = 0;
04aa27b1 5664
6f812a51 5665 /* Add this class to ivs->list. */
5666 bl->next = ivs->list;
5667 ivs->list = bl;
04aa27b1 5668
5669 /* Put it in the array of biv register classes. */
b995c337 5670 REG_IV_CLASS (ivs, REGNO (dest_reg)) = bl;
04aa27b1 5671 }
c2034a85 5672 else
5673 {
5674 /* Check if location is the same as a previous one. */
5675 struct induction *induction;
5676 for (induction = bl->biv; induction; induction = induction->next_iv)
5677 if (location == induction->location)
5678 {
5679 v->same = induction;
5680 break;
5681 }
5682 }
04aa27b1 5683
5684 /* Update IV_CLASS entry for this biv. */
5685 v->next_iv = bl->biv;
5686 bl->biv = v;
5687 bl->biv_count++;
5688 if (mult_val == const1_rtx)
5689 bl->incremented = 1;
5690
5691 if (loop_dump_stream)
bb6f5cc9 5692 loop_biv_dump (v, loop_dump_stream, 0);
04aa27b1 5693}
5694\f
5695/* Fill in the data about one giv.
5696 V is the `struct induction' in which we record the giv. (It is
5697 allocated by the caller, with alloca.)
5698 INSN is the insn that sets it.
5699 BENEFIT estimates the savings from deleting this insn.
5700 TYPE is DEST_REG or DEST_ADDR; it says whether the giv is computed
5701 into a register or is used as a memory address.
5702
5703 SRC_REG is the biv reg which the giv is computed from.
5704 DEST_REG is the giv's reg (if the giv is stored in a reg).
5705 MULT_VAL and ADD_VAL are the coefficients used to compute the giv.
5706 LOCATION points to the place where this giv's value appears in INSN. */
5707
5708static void
3ad4992f 5709record_giv (const struct loop *loop, struct induction *v, rtx insn,
5710 rtx src_reg, rtx dest_reg, rtx mult_val, rtx add_val,
5711 rtx ext_val, int benefit, enum g_types type,
5712 int not_every_iteration, int maybe_multiple, rtx *location)
04aa27b1 5713{
8ec5f078 5714 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 5715 struct induction *b;
5716 struct iv_class *bl;
5717 rtx set = single_set (insn);
2cbea1dc 5718 rtx temp;
5719
b903337a 5720 /* Attempt to prove constantness of the values. Don't let simplify_rtx
f4ad60b7 5721 undo the MULT canonicalization that we performed earlier. */
2cbea1dc 5722 temp = simplify_rtx (add_val);
f4ad60b7 5723 if (temp
5724 && ! (GET_CODE (add_val) == MULT
5725 && GET_CODE (temp) == ASHIFT))
2cbea1dc 5726 add_val = temp;
04aa27b1 5727
5728 v->insn = insn;
5729 v->src_reg = src_reg;
5730 v->giv_type = type;
5731 v->dest_reg = dest_reg;
5732 v->mult_val = mult_val;
5733 v->add_val = add_val;
5f155d21 5734 v->ext_dependent = ext_val;
04aa27b1 5735 v->benefit = benefit;
5736 v->location = location;
5737 v->cant_derive = 0;
5738 v->combined_with = 0;
7014838c 5739 v->maybe_multiple = maybe_multiple;
04aa27b1 5740 v->maybe_dead = 0;
5741 v->derive_adjustment = 0;
5742 v->same = 0;
5743 v->ignore = 0;
5744 v->new_reg = 0;
5745 v->final_value = 0;
4caa7e17 5746 v->same_insn = 0;
a5e23478 5747 v->auto_inc_opt = 0;
ea0cb7ae 5748 v->unrolled = 0;
5749 v->shared = 0;
04aa27b1 5750
5751 /* The v->always_computable field is used in update_giv_derive, to
5752 determine whether a giv can be used to derive another giv. For a
5753 DEST_REG giv, INSN computes a new value for the giv, so its value
5754 isn't computable if INSN insn't executed every iteration.
5755 However, for a DEST_ADDR giv, INSN merely uses the value of the giv;
5756 it does not compute a new value. Hence the value is always computable
f9e15121 5757 regardless of whether INSN is executed each iteration. */
04aa27b1 5758
5759 if (type == DEST_ADDR)
5760 v->always_computable = 1;
5761 else
5762 v->always_computable = ! not_every_iteration;
5763
a5e23478 5764 v->always_executed = ! not_every_iteration;
5765
04aa27b1 5766 if (type == DEST_ADDR)
5767 {
5768 v->mode = GET_MODE (*location);
5769 v->lifetime = 1;
04aa27b1 5770 }
5771 else /* type == DEST_REG */
5772 {
5773 v->mode = GET_MODE (SET_DEST (set));
5774
97923856 5775 v->lifetime = LOOP_REG_LIFETIME (loop, REGNO (dest_reg));
04aa27b1 5776
04aa27b1 5777 /* If the lifetime is zero, it means that this register is
5778 really a dead store. So mark this as a giv that can be
a92771b8 5779 ignored. This will not prevent the biv from being eliminated. */
04aa27b1 5780 if (v->lifetime == 0)
5781 v->ignore = 1;
5782
8ec5f078 5783 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
5784 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
04aa27b1 5785 }
5786
5787 /* Add the giv to the class of givs computed from one biv. */
5788
b995c337 5789 bl = REG_IV_CLASS (ivs, REGNO (src_reg));
04aa27b1 5790 if (bl)
5791 {
5792 v->next_iv = bl->giv;
5793 bl->giv = v;
5794 /* Don't count DEST_ADDR. This is supposed to count the number of
5795 insns that calculate givs. */
5796 if (type == DEST_REG)
5797 bl->giv_count++;
5798 bl->total_benefit += benefit;
5799 }
5800 else
5801 /* Fatal error, biv missing for this giv? */
5802 abort ();
5803
5804 if (type == DEST_ADDR)
d4a961c8 5805 {
5806 v->replaceable = 1;
5807 v->not_replaceable = 0;
5808 }
04aa27b1 5809 else
5810 {
5811 /* The giv can be replaced outright by the reduced register only if all
5812 of the following conditions are true:
4bb30577 5813 - the insn that sets the giv is always executed on any iteration
04aa27b1 5814 on which the giv is used at all
5815 (there are two ways to deduce this:
5816 either the insn is executed on every iteration,
5817 or all uses follow that insn in the same basic block),
4bb30577 5818 - the giv is not used outside the loop
04aa27b1 5819 - no assignments to the biv occur during the giv's lifetime. */
5820
394685a4 5821 if (REGNO_FIRST_UID (REGNO (dest_reg)) == INSN_UID (insn)
04aa27b1 5822 /* Previous line always fails if INSN was moved by loop opt. */
23e52523 5823 && REGNO_LAST_LUID (REGNO (dest_reg))
15fc3eb7 5824 < INSN_LUID (loop->end)
04aa27b1 5825 && (! not_every_iteration
5826 || last_use_this_basic_block (dest_reg, insn)))
4a8f0b95 5827 {
04aa27b1 5828 /* Now check that there are no assignments to the biv within the
5829 giv's lifetime. This requires two separate checks. */
5830
5831 /* Check each biv update, and fail if any are between the first
5832 and last use of the giv.
4a8f0b95 5833
04aa27b1 5834 If this loop contains an inner loop that was unrolled, then
5835 the insn modifying the biv may have been emitted by the loop
5836 unrolling code, and hence does not have a valid luid. Just
5837 mark the biv as not replaceable in this case. It is not very
5838 useful as a biv, because it is used in two different loops.
5839 It is very unlikely that we would be able to optimize the giv
5840 using this biv anyways. */
5841
5842 v->replaceable = 1;
d4a961c8 5843 v->not_replaceable = 0;
04aa27b1 5844 for (b = bl->biv; b; b = b->next_iv)
5845 {
5846 if (INSN_UID (b->insn) >= max_uid_for_loop
23e52523 5847 || ((INSN_LUID (b->insn)
5848 >= REGNO_FIRST_LUID (REGNO (dest_reg)))
5849 && (INSN_LUID (b->insn)
5850 <= REGNO_LAST_LUID (REGNO (dest_reg)))))
04aa27b1 5851 {
5852 v->replaceable = 0;
5853 v->not_replaceable = 1;
5854 break;
4a8f0b95 5855 }
04aa27b1 5856 }
5857
ad81343e 5858 /* If there are any backwards branches that go from after the
5859 biv update to before it, then this giv is not replaceable. */
04aa27b1 5860 if (v->replaceable)
ad81343e 5861 for (b = bl->biv; b; b = b->next_iv)
15fc3eb7 5862 if (back_branch_in_range_p (loop, b->insn))
ad81343e 5863 {
5864 v->replaceable = 0;
5865 v->not_replaceable = 1;
5866 break;
5867 }
04aa27b1 5868 }
5869 else
5870 {
5871 /* May still be replaceable, we don't have enough info here to
5872 decide. */
5873 v->replaceable = 0;
5874 v->not_replaceable = 0;
5875 }
5876 }
5877
1d322a97 5878 /* Record whether the add_val contains a const_int, for later use by
5879 combine_givs. */
5880 {
5881 rtx tem = add_val;
5882
5883 v->no_const_addval = 1;
5884 if (tem == const0_rtx)
5885 ;
2cbea1dc 5886 else if (CONSTANT_P (add_val))
1d322a97 5887 v->no_const_addval = 0;
2cbea1dc 5888 if (GET_CODE (tem) == PLUS)
1d322a97 5889 {
2cbea1dc 5890 while (1)
1d322a97 5891 {
5892 if (GET_CODE (XEXP (tem, 0)) == PLUS)
5893 tem = XEXP (tem, 0);
5894 else if (GET_CODE (XEXP (tem, 1)) == PLUS)
5895 tem = XEXP (tem, 1);
5896 else
5897 break;
5898 }
2cbea1dc 5899 if (CONSTANT_P (XEXP (tem, 1)))
5900 v->no_const_addval = 0;
1d322a97 5901 }
5902 }
5903
04aa27b1 5904 if (loop_dump_stream)
bb6f5cc9 5905 loop_giv_dump (v, loop_dump_stream, 0);
04aa27b1 5906}
5907
04aa27b1 5908/* All this does is determine whether a giv can be made replaceable because
5909 its final value can be calculated. This code can not be part of record_giv
5910 above, because final_giv_value requires that the number of loop iterations
5911 be known, and that can not be accurately calculated until after all givs
5912 have been identified. */
5913
5914static void
3ad4992f 5915check_final_value (const struct loop *loop, struct induction *v)
04aa27b1 5916{
04aa27b1 5917 rtx final_value = 0;
04aa27b1 5918
04aa27b1 5919 /* DEST_ADDR givs will never reach here, because they are always marked
5920 replaceable above in record_giv. */
5921
5922 /* The giv can be replaced outright by the reduced register only if all
5923 of the following conditions are true:
5924 - the insn that sets the giv is always executed on any iteration
5925 on which the giv is used at all
5926 (there are two ways to deduce this:
5927 either the insn is executed on every iteration,
5928 or all uses follow that insn in the same basic block),
5929 - its final value can be calculated (this condition is different
5930 than the one above in record_giv)
12898ca3 5931 - it's not used before the it's set
04aa27b1 5932 - no assignments to the biv occur during the giv's lifetime. */
5933
5934#if 0
5935 /* This is only called now when replaceable is known to be false. */
5936 /* Clear replaceable, so that it won't confuse final_giv_value. */
5937 v->replaceable = 0;
5938#endif
5939
15fc3eb7 5940 if ((final_value = final_giv_value (loop, v))
a22e3dfa 5941 && (v->always_executed
5942 || last_use_this_basic_block (v->dest_reg, v->insn)))
04aa27b1 5943 {
12898ca3 5944 int biv_increment_seen = 0, before_giv_insn = 0;
04aa27b1 5945 rtx p = v->insn;
5946 rtx last_giv_use;
5947
5948 v->replaceable = 1;
d4a961c8 5949 v->not_replaceable = 0;
04aa27b1 5950
5951 /* When trying to determine whether or not a biv increment occurs
5952 during the lifetime of the giv, we can ignore uses of the variable
5953 outside the loop because final_value is true. Hence we can not
5954 use regno_last_uid and regno_first_uid as above in record_giv. */
5955
5956 /* Search the loop to determine whether any assignments to the
5957 biv occur during the giv's lifetime. Start with the insn
5958 that sets the giv, and search around the loop until we come
5959 back to that insn again.
5960
5961 Also fail if there is a jump within the giv's lifetime that jumps
5962 to somewhere outside the lifetime but still within the loop. This
5963 catches spaghetti code where the execution order is not linear, and
5964 hence the above test fails. Here we assume that the giv lifetime
5965 does not extend from one iteration of the loop to the next, so as
5966 to make the test easier. Since the lifetime isn't known yet,
5967 this requires two loops. See also record_giv above. */
5968
5969 last_giv_use = v->insn;
5970
5971 while (1)
5972 {
5973 p = NEXT_INSN (p);
15fc3eb7 5974 if (p == loop->end)
12898ca3 5975 {
5976 before_giv_insn = 1;
5977 p = NEXT_INSN (loop->start);
5978 }
04aa27b1 5979 if (p == v->insn)
5980 break;
5981
5982 if (GET_CODE (p) == INSN || GET_CODE (p) == JUMP_INSN
5983 || GET_CODE (p) == CALL_INSN)
5984 {
8a37f84e 5985 /* It is possible for the BIV increment to use the GIV if we
5986 have a cycle. Thus we must be sure to check each insn for
5987 both BIV and GIV uses, and we must check for BIV uses
5988 first. */
5989
5990 if (! biv_increment_seen
5991 && reg_set_p (v->src_reg, PATTERN (p)))
5992 biv_increment_seen = 1;
72ae569f 5993
8a37f84e 5994 if (reg_mentioned_p (v->dest_reg, PATTERN (p)))
04aa27b1 5995 {
12898ca3 5996 if (biv_increment_seen || before_giv_insn)
04aa27b1 5997 {
5998 v->replaceable = 0;
5999 v->not_replaceable = 1;
6000 break;
6001 }
8a37f84e 6002 last_giv_use = p;
04aa27b1 6003 }
04aa27b1 6004 }
6005 }
4a8f0b95 6006
04aa27b1 6007 /* Now that the lifetime of the giv is known, check for branches
6008 from within the lifetime to outside the lifetime if it is still
6009 replaceable. */
6010
6011 if (v->replaceable)
6012 {
6013 p = v->insn;
6014 while (1)
6015 {
6016 p = NEXT_INSN (p);
15fc3eb7 6017 if (p == loop->end)
6018 p = NEXT_INSN (loop->start);
04aa27b1 6019 if (p == last_giv_use)
6020 break;
6021
6022 if (GET_CODE (p) == JUMP_INSN && JUMP_LABEL (p)
6023 && LABEL_NAME (JUMP_LABEL (p))
d27f543e 6024 && ((loop_insn_first_p (JUMP_LABEL (p), v->insn)
15fc3eb7 6025 && loop_insn_first_p (loop->start, JUMP_LABEL (p)))
d27f543e 6026 || (loop_insn_first_p (last_giv_use, JUMP_LABEL (p))
15fc3eb7 6027 && loop_insn_first_p (JUMP_LABEL (p), loop->end))))
04aa27b1 6028 {
6029 v->replaceable = 0;
6030 v->not_replaceable = 1;
6031
6032 if (loop_dump_stream)
6033 fprintf (loop_dump_stream,
6034 "Found branch outside giv lifetime.\n");
6035
6036 break;
6037 }
6038 }
6039 }
6040
6041 /* If it is replaceable, then save the final value. */
6042 if (v->replaceable)
6043 v->final_value = final_value;
6044 }
6045
6046 if (loop_dump_stream && v->replaceable)
6047 fprintf (loop_dump_stream, "Insn %d: giv reg %d final_value replaceable\n",
6048 INSN_UID (v->insn), REGNO (v->dest_reg));
6049}
6050\f
6051/* Update the status of whether a giv can derive other givs.
6052
6053 We need to do something special if there is or may be an update to the biv
6054 between the time the giv is defined and the time it is used to derive
6055 another giv.
6056
6057 In addition, a giv that is only conditionally set is not allowed to
6058 derive another giv once a label has been passed.
6059
6060 The cases we look at are when a label or an update to a biv is passed. */
6061
6062static void
3ad4992f 6063update_giv_derive (const struct loop *loop, rtx p)
04aa27b1 6064{
8ec5f078 6065 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 6066 struct iv_class *bl;
6067 struct induction *biv, *giv;
6068 rtx tem;
6069 int dummy;
6070
6071 /* Search all IV classes, then all bivs, and finally all givs.
6072
56a1cb6b 6073 There are three cases we are concerned with. First we have the situation
04aa27b1 6074 of a giv that is only updated conditionally. In that case, it may not
6075 derive any givs after a label is passed.
6076
6077 The second case is when a biv update occurs, or may occur, after the
6078 definition of a giv. For certain biv updates (see below) that are
6079 known to occur between the giv definition and use, we can adjust the
6080 giv definition. For others, or when the biv update is conditional,
6081 we must prevent the giv from deriving any other givs. There are two
6082 sub-cases within this case.
6083
6084 If this is a label, we are concerned with any biv update that is done
6085 conditionally, since it may be done after the giv is defined followed by
6086 a branch here (actually, we need to pass both a jump and a label, but
6087 this extra tracking doesn't seem worth it).
6088
56a1cb6b 6089 If this is a jump, we are concerned about any biv update that may be
6090 executed multiple times. We are actually only concerned about
6091 backward jumps, but it is probably not worth performing the test
6092 on the jump again here.
6093
6094 If this is a biv update, we must adjust the giv status to show that a
04aa27b1 6095 subsequent biv update was performed. If this adjustment cannot be done,
6096 the giv cannot derive further givs. */
6097
6f812a51 6098 for (bl = ivs->list; bl; bl = bl->next)
04aa27b1 6099 for (biv = bl->biv; biv; biv = biv->next_iv)
56a1cb6b 6100 if (GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN
6101 || biv->insn == p)
04aa27b1 6102 {
6103 for (giv = bl->giv; giv; giv = giv->next_iv)
6104 {
6105 /* If cant_derive is already true, there is no point in
6106 checking all of these conditions again. */
6107 if (giv->cant_derive)
6108 continue;
6109
6110 /* If this giv is conditionally set and we have passed a label,
6111 it cannot derive anything. */
6112 if (GET_CODE (p) == CODE_LABEL && ! giv->always_computable)
6113 giv->cant_derive = 1;
6114
6115 /* Skip givs that have mult_val == 0, since
6116 they are really invariants. Also skip those that are
6117 replaceable, since we know their lifetime doesn't contain
6118 any biv update. */
6119 else if (giv->mult_val == const0_rtx || giv->replaceable)
6120 continue;
6121
6122 /* The only way we can allow this giv to derive another
6123 is if this is a biv increment and we can form the product
6124 of biv->add_val and giv->mult_val. In this case, we will
6125 be able to compute a compensation. */
6126 else if (biv->insn == p)
6127 {
ff57e249 6128 rtx ext_val_dummy;
e1a25d7f 6129
ff57e249 6130 tem = 0;
e1a25d7f 6131 if (biv->mult_val == const1_rtx)
15fc3eb7 6132 tem = simplify_giv_expr (loop,
6133 gen_rtx_MULT (giv->mode,
941522d6 6134 biv->add_val,
6135 giv->mult_val),
ff57e249 6136 &ext_val_dummy, &dummy);
e1a25d7f 6137
6138 if (tem && giv->derive_adjustment)
7014838c 6139 tem = simplify_giv_expr
15fc3eb7 6140 (loop,
6141 gen_rtx_PLUS (giv->mode, tem, giv->derive_adjustment),
ff57e249 6142 &ext_val_dummy, &dummy);
7014838c 6143
e1a25d7f 6144 if (tem)
04aa27b1 6145 giv->derive_adjustment = tem;
6146 else
6147 giv->cant_derive = 1;
6148 }
56a1cb6b 6149 else if ((GET_CODE (p) == CODE_LABEL && ! biv->always_computable)
6150 || (GET_CODE (p) == JUMP_INSN && biv->maybe_multiple))
04aa27b1 6151 giv->cant_derive = 1;
6152 }
6153 }
6154}
6155\f
6156/* Check whether an insn is an increment legitimate for a basic induction var.
4f90f98b 6157 X is the source of insn P, or a part of it.
6158 MODE is the mode in which X should be interpreted.
6159
04aa27b1 6160 DEST_REG is the putative biv, also the destination of the insn.
6161 We accept patterns of these forms:
d0af4a3f 6162 REG = REG + INVARIANT (includes REG = REG - CONSTANT)
04aa27b1 6163 REG = INVARIANT + REG
04aa27b1 6164
6165 If X is suitable, we return 1, set *MULT_VAL to CONST1_RTX,
8bd88b68 6166 store the additive term into *INC_VAL, and store the place where
6167 we found the additive term into *LOCATION.
04aa27b1 6168
6169 If X is an assignment of an invariant into DEST_REG, we set
6170 *MULT_VAL to CONST0_RTX, and store the invariant into *INC_VAL.
6171
d0af4a3f 6172 We also want to detect a BIV when it corresponds to a variable
6173 whose mode was promoted via PROMOTED_MODE. In that case, an increment
6174 of the variable may be a PLUS that adds a SUBREG of that variable to
6175 an invariant and then sign- or zero-extends the result of the PLUS
6176 into the variable.
6177
6178 Most GIVs in such cases will be in the promoted mode, since that is the
6179 probably the natural computation mode (and almost certainly the mode
6180 used for addresses) on the machine. So we view the pseudo-reg containing
6181 the variable as the BIV, as if it were simply incremented.
6182
6183 Note that treating the entire pseudo as a BIV will result in making
6184 simple increments to any GIVs based on it. However, if the variable
6185 overflows in its declared mode but not its promoted mode, the result will
4a8f0b95 6186 be incorrect. This is acceptable if the variable is signed, since
d0af4a3f 6187 overflows in such cases are undefined, but not if it is unsigned, since
6188 those overflows are defined. So we only check for SIGN_EXTEND and
6189 not ZERO_EXTEND.
6190
6191 If we cannot find a biv, we return 0. */
04aa27b1 6192
6193static int
3ad4992f 6194basic_induction_var (const struct loop *loop, rtx x, enum machine_mode mode,
6195 rtx dest_reg, rtx p, rtx *inc_val, rtx *mult_val,
6196 rtx **location)
04aa27b1 6197{
19cb6b50 6198 enum rtx_code code;
8bd88b68 6199 rtx *argp, arg;
d0af4a3f 6200 rtx insn, set = 0;
04aa27b1 6201
6202 code = GET_CODE (x);
03bfb68f 6203 *location = NULL;
04aa27b1 6204 switch (code)
6205 {
6206 case PLUS:
1d322a97 6207 if (rtx_equal_p (XEXP (x, 0), dest_reg)
d0af4a3f 6208 || (GET_CODE (XEXP (x, 0)) == SUBREG
6209 && SUBREG_PROMOTED_VAR_P (XEXP (x, 0))
6210 && SUBREG_REG (XEXP (x, 0)) == dest_reg))
8bd88b68 6211 {
6212 argp = &XEXP (x, 1);
6213 }
1d322a97 6214 else if (rtx_equal_p (XEXP (x, 1), dest_reg)
d0af4a3f 6215 || (GET_CODE (XEXP (x, 1)) == SUBREG
9633da39 6216 && SUBREG_PROMOTED_VAR_P (XEXP (x, 1))
6217 && SUBREG_REG (XEXP (x, 1)) == dest_reg))
8bd88b68 6218 {
6219 argp = &XEXP (x, 0);
6220 }
04aa27b1 6221 else
4a8f0b95 6222 return 0;
04aa27b1 6223
8bd88b68 6224 arg = *argp;
15fc3eb7 6225 if (loop_invariant_p (loop, arg) != 1)
04aa27b1 6226 return 0;
6227
4f90f98b 6228 *inc_val = convert_modes (GET_MODE (dest_reg), GET_MODE (x), arg, 0);
04aa27b1 6229 *mult_val = const1_rtx;
8bd88b68 6230 *location = argp;
04aa27b1 6231 return 1;
6232
d0af4a3f 6233 case SUBREG:
d2422fc2 6234 /* If what's inside the SUBREG is a BIV, then the SUBREG. This will
6235 handle addition of promoted variables.
6236 ??? The comment at the start of this function is wrong: promoted
6237 variable increments don't look like it says they do. */
6238 return basic_induction_var (loop, SUBREG_REG (x),
6239 GET_MODE (SUBREG_REG (x)),
6240 dest_reg, p, inc_val, mult_val, location);
04aa27b1 6241
d0af4a3f 6242 case REG:
1d322a97 6243 /* If this register is assigned in a previous insn, look at its
d0af4a3f 6244 source, but don't go outside the loop or past a label. */
6245
b30045bf 6246 /* If this sets a register to itself, we would repeat any previous
6247 biv increment if we applied this strategy blindly. */
6248 if (rtx_equal_p (dest_reg, x))
6249 return 0;
6250
1d322a97 6251 insn = p;
6252 while (1)
6253 {
5cda37a3 6254 rtx dest;
4a8f0b95 6255 do
6256 {
6257 insn = PREV_INSN (insn);
6258 }
6259 while (insn && GET_CODE (insn) == NOTE
6260 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
d0af4a3f 6261
4a8f0b95 6262 if (!insn)
1d322a97 6263 break;
6264 set = single_set (insn);
6265 if (set == 0)
6266 break;
5cda37a3 6267 dest = SET_DEST (set);
6268 if (dest == x
6269 || (GET_CODE (dest) == SUBREG
6270 && (GET_MODE_SIZE (GET_MODE (dest)) <= UNITS_PER_WORD)
6271 && (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
6272 && SUBREG_REG (dest) == x))
6273 return basic_induction_var (loop, SET_SRC (set),
6274 (GET_MODE (SET_SRC (set)) == VOIDmode
6275 ? GET_MODE (x)
6276 : GET_MODE (SET_SRC (set))),
6277 dest_reg, insn,
6278 inc_val, mult_val, location);
6279
6280 while (GET_CODE (dest) == SIGN_EXTRACT
6281 || GET_CODE (dest) == ZERO_EXTRACT
6282 || GET_CODE (dest) == SUBREG
6283 || GET_CODE (dest) == STRICT_LOW_PART)
6284 dest = XEXP (dest, 0);
6285 if (dest == x)
6286 break;
1d322a97 6287 }
72ae569f 6288 /* Fall through. */
04aa27b1 6289
6290 /* Can accept constant setting of biv only when inside inner most loop.
4bb30577 6291 Otherwise, a biv of an inner loop may be incorrectly recognized
04aa27b1 6292 as a biv of the outer loop,
6293 causing code to be moved INTO the inner loop. */
6294 case MEM:
15fc3eb7 6295 if (loop_invariant_p (loop, x) != 1)
04aa27b1 6296 return 0;
6297 case CONST_INT:
6298 case SYMBOL_REF:
6299 case CONST:
3b5f953e 6300 /* convert_modes aborts if we try to convert to or from CCmode, so just
6301 exclude that case. It is very unlikely that a condition code value
73b69176 6302 would be a useful iterator anyways. convert_modes aborts if we try to
6303 convert a float mode to non-float or vice versa too. */
15fc3eb7 6304 if (loop->level == 1
73b69176 6305 && GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (dest_reg))
6306 && GET_MODE_CLASS (mode) != MODE_CC)
72ae569f 6307 {
4f90f98b 6308 /* Possible bug here? Perhaps we don't know the mode of X. */
6309 *inc_val = convert_modes (GET_MODE (dest_reg), mode, x, 0);
4a8f0b95 6310 *mult_val = const0_rtx;
6311 return 1;
6312 }
04aa27b1 6313 else
4a8f0b95 6314 return 0;
04aa27b1 6315
d0af4a3f 6316 case SIGN_EXTEND:
b24bee03 6317 /* Ignore this BIV if signed arithmetic overflow is defined. */
6318 if (flag_wrapv)
6319 return 0;
15fc3eb7 6320 return basic_induction_var (loop, XEXP (x, 0), GET_MODE (XEXP (x, 0)),
369943ac 6321 dest_reg, p, inc_val, mult_val, location);
1d322a97 6322
d0af4a3f 6323 case ASHIFTRT:
6324 /* Similar, since this can be a sign extension. */
6325 for (insn = PREV_INSN (p);
6326 (insn && GET_CODE (insn) == NOTE
6327 && NOTE_LINE_NUMBER (insn) != NOTE_INSN_LOOP_BEG);
6328 insn = PREV_INSN (insn))
6329 ;
6330
6331 if (insn)
6332 set = single_set (insn);
6333
b30045bf 6334 if (! rtx_equal_p (dest_reg, XEXP (x, 0))
6335 && set && SET_DEST (set) == XEXP (x, 0)
d0af4a3f 6336 && GET_CODE (XEXP (x, 1)) == CONST_INT
6337 && INTVAL (XEXP (x, 1)) >= 0
6338 && GET_CODE (SET_SRC (set)) == ASHIFT
369943ac 6339 && XEXP (x, 1) == XEXP (SET_SRC (set), 1))
6340 return basic_induction_var (loop, XEXP (SET_SRC (set), 0),
6341 GET_MODE (XEXP (x, 0)),
6342 dest_reg, insn, inc_val, mult_val,
6343 location);
d0af4a3f 6344 return 0;
6345
04aa27b1 6346 default:
6347 return 0;
6348 }
6349}
6350\f
6351/* A general induction variable (giv) is any quantity that is a linear
6352 function of a basic induction variable,
6353 i.e. giv = biv * mult_val + add_val.
6354 The coefficients can be any loop invariant quantity.
6355 A giv need not be computed directly from the biv;
6356 it can be computed by way of other givs. */
6357
6358/* Determine whether X computes a giv.
6359 If it does, return a nonzero value
6360 which is the benefit from eliminating the computation of X;
6361 set *SRC_REG to the register of the biv that it is computed from;
6362 set *ADD_VAL and *MULT_VAL to the coefficients,
6363 such that the value of X is biv * mult + add; */
6364
6365static int
3ad4992f 6366general_induction_var (const struct loop *loop, rtx x, rtx *src_reg,
6367 rtx *add_val, rtx *mult_val, rtx *ext_val,
6368 int is_addr, int *pbenefit,
6369 enum machine_mode addr_mode)
04aa27b1 6370{
8ec5f078 6371 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 6372 rtx orig_x = x;
04aa27b1 6373
6374 /* If this is an invariant, forget it, it isn't a giv. */
15fc3eb7 6375 if (loop_invariant_p (loop, x) == 1)
04aa27b1 6376 return 0;
6377
1d322a97 6378 *pbenefit = 0;
ff57e249 6379 *ext_val = NULL_RTX;
6380 x = simplify_giv_expr (loop, x, ext_val, pbenefit);
04aa27b1 6381 if (x == 0)
d7c47c0e 6382 return 0;
04aa27b1 6383
6384 switch (GET_CODE (x))
6385 {
6386 case USE:
6387 case CONST_INT:
6388 /* Since this is now an invariant and wasn't before, it must be a giv
6389 with MULT_VAL == 0. It doesn't matter which BIV we associate this
6390 with. */
6f812a51 6391 *src_reg = ivs->list->biv->dest_reg;
04aa27b1 6392 *mult_val = const0_rtx;
6393 *add_val = x;
6394 break;
6395
6396 case REG:
6397 /* This is equivalent to a BIV. */
6398 *src_reg = x;
6399 *mult_val = const1_rtx;
6400 *add_val = const0_rtx;
6401 break;
6402
6403 case PLUS:
6404 /* Either (plus (biv) (invar)) or
6405 (plus (mult (biv) (invar_1)) (invar_2)). */
6406 if (GET_CODE (XEXP (x, 0)) == MULT)
6407 {
6408 *src_reg = XEXP (XEXP (x, 0), 0);
6409 *mult_val = XEXP (XEXP (x, 0), 1);
6410 }
6411 else
6412 {
6413 *src_reg = XEXP (x, 0);
6414 *mult_val = const1_rtx;
6415 }
6416 *add_val = XEXP (x, 1);
6417 break;
6418
6419 case MULT:
6420 /* ADD_VAL is zero. */
6421 *src_reg = XEXP (x, 0);
6422 *mult_val = XEXP (x, 1);
6423 *add_val = const0_rtx;
6424 break;
6425
6426 default:
6427 abort ();
6428 }
6429
6430 /* Remove any enclosing USE from ADD_VAL and MULT_VAL (there will be
6431 unless they are CONST_INT). */
6432 if (GET_CODE (*add_val) == USE)
6433 *add_val = XEXP (*add_val, 0);
6434 if (GET_CODE (*mult_val) == USE)
6435 *mult_val = XEXP (*mult_val, 0);
6436
1d322a97 6437 if (is_addr)
cf495191 6438 *pbenefit += address_cost (orig_x, addr_mode) - reg_address_cost;
1d322a97 6439 else
6440 *pbenefit += rtx_cost (orig_x, SET);
04aa27b1 6441
1d322a97 6442 /* Always return true if this is a giv so it will be detected as such,
4a8f0b95 6443 even if the benefit is zero or negative. This allows elimination
6444 of bivs that might otherwise not be eliminated. */
6445 return 1;
04aa27b1 6446}
6447\f
6448/* Given an expression, X, try to form it as a linear function of a biv.
6449 We will canonicalize it to be of the form
4bb30577 6450 (plus (mult (BIV) (invar_1))
04aa27b1 6451 (invar_2))
b550e058 6452 with possible degeneracies.
04aa27b1 6453
6454 The invariant expressions must each be of a form that can be used as a
6455 machine operand. We surround then with a USE rtx (a hack, but localized
6456 and certainly unambiguous!) if not a CONST_INT for simplicity in this
6457 routine; it is the caller's responsibility to strip them.
6458
6459 If no such canonicalization is possible (i.e., two biv's are used or an
6460 expression that is neither invariant nor a biv or giv), this routine
6461 returns 0.
6462
6ef828f9 6463 For a nonzero return, the result will have a code of CONST_INT, USE,
4a8f0b95 6464 REG (for a BIV), PLUS, or MULT. No other codes will occur.
04aa27b1 6465
6466 *BENEFIT will be incremented by the benefit of any sub-giv encountered. */
6467
3ad4992f 6468static rtx sge_plus (enum machine_mode, rtx, rtx);
6469static rtx sge_plus_constant (rtx, rtx);
1d322a97 6470
04aa27b1 6471static rtx
3ad4992f 6472simplify_giv_expr (const struct loop *loop, rtx x, rtx *ext_val, int *benefit)
04aa27b1 6473{
8ec5f078 6474 struct loop_ivs *ivs = LOOP_IVS (loop);
e9b78d43 6475 struct loop_regs *regs = LOOP_REGS (loop);
04aa27b1 6476 enum machine_mode mode = GET_MODE (x);
6477 rtx arg0, arg1;
6478 rtx tem;
6479
6480 /* If this is not an integer mode, or if we cannot do arithmetic in this
6481 mode, this can't be a giv. */
6482 if (mode != VOIDmode
6483 && (GET_MODE_CLASS (mode) != MODE_INT
df38d76e 6484 || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT))
1d322a97 6485 return NULL_RTX;
04aa27b1 6486
6487 switch (GET_CODE (x))
6488 {
6489 case PLUS:
ff57e249 6490 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6491 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
04aa27b1 6492 if (arg0 == 0 || arg1 == 0)
1d322a97 6493 return NULL_RTX;
04aa27b1 6494
6495 /* Put constant last, CONST_INT last if both constant. */
6496 if ((GET_CODE (arg0) == USE
6497 || GET_CODE (arg0) == CONST_INT)
1d322a97 6498 && ! ((GET_CODE (arg0) == USE
6499 && GET_CODE (arg1) == USE)
6500 || GET_CODE (arg1) == CONST_INT))
04aa27b1 6501 tem = arg0, arg0 = arg1, arg1 = tem;
6502
6503 /* Handle addition of zero, then addition of an invariant. */
6504 if (arg1 == const0_rtx)
6505 return arg0;
6506 else if (GET_CODE (arg1) == CONST_INT || GET_CODE (arg1) == USE)
6507 switch (GET_CODE (arg0))
6508 {
6509 case CONST_INT:
6510 case USE:
1d322a97 6511 /* Adding two invariants must result in an invariant, so enclose
4bb30577 6512 addition operation inside a USE and return it. */
04aa27b1 6513 if (GET_CODE (arg0) == USE)
6514 arg0 = XEXP (arg0, 0);
9c4e7a6b 6515 if (GET_CODE (arg1) == USE)
6516 arg1 = XEXP (arg1, 0);
6517
1d322a97 6518 if (GET_CODE (arg0) == CONST_INT)
6519 tem = arg0, arg0 = arg1, arg1 = tem;
6520 if (GET_CODE (arg1) == CONST_INT)
6521 tem = sge_plus_constant (arg0, arg1);
9c4e7a6b 6522 else
1d322a97 6523 tem = sge_plus (mode, arg0, arg1);
04aa27b1 6524
1d322a97 6525 if (GET_CODE (tem) != CONST_INT)
6526 tem = gen_rtx_USE (mode, tem);
04aa27b1 6527 return tem;
6528
6529 case REG:
6530 case MULT:
6531 /* biv + invar or mult + invar. Return sum. */
941522d6 6532 return gen_rtx_PLUS (mode, arg0, arg1);
04aa27b1 6533
6534 case PLUS:
6535 /* (a + invar_1) + invar_2. Associate. */
7014838c 6536 return
15fc3eb7 6537 simplify_giv_expr (loop,
6538 gen_rtx_PLUS (mode,
7014838c 6539 XEXP (arg0, 0),
6540 gen_rtx_PLUS (mode,
6541 XEXP (arg0, 1),
6542 arg1)),
ff57e249 6543 ext_val, benefit);
04aa27b1 6544
6545 default:
6546 abort ();
6547 }
6548
6549 /* Each argument must be either REG, PLUS, or MULT. Convert REG to
6550 MULT to reduce cases. */
6551 if (GET_CODE (arg0) == REG)
941522d6 6552 arg0 = gen_rtx_MULT (mode, arg0, const1_rtx);
04aa27b1 6553 if (GET_CODE (arg1) == REG)
941522d6 6554 arg1 = gen_rtx_MULT (mode, arg1, const1_rtx);
04aa27b1 6555
6556 /* Now have PLUS + PLUS, PLUS + MULT, MULT + PLUS, or MULT + MULT.
6557 Put a MULT first, leaving PLUS + PLUS, MULT + PLUS, or MULT + MULT.
6558 Recurse to associate the second PLUS. */
6559 if (GET_CODE (arg1) == MULT)
6560 tem = arg0, arg0 = arg1, arg1 = tem;
6561
6562 if (GET_CODE (arg1) == PLUS)
e17f5b23 6563 return
6564 simplify_giv_expr (loop,
6565 gen_rtx_PLUS (mode,
6566 gen_rtx_PLUS (mode, arg0,
6567 XEXP (arg1, 0)),
6568 XEXP (arg1, 1)),
6569 ext_val, benefit);
04aa27b1 6570
6571 /* Now must have MULT + MULT. Distribute if same biv, else not giv. */
6572 if (GET_CODE (arg0) != MULT || GET_CODE (arg1) != MULT)
1d322a97 6573 return NULL_RTX;
04aa27b1 6574
1d322a97 6575 if (!rtx_equal_p (arg0, arg1))
6576 return NULL_RTX;
04aa27b1 6577
15fc3eb7 6578 return simplify_giv_expr (loop,
6579 gen_rtx_MULT (mode,
941522d6 6580 XEXP (arg0, 0),
6581 gen_rtx_PLUS (mode,
6582 XEXP (arg0, 1),
6583 XEXP (arg1, 1))),
ff57e249 6584 ext_val, benefit);
04aa27b1 6585
6586 case MINUS:
a92771b8 6587 /* Handle "a - b" as "a + b * (-1)". */
15fc3eb7 6588 return simplify_giv_expr (loop,
6589 gen_rtx_PLUS (mode,
941522d6 6590 XEXP (x, 0),
7014838c 6591 gen_rtx_MULT (mode,
6592 XEXP (x, 1),
941522d6 6593 constm1_rtx)),
ff57e249 6594 ext_val, benefit);
04aa27b1 6595
6596 case MULT:
ff57e249 6597 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6598 arg1 = simplify_giv_expr (loop, XEXP (x, 1), ext_val, benefit);
04aa27b1 6599 if (arg0 == 0 || arg1 == 0)
1d322a97 6600 return NULL_RTX;
04aa27b1 6601
6602 /* Put constant last, CONST_INT last if both constant. */
6603 if ((GET_CODE (arg0) == USE || GET_CODE (arg0) == CONST_INT)
6604 && GET_CODE (arg1) != CONST_INT)
6605 tem = arg0, arg0 = arg1, arg1 = tem;
6606
6607 /* If second argument is not now constant, not giv. */
6608 if (GET_CODE (arg1) != USE && GET_CODE (arg1) != CONST_INT)
1d322a97 6609 return NULL_RTX;
04aa27b1 6610
6611 /* Handle multiply by 0 or 1. */
6612 if (arg1 == const0_rtx)
6613 return const0_rtx;
6614
6615 else if (arg1 == const1_rtx)
6616 return arg0;
6617
6618 switch (GET_CODE (arg0))
6619 {
6620 case REG:
6621 /* biv * invar. Done. */
941522d6 6622 return gen_rtx_MULT (mode, arg0, arg1);
04aa27b1 6623
6624 case CONST_INT:
6625 /* Product of two constants. */
df38d76e 6626 return GEN_INT (INTVAL (arg0) * INTVAL (arg1));
04aa27b1 6627
6628 case USE:
e20f9563 6629 /* invar * invar is a giv, but attempt to simplify it somehow. */
1d322a97 6630 if (GET_CODE (arg1) != CONST_INT)
6631 return NULL_RTX;
6632
6633 arg0 = XEXP (arg0, 0);
e20f9563 6634 if (GET_CODE (arg0) == MULT)
1d322a97 6635 {
e20f9563 6636 /* (invar_0 * invar_1) * invar_2. Associate. */
6637 return simplify_giv_expr (loop,
6638 gen_rtx_MULT (mode,
6639 XEXP (arg0, 0),
6640 gen_rtx_MULT (mode,
6641 XEXP (arg0,
6642 1),
6643 arg1)),
ff57e249 6644 ext_val, benefit);
1d322a97 6645 }
b903337a 6646 /* Propagate the MULT expressions to the intermost nodes. */
e20f9563 6647 else if (GET_CODE (arg0) == PLUS)
6648 {
6649 /* (invar_0 + invar_1) * invar_2. Distribute. */
6650 return simplify_giv_expr (loop,
6651 gen_rtx_PLUS (mode,
6652 gen_rtx_MULT (mode,
6653 XEXP (arg0,
6654 0),
6655 arg1),
6656 gen_rtx_MULT (mode,
6657 XEXP (arg0,
6658 1),
6659 arg1)),
ff57e249 6660 ext_val, benefit);
e20f9563 6661 }
6662 return gen_rtx_USE (mode, gen_rtx_MULT (mode, arg0, arg1));
04aa27b1 6663
6664 case MULT:
6665 /* (a * invar_1) * invar_2. Associate. */
15fc3eb7 6666 return simplify_giv_expr (loop,
6667 gen_rtx_MULT (mode,
7014838c 6668 XEXP (arg0, 0),
941522d6 6669 gen_rtx_MULT (mode,
6670 XEXP (arg0, 1),
6671 arg1)),
ff57e249 6672 ext_val, benefit);
04aa27b1 6673
6674 case PLUS:
6675 /* (a + invar_1) * invar_2. Distribute. */
15fc3eb7 6676 return simplify_giv_expr (loop,
6677 gen_rtx_PLUS (mode,
941522d6 6678 gen_rtx_MULT (mode,
6679 XEXP (arg0, 0),
6680 arg1),
6681 gen_rtx_MULT (mode,
6682 XEXP (arg0, 1),
6683 arg1)),
ff57e249 6684 ext_val, benefit);
04aa27b1 6685
6686 default:
6687 abort ();
6688 }
6689
6690 case ASHIFT:
04aa27b1 6691 /* Shift by constant is multiply by power of two. */
6692 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6693 return 0;
6694
7014838c 6695 return
15fc3eb7 6696 simplify_giv_expr (loop,
6697 gen_rtx_MULT (mode,
7014838c 6698 XEXP (x, 0),
6699 GEN_INT ((HOST_WIDE_INT) 1
6700 << INTVAL (XEXP (x, 1)))),
ff57e249 6701 ext_val, benefit);
04aa27b1 6702
6703 case NEG:
6704 /* "-a" is "a * (-1)" */
15fc3eb7 6705 return simplify_giv_expr (loop,
6706 gen_rtx_MULT (mode, XEXP (x, 0), constm1_rtx),
ff57e249 6707 ext_val, benefit);
04aa27b1 6708
6709 case NOT:
6710 /* "~a" is "-a - 1". Silly, but easy. */
15fc3eb7 6711 return simplify_giv_expr (loop,
6712 gen_rtx_MINUS (mode,
941522d6 6713 gen_rtx_NEG (mode, XEXP (x, 0)),
6714 const1_rtx),
ff57e249 6715 ext_val, benefit);
04aa27b1 6716
6717 case USE:
6718 /* Already in proper form for invariant. */
6719 return x;
6720
ff57e249 6721 case SIGN_EXTEND:
6722 case ZERO_EXTEND:
6723 case TRUNCATE:
6724 /* Conditionally recognize extensions of simple IVs. After we've
72ae569f 6725 computed loop traversal counts and verified the range of the
ff57e249 6726 source IV, we'll reevaluate this as a GIV. */
6727 if (*ext_val == NULL_RTX)
6728 {
6729 arg0 = simplify_giv_expr (loop, XEXP (x, 0), ext_val, benefit);
6730 if (arg0 && *ext_val == NULL_RTX && GET_CODE (arg0) == REG)
6731 {
6732 *ext_val = gen_rtx_fmt_e (GET_CODE (x), mode, arg0);
6733 return arg0;
6734 }
6735 }
6736 goto do_default;
6737
72ae569f 6738 case REG:
04aa27b1 6739 /* If this is a new register, we can't deal with it. */
6740 if (REGNO (x) >= max_reg_before_loop)
6741 return 0;
6742
6743 /* Check for biv or giv. */
8ec5f078 6744 switch (REG_IV_TYPE (ivs, REGNO (x)))
04aa27b1 6745 {
6746 case BASIC_INDUCT:
6747 return x;
6748 case GENERAL_INDUCT:
6749 {
8ec5f078 6750 struct induction *v = REG_IV_INFO (ivs, REGNO (x));
04aa27b1 6751
6752 /* Form expression from giv and add benefit. Ensure this giv
6753 can derive another and subtract any needed adjustment if so. */
d27eb4b1 6754
6755 /* Increasing the benefit here is risky. The only case in which it
6756 is arguably correct is if this is the only use of V. In other
6757 cases, this will artificially inflate the benefit of the current
6758 giv, and lead to suboptimal code. Thus, it is disabled, since
6759 potentially not reducing an only marginally beneficial giv is
6760 less harmful than reducing many givs that are not really
6761 beneficial. */
6762 {
05cb4e54 6763 rtx single_use = regs->array[REGNO (x)].single_usage;
d27eb4b1 6764 if (single_use && single_use != const0_rtx)
6765 *benefit += v->benefit;
6766 }
6767
04aa27b1 6768 if (v->cant_derive)
6769 return 0;
6770
7014838c 6771 tem = gen_rtx_PLUS (mode, gen_rtx_MULT (mode,
6772 v->src_reg, v->mult_val),
6773 v->add_val);
6774
04aa27b1 6775 if (v->derive_adjustment)
941522d6 6776 tem = gen_rtx_MINUS (mode, tem, v->derive_adjustment);
ff57e249 6777 arg0 = simplify_giv_expr (loop, tem, ext_val, benefit);
6778 if (*ext_val)
6779 {
5f155d21 6780 if (!v->ext_dependent)
ff57e249 6781 return arg0;
6782 }
6783 else
6784 {
5f155d21 6785 *ext_val = v->ext_dependent;
ff57e249 6786 return arg0;
6787 }
6788 return 0;
04aa27b1 6789 }
0dbd1c74 6790
6791 default:
ff57e249 6792 do_default:
1d322a97 6793 /* If it isn't an induction variable, and it is invariant, we
6794 may be able to simplify things further by looking through
6795 the bits we just moved outside the loop. */
15fc3eb7 6796 if (loop_invariant_p (loop, x) == 1)
1d322a97 6797 {
6798 struct movable *m;
df3b4a51 6799 struct loop_movables *movables = LOOP_MOVABLES (loop);
1d322a97 6800
df3b4a51 6801 for (m = movables->head; m; m = m->next)
1d322a97 6802 if (rtx_equal_p (x, m->set_dest))
6803 {
6804 /* Ok, we found a match. Substitute and simplify. */
6805
4a8f0b95 6806 /* If we match another movable, we must use that, as
1d322a97 6807 this one is going away. */
6808 if (m->match)
4a8f0b95 6809 return simplify_giv_expr (loop, m->match->set_dest,
ff57e249 6810 ext_val, benefit);
1d322a97 6811
6ef828f9 6812 /* If consec is nonzero, this is a member of a group of
1d322a97 6813 instructions that were moved together. We handle this
6814 case only to the point of seeking to the last insn and
6815 looking for a REG_EQUAL. Fail if we don't find one. */
6816 if (m->consec != 0)
6817 {
6818 int i = m->consec;
6819 tem = m->insn;
72ae569f 6820 do
6821 {
6822 tem = NEXT_INSN (tem);
6823 }
6824 while (--i > 0);
1d322a97 6825
6826 tem = find_reg_note (tem, REG_EQUAL, NULL_RTX);
6827 if (tem)
6828 tem = XEXP (tem, 0);
6829 }
6830 else
6831 {
4a8f0b95 6832 tem = single_set (m->insn);
6833 if (tem)
1d322a97 6834 tem = SET_SRC (tem);
6835 }
6836
6837 if (tem)
6838 {
6839 /* What we are most interested in is pointer
6840 arithmetic on invariants -- only take
6841 patterns we may be able to do something with. */
6842 if (GET_CODE (tem) == PLUS
6843 || GET_CODE (tem) == MULT
6844 || GET_CODE (tem) == ASHIFT
6845 || GET_CODE (tem) == CONST_INT
6846 || GET_CODE (tem) == SYMBOL_REF)
6847 {
ff57e249 6848 tem = simplify_giv_expr (loop, tem, ext_val,
6849 benefit);
1d322a97 6850 if (tem)
6851 return tem;
6852 }
6853 else if (GET_CODE (tem) == CONST
72ae569f 6854 && GET_CODE (XEXP (tem, 0)) == PLUS
6855 && GET_CODE (XEXP (XEXP (tem, 0), 0)) == SYMBOL_REF
6856 && GET_CODE (XEXP (XEXP (tem, 0), 1)) == CONST_INT)
1d322a97 6857 {
15fc3eb7 6858 tem = simplify_giv_expr (loop, XEXP (tem, 0),
ff57e249 6859 ext_val, benefit);
1d322a97 6860 if (tem)
6861 return tem;
6862 }
6863 }
6864 break;
6865 }
6866 }
0dbd1c74 6867 break;
04aa27b1 6868 }
6869
6870 /* Fall through to general case. */
6871 default:
6872 /* If invariant, return as USE (unless CONST_INT).
6873 Otherwise, not giv. */
6874 if (GET_CODE (x) == USE)
6875 x = XEXP (x, 0);
6876
15fc3eb7 6877 if (loop_invariant_p (loop, x) == 1)
04aa27b1 6878 {
6879 if (GET_CODE (x) == CONST_INT)
6880 return x;
1d322a97 6881 if (GET_CODE (x) == CONST
6882 && GET_CODE (XEXP (x, 0)) == PLUS
6883 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
6884 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)
6885 x = XEXP (x, 0);
6886 return gen_rtx_USE (mode, x);
04aa27b1 6887 }
6888 else
6889 return 0;
6890 }
6891}
1d322a97 6892
6893/* This routine folds invariants such that there is only ever one
6894 CONST_INT in the summation. It is only used by simplify_giv_expr. */
6895
6896static rtx
3ad4992f 6897sge_plus_constant (rtx x, rtx c)
1d322a97 6898{
6899 if (GET_CODE (x) == CONST_INT)
6900 return GEN_INT (INTVAL (x) + INTVAL (c));
6901 else if (GET_CODE (x) != PLUS)
6902 return gen_rtx_PLUS (GET_MODE (x), x, c);
6903 else if (GET_CODE (XEXP (x, 1)) == CONST_INT)
6904 {
6905 return gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
6906 GEN_INT (INTVAL (XEXP (x, 1)) + INTVAL (c)));
6907 }
6908 else if (GET_CODE (XEXP (x, 0)) == PLUS
6909 || GET_CODE (XEXP (x, 1)) != PLUS)
6910 {
6911 return gen_rtx_PLUS (GET_MODE (x),
6912 sge_plus_constant (XEXP (x, 0), c), XEXP (x, 1));
6913 }
6914 else
6915 {
6916 return gen_rtx_PLUS (GET_MODE (x),
6917 sge_plus_constant (XEXP (x, 1), c), XEXP (x, 0));
6918 }
6919}
6920
6921static rtx
3ad4992f 6922sge_plus (enum machine_mode mode, rtx x, rtx y)
1d322a97 6923{
6924 while (GET_CODE (y) == PLUS)
6925 {
6926 rtx a = XEXP (y, 0);
6927 if (GET_CODE (a) == CONST_INT)
6928 x = sge_plus_constant (x, a);
6929 else
6930 x = gen_rtx_PLUS (mode, x, a);
6931 y = XEXP (y, 1);
6932 }
6933 if (GET_CODE (y) == CONST_INT)
6934 x = sge_plus_constant (x, y);
6935 else
6936 x = gen_rtx_PLUS (mode, x, y);
6937 return x;
6938}
04aa27b1 6939\f
6940/* Help detect a giv that is calculated by several consecutive insns;
6941 for example,
6942 giv = biv * M
6943 giv = giv + A
6944 The caller has already identified the first insn P as having a giv as dest;
6945 we check that all other insns that set the same register follow
6946 immediately after P, that they alter nothing else,
6947 and that the result of the last is still a giv.
6948
6949 The value is 0 if the reg set in P is not really a giv.
6950 Otherwise, the value is the amount gained by eliminating
6951 all the consecutive insns that compute the value.
6952
6953 FIRST_BENEFIT is the amount gained by eliminating the first insn, P.
6954 SRC_REG is the reg of the biv; DEST_REG is the reg of the giv.
6955
6956 The coefficients of the ultimate giv value are stored in
6957 *MULT_VAL and *ADD_VAL. */
6958
6959static int
3ad4992f 6960consec_sets_giv (const struct loop *loop, int first_benefit, rtx p,
6961 rtx src_reg, rtx dest_reg, rtx *add_val, rtx *mult_val,
6962 rtx *ext_val, rtx *last_consec_insn)
04aa27b1 6963{
8ec5f078 6964 struct loop_ivs *ivs = LOOP_IVS (loop);
e9b78d43 6965 struct loop_regs *regs = LOOP_REGS (loop);
04aa27b1 6966 int count;
6967 enum rtx_code code;
6968 int benefit;
6969 rtx temp;
6970 rtx set;
6971
6972 /* Indicate that this is a giv so that we can update the value produced in
4a8f0b95 6973 each insn of the multi-insn sequence.
04aa27b1 6974
6975 This induction structure will be used only by the call to
6976 general_induction_var below, so we can allocate it on our stack.
6977 If this is a giv, our caller will replace the induct var entry with
6978 a new induction structure. */
719f8b96 6979 struct induction *v;
6980
6981 if (REG_IV_TYPE (ivs, REGNO (dest_reg)) != UNKNOWN_INDUCT)
6982 return 0;
6983
6984 v = (struct induction *) alloca (sizeof (struct induction));
04aa27b1 6985 v->src_reg = src_reg;
6986 v->mult_val = *mult_val;
6987 v->add_val = *add_val;
6988 v->benefit = first_benefit;
6989 v->cant_derive = 0;
6990 v->derive_adjustment = 0;
5f155d21 6991 v->ext_dependent = NULL_RTX;
04aa27b1 6992
8ec5f078 6993 REG_IV_TYPE (ivs, REGNO (dest_reg)) = GENERAL_INDUCT;
6994 REG_IV_INFO (ivs, REGNO (dest_reg)) = v;
04aa27b1 6995
05cb4e54 6996 count = regs->array[REGNO (dest_reg)].n_times_set - 1;
04aa27b1 6997
6998 while (count > 0)
6999 {
7000 p = NEXT_INSN (p);
7001 code = GET_CODE (p);
7002
7003 /* If libcall, skip to end of call sequence. */
df38d76e 7004 if (code == INSN && (temp = find_reg_note (p, REG_LIBCALL, NULL_RTX)))
04aa27b1 7005 p = XEXP (temp, 0);
7006
7007 if (code == INSN
7008 && (set = single_set (p))
7009 && GET_CODE (SET_DEST (set)) == REG
7010 && SET_DEST (set) == dest_reg
15fc3eb7 7011 && (general_induction_var (loop, SET_SRC (set), &src_reg,
ff57e249 7012 add_val, mult_val, ext_val, 0,
7013 &benefit, VOIDmode)
04aa27b1 7014 /* Giv created by equivalent expression. */
df38d76e 7015 || ((temp = find_reg_note (p, REG_EQUAL, NULL_RTX))
15fc3eb7 7016 && general_induction_var (loop, XEXP (temp, 0), &src_reg,
ff57e249 7017 add_val, mult_val, ext_val, 0,
7018 &benefit, VOIDmode)))
04aa27b1 7019 && src_reg == v->src_reg)
7020 {
df38d76e 7021 if (find_reg_note (p, REG_RETVAL, NULL_RTX))
04aa27b1 7022 benefit += libcall_benefit (p);
7023
7024 count--;
7025 v->mult_val = *mult_val;
7026 v->add_val = *add_val;
d27eb4b1 7027 v->benefit += benefit;
04aa27b1 7028 }
7029 else if (code != NOTE)
7030 {
7031 /* Allow insns that set something other than this giv to a
7032 constant. Such insns are needed on machines which cannot
7033 include long constants and should not disqualify a giv. */
7034 if (code == INSN
7035 && (set = single_set (p))
7036 && SET_DEST (set) != dest_reg
7037 && CONSTANT_P (SET_SRC (set)))
7038 continue;
7039
8ec5f078 7040 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
04aa27b1 7041 return 0;
7042 }
7043 }
7044
719f8b96 7045 REG_IV_TYPE (ivs, REGNO (dest_reg)) = UNKNOWN_INDUCT;
8323cc02 7046 *last_consec_insn = p;
04aa27b1 7047 return v->benefit;
7048}
7049\f
7050/* Return an rtx, if any, that expresses giv G2 as a function of the register
7051 represented by G1. If no such expression can be found, or it is clear that
4a8f0b95 7052 it cannot possibly be a valid address, 0 is returned.
04aa27b1 7053
7054 To perform the computation, we note that
4bb30577 7055 G1 = x * v + a and
1d322a97 7056 G2 = y * v + b
04aa27b1 7057 where `v' is the biv.
7058
1d322a97 7059 So G2 = (y/b) * G1 + (b - a*y/x).
7060
7061 Note that MULT = y/x.
7062
7063 Update: A and B are now allowed to be additive expressions such that
7064 B contains all variables in A. That is, computing B-A will not require
7065 subtracting variables. */
7066
7067static rtx
3ad4992f 7068express_from_1 (rtx a, rtx b, rtx mult)
1d322a97 7069{
7070 /* If MULT is zero, then A*MULT is zero, and our expression is B. */
7071
7072 if (mult == const0_rtx)
7073 return b;
7074
7075 /* If MULT is not 1, we cannot handle A with non-constants, since we
7076 would then be required to subtract multiples of the registers in A.
7077 This is theoretically possible, and may even apply to some Fortran
7078 constructs, but it is a lot of work and we do not attempt it here. */
7079
7080 if (mult != const1_rtx && GET_CODE (a) != CONST_INT)
7081 return NULL_RTX;
7082
7083 /* In general these structures are sorted top to bottom (down the PLUS
7084 chain), but not left to right across the PLUS. If B is a higher
7085 order giv than A, we can strip one level and recurse. If A is higher
7086 order, we'll eventually bail out, but won't know that until the end.
7087 If they are the same, we'll strip one level around this loop. */
7088
7089 while (GET_CODE (a) == PLUS && GET_CODE (b) == PLUS)
7090 {
7091 rtx ra, rb, oa, ob, tmp;
7092
7093 ra = XEXP (a, 0), oa = XEXP (a, 1);
7094 if (GET_CODE (ra) == PLUS)
4a8f0b95 7095 tmp = ra, ra = oa, oa = tmp;
1d322a97 7096
7097 rb = XEXP (b, 0), ob = XEXP (b, 1);
7098 if (GET_CODE (rb) == PLUS)
4a8f0b95 7099 tmp = rb, rb = ob, ob = tmp;
1d322a97 7100
7101 if (rtx_equal_p (ra, rb))
7102 /* We matched: remove one reg completely. */
7103 a = oa, b = ob;
7104 else if (GET_CODE (ob) != PLUS && rtx_equal_p (ra, ob))
7105 /* An alternate match. */
7106 a = oa, b = rb;
7107 else if (GET_CODE (oa) != PLUS && rtx_equal_p (oa, rb))
7108 /* An alternate match. */
7109 a = ra, b = ob;
7110 else
7111 {
72ae569f 7112 /* Indicates an extra register in B. Strip one level from B and
1d322a97 7113 recurse, hoping B was the higher order expression. */
7114 ob = express_from_1 (a, ob, mult);
7115 if (ob == NULL_RTX)
7116 return NULL_RTX;
7117 return gen_rtx_PLUS (GET_MODE (b), rb, ob);
7118 }
7119 }
7120
7121 /* Here we are at the last level of A, go through the cases hoping to
7122 get rid of everything but a constant. */
7123
7124 if (GET_CODE (a) == PLUS)
7125 {
0f9685e4 7126 rtx ra, oa;
1d322a97 7127
7128 ra = XEXP (a, 0), oa = XEXP (a, 1);
7129 if (rtx_equal_p (oa, b))
7130 oa = ra;
7131 else if (!rtx_equal_p (ra, b))
7132 return NULL_RTX;
7133
7134 if (GET_CODE (oa) != CONST_INT)
7135 return NULL_RTX;
7136
7137 return GEN_INT (-INTVAL (oa) * INTVAL (mult));
7138 }
7139 else if (GET_CODE (a) == CONST_INT)
7140 {
7141 return plus_constant (b, -INTVAL (a) * INTVAL (mult));
7142 }
2cbea1dc 7143 else if (CONSTANT_P (a))
7144 {
f4ad60b7 7145 enum machine_mode mode_a = GET_MODE (a);
7146 enum machine_mode mode_b = GET_MODE (b);
7147 enum machine_mode mode = mode_b == VOIDmode ? mode_a : mode_b;
7148 return simplify_gen_binary (MINUS, mode, b, a);
2cbea1dc 7149 }
1d322a97 7150 else if (GET_CODE (b) == PLUS)
7151 {
7152 if (rtx_equal_p (a, XEXP (b, 0)))
7153 return XEXP (b, 1);
7154 else if (rtx_equal_p (a, XEXP (b, 1)))
7155 return XEXP (b, 0);
7156 else
7157 return NULL_RTX;
7158 }
7159 else if (rtx_equal_p (a, b))
7160 return const0_rtx;
7161
7162 return NULL_RTX;
7163}
04aa27b1 7164
3a7923ef 7165rtx
3ad4992f 7166express_from (struct induction *g1, struct induction *g2)
04aa27b1 7167{
7168 rtx mult, add;
7169
7170 /* The value that G1 will be multiplied by must be a constant integer. Also,
7171 the only chance we have of getting a valid address is if b*c/a (see above
7172 for notation) is also an integer. */
1d322a97 7173 if (GET_CODE (g1->mult_val) == CONST_INT
7174 && GET_CODE (g2->mult_val) == CONST_INT)
7175 {
7176 if (g1->mult_val == const0_rtx
4a8f0b95 7177 || INTVAL (g2->mult_val) % INTVAL (g1->mult_val) != 0)
7178 return NULL_RTX;
1d322a97 7179 mult = GEN_INT (INTVAL (g2->mult_val) / INTVAL (g1->mult_val));
7180 }
7181 else if (rtx_equal_p (g1->mult_val, g2->mult_val))
7182 mult = const1_rtx;
7183 else
7184 {
7185 /* ??? Find out if the one is a multiple of the other? */
7186 return NULL_RTX;
7187 }
04aa27b1 7188
1d322a97 7189 add = express_from_1 (g1->add_val, g2->add_val, mult);
85c08fb4 7190 if (add == NULL_RTX)
7191 {
7192 /* Failed. If we've got a multiplication factor between G1 and G2,
7193 scale G1's addend and try again. */
7194 if (INTVAL (mult) > 1)
7195 {
7196 rtx g1_add_val = g1->add_val;
7197 if (GET_CODE (g1_add_val) == MULT
7198 && GET_CODE (XEXP (g1_add_val, 1)) == CONST_INT)
7199 {
7200 HOST_WIDE_INT m;
7201 m = INTVAL (mult) * INTVAL (XEXP (g1_add_val, 1));
7202 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val),
7203 XEXP (g1_add_val, 0), GEN_INT (m));
7204 }
7205 else
7206 {
7207 g1_add_val = gen_rtx_MULT (GET_MODE (g1_add_val), g1_add_val,
7208 mult);
7209 }
7210
7211 add = express_from_1 (g1_add_val, g2->add_val, const1_rtx);
7212 }
7213 }
1d322a97 7214 if (add == NULL_RTX)
7215 return NULL_RTX;
04aa27b1 7216
7217 /* Form simplified final result. */
7218 if (mult == const0_rtx)
7219 return add;
7220 else if (mult == const1_rtx)
7221 mult = g1->dest_reg;
7222 else
941522d6 7223 mult = gen_rtx_MULT (g2->mode, g1->dest_reg, mult);
04aa27b1 7224
7225 if (add == const0_rtx)
7226 return mult;
7227 else
bdabc8e0 7228 {
7229 if (GET_CODE (add) == PLUS
7230 && CONSTANT_P (XEXP (add, 1)))
7231 {
7232 rtx tem = XEXP (add, 1);
7233 mult = gen_rtx_PLUS (g2->mode, mult, XEXP (add, 0));
7234 add = tem;
7235 }
4a8f0b95 7236
bdabc8e0 7237 return gen_rtx_PLUS (g2->mode, mult, add);
7238 }
04aa27b1 7239}
04aa27b1 7240\f
9309808a 7241/* Return an rtx, if any, that expresses giv G2 as a function of the register
7242 represented by G1. This indicates that G2 should be combined with G1 and
7243 that G2 can use (either directly or via an address expression) a register
7244 used to represent G1. */
04aa27b1 7245
1d322a97 7246static rtx
3ad4992f 7247combine_givs_p (struct induction *g1, struct induction *g2)
04aa27b1 7248{
ff57e249 7249 rtx comb, ret;
7250
424da949 7251 /* With the introduction of ext dependent givs, we must care for modes.
ff57e249 7252 G2 must not use a wider mode than G1. */
7253 if (GET_MODE_SIZE (g1->mode) < GET_MODE_SIZE (g2->mode))
7254 return NULL_RTX;
7255
7256 ret = comb = express_from (g1, g2);
7257 if (comb == NULL_RTX)
7258 return NULL_RTX;
7259 if (g1->mode != g2->mode)
7260 ret = gen_lowpart (g2->mode, comb);
04aa27b1 7261
1d322a97 7262 /* If these givs are identical, they can be combined. We use the results
7263 of express_from because the addends are not in a canonical form, so
7264 rtx_equal_p is a weaker test. */
8bd88b68 7265 /* But don't combine a DEST_REG giv with a DEST_ADDR giv; we want the
7266 combination to be the other way round. */
ff57e249 7267 if (comb == g1->dest_reg
8bd88b68 7268 && (g1->giv_type == DEST_REG || g2->giv_type == DEST_ADDR))
04aa27b1 7269 {
ff57e249 7270 return ret;
04aa27b1 7271 }
7272
04aa27b1 7273 /* If G2 can be expressed as a function of G1 and that function is valid
7274 as an address and no more expensive than using a register for G2,
7275 the expression of G2 in terms of G1 can be used. */
ff57e249 7276 if (ret != NULL_RTX
1d322a97 7277 && g2->giv_type == DEST_ADDR
ec0457a8 7278 && memory_address_p (GET_MODE (g2->mem), ret))
7279 return ret;
04aa27b1 7280
1d322a97 7281 return NULL_RTX;
04aa27b1 7282}
7283\f
424da949 7284/* Check each extension dependent giv in this class to see if its
ff57e249 7285 root biv is safe from wrapping in the interior mode, which would
7286 make the giv illegal. */
7287
7288static void
3ad4992f 7289check_ext_dependent_givs (struct iv_class *bl, struct loop_info *loop_info)
ff57e249 7290{
7291 int ze_ok = 0, se_ok = 0, info_ok = 0;
7292 enum machine_mode biv_mode = GET_MODE (bl->biv->src_reg);
7293 HOST_WIDE_INT start_val;
4acc436e 7294 unsigned HOST_WIDE_INT u_end_val = 0;
7295 unsigned HOST_WIDE_INT u_start_val = 0;
ff57e249 7296 rtx incr = pc_rtx;
7297 struct induction *v;
7298
7299 /* Make sure the iteration data is available. We must have
7300 constants in order to be certain of no overflow. */
7301 /* ??? An unknown iteration count with an increment of +-1
7302 combined with friendly exit tests of against an invariant
b903337a 7303 value is also amenable to optimization. Not implemented. */
ff57e249 7304 if (loop_info->n_iterations > 0
7305 && bl->initial_value
7306 && GET_CODE (bl->initial_value) == CONST_INT
7307 && (incr = biv_total_increment (bl))
7308 && GET_CODE (incr) == CONST_INT
7309 /* Make sure the host can represent the arithmetic. */
7310 && HOST_BITS_PER_WIDE_INT >= GET_MODE_BITSIZE (biv_mode))
7311 {
7312 unsigned HOST_WIDE_INT abs_incr, total_incr;
7313 HOST_WIDE_INT s_end_val;
7314 int neg_incr;
7315
7316 info_ok = 1;
7317 start_val = INTVAL (bl->initial_value);
7318 u_start_val = start_val;
72ae569f 7319
ff57e249 7320 neg_incr = 0, abs_incr = INTVAL (incr);
7321 if (INTVAL (incr) < 0)
7322 neg_incr = 1, abs_incr = -abs_incr;
7323 total_incr = abs_incr * loop_info->n_iterations;
7324
b903337a 7325 /* Check for host arithmetic overflow. */
ff57e249 7326 if (total_incr / loop_info->n_iterations == abs_incr)
7327 {
7328 unsigned HOST_WIDE_INT u_max;
7329 HOST_WIDE_INT s_max;
7330
7331 u_end_val = start_val + (neg_incr ? -total_incr : total_incr);
7332 s_end_val = u_end_val;
7333 u_max = GET_MODE_MASK (biv_mode);
7334 s_max = u_max >> 1;
72ae569f 7335
ff57e249 7336 /* Check zero extension of biv ok. */
7337 if (start_val >= 0
b903337a 7338 /* Check for host arithmetic overflow. */
ff57e249 7339 && (neg_incr
7340 ? u_end_val < u_start_val
7341 : u_end_val > u_start_val)
7342 /* Check for target arithmetic overflow. */
7343 && (neg_incr
7344 ? 1 /* taken care of with host overflow */
7345 : u_end_val <= u_max))
7346 {
7347 ze_ok = 1;
7348 }
72ae569f 7349
ff57e249 7350 /* Check sign extension of biv ok. */
7351 /* ??? While it is true that overflow with signed and pointer
7352 arithmetic is undefined, I fear too many programmers don't
7353 keep this fact in mind -- myself included on occasion.
7354 So leave alone with the signed overflow optimizations. */
7355 if (start_val >= -s_max - 1
b903337a 7356 /* Check for host arithmetic overflow. */
ff57e249 7357 && (neg_incr
7358 ? s_end_val < start_val
7359 : s_end_val > start_val)
7360 /* Check for target arithmetic overflow. */
7361 && (neg_incr
7362 ? s_end_val >= -s_max - 1
7363 : s_end_val <= s_max))
7364 {
7365 se_ok = 1;
7366 }
7367 }
7368 }
7369
7370 /* Invalidate givs that fail the tests. */
7371 for (v = bl->giv; v; v = v->next_iv)
5f155d21 7372 if (v->ext_dependent)
ff57e249 7373 {
5f155d21 7374 enum rtx_code code = GET_CODE (v->ext_dependent);
ff57e249 7375 int ok = 0;
7376
7377 switch (code)
7378 {
7379 case SIGN_EXTEND:
7380 ok = se_ok;
7381 break;
7382 case ZERO_EXTEND:
7383 ok = ze_ok;
7384 break;
7385
7386 case TRUNCATE:
7387 /* We don't know whether this value is being used as either
7388 signed or unsigned, so to safely truncate we must satisfy
72ae569f 7389 both. The initial check here verifies the BIV itself;
ff57e249 7390 once that is successful we may check its range wrt the
7391 derived GIV. */
7392 if (se_ok && ze_ok)
7393 {
5f155d21 7394 enum machine_mode outer_mode = GET_MODE (v->ext_dependent);
ff57e249 7395 unsigned HOST_WIDE_INT max = GET_MODE_MASK (outer_mode) >> 1;
7396
7397 /* We know from the above that both endpoints are nonnegative,
7398 and that there is no wrapping. Verify that both endpoints
7399 are within the (signed) range of the outer mode. */
7400 if (u_start_val <= max && u_end_val <= max)
7401 ok = 1;
7402 }
7403 break;
7404
7405 default:
7406 abort ();
7407 }
7408
7409 if (ok)
7410 {
7411 if (loop_dump_stream)
7412 {
72ae569f 7413 fprintf (loop_dump_stream,
424da949 7414 "Verified ext dependent giv at %d of reg %d\n",
72ae569f 7415 INSN_UID (v->insn), bl->regno);
ff57e249 7416 }
7417 }
7418 else
7419 {
7420 if (loop_dump_stream)
7421 {
7422 const char *why;
7423
7424 if (info_ok)
7425 why = "biv iteration values overflowed";
7426 else
7427 {
7428 if (incr == pc_rtx)
7429 incr = biv_total_increment (bl);
7430 if (incr == const1_rtx)
7431 why = "biv iteration info incomplete; incr by 1";
7432 else
7433 why = "biv iteration info incomplete";
7434 }
7435
72ae569f 7436 fprintf (loop_dump_stream,
424da949 7437 "Failed ext dependent giv at %d, %s\n",
72ae569f 7438 INSN_UID (v->insn), why);
ff57e249 7439 }
7440 v->ignore = 1;
af10ea53 7441 bl->all_reduced = 0;
ff57e249 7442 }
7443 }
7444}
7445
7446/* Generate a version of VALUE in a mode appropriate for initializing V. */
7447
7448rtx
3ad4992f 7449extend_value_for_giv (struct induction *v, rtx value)
ff57e249 7450{
5f155d21 7451 rtx ext_dep = v->ext_dependent;
ff57e249 7452
7453 if (! ext_dep)
7454 return value;
7455
5f155d21 7456 /* Recall that check_ext_dependent_givs verified that the known bounds
ff57e249 7457 of a biv did not overflow or wrap with respect to the extension for
7458 the giv. Therefore, constants need no additional adjustment. */
7459 if (CONSTANT_P (value) && GET_MODE (value) == VOIDmode)
7460 return value;
7461
7462 /* Otherwise, we must adjust the value to compensate for the
7463 differing modes of the biv and the giv. */
7464 return gen_rtx_fmt_e (GET_CODE (ext_dep), GET_MODE (ext_dep), value);
7465}
7466\f
1d322a97 7467struct combine_givs_stats
7468{
7469 int giv_number;
7470 int total_benefit;
7471};
7472
7473static int
3ad4992f 7474cmp_combine_givs_stats (const void *xp, const void *yp)
1d322a97 7475{
d844aacc 7476 const struct combine_givs_stats * const x =
7477 (const struct combine_givs_stats *) xp;
7478 const struct combine_givs_stats * const y =
7479 (const struct combine_givs_stats *) yp;
1d322a97 7480 int d;
7481 d = y->total_benefit - x->total_benefit;
7482 /* Stabilize the sort. */
7483 if (!d)
7484 d = x->giv_number - y->giv_number;
7485 return d;
7486}
7487
04aa27b1 7488/* Check all pairs of givs for iv_class BL and see if any can be combined with
7489 any other. If so, point SAME to the giv combined with and set NEW_REG to
7490 be an expression (in terms of the other giv's DEST_REG) equivalent to the
7491 giv. Also, update BENEFIT and related fields for cost/benefit analysis. */
7492
7493static void
3ad4992f 7494combine_givs (struct loop_regs *regs, struct iv_class *bl)
04aa27b1 7495{
862cfae2 7496 /* Additional benefit to add for being combined multiple times. */
7497 const int extra_benefit = 3;
7498
7bd36830 7499 struct induction *g1, *g2, **giv_array;
1d322a97 7500 int i, j, k, giv_count;
7501 struct combine_givs_stats *stats;
7502 rtx *can_combine;
04aa27b1 7503
533a6d7a 7504 /* Count givs, because bl->giv_count is incorrect here. */
7505 giv_count = 0;
04aa27b1 7506 for (g1 = bl->giv; g1; g1 = g1->next_iv)
1d322a97 7507 if (!g1->ignore)
7508 giv_count++;
533a6d7a 7509
7510 giv_array
7511 = (struct induction **) alloca (giv_count * sizeof (struct induction *));
7512 i = 0;
7513 for (g1 = bl->giv; g1; g1 = g1->next_iv)
1d322a97 7514 if (!g1->ignore)
7515 giv_array[i++] = g1;
533a6d7a 7516
8b861be4 7517 stats = (struct combine_givs_stats *) xcalloc (giv_count, sizeof (*stats));
4a8f0b95 7518 can_combine = (rtx *) xcalloc (giv_count, giv_count * sizeof (rtx));
533a6d7a 7519
7520 for (i = 0; i < giv_count; i++)
7521 {
1d322a97 7522 int this_benefit;
862cfae2 7523 rtx single_use;
1d322a97 7524
533a6d7a 7525 g1 = giv_array[i];
862cfae2 7526 stats[i].giv_number = i;
7527
7528 /* If a DEST_REG GIV is used only once, do not allow it to combine
7529 with anything, for in doing so we will gain nothing that cannot
7530 be had by simply letting the GIV with which we would have combined
4a8f0b95 7531 to be reduced on its own. The losage shows up in particular with
862cfae2 7532 DEST_ADDR targets on hosts with reg+reg addressing, though it can
7533 be seen elsewhere as well. */
7534 if (g1->giv_type == DEST_REG
05cb4e54 7535 && (single_use = regs->array[REGNO (g1->dest_reg)].single_usage)
862cfae2 7536 && single_use != const0_rtx)
7537 continue;
1d322a97 7538
7539 this_benefit = g1->benefit;
7540 /* Add an additional weight for zero addends. */
7541 if (g1->no_const_addval)
7542 this_benefit += 1;
862cfae2 7543
1d322a97 7544 for (j = 0; j < giv_count; j++)
7545 {
7546 rtx this_combine;
7547
7548 g2 = giv_array[j];
7549 if (g1 != g2
7550 && (this_combine = combine_givs_p (g1, g2)) != NULL_RTX)
7551 {
4a8f0b95 7552 can_combine[i * giv_count + j] = this_combine;
862cfae2 7553 this_benefit += g2->benefit + extra_benefit;
1d322a97 7554 }
7555 }
1d322a97 7556 stats[i].total_benefit = this_benefit;
7557 }
7558
7559 /* Iterate, combining until we can't. */
7560restart:
4a8f0b95 7561 qsort (stats, giv_count, sizeof (*stats), cmp_combine_givs_stats);
1d322a97 7562
7563 if (loop_dump_stream)
7564 {
7565 fprintf (loop_dump_stream, "Sorted combine statistics:\n");
7566 for (k = 0; k < giv_count; k++)
7567 {
7568 g1 = giv_array[stats[k].giv_number];
7569 if (!g1->combined_with && !g1->same)
4a8f0b95 7570 fprintf (loop_dump_stream, " {%d, %d}",
1d322a97 7571 INSN_UID (giv_array[stats[k].giv_number]->insn),
7572 stats[k].total_benefit);
7573 }
7574 putc ('\n', loop_dump_stream);
7575 }
7576
7577 for (k = 0; k < giv_count; k++)
7578 {
7579 int g1_add_benefit = 0;
7580
7581 i = stats[k].giv_number;
7582 g1 = giv_array[i];
7583
7584 /* If it has already been combined, skip. */
7585 if (g1->combined_with || g1->same)
7586 continue;
7587
7588 for (j = 0; j < giv_count; j++)
7589 {
7590 g2 = giv_array[j];
4a8f0b95 7591 if (g1 != g2 && can_combine[i * giv_count + j]
1d322a97 7592 /* If it has already been combined, skip. */
7593 && ! g2->same && ! g2->combined_with)
7594 {
7595 int l;
7596
4a8f0b95 7597 g2->new_reg = can_combine[i * giv_count + j];
1d322a97 7598 g2->same = g1;
d643dd7e 7599 /* For destination, we now may replace by mem expression instead
7600 of register. This changes the costs considerably, so add the
7601 compensation. */
7602 if (g2->giv_type == DEST_ADDR)
7603 g2->benefit = (g2->benefit + reg_address_cost
7604 - address_cost (g2->new_reg,
7605 GET_MODE (g2->mem)));
8bd88b68 7606 g1->combined_with++;
1d322a97 7607 g1->lifetime += g2->lifetime;
7608
862cfae2 7609 g1_add_benefit += g2->benefit;
1d322a97 7610
7611 /* ??? The new final_[bg]iv_value code does a much better job
7612 of finding replaceable giv's, and hence this code may no
7613 longer be necessary. */
7614 if (! g2->replaceable && REG_USERVAR_P (g2->dest_reg))
7615 g1_add_benefit -= copy_cost;
4a8f0b95 7616
1d322a97 7617 /* To help optimize the next set of combinations, remove
7618 this giv from the benefits of other potential mates. */
7619 for (l = 0; l < giv_count; ++l)
7620 {
7621 int m = stats[l].giv_number;
4a8f0b95 7622 if (can_combine[m * giv_count + j])
862cfae2 7623 stats[l].total_benefit -= g2->benefit + extra_benefit;
1d322a97 7624 }
7625
7626 if (loop_dump_stream)
7627 fprintf (loop_dump_stream,
d27eb4b1 7628 "giv at %d combined with giv at %d; new benefit %d + %d, lifetime %d\n",
7629 INSN_UID (g2->insn), INSN_UID (g1->insn),
7630 g1->benefit, g1_add_benefit, g1->lifetime);
1d322a97 7631 }
7632 }
7633
7634 /* To help optimize the next set of combinations, remove
7635 this giv from the benefits of other potential mates. */
7636 if (g1->combined_with)
7637 {
7638 for (j = 0; j < giv_count; ++j)
7639 {
7640 int m = stats[j].giv_number;
4a8f0b95 7641 if (can_combine[m * giv_count + i])
862cfae2 7642 stats[j].total_benefit -= g1->benefit + extra_benefit;
1d322a97 7643 }
7644
7645 g1->benefit += g1_add_benefit;
7646
7647 /* We've finished with this giv, and everything it touched.
4a8f0b95 7648 Restart the combination so that proper weights for the
1d322a97 7649 rest of the givs are properly taken into account. */
7650 /* ??? Ideally we would compact the arrays at this point, so
7651 as to not cover old ground. But sanely compacting
7652 can_combine is tricky. */
7653 goto restart;
7654 }
533a6d7a 7655 }
8b861be4 7656
7657 /* Clean up. */
7658 free (stats);
7659 free (can_combine);
04aa27b1 7660}
7661\f
3ad4992f 7662/* Generate sequence for REG = B * M + A. B is the initial value of
7663 the basic induction variable, M a multiplicative constant, A an
7664 additive constant and REG the destination register. */
04aa27b1 7665
89e8d34f 7666static rtx
3ad4992f 7667gen_add_mult (rtx b, rtx m, rtx a, rtx reg)
04aa27b1 7668{
7669 rtx seq;
7670 rtx result;
7671
04aa27b1 7672 start_sequence ();
89e8d34f 7673 /* Use unsigned arithmetic. */
bec2d490 7674 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
04aa27b1 7675 if (reg != result)
7676 emit_move_insn (reg, result);
31d3e01c 7677 seq = get_insns ();
04aa27b1 7678 end_sequence ();
7679
89e8d34f 7680 return seq;
7681}
7682
7683
7684/* Update registers created in insn sequence SEQ. */
ea0cb7ae 7685
89e8d34f 7686static void
3ad4992f 7687loop_regs_update (const struct loop *loop ATTRIBUTE_UNUSED, rtx seq)
89e8d34f 7688{
31d3e01c 7689 rtx insn;
7690
89e8d34f 7691 /* Update register info for alias analysis. */
68e29b3d 7692
31d3e01c 7693 if (seq == NULL_RTX)
7694 return;
7695
7696 if (INSN_P (seq))
68e29b3d 7697 {
31d3e01c 7698 insn = seq;
7699 while (insn != NULL_RTX)
68e29b3d 7700 {
31d3e01c 7701 rtx set = single_set (insn);
7702
68e29b3d 7703 if (set && GET_CODE (SET_DEST (set)) == REG)
7704 record_base_value (REGNO (SET_DEST (set)), SET_SRC (set), 0);
31d3e01c 7705
7706 insn = NEXT_INSN (insn);
68e29b3d 7707 }
7708 }
31d3e01c 7709 else if (GET_CODE (seq) == SET
7710 && GET_CODE (SET_DEST (seq)) == REG)
7711 record_base_value (REGNO (SET_DEST (seq)), SET_SRC (seq), 0);
04aa27b1 7712}
d27eb4b1 7713
89e8d34f 7714
3ad4992f 7715/* EMIT code before BEFORE_BB/BEFORE_INSN to set REG = B * M + A. B
7716 is the initial value of the basic induction variable, M a
7717 multiplicative constant, A an additive constant and REG the
7718 destination register. */
89e8d34f 7719
7720void
3ad4992f 7721loop_iv_add_mult_emit_before (const struct loop *loop, rtx b, rtx m, rtx a,
7722 rtx reg, basic_block before_bb, rtx before_insn)
89e8d34f 7723{
7724 rtx seq;
7725
7726 if (! before_insn)
7727 {
7728 loop_iv_add_mult_hoist (loop, b, m, a, reg);
7729 return;
7730 }
7731
7732 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
115bceb9 7733 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
89e8d34f 7734
7735 /* Increase the lifetime of any invariants moved further in code. */
7736 update_reg_last_use (a, before_insn);
7737 update_reg_last_use (b, before_insn);
7738 update_reg_last_use (m, before_insn);
7739
7740 loop_insn_emit_before (loop, before_bb, before_insn, seq);
7741
7742 /* It is possible that the expansion created lots of new registers.
7743 Iterate over the sequence we just created and record them all. */
7744 loop_regs_update (loop, seq);
7745}
7746
7747
3ad4992f 7748/* Emit insns in loop pre-header to set REG = B * M + A. B is the
7749 initial value of the basic induction variable, M a multiplicative
7750 constant, A an additive constant and REG the destination
7751 register. */
89e8d34f 7752
7753void
3ad4992f 7754loop_iv_add_mult_sink (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
89e8d34f 7755{
7756 rtx seq;
7757
7758 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
115bceb9 7759 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
89e8d34f 7760
7761 /* Increase the lifetime of any invariants moved further in code.
7762 ???? Is this really necessary? */
7763 update_reg_last_use (a, loop->sink);
7764 update_reg_last_use (b, loop->sink);
7765 update_reg_last_use (m, loop->sink);
7766
7767 loop_insn_sink (loop, seq);
7768
7769 /* It is possible that the expansion created lots of new registers.
7770 Iterate over the sequence we just created and record them all. */
7771 loop_regs_update (loop, seq);
7772}
7773
7774
3ad4992f 7775/* Emit insns after loop to set REG = B * M + A. B is the initial
7776 value of the basic induction variable, M a multiplicative constant,
7777 A an additive constant and REG the destination register. */
89e8d34f 7778
7779void
3ad4992f 7780loop_iv_add_mult_hoist (const struct loop *loop, rtx b, rtx m, rtx a, rtx reg)
89e8d34f 7781{
7782 rtx seq;
7783
7784 /* Use copy_rtx to prevent unexpected sharing of these rtx. */
115bceb9 7785 seq = gen_add_mult (copy_rtx (b), copy_rtx (m), copy_rtx (a), reg);
89e8d34f 7786
7787 loop_insn_hoist (loop, seq);
7788
7789 /* It is possible that the expansion created lots of new registers.
7790 Iterate over the sequence we just created and record them all. */
7791 loop_regs_update (loop, seq);
7792}
7793
7794
7795
7796/* Similar to gen_add_mult, but compute cost rather than generating
7797 sequence. */
7798
d27eb4b1 7799static int
3ad4992f 7800iv_add_mult_cost (rtx b, rtx m, rtx a, rtx reg)
d27eb4b1 7801{
7802 int cost = 0;
7803 rtx last, result;
7804
7805 start_sequence ();
89e8d34f 7806 result = expand_mult_add (b, reg, m, a, GET_MODE (reg), 1);
d27eb4b1 7807 if (reg != result)
7808 emit_move_insn (reg, result);
7809 last = get_last_insn ();
7810 while (last)
7811 {
7812 rtx t = single_set (last);
7813 if (t)
7814 cost += rtx_cost (SET_SRC (t), SET);
7815 last = PREV_INSN (last);
7816 }
7817 end_sequence ();
7818 return cost;
7819}
04aa27b1 7820\f
7821/* Test whether A * B can be computed without
31d3e01c 7822 an actual multiply insn. Value is 1 if so.
7823
7824 ??? This function stinks because it generates a ton of wasted RTL
7825 ??? and as a result fragments GC memory to no end. There are other
7826 ??? places in the compiler which are invoked a lot and do the same
7827 ??? thing, generate wasted RTL just to see if something is possible. */
04aa27b1 7828
7829static int
3ad4992f 7830product_cheap_p (rtx a, rtx b)
04aa27b1 7831{
04aa27b1 7832 rtx tmp;
31d3e01c 7833 int win, n_insns;
04aa27b1 7834
a92771b8 7835 /* If only one is constant, make it B. */
04aa27b1 7836 if (GET_CODE (a) == CONST_INT)
7837 tmp = a, a = b, b = tmp;
7838
7839 /* If first constant, both constant, so don't need multiply. */
7840 if (GET_CODE (a) == CONST_INT)
7841 return 1;
7842
7843 /* If second not constant, neither is constant, so would need multiply. */
7844 if (GET_CODE (b) != CONST_INT)
7845 return 0;
7846
7847 /* One operand is constant, so might not need multiply insn. Generate the
7848 code for the multiply and see if a call or multiply, or long sequence
7849 of insns is generated. */
7850
04aa27b1 7851 start_sequence ();
bec2d490 7852 expand_mult (GET_MODE (a), a, b, NULL_RTX, 1);
31d3e01c 7853 tmp = get_insns ();
04aa27b1 7854 end_sequence ();
7855
31d3e01c 7856 win = 1;
7857 if (INSN_P (tmp))
04aa27b1 7858 {
31d3e01c 7859 n_insns = 0;
7860 while (tmp != NULL_RTX)
7861 {
7862 rtx next = NEXT_INSN (tmp);
7863
7864 if (++n_insns > 3
7865 || GET_CODE (tmp) != INSN
7866 || (GET_CODE (PATTERN (tmp)) == SET
7867 && GET_CODE (SET_SRC (PATTERN (tmp))) == MULT)
7868 || (GET_CODE (PATTERN (tmp)) == PARALLEL
7869 && GET_CODE (XVECEXP (PATTERN (tmp), 0, 0)) == SET
7870 && GET_CODE (SET_SRC (XVECEXP (PATTERN (tmp), 0, 0))) == MULT))
7871 {
7872 win = 0;
7873 break;
7874 }
7875
7876 tmp = next;
7877 }
04aa27b1 7878 }
7879 else if (GET_CODE (tmp) == SET
7880 && GET_CODE (SET_SRC (tmp)) == MULT)
7881 win = 0;
7882 else if (GET_CODE (tmp) == PARALLEL
7883 && GET_CODE (XVECEXP (tmp, 0, 0)) == SET
7884 && GET_CODE (SET_SRC (XVECEXP (tmp, 0, 0))) == MULT)
7885 win = 0;
7886
04aa27b1 7887 return win;
7888}
7889\f
7890/* Check to see if loop can be terminated by a "decrement and branch until
7891 zero" instruction. If so, add a REG_NONNEG note to the branch insn if so.
7892 Also try reversing an increment loop to a decrement loop
7893 to see if the optimization can be performed.
7894 Value is nonzero if optimization was performed. */
7895
7896/* This is useful even if the architecture doesn't have such an insn,
7897 because it might change a loops which increments from 0 to n to a loop
7898 which decrements from n to 0. A loop that decrements to zero is usually
7899 faster than one that increments from zero. */
7900
7901/* ??? This could be rewritten to use some of the loop unrolling procedures,
7902 such as approx_final_value, biv_total_increment, loop_iterations, and
7903 final_[bg]iv_value. */
7904
7905static int
3ad4992f 7906check_dbra_loop (struct loop *loop, int insn_count)
04aa27b1 7907{
e9b78d43 7908 struct loop_info *loop_info = LOOP_INFO (loop);
7909 struct loop_regs *regs = LOOP_REGS (loop);
8ec5f078 7910 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 7911 struct iv_class *bl;
7912 rtx reg;
7913 rtx jump_label;
7914 rtx final_value;
7915 rtx start_value;
04aa27b1 7916 rtx new_add_val;
7917 rtx comparison;
7918 rtx before_comparison;
7919 rtx p;
0a40b133 7920 rtx jump;
7921 rtx first_compare;
7922 int compare_and_branch;
ec7d7ef9 7923 rtx loop_start = loop->start;
7924 rtx loop_end = loop->end;
04aa27b1 7925
7926 /* If last insn is a conditional branch, and the insn before tests a
7927 register value, try to optimize it. Otherwise, we can't do anything. */
7928
0a40b133 7929 jump = PREV_INSN (loop_end);
15fc3eb7 7930 comparison = get_condition_for_loop (loop, jump);
04aa27b1 7931 if (comparison == 0)
7932 return 0;
b2816317 7933 if (!onlyjump_p (jump))
7934 return 0;
04aa27b1 7935
0a40b133 7936 /* Try to compute whether the compare/branch at the loop end is one or
7937 two instructions. */
7938 get_condition (jump, &first_compare);
7939 if (first_compare == jump)
7940 compare_and_branch = 1;
7941 else if (first_compare == prev_nonnote_insn (jump))
7942 compare_and_branch = 2;
7943 else
7944 return 0;
7945
ccbe285c 7946 {
7947 /* If more than one condition is present to control the loop, then
c25f946d 7948 do not proceed, as this function does not know how to rewrite
3c68ac58 7949 loop tests with more than one condition.
7950
7951 Look backwards from the first insn in the last comparison
7952 sequence and see if we've got another comparison sequence. */
ccbe285c 7953
7954 rtx jump1;
3c68ac58 7955 if ((jump1 = prev_nonnote_insn (first_compare)) != loop->cont)
d1aea65a 7956 if (GET_CODE (jump1) == JUMP_INSN)
72ae569f 7957 return 0;
ccbe285c 7958 }
7959
04aa27b1 7960 /* Check all of the bivs to see if the compare uses one of them.
7961 Skip biv's set more than once because we can't guarantee that
7962 it will be zero on the last iteration. Also skip if the biv is
7963 used between its update and the test insn. */
7964
6f812a51 7965 for (bl = ivs->list; bl; bl = bl->next)
04aa27b1 7966 {
7967 if (bl->biv_count == 1
88667609 7968 && ! bl->biv->maybe_multiple
04aa27b1 7969 && bl->biv->dest_reg == XEXP (comparison, 0)
7970 && ! reg_used_between_p (regno_reg_rtx[bl->regno], bl->biv->insn,
0a40b133 7971 first_compare))
04aa27b1 7972 break;
7973 }
7974
7975 if (! bl)
7976 return 0;
7977
7978 /* Look for the case where the basic induction variable is always
7979 nonnegative, and equals zero on the last iteration.
7980 In this case, add a reg_note REG_NONNEG, which allows the
7981 m68k DBRA instruction to be used. */
7982
7983 if (((GET_CODE (comparison) == GT
7984 && GET_CODE (XEXP (comparison, 1)) == CONST_INT
7985 && INTVAL (XEXP (comparison, 1)) == -1)
7986 || (GET_CODE (comparison) == NE && XEXP (comparison, 1) == const0_rtx))
7987 && GET_CODE (bl->biv->add_val) == CONST_INT
7988 && INTVAL (bl->biv->add_val) < 0)
7989 {
7990 /* Initial value must be greater than 0,
7991 init_val % -dec_value == 0 to ensure that it equals zero on
7992 the last iteration */
7993
7994 if (GET_CODE (bl->initial_value) == CONST_INT
7995 && INTVAL (bl->initial_value) > 0
0bc644e0 7996 && (INTVAL (bl->initial_value)
7997 % (-INTVAL (bl->biv->add_val))) == 0)
04aa27b1 7998 {
7999 /* register always nonnegative, add REG_NOTE to branch */
2102d800 8000 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8001 REG_NOTES (jump)
8002 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8003 REG_NOTES (jump));
04aa27b1 8004 bl->nonneg = 1;
8005
8006 return 1;
8007 }
8008
8009 /* If the decrement is 1 and the value was tested as >= 0 before
8010 the loop, then we can safely optimize. */
8011 for (p = loop_start; p; p = PREV_INSN (p))
8012 {
8013 if (GET_CODE (p) == CODE_LABEL)
8014 break;
8015 if (GET_CODE (p) != JUMP_INSN)
8016 continue;
8017
15fc3eb7 8018 before_comparison = get_condition_for_loop (loop, p);
04aa27b1 8019 if (before_comparison
8020 && XEXP (before_comparison, 0) == bl->biv->dest_reg
8021 && GET_CODE (before_comparison) == LT
8022 && XEXP (before_comparison, 1) == const0_rtx
8023 && ! reg_set_between_p (bl->biv->dest_reg, p, loop_start)
8024 && INTVAL (bl->biv->add_val) == -1)
8025 {
2102d800 8026 if (! find_reg_note (jump, REG_NONNEG, NULL_RTX))
8027 REG_NOTES (jump)
8028 = gen_rtx_EXPR_LIST (REG_NONNEG, bl->biv->dest_reg,
8029 REG_NOTES (jump));
04aa27b1 8030 bl->nonneg = 1;
8031
8032 return 1;
8033 }
8034 }
8035 }
25999090 8036 else if (GET_CODE (bl->biv->add_val) == CONST_INT
8037 && INTVAL (bl->biv->add_val) > 0)
04aa27b1 8038 {
8039 /* Try to change inc to dec, so can apply above optimization. */
8040 /* Can do this if:
8041 all registers modified are induction variables or invariant,
8042 all memory references have non-overlapping addresses
8043 (obviously true if only one write)
8044 allow 2 insns for the compare/jump at the end of the loop. */
0e95b87f 8045 /* Also, we must avoid any instructions which use both the reversed
8046 biv and another biv. Such instructions will fail if the loop is
8047 reversed. We meet this condition by requiring that either
8048 no_use_except_counting is true, or else that there is only
8049 one biv. */
04aa27b1 8050 int num_nonfixed_reads = 0;
8051 /* 1 if the iteration var is used only to count iterations. */
8052 int no_use_except_counting = 0;
51d1bacc 8053 /* 1 if the loop has no memory store, or it has a single memory store
8054 which is reversible. */
8055 int reversible_mem_store = 1;
04aa27b1 8056
4d554fb7 8057 if (bl->giv_count == 0
8058 && !loop->exit_count
8059 && !loop_info->has_multiple_exit_targets)
04aa27b1 8060 {
8061 rtx bivreg = regno_reg_rtx[bl->regno];
7c2fce50 8062 struct iv_class *blt;
04aa27b1 8063
8064 /* If there are no givs for this biv, and the only exit is the
3398e91d 8065 fall through at the end of the loop, then
04aa27b1 8066 see if perhaps there are no uses except to count. */
8067 no_use_except_counting = 1;
8068 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9204e736 8069 if (INSN_P (p))
04aa27b1 8070 {
8071 rtx set = single_set (p);
8072
8073 if (set && GET_CODE (SET_DEST (set)) == REG
8074 && REGNO (SET_DEST (set)) == bl->regno)
8075 /* An insn that sets the biv is okay. */
8076 ;
d4a087b2 8077 else if (!reg_mentioned_p (bivreg, PATTERN (p)))
8078 /* An insn that doesn't mention the biv is okay. */
8079 ;
8080 else if (p == prev_nonnote_insn (prev_nonnote_insn (loop_end))
8081 || p == prev_nonnote_insn (loop_end))
090651ad 8082 {
8083 /* If either of these insns uses the biv and sets a pseudo
8084 that has more than one usage, then the biv has uses
8085 other than counting since it's used to derive a value
8086 that is used more than one time. */
ec8895d7 8087 note_stores (PATTERN (p), note_set_pseudo_multiple_uses,
e9b78d43 8088 regs);
8089 if (regs->multiple_uses)
090651ad 8090 {
8091 no_use_except_counting = 0;
8092 break;
8093 }
8094 }
d4a087b2 8095 else
04aa27b1 8096 {
8097 no_use_except_counting = 0;
8098 break;
8099 }
8100 }
7c2fce50 8101
6ed9d6f6 8102 /* A biv has uses besides counting if it is used to set
8103 another biv. */
7c2fce50 8104 for (blt = ivs->list; blt; blt = blt->next)
6ed9d6f6 8105 if (blt->init_set
8106 && reg_mentioned_p (bivreg, SET_SRC (blt->init_set)))
7c2fce50 8107 {
8108 no_use_except_counting = 0;
8109 break;
8110 }
04aa27b1 8111 }
8112
dbeeff76 8113 if (no_use_except_counting)
4a8f0b95 8114 /* No need to worry about MEMs. */
8115 ;
2ff1269a 8116 else if (loop_info->num_mem_sets <= 1)
dbeeff76 8117 {
8118 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9204e736 8119 if (INSN_P (p))
15fc3eb7 8120 num_nonfixed_reads += count_nonfixed_reads (loop, PATTERN (p));
dbeeff76 8121
8122 /* If the loop has a single store, and the destination address is
8123 invariant, then we can't reverse the loop, because this address
8124 might then have the wrong value at loop exit.
8125 This would work if the source was invariant also, however, in that
8126 case, the insn should have been moved out of the loop. */
8127
2ff1269a 8128 if (loop_info->num_mem_sets == 1)
75d04235 8129 {
8130 struct induction *v;
8131
85bd9543 8132 /* If we could prove that each of the memory locations
8133 written to was different, then we could reverse the
8134 store -- but we don't presently have any way of
8135 knowing that. */
8136 reversible_mem_store = 0;
75d04235 8137
8138 /* If the store depends on a register that is set after the
8139 store, it depends on the initial value, and is thus not
8140 reversible. */
8141 for (v = bl->giv; reversible_mem_store && v; v = v->next_iv)
8142 {
8143 if (v->giv_type == DEST_REG
8144 && reg_mentioned_p (v->dest_reg,
2ff1269a 8145 PATTERN (loop_info->first_loop_store_insn))
72ae569f 8146 && loop_insn_first_p (loop_info->first_loop_store_insn,
2ff1269a 8147 v->insn))
75d04235 8148 reversible_mem_store = 0;
8149 }
8150 }
dbeeff76 8151 }
8152 else
8153 return 0;
51d1bacc 8154
04aa27b1 8155 /* This code only acts for innermost loops. Also it simplifies
8156 the memory address check by only reversing loops with
8157 zero or one memory access.
8158 Two memory accesses could involve parts of the same array,
dbeeff76 8159 and that can't be reversed.
8160 If the biv is used only for counting, than we don't need to worry
8161 about all these things. */
8162
8163 if ((num_nonfixed_reads <= 1
d286d8a0 8164 && ! loop_info->has_nonconst_call
cd6839f2 8165 && ! loop_info->has_prefetch
1f8922d4 8166 && ! loop_info->has_volatile
dbeeff76 8167 && reversible_mem_store
2ff1269a 8168 && (bl->giv_count + bl->biv_count + loop_info->num_mem_sets
85bd9543 8169 + num_unmoved_movables (loop) + compare_and_branch == insn_count)
6f812a51 8170 && (bl == ivs->list && bl->next == 0))
cd6839f2 8171 || (no_use_except_counting && ! loop_info->has_prefetch))
04aa27b1 8172 {
04aa27b1 8173 rtx tem;
8174
8175 /* Loop can be reversed. */
8176 if (loop_dump_stream)
8177 fprintf (loop_dump_stream, "Can reverse loop\n");
8178
8179 /* Now check other conditions:
0dbd1c74 8180
ad87de1e 8181 The increment must be a constant, as must the initial value,
4a8f0b95 8182 and the comparison code must be LT.
04aa27b1 8183
8184 This test can probably be improved since +/- 1 in the constant
8185 can be obtained by changing LT to LE and vice versa; this is
8186 confusing. */
8187
0dbd1c74 8188 if (comparison
dbeeff76 8189 /* for constants, LE gets turned into LT */
8190 && (GET_CODE (comparison) == LT
8191 || (GET_CODE (comparison) == LE
8192 && no_use_except_counting)))
04aa27b1 8193 {
d844aacc 8194 HOST_WIDE_INT add_val, add_adjust, comparison_val = 0;
dbeeff76 8195 rtx initial_value, comparison_value;
8196 int nonneg = 0;
8197 enum rtx_code cmp_code;
8198 int comparison_const_width;
8199 unsigned HOST_WIDE_INT comparison_sign_mask;
0dbd1c74 8200
8201 add_val = INTVAL (bl->biv->add_val);
dbeeff76 8202 comparison_value = XEXP (comparison, 1);
8c00cd77 8203 if (GET_MODE (comparison_value) == VOIDmode)
8204 comparison_const_width
8205 = GET_MODE_BITSIZE (GET_MODE (XEXP (comparison, 0)));
8206 else
8207 comparison_const_width
8208 = GET_MODE_BITSIZE (GET_MODE (comparison_value));
dbeeff76 8209 if (comparison_const_width > HOST_BITS_PER_WIDE_INT)
8210 comparison_const_width = HOST_BITS_PER_WIDE_INT;
8211 comparison_sign_mask
4a8f0b95 8212 = (unsigned HOST_WIDE_INT) 1 << (comparison_const_width - 1);
dbeeff76 8213
9f89e897 8214 /* If the comparison value is not a loop invariant, then we
8215 can not reverse this loop.
8216
8217 ??? If the insns which initialize the comparison value as
8218 a whole compute an invariant result, then we could move
8219 them out of the loop and proceed with loop reversal. */
15fc3eb7 8220 if (! loop_invariant_p (loop, comparison_value))
9f89e897 8221 return 0;
8222
dbeeff76 8223 if (GET_CODE (comparison_value) == CONST_INT)
8224 comparison_val = INTVAL (comparison_value);
0dbd1c74 8225 initial_value = bl->initial_value;
4a8f0b95 8226
8227 /* Normalize the initial value if it is an integer and
00c62112 8228 has no other use except as a counter. This will allow
8229 a few more loops to be reversed. */
8230 if (no_use_except_counting
dbeeff76 8231 && GET_CODE (comparison_value) == CONST_INT
00c62112 8232 && GET_CODE (initial_value) == CONST_INT)
0dbd1c74 8233 {
8234 comparison_val = comparison_val - INTVAL (bl->initial_value);
dbeeff76 8235 /* The code below requires comparison_val to be a multiple
8236 of add_val in order to do the loop reversal, so
8237 round up comparison_val to a multiple of add_val.
8238 Since comparison_value is constant, we know that the
8239 current comparison code is LT. */
8240 comparison_val = comparison_val + add_val - 1;
8241 comparison_val
8242 -= (unsigned HOST_WIDE_INT) comparison_val % add_val;
8243 /* We postpone overflow checks for COMPARISON_VAL here;
8244 even if there is an overflow, we might still be able to
8245 reverse the loop, if converting the loop exit test to
8246 NE is possible. */
8247 initial_value = const0_rtx;
0dbd1c74 8248 }
8249
dbeeff76 8250 /* First check if we can do a vanilla loop reversal. */
8251 if (initial_value == const0_rtx
1f8922d4 8252 /* If we have a decrement_and_branch_on_count,
8253 prefer the NE test, since this will allow that
8254 instruction to be generated. Note that we must
8255 use a vanilla loop reversal if the biv is used to
8256 calculate a giv or has a non-counting use. */
8257#if ! defined (HAVE_decrement_and_branch_until_zero) \
8258&& defined (HAVE_decrement_and_branch_on_count)
ec7d7ef9 8259 && (! (add_val == 1 && loop->vtop
6972bd1a 8260 && (bl->biv_count == 0
8261 || no_use_except_counting)))
dbeeff76 8262#endif
8263 && GET_CODE (comparison_value) == CONST_INT
8264 /* Now do postponed overflow checks on COMPARISON_VAL. */
8265 && ! (((comparison_val - add_val) ^ INTVAL (comparison_value))
8266 & comparison_sign_mask))
8267 {
8268 /* Register will always be nonnegative, with value
8269 0 on last iteration */
8270 add_adjust = add_val;
8271 nonneg = 1;
8272 cmp_code = GE;
8273 }
ec7d7ef9 8274 else if (add_val == 1 && loop->vtop
6972bd1a 8275 && (bl->biv_count == 0
8276 || no_use_except_counting))
dbeeff76 8277 {
8278 add_adjust = 0;
8279 cmp_code = NE;
8280 }
8281 else
8282 return 0;
8283
8284 if (GET_CODE (comparison) == LE)
8285 add_adjust -= add_val;
8286
0dbd1c74 8287 /* If the initial value is not zero, or if the comparison
8288 value is not an exact multiple of the increment, then we
8289 can not reverse this loop. */
dbeeff76 8290 if (initial_value == const0_rtx
8291 && GET_CODE (comparison_value) == CONST_INT)
8292 {
8293 if (((unsigned HOST_WIDE_INT) comparison_val % add_val) != 0)
8294 return 0;
8295 }
8296 else
8297 {
8298 if (! no_use_except_counting || add_val != 1)
8299 return 0;
8300 }
0dbd1c74 8301
655e5d48 8302 final_value = comparison_value;
8303
0dbd1c74 8304 /* Reset these in case we normalized the initial value
8305 and comparison value above. */
655e5d48 8306 if (GET_CODE (comparison_value) == CONST_INT
8307 && GET_CODE (initial_value) == CONST_INT)
8308 {
8309 comparison_value = GEN_INT (comparison_val);
8310 final_value
8311 = GEN_INT (comparison_val + INTVAL (bl->initial_value));
8312 }
0dbd1c74 8313 bl->initial_value = initial_value;
04aa27b1 8314
8315 /* Save some info needed to produce the new insns. */
8316 reg = bl->biv->dest_reg;
e78ccb91 8317 jump_label = condjump_label (PREV_INSN (loop_end));
72ae569f 8318 new_add_val = GEN_INT (-INTVAL (bl->biv->add_val));
04aa27b1 8319
dbeeff76 8320 /* Set start_value; if this is not a CONST_INT, we need
8321 to generate a SUB.
8322 Initialize biv to start_value before loop start.
04aa27b1 8323 The old initializing insn will be deleted as a
8324 dead store by flow.c. */
dbeeff76 8325 if (initial_value == const0_rtx
8326 && GET_CODE (comparison_value) == CONST_INT)
8327 {
8328 start_value = GEN_INT (comparison_val - add_adjust);
26d6ff2a 8329 loop_insn_hoist (loop, gen_move_insn (reg, start_value));
dbeeff76 8330 }
8331 else if (GET_CODE (initial_value) == CONST_INT)
8332 {
dbeeff76 8333 enum machine_mode mode = GET_MODE (reg);
ad99e708 8334 rtx offset = GEN_INT (-INTVAL (initial_value) - add_adjust);
8335 rtx add_insn = gen_add3_insn (reg, comparison_value, offset);
8336
8337 if (add_insn == 0)
dbeeff76 8338 return 0;
ad99e708 8339
dbeeff76 8340 start_value
8341 = gen_rtx_PLUS (mode, comparison_value, offset);
ad99e708 8342 loop_insn_hoist (loop, add_insn);
dbeeff76 8343 if (GET_CODE (comparison) == LE)
8344 final_value = gen_rtx_PLUS (mode, comparison_value,
8345 GEN_INT (add_val));
8346 }
8347 else if (! add_adjust)
8348 {
8349 enum machine_mode mode = GET_MODE (reg);
ad99e708 8350 rtx sub_insn = gen_sub3_insn (reg, comparison_value,
8351 initial_value);
8352
8353 if (sub_insn == 0)
dbeeff76 8354 return 0;
8355 start_value
8356 = gen_rtx_MINUS (mode, comparison_value, initial_value);
ad99e708 8357 loop_insn_hoist (loop, sub_insn);
dbeeff76 8358 }
8359 else
8360 /* We could handle the other cases too, but it'll be
8361 better to have a testcase first. */
8362 return 0;
04aa27b1 8363
e461685e 8364 /* We may not have a single insn which can increment a reg, so
8365 create a sequence to hold all the insns from expand_inc. */
8366 start_sequence ();
8367 expand_inc (reg, new_add_val);
31d3e01c 8368 tem = get_insns ();
4a8f0b95 8369 end_sequence ();
e461685e 8370
0ab94f0c 8371 p = loop_insn_emit_before (loop, 0, bl->biv->insn, tem);
06786d0d 8372 delete_insn (bl->biv->insn);
4a8f0b95 8373
04aa27b1 8374 /* Update biv info to reflect its new status. */
8375 bl->biv->insn = p;
8376 bl->initial_value = start_value;
8377 bl->biv->add_val = new_add_val;
8378
da6b0be0 8379 /* Update loop info. */
f48706bb 8380 loop_info->initial_value = reg;
8381 loop_info->initial_equiv_value = reg;
da6b0be0 8382 loop_info->final_value = const0_rtx;
8383 loop_info->final_equiv_value = const0_rtx;
8384 loop_info->comparison_value = const0_rtx;
8385 loop_info->comparison_code = cmp_code;
8386 loop_info->increment = new_add_val;
8387
04aa27b1 8388 /* Inc LABEL_NUSES so that delete_insn will
8389 not delete the label. */
72ae569f 8390 LABEL_NUSES (XEXP (jump_label, 0))++;
04aa27b1 8391
8392 /* Emit an insn after the end of the loop to set the biv's
8393 proper exit value if it is used anywhere outside the loop. */
0a40b133 8394 if ((REGNO_LAST_UID (bl->regno) != INSN_UID (first_compare))
04aa27b1 8395 || ! bl->init_insn
394685a4 8396 || REGNO_FIRST_UID (bl->regno) != INSN_UID (bl->init_insn))
2ecaad6d 8397 loop_insn_sink (loop, gen_load_of_final_value (reg, final_value));
04aa27b1 8398
8399 /* Delete compare/branch at end of loop. */
e4bf866d 8400 delete_related_insns (PREV_INSN (loop_end));
0a40b133 8401 if (compare_and_branch == 2)
e4bf866d 8402 delete_related_insns (first_compare);
04aa27b1 8403
8404 /* Add new compare/branch insn at end of loop. */
8405 start_sequence ();
989c2a69 8406 emit_cmp_and_jump_insns (reg, const0_rtx, cmp_code, NULL_RTX,
2b96c5f6 8407 GET_MODE (reg), 0,
989c2a69 8408 XEXP (jump_label, 0));
31d3e01c 8409 tem = get_insns ();
04aa27b1 8410 end_sequence ();
8411 emit_jump_insn_before (tem, loop_end);
8412
46134d72 8413 for (tem = PREV_INSN (loop_end);
8414 tem && GET_CODE (tem) != JUMP_INSN;
8415 tem = PREV_INSN (tem))
8416 ;
8417
8418 if (tem)
8419 JUMP_LABEL (tem) = XEXP (jump_label, 0);
8420
dbeeff76 8421 if (nonneg)
04aa27b1 8422 {
dbeeff76 8423 if (tem)
8424 {
dbeeff76 8425 /* Increment of LABEL_NUSES done above. */
8426 /* Register is now always nonnegative,
8427 so add REG_NONNEG note to the branch. */
2102d800 8428 REG_NOTES (tem) = gen_rtx_EXPR_LIST (REG_NONNEG, reg,
dbeeff76 8429 REG_NOTES (tem));
8430 }
8431 bl->nonneg = 1;
04aa27b1 8432 }
8433
3aeaf55f 8434 /* No insn may reference both the reversed and another biv or it
8435 will fail (see comment near the top of the loop reversal
8436 code).
8437 Earlier on, we have verified that the biv has no use except
8438 counting, or it is the only biv in this function.
8439 However, the code that computes no_use_except_counting does
8440 not verify reg notes. It's possible to have an insn that
8441 references another biv, and has a REG_EQUAL note with an
8442 expression based on the reversed biv. To avoid this case,
8443 remove all REG_EQUAL notes based on the reversed biv
8444 here. */
8445 for (p = loop_start; p != loop_end; p = NEXT_INSN (p))
9204e736 8446 if (INSN_P (p))
3aeaf55f 8447 {
8448 rtx *pnote;
8449 rtx set = single_set (p);
8450 /* If this is a set of a GIV based on the reversed biv, any
8451 REG_EQUAL notes should still be correct. */
8452 if (! set
8453 || GET_CODE (SET_DEST (set)) != REG
6f812a51 8454 || (size_t) REGNO (SET_DEST (set)) >= ivs->n_regs
8ec5f078 8455 || REG_IV_TYPE (ivs, REGNO (SET_DEST (set))) != GENERAL_INDUCT
8456 || REG_IV_INFO (ivs, REGNO (SET_DEST (set)))->src_reg != bl->biv->src_reg)
3aeaf55f 8457 for (pnote = &REG_NOTES (p); *pnote;)
8458 {
8459 if (REG_NOTE_KIND (*pnote) == REG_EQUAL
8460 && reg_mentioned_p (regno_reg_rtx[bl->regno],
8461 XEXP (*pnote, 0)))
8462 *pnote = XEXP (*pnote, 1);
8463 else
8464 pnote = &XEXP (*pnote, 1);
8465 }
8466 }
8467
04aa27b1 8468 /* Mark that this biv has been reversed. Each giv which depends
8469 on this biv, and which is also live past the end of the loop
8470 will have to be fixed up. */
8471
8472 bl->reversed = 1;
8473
8474 if (loop_dump_stream)
ab8f7c7d 8475 {
8476 fprintf (loop_dump_stream, "Reversed loop");
8477 if (bl->nonneg)
8478 fprintf (loop_dump_stream, " and added reg_nonneg\n");
8479 else
8480 fprintf (loop_dump_stream, "\n");
8481 }
04aa27b1 8482
8483 return 1;
8484 }
8485 }
8486 }
8487
8488 return 0;
8489}
8490\f
8491/* Verify whether the biv BL appears to be eliminable,
8492 based on the insns in the loop that refer to it.
04aa27b1 8493
6ef828f9 8494 If ELIMINATE_P is nonzero, actually do the elimination.
04aa27b1 8495
8496 THRESHOLD and INSN_COUNT are from loop_optimize and are used to
8497 determine whether invariant insns should be placed inside or at the
8498 start of the loop. */
8499
8500static int
3ad4992f 8501maybe_eliminate_biv (const struct loop *loop, struct iv_class *bl,
8502 int eliminate_p, int threshold, int insn_count)
04aa27b1 8503{
8ec5f078 8504 struct loop_ivs *ivs = LOOP_IVS (loop);
04aa27b1 8505 rtx reg = bl->biv->dest_reg;
2835c916 8506 rtx p;
04aa27b1 8507
8508 /* Scan all insns in the loop, stopping if we find one that uses the
8509 biv in a way that we cannot eliminate. */
8510
89e8d34f 8511 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
04aa27b1 8512 {
8513 enum rtx_code code = GET_CODE (p);
89e8d34f 8514 basic_block where_bb = 0;
8515 rtx where_insn = threshold >= insn_count ? 0 : p;
a821a12b 8516 rtx note;
04aa27b1 8517
092aa66f 8518 /* If this is a libcall that sets a giv, skip ahead to its end. */
8519 if (GET_RTX_CLASS (code) == 'i')
8520 {
a821a12b 8521 note = find_reg_note (p, REG_LIBCALL, NULL_RTX);
092aa66f 8522
8523 if (note)
8524 {
8525 rtx last = XEXP (note, 0);
8526 rtx set = single_set (last);
8527
8528 if (set && GET_CODE (SET_DEST (set)) == REG)
8529 {
02e7a332 8530 unsigned int regno = REGNO (SET_DEST (set));
092aa66f 8531
d1908618 8532 if (regno < ivs->n_regs
8ec5f078 8533 && REG_IV_TYPE (ivs, regno) == GENERAL_INDUCT
8534 && REG_IV_INFO (ivs, regno)->src_reg == bl->biv->src_reg)
092aa66f 8535 p = last;
8536 }
8537 }
8538 }
a821a12b 8539
8540 /* Closely examine the insn if the biv is mentioned. */
04aa27b1 8541 if ((code == INSN || code == JUMP_INSN || code == CALL_INSN)
8542 && reg_mentioned_p (reg, PATTERN (p))
15fc3eb7 8543 && ! maybe_eliminate_biv_1 (loop, PATTERN (p), p, bl,
89e8d34f 8544 eliminate_p, where_bb, where_insn))
04aa27b1 8545 {
8546 if (loop_dump_stream)
8547 fprintf (loop_dump_stream,
8548 "Cannot eliminate biv %d: biv used in insn %d.\n",
8549 bl->regno, INSN_UID (p));
8550 break;
8551 }
a821a12b 8552
8553 /* If we are eliminating, kill REG_EQUAL notes mentioning the biv. */
8554 if (eliminate_p
8555 && (note = find_reg_note (p, REG_EQUAL, NULL_RTX)) != NULL_RTX
8556 && reg_mentioned_p (reg, XEXP (note, 0)))
8557 remove_note (p, note);
04aa27b1 8558 }
8559
89e8d34f 8560 if (p == loop->end)
04aa27b1 8561 {
8562 if (loop_dump_stream)
8563 fprintf (loop_dump_stream, "biv %d %s eliminated.\n",
8564 bl->regno, eliminate_p ? "was" : "can be");
8565 return 1;
8566 }
8567
8568 return 0;
8569}
8570\f
9246c12c 8571/* INSN and REFERENCE are instructions in the same insn chain.
6ef828f9 8572 Return nonzero if INSN is first. */
9246c12c 8573
37d52343 8574int
3ad4992f 8575loop_insn_first_p (rtx insn, rtx reference)
9246c12c 8576{
64a60ba8 8577 rtx p, q;
8578
4a8f0b95 8579 for (p = insn, q = reference;;)
64a60ba8 8580 {
8581 /* Start with test for not first so that INSN == REFERENCE yields not
8582 first. */
8583 if (q == insn || ! p)
8851e806 8584 return 0;
64a60ba8 8585 if (p == reference || ! q)
8851e806 8586 return 1;
64a60ba8 8587
930b8f5a 8588 /* Either of P or Q might be a NOTE. Notes have the same LUID as the
8589 previous insn, hence the <= comparison below does not work if
8590 P is a note. */
64a60ba8 8591 if (INSN_UID (p) < max_uid_for_loop
930b8f5a 8592 && INSN_UID (q) < max_uid_for_loop
8593 && GET_CODE (p) != NOTE)
8594 return INSN_LUID (p) <= INSN_LUID (q);
64a60ba8 8595
930b8f5a 8596 if (INSN_UID (p) >= max_uid_for_loop
8597 || GET_CODE (p) == NOTE)
64a60ba8 8598 p = NEXT_INSN (p);
8599 if (INSN_UID (q) >= max_uid_for_loop)
8600 q = NEXT_INSN (q);
8601 }
9246c12c 8602}
8603
6ef828f9 8604/* We are trying to eliminate BIV in INSN using GIV. Return nonzero if
9246c12c 8605 the offset that we have to take into account due to auto-increment /
8606 div derivation is zero. */
8607static int
3ad4992f 8608biv_elimination_giv_has_0_offset (struct induction *biv,
8609 struct induction *giv, rtx insn)
9246c12c 8610{
8611 /* If the giv V had the auto-inc address optimization applied
8612 to it, and INSN occurs between the giv insn and the biv
8613 insn, then we'd have to adjust the value used here.
8614 This is rare, so we don't bother to make this possible. */
8615 if (giv->auto_inc_opt
8616 && ((loop_insn_first_p (giv->insn, insn)
8617 && loop_insn_first_p (insn, biv->insn))
8618 || (loop_insn_first_p (biv->insn, insn)
8619 && loop_insn_first_p (insn, giv->insn))))
8620 return 0;
8621
9246c12c 8622 return 1;
8623}
8624
04aa27b1 8625/* If BL appears in X (part of the pattern of INSN), see if we can
8626 eliminate its use. If so, return 1. If not, return 0.
8627
8628 If BIV does not appear in X, return 1.
8629
6ef828f9 8630 If ELIMINATE_P is nonzero, actually do the elimination.
89e8d34f 8631 WHERE_INSN/WHERE_BB indicate where extra insns should be added.
8632 Depending on how many items have been moved out of the loop, it
6ef828f9 8633 will either be before INSN (when WHERE_INSN is nonzero) or at the
89e8d34f 8634 start of the loop (when WHERE_INSN is zero). */
04aa27b1 8635
8636static int
3ad4992f 8637maybe_eliminate_biv_1 (const struct loop *loop, rtx x, rtx insn,
8638 struct iv_class *bl, int eliminate_p,
8639 basic_block where_bb, rtx where_insn)
04aa27b1 8640{
8641 enum rtx_code code = GET_CODE (x);
8642 rtx reg = bl->biv->dest_reg;
8643 enum machine_mode mode = GET_MODE (reg);
8644 struct induction *v;
7d27e4c9 8645 rtx arg, tem;
8646#ifdef HAVE_cc0
8647 rtx new;
8648#endif
04aa27b1 8649 int arg_operand;
d2ca078f 8650 const char *fmt;
04aa27b1 8651 int i, j;
8652
8653 switch (code)
8654 {
8655 case REG:
8656 /* If we haven't already been able to do something with this BIV,
8657 we can't eliminate it. */
8658 if (x == reg)
8659 return 0;
8660 return 1;
8661
8662 case SET:
8663 /* If this sets the BIV, it is not a problem. */
8664 if (SET_DEST (x) == reg)
8665 return 1;
8666
8667 /* If this is an insn that defines a giv, it is also ok because
8668 it will go away when the giv is reduced. */
8669 for (v = bl->giv; v; v = v->next_iv)
8670 if (v->giv_type == DEST_REG && SET_DEST (x) == v->dest_reg)
8671 return 1;
8672
8673#ifdef HAVE_cc0
8674 if (SET_DEST (x) == cc0_rtx && SET_SRC (x) == reg)
8675 {
8676 /* Can replace with any giv that was reduced and
8677 that has (MULT_VAL != 0) and (ADD_VAL == 0).
48e0a6bf 8678 Require a constant for MULT_VAL, so we know it's nonzero.
8679 ??? We disable this optimization to avoid potential
8680 overflows. */
04aa27b1 8681
8682 for (v = bl->giv; v; v = v->next_iv)
d66834dc 8683 if (GET_CODE (v->mult_val) == CONST_INT && v->mult_val != const0_rtx
04aa27b1 8684 && v->add_val == const0_rtx
fe32df01 8685 && ! v->ignore && ! v->maybe_dead && v->always_computable
48e0a6bf 8686 && v->mode == mode
8687 && 0)
04aa27b1 8688 {
9246c12c 8689 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8690 continue;
8691
04aa27b1 8692 if (! eliminate_p)
8693 return 1;
8694
8695 /* If the giv has the opposite direction of change,
8696 then reverse the comparison. */
8697 if (INTVAL (v->mult_val) < 0)
941522d6 8698 new = gen_rtx_COMPARE (GET_MODE (v->new_reg),
8699 const0_rtx, v->new_reg);
04aa27b1 8700 else
8701 new = v->new_reg;
8702
8703 /* We can probably test that giv's reduced reg. */
8704 if (validate_change (insn, &SET_SRC (x), new, 0))
8705 return 1;
8706 }
8707
8708 /* Look for a giv with (MULT_VAL != 0) and (ADD_VAL != 0);
8709 replace test insn with a compare insn (cmp REDUCED_GIV ADD_VAL).
48e0a6bf 8710 Require a constant for MULT_VAL, so we know it's nonzero.
8711 ??? Do this only if ADD_VAL is a pointer to avoid a potential
8712 overflow problem. */
04aa27b1 8713
8714 for (v = bl->giv; v; v = v->next_iv)
4a8f0b95 8715 if (GET_CODE (v->mult_val) == CONST_INT
8716 && v->mult_val != const0_rtx
fe32df01 8717 && ! v->ignore && ! v->maybe_dead && v->always_computable
48e0a6bf 8718 && v->mode == mode
8719 && (GET_CODE (v->add_val) == SYMBOL_REF
8720 || GET_CODE (v->add_val) == LABEL_REF
8721 || GET_CODE (v->add_val) == CONST
8722 || (GET_CODE (v->add_val) == REG
e61a0a7f 8723 && REG_POINTER (v->add_val))))
04aa27b1 8724 {
9246c12c 8725 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8726 continue;
8727
04aa27b1 8728 if (! eliminate_p)
8729 return 1;
8730
8731 /* If the giv has the opposite direction of change,
8732 then reverse the comparison. */
8733 if (INTVAL (v->mult_val) < 0)
941522d6 8734 new = gen_rtx_COMPARE (VOIDmode, copy_rtx (v->add_val),
8735 v->new_reg);
04aa27b1 8736 else
941522d6 8737 new = gen_rtx_COMPARE (VOIDmode, v->new_reg,
8738 copy_rtx (v->add_val));
04aa27b1 8739
8740 /* Replace biv with the giv's reduced register. */
8741 update_reg_last_use (v->add_val, insn);
8742 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
8743 return 1;
8744
8745 /* Insn doesn't support that constant or invariant. Copy it
8746 into a register (it will be a loop invariant.) */
8747 tem = gen_reg_rtx (GET_MODE (v->new_reg));
8748
0ab94f0c 8749 loop_insn_emit_before (loop, 0, where_insn,
8750 gen_move_insn (tem,
8751 copy_rtx (v->add_val)));
04aa27b1 8752
e7d536da 8753 /* Substitute the new register for its invariant value in
4a8f0b95 8754 the compare expression. */
e7d536da 8755 XEXP (new, (INTVAL (v->mult_val) < 0) ? 0 : 1) = tem;
8756 if (validate_change (insn, &SET_SRC (PATTERN (insn)), new, 0))
04aa27b1 8757 return 1;
8758 }
8759 }
8760#endif
8761 break;
8762
8763 case COMPARE:
8764 case EQ: case NE:
8765 case GT: case GE: case GTU: case GEU:
8766 case LT: case LE: case LTU: case LEU:
8767 /* See if either argument is the biv. */
8768 if (XEXP (x, 0) == reg)
8769 arg = XEXP (x, 1), arg_operand = 1;
8770 else if (XEXP (x, 1) == reg)
8771 arg = XEXP (x, 0), arg_operand = 0;
8772 else
8773 break;
8774
8775 if (CONSTANT_P (arg))
8776 {
8777 /* First try to replace with any giv that has constant positive
8778 mult_val and constant add_val. We might be able to support
8779 negative mult_val, but it seems complex to do it in general. */
8780
8781 for (v = bl->giv; v; v = v->next_iv)
4a8f0b95 8782 if (GET_CODE (v->mult_val) == CONST_INT
8783 && INTVAL (v->mult_val) > 0
48e0a6bf 8784 && (GET_CODE (v->add_val) == SYMBOL_REF
8785 || GET_CODE (v->add_val) == LABEL_REF
8786 || GET_CODE (v->add_val) == CONST
8787 || (GET_CODE (v->add_val) == REG
e61a0a7f 8788 && REG_POINTER (v->add_val)))
fe32df01 8789 && ! v->ignore && ! v->maybe_dead && v->always_computable
04aa27b1 8790 && v->mode == mode)
8791 {
9246c12c 8792 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8793 continue;
8794
9ae13cc6 8795 /* Don't eliminate if the linear combination that makes up
8796 the giv overflows when it is applied to ARG. */
8797 if (GET_CODE (arg) == CONST_INT)
8798 {
8799 rtx add_val;
8800
8801 if (GET_CODE (v->add_val) == CONST_INT)
8802 add_val = v->add_val;
8803 else
8804 add_val = const0_rtx;
8805
8806 if (const_mult_add_overflow_p (arg, v->mult_val,
8807 add_val, mode, 1))
8808 continue;
8809 }
8810
04aa27b1 8811 if (! eliminate_p)
8812 return 1;
8813
8814 /* Replace biv with the giv's reduced reg. */
4a8f0b95 8815 validate_change (insn, &XEXP (x, 1 - arg_operand), v->new_reg, 1);
04aa27b1 8816
8817 /* If all constants are actually constant integers and
8818 the derived constant can be directly placed in the COMPARE,
8819 do so. */
8820 if (GET_CODE (arg) == CONST_INT
d66834dc 8821 && GET_CODE (v->add_val) == CONST_INT)
8822 {
9ae13cc6 8823 tem = expand_mult_add (arg, NULL_RTX, v->mult_val,
8824 v->add_val, mode, 1);
d66834dc 8825 }
8826 else
8827 {
8828 /* Otherwise, load it into a register. */
8829 tem = gen_reg_rtx (mode);
89e8d34f 8830 loop_iv_add_mult_emit_before (loop, arg,
8831 v->mult_val, v->add_val,
8832 tem, where_bb, where_insn);
d66834dc 8833 }
9ae13cc6 8834
8835 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8836
d66834dc 8837 if (apply_change_group ())
04aa27b1 8838 return 1;
04aa27b1 8839 }
4a8f0b95 8840
04aa27b1 8841 /* Look for giv with positive constant mult_val and nonconst add_val.
4a8f0b95 8842 Insert insns to calculate new compare value.
48e0a6bf 8843 ??? Turn this off due to possible overflow. */
04aa27b1 8844
8845 for (v = bl->giv; v; v = v->next_iv)
4a8f0b95 8846 if (GET_CODE (v->mult_val) == CONST_INT
8847 && INTVAL (v->mult_val) > 0
fe32df01 8848 && ! v->ignore && ! v->maybe_dead && v->always_computable
48e0a6bf 8849 && v->mode == mode
8850 && 0)
04aa27b1 8851 {
8852 rtx tem;
8853
9246c12c 8854 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8855 continue;
8856
04aa27b1 8857 if (! eliminate_p)
8858 return 1;
8859
8860 tem = gen_reg_rtx (mode);
8861
8862 /* Replace biv with giv's reduced register. */
8863 validate_change (insn, &XEXP (x, 1 - arg_operand),
8864 v->new_reg, 1);
8865
8866 /* Compute value to compare against. */
4bb30577 8867 loop_iv_add_mult_emit_before (loop, arg,
89e8d34f 8868 v->mult_val, v->add_val,
8869 tem, where_bb, where_insn);
04aa27b1 8870 /* Use it in this insn. */
8871 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8872 if (apply_change_group ())
8873 return 1;
8874 }
8875 }
8876 else if (GET_CODE (arg) == REG || GET_CODE (arg) == MEM)
8877 {
15fc3eb7 8878 if (loop_invariant_p (loop, arg) == 1)
04aa27b1 8879 {
8880 /* Look for giv with constant positive mult_val and nonconst
4a8f0b95 8881 add_val. Insert insns to compute new compare value.
48e0a6bf 8882 ??? Turn this off due to possible overflow. */
04aa27b1 8883
8884 for (v = bl->giv; v; v = v->next_iv)
d66834dc 8885 if (GET_CODE (v->mult_val) == CONST_INT && INTVAL (v->mult_val) > 0
fe32df01 8886 && ! v->ignore && ! v->maybe_dead && v->always_computable
48e0a6bf 8887 && v->mode == mode
8888 && 0)
04aa27b1 8889 {
8890 rtx tem;
8891
9246c12c 8892 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8893 continue;
8894
04aa27b1 8895 if (! eliminate_p)
8896 return 1;
8897
8898 tem = gen_reg_rtx (mode);
8899
8900 /* Replace biv with giv's reduced register. */
8901 validate_change (insn, &XEXP (x, 1 - arg_operand),
8902 v->new_reg, 1);
8903
8904 /* Compute value to compare against. */
4bb30577 8905 loop_iv_add_mult_emit_before (loop, arg,
89e8d34f 8906 v->mult_val, v->add_val,
8907 tem, where_bb, where_insn);
04aa27b1 8908 validate_change (insn, &XEXP (x, arg_operand), tem, 1);
8909 if (apply_change_group ())
8910 return 1;
8911 }
8912 }
8913
8914 /* This code has problems. Basically, you can't know when
8915 seeing if we will eliminate BL, whether a particular giv
8916 of ARG will be reduced. If it isn't going to be reduced,
8917 we can't eliminate BL. We can try forcing it to be reduced,
8918 but that can generate poor code.
8919
8920 The problem is that the benefit of reducing TV, below should
8921 be increased if BL can actually be eliminated, but this means
8922 we might have to do a topological sort of the order in which
8923 we try to process biv. It doesn't seem worthwhile to do
8924 this sort of thing now. */
8925
8926#if 0
8927 /* Otherwise the reg compared with had better be a biv. */
8928 if (GET_CODE (arg) != REG
8ec5f078 8929 || REG_IV_TYPE (ivs, REGNO (arg)) != BASIC_INDUCT)
04aa27b1 8930 return 0;
8931
8932 /* Look for a pair of givs, one for each biv,
8933 with identical coefficients. */
8934 for (v = bl->giv; v; v = v->next_iv)
8935 {
8936 struct induction *tv;
8937
8938 if (v->ignore || v->maybe_dead || v->mode != mode)
8939 continue;
8940
4bb30577 8941 for (tv = REG_IV_CLASS (ivs, REGNO (arg))->giv; tv;
b995c337 8942 tv = tv->next_iv)
04aa27b1 8943 if (! tv->ignore && ! tv->maybe_dead
8944 && rtx_equal_p (tv->mult_val, v->mult_val)
8945 && rtx_equal_p (tv->add_val, v->add_val)
8946 && tv->mode == mode)
8947 {
9246c12c 8948 if (! biv_elimination_giv_has_0_offset (bl->biv, v, insn))
a5e23478 8949 continue;
8950
04aa27b1 8951 if (! eliminate_p)
8952 return 1;
8953
8954 /* Replace biv with its giv's reduced reg. */
4a8f0b95 8955 XEXP (x, 1 - arg_operand) = v->new_reg;
04aa27b1 8956 /* Replace other operand with the other giv's
8957 reduced reg. */
8958 XEXP (x, arg_operand) = tv->new_reg;
8959 return 1;
8960 }
8961 }
8962#endif
8963 }
8964
8965 /* If we get here, the biv can't be eliminated. */
8966 return 0;
8967
8968 case MEM:
8969 /* If this address is a DEST_ADDR giv, it doesn't matter if the
8970 biv is used in it, since it will be replaced. */
8971 for (v = bl->giv; v; v = v->next_iv)
8972 if (v->giv_type == DEST_ADDR && v->location == &XEXP (x, 0))
8973 return 1;
8974 break;
0dbd1c74 8975
8976 default:
8977 break;
04aa27b1 8978 }
8979
8980 /* See if any subexpression fails elimination. */
8981 fmt = GET_RTX_FORMAT (code);
8982 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8983 {
8984 switch (fmt[i])
8985 {
8986 case 'e':
4a8f0b95 8987 if (! maybe_eliminate_biv_1 (loop, XEXP (x, i), insn, bl,
89e8d34f 8988 eliminate_p, where_bb, where_insn))
04aa27b1 8989 return 0;
8990 break;
8991
8992 case 'E':
8993 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
15fc3eb7 8994 if (! maybe_eliminate_biv_1 (loop, XVECEXP (x, i, j), insn, bl,
89e8d34f 8995 eliminate_p, where_bb, where_insn))
04aa27b1 8996 return 0;
8997 break;
8998 }
8999 }
9000
9001 return 1;
4a8f0b95 9002}
04aa27b1 9003\f
9004/* Return nonzero if the last use of REG
9005 is in an insn following INSN in the same basic block. */
9006
9007static int
3ad4992f 9008last_use_this_basic_block (rtx reg, rtx insn)
04aa27b1 9009{
9010 rtx n;
9011 for (n = insn;
9012 n && GET_CODE (n) != CODE_LABEL && GET_CODE (n) != JUMP_INSN;
9013 n = NEXT_INSN (n))
9014 {
394685a4 9015 if (REGNO_LAST_UID (REGNO (reg)) == INSN_UID (n))
04aa27b1 9016 return 1;
9017 }
9018 return 0;
9019}
9020\f
9021/* Called via `note_stores' to record the initial value of a biv. Here we
9022 just record the location of the set and process it later. */
9023
9024static void
3ad4992f 9025record_initial (rtx dest, rtx set, void *data ATTRIBUTE_UNUSED)
04aa27b1 9026{
8ec5f078 9027 struct loop_ivs *ivs = (struct loop_ivs *) data;
04aa27b1 9028 struct iv_class *bl;
9029
9030 if (GET_CODE (dest) != REG
d1908618 9031 || REGNO (dest) >= ivs->n_regs
8ec5f078 9032 || REG_IV_TYPE (ivs, REGNO (dest)) != BASIC_INDUCT)
04aa27b1 9033 return;
9034
b995c337 9035 bl = REG_IV_CLASS (ivs, REGNO (dest));
04aa27b1 9036
9037 /* If this is the first set found, record it. */
9038 if (bl->init_insn == 0)
9039 {
9040 bl->init_insn = note_insn;
9041 bl->init_set = set;
9042 }
9043}
9044\f
9045/* If any of the registers in X are "old" and currently have a last use earlier
9046 than INSN, update them to have a last use of INSN. Their actual last use
9047 will be the previous insn but it will not have a valid uid_luid so we can't
89e8d34f 9048 use it. X must be a source expression only. */
04aa27b1 9049
9050static void
3ad4992f 9051update_reg_last_use (rtx x, rtx insn)
04aa27b1 9052{
9053 /* Check for the case where INSN does not have a valid luid. In this case,
9054 there is no need to modify the regno_last_uid, as this can only happen
9055 when code is inserted after the loop_end to set a pseudo's final value,
4bb30577 9056 and hence this insn will never be the last use of x.
9057 ???? This comment is not correct. See for example loop_givs_reduce.
89e8d34f 9058 This may insert an insn before another new insn. */
04aa27b1 9059 if (GET_CODE (x) == REG && REGNO (x) < max_reg_before_loop
9060 && INSN_UID (insn) < max_uid_for_loop
23e52523 9061 && REGNO_LAST_LUID (REGNO (x)) < INSN_LUID (insn))
89e8d34f 9062 {
9063 REGNO_LAST_UID (REGNO (x)) = INSN_UID (insn);
9064 }
04aa27b1 9065 else
9066 {
19cb6b50 9067 int i, j;
9068 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
04aa27b1 9069 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
9070 {
9071 if (fmt[i] == 'e')
9072 update_reg_last_use (XEXP (x, i), insn);
9073 else if (fmt[i] == 'E')
9074 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9075 update_reg_last_use (XVECEXP (x, i, j), insn);
9076 }
9077 }
9078}
9079\f
ff27bca2 9080/* Given an insn INSN and condition COND, return the condition in a
9081 canonical form to simplify testing by callers. Specifically:
04aa27b1 9082
9083 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
9084 (2) Both operands will be machine operands; (cc0) will have been replaced.
9085 (3) If an operand is a constant, it will be the second operand.
9086 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
ff27bca2 9087 for GE, GEU, and LEU.
9088
9089 If the condition cannot be understood, or is an inequality floating-point
9090 comparison which needs to be reversed, 0 will be returned.
9091
6ef828f9 9092 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
ff27bca2 9093
6ef828f9 9094 If EARLIEST is nonzero, it is a pointer to a place where the earliest
ff27bca2 9095 insn used in locating the condition was found. If a replacement test
9096 of the condition is desired, it should be placed in front of that
124d766d 9097 insn and we will be sure that the inputs are still valid.
9098
6ef828f9 9099 If WANT_REG is nonzero, we wish the condition to be relative to that
124d766d 9100 register, if possible. Therefore, do not canonicalize the condition
9101 further. */
04aa27b1 9102
9103rtx
3ad4992f 9104canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
9105 rtx want_reg)
04aa27b1 9106{
9107 enum rtx_code code;
ff27bca2 9108 rtx prev = insn;
04aa27b1 9109 rtx set;
9110 rtx tem;
9111 rtx op0, op1;
9112 int reverse_code = 0;
3a2a3a7f 9113 enum machine_mode mode;
04aa27b1 9114
ff27bca2 9115 code = GET_CODE (cond);
9116 mode = GET_MODE (cond);
9117 op0 = XEXP (cond, 0);
9118 op1 = XEXP (cond, 1);
04aa27b1 9119
ff27bca2 9120 if (reverse)
87fd8214 9121 code = reversed_comparison_code (cond, insn);
9122 if (code == UNKNOWN)
9123 return 0;
04aa27b1 9124
9125 if (earliest)
ff27bca2 9126 *earliest = insn;
04aa27b1 9127
9128 /* If we are comparing a register with zero, see if the register is set
9129 in the previous insn to a COMPARE or a comparison operation. Perform
9130 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
9131 in cse.c */
9132
124d766d 9133 while (GET_RTX_CLASS (code) == '<'
72ae569f 9134 && op1 == CONST0_RTX (GET_MODE (op0))
124d766d 9135 && op0 != want_reg)
04aa27b1 9136 {
6ef828f9 9137 /* Set nonzero when we find something of interest. */
04aa27b1 9138 rtx x = 0;
9139
9140#ifdef HAVE_cc0
9141 /* If comparison with cc0, import actual comparison from compare
9142 insn. */
9143 if (op0 == cc0_rtx)
9144 {
9145 if ((prev = prev_nonnote_insn (prev)) == 0
9146 || GET_CODE (prev) != INSN
9147 || (set = single_set (prev)) == 0
9148 || SET_DEST (set) != cc0_rtx)
9149 return 0;
9150
9151 op0 = SET_SRC (set);
9152 op1 = CONST0_RTX (GET_MODE (op0));
9153 if (earliest)
9154 *earliest = prev;
9155 }
9156#endif
9157
9158 /* If this is a COMPARE, pick up the two things being compared. */
9159 if (GET_CODE (op0) == COMPARE)
9160 {
9161 op1 = XEXP (op0, 1);
9162 op0 = XEXP (op0, 0);
9163 continue;
9164 }
9165 else if (GET_CODE (op0) != REG)
9166 break;
9167
9168 /* Go back to the previous insn. Stop if it is not an INSN. We also
9169 stop if it isn't a single set or if it has a REG_INC note because
9170 we don't want to bother dealing with it. */
9171
9172 if ((prev = prev_nonnote_insn (prev)) == 0
9173 || GET_CODE (prev) != INSN
ed1e5d40 9174 || FIND_REG_INC_NOTE (prev, NULL_RTX))
87fd8214 9175 break;
9176
9177 set = set_of (op0, prev);
9178
9179 if (set
9180 && (GET_CODE (set) != SET
9181 || !rtx_equal_p (SET_DEST (set), op0)))
04aa27b1 9182 break;
9183
9184 /* If this is setting OP0, get what it sets it to if it looks
9185 relevant. */
87fd8214 9186 if (set)
04aa27b1 9187 {
37103d5c 9188 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
aa870c1b 9189#ifdef FLOAT_STORE_FLAG_VALUE
9190 REAL_VALUE_TYPE fsfv;
9191#endif
04aa27b1 9192
3a2a3a7f 9193 /* ??? We may not combine comparisons done in a CCmode with
9194 comparisons not done in a CCmode. This is to aid targets
9195 like Alpha that have an IEEE compliant EQ instruction, and
9196 a non-IEEE compliant BEQ instruction. The use of CCmode is
9197 actually artificial, simply to prevent the combination, but
9b515c48 9198 should not affect other platforms.
9199
9200 However, we must allow VOIDmode comparisons to match either
9201 CCmode or non-CCmode comparison, because some ports have
9202 modeless comparisons inside branch patterns.
9203
9204 ??? This mode check should perhaps look more like the mode check
9205 in simplify_comparison in combine. */
3a2a3a7f 9206
04aa27b1 9207 if ((GET_CODE (SET_SRC (set)) == COMPARE
fda4fc2e 9208 || (((code == NE
9209 || (code == LT
9210 && GET_MODE_CLASS (inner_mode) == MODE_INT
df38d76e 9211 && (GET_MODE_BITSIZE (inner_mode)
9212 <= HOST_BITS_PER_WIDE_INT)
fda4fc2e 9213 && (STORE_FLAG_VALUE
df38d76e 9214 & ((HOST_WIDE_INT) 1
9215 << (GET_MODE_BITSIZE (inner_mode) - 1))))
fda4fc2e 9216#ifdef FLOAT_STORE_FLAG_VALUE
9217 || (code == LT
9218 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
aa870c1b 9219 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9220 REAL_VALUE_NEGATIVE (fsfv)))
fda4fc2e 9221#endif
9222 ))
3a2a3a7f 9223 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'))
9b515c48 9224 && (((GET_MODE_CLASS (mode) == MODE_CC)
9225 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9226 || mode == VOIDmode || inner_mode == VOIDmode))
04aa27b1 9227 x = SET_SRC (set);
fda4fc2e 9228 else if (((code == EQ
9229 || (code == GE
df38d76e 9230 && (GET_MODE_BITSIZE (inner_mode)
9231 <= HOST_BITS_PER_WIDE_INT)
fda4fc2e 9232 && GET_MODE_CLASS (inner_mode) == MODE_INT
9233 && (STORE_FLAG_VALUE
df38d76e 9234 & ((HOST_WIDE_INT) 1
9235 << (GET_MODE_BITSIZE (inner_mode) - 1))))
fda4fc2e 9236#ifdef FLOAT_STORE_FLAG_VALUE
9237 || (code == GE
9238 && GET_MODE_CLASS (inner_mode) == MODE_FLOAT
aa870c1b 9239 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
9240 REAL_VALUE_NEGATIVE (fsfv)))
150bae75 9241#endif
fda4fc2e 9242 ))
3a2a3a7f 9243 && GET_RTX_CLASS (GET_CODE (SET_SRC (set))) == '<'
4a8f0b95 9244 && (((GET_MODE_CLASS (mode) == MODE_CC)
9b515c48 9245 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
9246 || mode == VOIDmode || inner_mode == VOIDmode))
9247
04aa27b1 9248 {
04aa27b1 9249 reverse_code = 1;
9250 x = SET_SRC (set);
9251 }
b5dd6230 9252 else
9253 break;
04aa27b1 9254 }
9255
9256 else if (reg_set_p (op0, prev))
9257 /* If this sets OP0, but not directly, we have to give up. */
9258 break;
9259
9260 if (x)
9261 {
9262 if (GET_RTX_CLASS (GET_CODE (x)) == '<')
9263 code = GET_CODE (x);
9264 if (reverse_code)
9265 {
87fd8214 9266 code = reversed_comparison_code (x, prev);
a4110d9a 9267 if (code == UNKNOWN)
9268 return 0;
04aa27b1 9269 reverse_code = 0;
9270 }
9271
9272 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
9273 if (earliest)
9274 *earliest = prev;
9275 }
9276 }
9277
9278 /* If constant is first, put it last. */
9279 if (CONSTANT_P (op0))
9280 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
9281
9282 /* If OP0 is the result of a comparison, we weren't able to find what
9283 was really being compared, so fail. */
9284 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
9285 return 0;
9286
163cbb6c 9287 /* Canonicalize any ordered comparison with integers involving equality
9288 if we can do computations in the relevant mode and we do not
9289 overflow. */
9290
9291 if (GET_CODE (op1) == CONST_INT
9292 && GET_MODE (op0) != VOIDmode
9293 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
04aa27b1 9294 {
df38d76e 9295 HOST_WIDE_INT const_val = INTVAL (op1);
9296 unsigned HOST_WIDE_INT uconst_val = const_val;
163cbb6c 9297 unsigned HOST_WIDE_INT max_val
9298 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
04aa27b1 9299
9300 switch (code)
163cbb6c 9301 {
9302 case LE:
274c11d8 9303 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
b4ddc721 9304 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
163cbb6c 9305 break;
04aa27b1 9306
4354a41d 9307 /* When cross-compiling, const_val might be sign-extended from
9308 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
163cbb6c 9309 case GE:
274c11d8 9310 if ((HOST_WIDE_INT) (const_val & max_val)
163cbb6c 9311 != (((HOST_WIDE_INT) 1
9312 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
b4ddc721 9313 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
163cbb6c 9314 break;
04aa27b1 9315
163cbb6c 9316 case LEU:
4354a41d 9317 if (uconst_val < max_val)
b4ddc721 9318 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
163cbb6c 9319 break;
04aa27b1 9320
163cbb6c 9321 case GEU:
9322 if (uconst_val != 0)
b4ddc721 9323 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
163cbb6c 9324 break;
0dbd1c74 9325
9326 default:
9327 break;
163cbb6c 9328 }
04aa27b1 9329 }
9330
04aa27b1 9331 /* Never return CC0; return zero instead. */
a4589b78 9332 if (CC0_P (op0))
04aa27b1 9333 return 0;
04aa27b1 9334
941522d6 9335 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
04aa27b1 9336}
9337
ff27bca2 9338/* Given a jump insn JUMP, return the condition that will cause it to branch
9339 to its JUMP_LABEL. If the condition cannot be understood, or is an
9340 inequality floating-point comparison which needs to be reversed, 0 will
9341 be returned.
9342
6ef828f9 9343 If EARLIEST is nonzero, it is a pointer to a place where the earliest
ff27bca2 9344 insn used in locating the condition was found. If a replacement test
9345 of the condition is desired, it should be placed in front of that
9346 insn and we will be sure that the inputs are still valid. */
9347
9348rtx
3ad4992f 9349get_condition (rtx jump, rtx *earliest)
ff27bca2 9350{
9351 rtx cond;
9352 int reverse;
b2816317 9353 rtx set;
ff27bca2 9354
9355 /* If this is not a standard conditional jump, we can't parse it. */
9356 if (GET_CODE (jump) != JUMP_INSN
b2816317 9357 || ! any_condjump_p (jump))
ff27bca2 9358 return 0;
b2816317 9359 set = pc_set (jump);
ff27bca2 9360
b2816317 9361 cond = XEXP (SET_SRC (set), 0);
ff27bca2 9362
9363 /* If this branches to JUMP_LABEL when the condition is false, reverse
9364 the condition. */
9365 reverse
b2816317 9366 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
9367 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
ff27bca2 9368
124d766d 9369 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX);
ff27bca2 9370}
9371
04aa27b1 9372/* Similar to above routine, except that we also put an invariant last
9373 unless both operands are invariants. */
9374
9375rtx
3ad4992f 9376get_condition_for_loop (const struct loop *loop, rtx x)
04aa27b1 9377{
e17f5b23 9378 rtx comparison = get_condition (x, (rtx*) 0);
04aa27b1 9379
9380 if (comparison == 0
15fc3eb7 9381 || ! loop_invariant_p (loop, XEXP (comparison, 0))
9382 || loop_invariant_p (loop, XEXP (comparison, 1)))
04aa27b1 9383 return comparison;
9384
941522d6 9385 return gen_rtx_fmt_ee (swap_condition (GET_CODE (comparison)), VOIDmode,
9386 XEXP (comparison, 1), XEXP (comparison, 0));
04aa27b1 9387}
3eb9a99d 9388
ca6d6e84 9389/* Scan the function and determine whether it has indirect (computed) jumps.
3eb9a99d 9390
ca6d6e84 9391 This is taken mostly from flow.c; similar code exists elsewhere
9392 in the compiler. It may be useful to put this into rtlanal.c. */
3eb9a99d 9393static int
3ad4992f 9394indirect_jump_in_function_p (rtx start)
3eb9a99d 9395{
9396 rtx insn;
3eb9a99d 9397
ca6d6e84 9398 for (insn = start; insn; insn = NEXT_INSN (insn))
9399 if (computed_jump_p (insn))
9400 return 1;
ba0d241e 9401
9402 return 0;
3eb9a99d 9403}
ce326ac0 9404
9405/* Add MEM to the LOOP_MEMS array, if appropriate. See the
9406 documentation for LOOP_MEMS for the definition of `appropriate'.
9407 This function is called from prescan_loop via for_each_rtx. */
9408
9409static int
3ad4992f 9410insert_loop_mem (rtx *mem, void *data ATTRIBUTE_UNUSED)
ce326ac0 9411{
2ff1269a 9412 struct loop_info *loop_info = data;
ce326ac0 9413 int i;
9414 rtx m = *mem;
9415
9416 if (m == NULL_RTX)
9417 return 0;
9418
9419 switch (GET_CODE (m))
9420 {
9421 case MEM:
9422 break;
9423
ac2a1716 9424 case CLOBBER:
9425 /* We're not interested in MEMs that are only clobbered. */
9426 return -1;
9427
ce326ac0 9428 case CONST_DOUBLE:
9429 /* We're not interested in the MEM associated with a
9430 CONST_DOUBLE, so there's no need to traverse into this. */
9431 return -1;
9432
83d318ad 9433 case EXPR_LIST:
9434 /* We're not interested in any MEMs that only appear in notes. */
9435 return -1;
9436
ce326ac0 9437 default:
9438 /* This is not a MEM. */
9439 return 0;
9440 }
9441
9442 /* See if we've already seen this MEM. */
2ff1269a 9443 for (i = 0; i < loop_info->mems_idx; ++i)
9444 if (rtx_equal_p (m, loop_info->mems[i].mem))
ce326ac0 9445 {
2ff1269a 9446 if (GET_MODE (m) != GET_MODE (loop_info->mems[i].mem))
ce326ac0 9447 /* The modes of the two memory accesses are different. If
9448 this happens, something tricky is going on, and we just
9449 don't optimize accesses to this MEM. */
2ff1269a 9450 loop_info->mems[i].optimize = 0;
ce326ac0 9451
9452 return 0;
9453 }
9454
9455 /* Resize the array, if necessary. */
2ff1269a 9456 if (loop_info->mems_idx == loop_info->mems_allocated)
ce326ac0 9457 {
2ff1269a 9458 if (loop_info->mems_allocated != 0)
9459 loop_info->mems_allocated *= 2;
ce326ac0 9460 else
2ff1269a 9461 loop_info->mems_allocated = 32;
ce326ac0 9462
72ae569f 9463 loop_info->mems = (loop_mem_info *)
2ff1269a 9464 xrealloc (loop_info->mems,
9465 loop_info->mems_allocated * sizeof (loop_mem_info));
ce326ac0 9466 }
9467
9468 /* Actually insert the MEM. */
2ff1269a 9469 loop_info->mems[loop_info->mems_idx].mem = m;
ce326ac0 9470 /* We can't hoist this MEM out of the loop if it's a BLKmode MEM
9471 because we can't put it in a register. We still store it in the
9472 table, though, so that if we see the same address later, but in a
9473 non-BLK mode, we'll not think we can optimize it at that point. */
2ff1269a 9474 loop_info->mems[loop_info->mems_idx].optimize = (GET_MODE (m) != BLKmode);
9475 loop_info->mems[loop_info->mems_idx].reg = NULL_RTX;
9476 ++loop_info->mems_idx;
d1ae477c 9477
9478 return 0;
ce326ac0 9479}
9480
eab144bb 9481
9482/* Allocate REGS->ARRAY or reallocate it if it is too small.
9483
9484 Increment REGS->ARRAY[I].SET_IN_LOOP at the index I of each
9485 register that is modified by an insn between FROM and TO. If the
9486 value of an element of REGS->array[I].SET_IN_LOOP becomes 127 or
9487 more, stop incrementing it, to avoid overflow.
9488
9489 Store in REGS->ARRAY[I].SINGLE_USAGE the single insn in which
9490 register I is used, if it is only used once. Otherwise, it is set
9491 to 0 (for no uses) or const0_rtx for more than one use. This
9492 parameter may be zero, in which case this processing is not done.
9493
9494 Set REGS->ARRAY[I].MAY_NOT_OPTIMIZE nonzero if we should not
85bd9543 9495 optimize register I. */
ce326ac0 9496
9497static void
3ad4992f 9498loop_regs_scan (const struct loop *loop, int extra_size)
ce326ac0 9499{
e9b78d43 9500 struct loop_regs *regs = LOOP_REGS (loop);
eab144bb 9501 int old_nregs;
9502 /* last_set[n] is nonzero iff reg n has been set in the current
9503 basic block. In that case, it is the insn that last set reg n. */
9504 rtx *last_set;
9505 rtx insn;
eab144bb 9506 int i;
ce326ac0 9507
eab144bb 9508 old_nregs = regs->num;
9509 regs->num = max_reg_num ();
4a8f0b95 9510
eab144bb 9511 /* Grow the regs array if not allocated or too small. */
9512 if (regs->num >= regs->size)
ce326ac0 9513 {
eab144bb 9514 regs->size = regs->num + extra_size;
4bb30577 9515
eab144bb 9516 regs->array = (struct loop_reg *)
9517 xrealloc (regs->array, regs->size * sizeof (*regs->array));
9518
9519 /* Zero the new elements. */
9520 memset (regs->array + old_nregs, 0,
9521 (regs->size - old_nregs) * sizeof (*regs->array));
9522 }
ce326ac0 9523
eab144bb 9524 /* Clear previously scanned fields but do not clear n_times_set. */
9525 for (i = 0; i < old_nregs; i++)
9526 {
9527 regs->array[i].set_in_loop = 0;
9528 regs->array[i].may_not_optimize = 0;
9529 regs->array[i].single_usage = NULL_RTX;
9530 }
9531
9532 last_set = (rtx *) xcalloc (regs->num, sizeof (rtx));
05cb4e54 9533
eab144bb 9534 /* Scan the loop, recording register usage. */
9535 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9536 insn = NEXT_INSN (insn))
9537 {
9538 if (INSN_P (insn))
05cb4e54 9539 {
eab144bb 9540 /* Record registers that have exactly one use. */
9541 find_single_use_in_loop (regs, insn, PATTERN (insn));
8a4c0d5c 9542
eab144bb 9543 /* Include uses in REG_EQUAL notes. */
9544 if (REG_NOTES (insn))
9545 find_single_use_in_loop (regs, insn, REG_NOTES (insn));
ce326ac0 9546
eab144bb 9547 if (GET_CODE (PATTERN (insn)) == SET
9548 || GET_CODE (PATTERN (insn)) == CLOBBER)
9549 count_one_set (regs, insn, PATTERN (insn), last_set);
9550 else if (GET_CODE (PATTERN (insn)) == PARALLEL)
9551 {
19cb6b50 9552 int i;
eab144bb 9553 for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
9554 count_one_set (regs, insn, XVECEXP (PATTERN (insn), 0, i),
9555 last_set);
9556 }
d1ae477c 9557 }
ce326ac0 9558
eab144bb 9559 if (GET_CODE (insn) == CODE_LABEL || GET_CODE (insn) == JUMP_INSN)
9560 memset (last_set, 0, regs->num * sizeof (rtx));
d483846f 9561
9562 /* Invalidate all registers used for function argument passing.
9563 We check rtx_varies_p for the same reason as below, to allow
9564 optimizing PIC calculations. */
9565 if (GET_CODE (insn) == CALL_INSN)
9566 {
9567 rtx link;
3ad4992f 9568 for (link = CALL_INSN_FUNCTION_USAGE (insn);
9569 link;
d483846f 9570 link = XEXP (link, 1))
9571 {
9572 rtx op, reg;
9573
9574 if (GET_CODE (op = XEXP (link, 0)) == USE
9575 && GET_CODE (reg = XEXP (op, 0)) == REG
9576 && rtx_varies_p (reg, 1))
9577 regs->array[REGNO (reg)].may_not_optimize = 1;
9578 }
9579 }
eab144bb 9580 }
ce326ac0 9581
dea91d92 9582 /* Invalidate all hard registers clobbered by calls. With one exception:
9583 a call-clobbered PIC register is still function-invariant for our
9584 purposes, since we can hoist any PIC calculations out of the loop.
9585 Thus the call to rtx_varies_p. */
9586 if (LOOP_INFO (loop)->has_call)
9587 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
9588 if (TEST_HARD_REG_BIT (regs_invalidated_by_call, i)
9e984a23 9589 && rtx_varies_p (regno_reg_rtx[i], 1))
8851e806 9590 {
9591 regs->array[i].may_not_optimize = 1;
9592 regs->array[i].set_in_loop = 1;
9593 }
4a8f0b95 9594
b9bec878 9595#ifdef AVOID_CCMODE_COPIES
eab144bb 9596 /* Don't try to move insns which set CC registers if we should not
9597 create CCmode register copies. */
9598 for (i = regs->num - 1; i >= FIRST_PSEUDO_REGISTER; i--)
9599 if (GET_MODE_CLASS (GET_MODE (regno_reg_rtx[i])) == MODE_CC)
9600 regs->array[i].may_not_optimize = 1;
b9bec878 9601#endif
4bb30577 9602
eab144bb 9603 /* Set regs->array[I].n_times_set for the new registers. */
9604 for (i = old_nregs; i < regs->num; i++)
9605 regs->array[i].n_times_set = regs->array[i].set_in_loop;
b9bec878 9606
eab144bb 9607 free (last_set);
ce326ac0 9608}
9609
85bd9543 9610/* Returns the number of real INSNs in the LOOP. */
9611
9612static int
3ad4992f 9613count_insns_in_loop (const struct loop *loop)
85bd9543 9614{
9615 int count = 0;
9616 rtx insn;
9617
9618 for (insn = loop->top ? loop->top : loop->start; insn != loop->end;
9619 insn = NEXT_INSN (insn))
9620 if (INSN_P (insn))
9621 ++count;
9622
9623 return count;
9624}
eab144bb 9625
ec7d7ef9 9626/* Move MEMs into registers for the duration of the loop. */
ce326ac0 9627
9628static void
3ad4992f 9629load_mems (const struct loop *loop)
ce326ac0 9630{
2ff1269a 9631 struct loop_info *loop_info = LOOP_INFO (loop);
e9b78d43 9632 struct loop_regs *regs = LOOP_REGS (loop);
ce326ac0 9633 int maybe_never = 0;
9634 int i;
fb0ac82c 9635 rtx p, prev_ebb_head;
ce326ac0 9636 rtx label = NULL_RTX;
76c229ff 9637 rtx end_label;
8bc5499f 9638 /* Nonzero if the next instruction may never be executed. */
9639 int next_maybe_never = 0;
4acc436e 9640 unsigned int last_max_reg = max_reg_num ();
ce326ac0 9641
2ff1269a 9642 if (loop_info->mems_idx == 0)
8bc5499f 9643 return;
ce326ac0 9644
76c229ff 9645 /* We cannot use next_label here because it skips over normal insns. */
9646 end_label = next_nonnote_insn (loop->end);
9647 if (end_label && GET_CODE (end_label) != CODE_LABEL)
9648 end_label = NULL_RTX;
1617c276 9649
76c229ff 9650 /* Check to see if it's possible that some instructions in the loop are
9651 never executed. Also check if there is a goto out of the loop other
9652 than right after the end of the loop. */
4a8f0b95 9653 for (p = next_insn_in_loop (loop, loop->scan_start);
349291aa 9654 p != NULL_RTX;
ec7d7ef9 9655 p = next_insn_in_loop (loop, p))
8bc5499f 9656 {
9657 if (GET_CODE (p) == CODE_LABEL)
9658 maybe_never = 1;
9659 else if (GET_CODE (p) == JUMP_INSN
9660 /* If we enter the loop in the middle, and scan
9661 around to the beginning, don't set maybe_never
9662 for that. This must be an unconditional jump,
9663 otherwise the code at the top of the loop might
9664 never be executed. Unconditional jumps are
9665 followed a by barrier then loop end. */
4a8f0b95 9666 && ! (GET_CODE (p) == JUMP_INSN
ec7d7ef9 9667 && JUMP_LABEL (p) == loop->top
9668 && NEXT_INSN (NEXT_INSN (p)) == loop->end
b2816317 9669 && any_uncondjump_p (p)))
ce326ac0 9670 {
76c229ff 9671 /* If this is a jump outside of the loop but not right
9672 after the end of the loop, we would have to emit new fixup
9673 sequences for each such label. */
41441b59 9674 if (/* If we can't tell where control might go when this
9675 JUMP_INSN is executed, we must be conservative. */
9676 !JUMP_LABEL (p)
9677 || (JUMP_LABEL (p) != end_label
9678 && (INSN_UID (JUMP_LABEL (p)) >= max_uid_for_loop
9679 || INSN_LUID (JUMP_LABEL (p)) < INSN_LUID (loop->start)
9680 || INSN_LUID (JUMP_LABEL (p)) > INSN_LUID (loop->end))))
76c229ff 9681 return;
9682
b2816317 9683 if (!any_condjump_p (p))
8bc5499f 9684 /* Something complicated. */
ce326ac0 9685 maybe_never = 1;
8bc5499f 9686 else
9687 /* If there are any more instructions in the loop, they
9688 might not be reached. */
4a8f0b95 9689 next_maybe_never = 1;
9690 }
8bc5499f 9691 else if (next_maybe_never)
9692 maybe_never = 1;
9693 }
9694
76c229ff 9695 /* Find start of the extended basic block that enters the loop. */
9696 for (p = loop->start;
9697 PREV_INSN (p) && GET_CODE (p) != CODE_LABEL;
9698 p = PREV_INSN (p))
9699 ;
fb0ac82c 9700 prev_ebb_head = p;
76c229ff 9701
9702 cselib_init ();
9703
9704 /* Build table of mems that get set to constant values before the
9705 loop. */
9706 for (; p != loop->start; p = NEXT_INSN (p))
9707 cselib_process_insn (p);
9708
8bc5499f 9709 /* Actually move the MEMs. */
2ff1269a 9710 for (i = 0; i < loop_info->mems_idx; ++i)
8bc5499f 9711 {
9a01d2c3 9712 regset_head load_copies;
9713 regset_head store_copies;
8bc5499f 9714 int written = 0;
9715 rtx reg;
2ff1269a 9716 rtx mem = loop_info->mems[i].mem;
8bc5499f 9717 rtx mem_list_entry;
ce326ac0 9718
4a8f0b95 9719 if (MEM_VOLATILE_P (mem)
15fc3eb7 9720 || loop_invariant_p (loop, XEXP (mem, 0)) != 1)
8bc5499f 9721 /* There's no telling whether or not MEM is modified. */
2ff1269a 9722 loop_info->mems[i].optimize = 0;
8bc5499f 9723
9724 /* Go through the MEMs written to in the loop to see if this
9725 one is aliased by one of them. */
2ff1269a 9726 mem_list_entry = loop_info->store_mems;
8bc5499f 9727 while (mem_list_entry)
ce326ac0 9728 {
8bc5499f 9729 if (rtx_equal_p (mem, XEXP (mem_list_entry, 0)))
9730 written = 1;
9731 else if (true_dependence (XEXP (mem_list_entry, 0), VOIDmode,
9732 mem, rtx_varies_p))
ce326ac0 9733 {
8bc5499f 9734 /* MEM is indeed aliased by this store. */
2ff1269a 9735 loop_info->mems[i].optimize = 0;
8bc5499f 9736 break;
ce326ac0 9737 }
8bc5499f 9738 mem_list_entry = XEXP (mem_list_entry, 1);
9739 }
ee261446 9740
9741 if (flag_float_store && written
9742 && GET_MODE_CLASS (GET_MODE (mem)) == MODE_FLOAT)
2ff1269a 9743 loop_info->mems[i].optimize = 0;
4a8f0b95 9744
8bc5499f 9745 /* If this MEM is written to, we must be sure that there
4a8f0b95 9746 are no reads from another MEM that aliases this one. */
2ff1269a 9747 if (loop_info->mems[i].optimize && written)
8bc5499f 9748 {
9749 int j;
ce326ac0 9750
2ff1269a 9751 for (j = 0; j < loop_info->mems_idx; ++j)
8bc5499f 9752 {
9753 if (j == i)
9754 continue;
9755 else if (true_dependence (mem,
9756 VOIDmode,
2ff1269a 9757 loop_info->mems[j].mem,
8bc5499f 9758 rtx_varies_p))
ce326ac0 9759 {
2ff1269a 9760 /* It's not safe to hoist loop_info->mems[i] out of
8bc5499f 9761 the loop because writes to it might not be
2ff1269a 9762 seen by reads from loop_info->mems[j]. */
9763 loop_info->mems[i].optimize = 0;
8bc5499f 9764 break;
ce326ac0 9765 }
9766 }
8bc5499f 9767 }
ce326ac0 9768
8bc5499f 9769 if (maybe_never && may_trap_p (mem))
9770 /* We can't access the MEM outside the loop; it might
9771 cause a trap that wouldn't have happened otherwise. */
2ff1269a 9772 loop_info->mems[i].optimize = 0;
4a8f0b95 9773
2ff1269a 9774 if (!loop_info->mems[i].optimize)
8bc5499f 9775 /* We thought we were going to lift this MEM out of the
9776 loop, but later discovered that we could not. */
9777 continue;
ce326ac0 9778
9a01d2c3 9779 INIT_REG_SET (&load_copies);
9780 INIT_REG_SET (&store_copies);
8955ba91 9781
8bc5499f 9782 /* Allocate a pseudo for this MEM. We set REG_USERVAR_P in
9783 order to keep scan_loop from moving stores to this MEM
9784 out of the loop just because this REG is neither a
9785 user-variable nor used in the loop test. */
9786 reg = gen_reg_rtx (GET_MODE (mem));
9787 REG_USERVAR_P (reg) = 1;
2ff1269a 9788 loop_info->mems[i].reg = reg;
8bc5499f 9789
9790 /* Now, replace all references to the MEM with the
fb0ac82c 9791 corresponding pseudos. */
8955ba91 9792 maybe_never = 0;
ec7d7ef9 9793 for (p = next_insn_in_loop (loop, loop->scan_start);
8bc5499f 9794 p != NULL_RTX;
ec7d7ef9 9795 p = next_insn_in_loop (loop, p))
8bc5499f 9796 {
9204e736 9797 if (INSN_P (p))
8955ba91 9798 {
9a01d2c3 9799 rtx set;
9800
9801 set = single_set (p);
9802
8955ba91 9803 /* See if this copies the mem into a register that isn't
9804 modified afterwards. We'll try to do copy propagation
9805 a little further on. */
8955ba91 9806 if (set
9807 /* @@@ This test is _way_ too conservative. */
9808 && ! maybe_never
9809 && GET_CODE (SET_DEST (set)) == REG
9810 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER
9811 && REGNO (SET_DEST (set)) < last_max_reg
05cb4e54 9812 && regs->array[REGNO (SET_DEST (set))].n_times_set == 1
9a01d2c3 9813 && rtx_equal_p (SET_SRC (set), mem))
9814 SET_REGNO_REG_SET (&load_copies, REGNO (SET_DEST (set)));
9815
72ae569f 9816 /* See if this copies the mem from a register that isn't
9a01d2c3 9817 modified afterwards. We'll try to remove the
9818 redundant copy later on by doing a little register
9819 renaming and copy propagation. This will help
9820 to untangle things for the BIV detection code. */
72ae569f 9821 if (set
9822 && ! maybe_never
9823 && GET_CODE (SET_SRC (set)) == REG
9824 && REGNO (SET_SRC (set)) >= FIRST_PSEUDO_REGISTER
9825 && REGNO (SET_SRC (set)) < last_max_reg
05cb4e54 9826 && regs->array[REGNO (SET_SRC (set))].n_times_set == 1
72ae569f 9827 && rtx_equal_p (SET_DEST (set), mem))
9828 SET_REGNO_REG_SET (&store_copies, REGNO (SET_SRC (set)));
9829
bd54bbc6 9830 /* If this is a call which uses / clobbers this memory
9831 location, we must not change the interface here. */
9832 if (GET_CODE (p) == CALL_INSN
9833 && reg_mentioned_p (loop_info->mems[i].mem,
9834 CALL_INSN_FUNCTION_USAGE (p)))
9835 {
9836 cancel_changes (0);
9837 loop_info->mems[i].optimize = 0;
9838 break;
9839 }
9840 else
9841 /* Replace the memory reference with the shadow register. */
9842 replace_loop_mems (p, loop_info->mems[i].mem,
f6981054 9843 loop_info->mems[i].reg, written);
8955ba91 9844 }
9845
9846 if (GET_CODE (p) == CODE_LABEL
9847 || GET_CODE (p) == JUMP_INSN)
9848 maybe_never = 1;
8bc5499f 9849 }
ce326ac0 9850
bd54bbc6 9851 if (! loop_info->mems[i].optimize)
9852 ; /* We found we couldn't do the replacement, so do nothing. */
9853 else if (! apply_change_group ())
8bc5499f 9854 /* We couldn't replace all occurrences of the MEM. */
2ff1269a 9855 loop_info->mems[i].optimize = 0;
8bc5499f 9856 else
9857 {
15fc3eb7 9858 /* Load the memory immediately before LOOP->START, which is
8bc5499f 9859 the NOTE_LOOP_BEG. */
1617c276 9860 cselib_val *e = cselib_lookup (mem, VOIDmode, 0);
9861 rtx set;
9862 rtx best = mem;
9863 int j;
9864 struct elt_loc_list *const_equiv = 0;
9865
9866 if (e)
9867 {
9868 struct elt_loc_list *equiv;
9869 struct elt_loc_list *best_equiv = 0;
9870 for (equiv = e->locs; equiv; equiv = equiv->next)
9871 {
9872 if (CONSTANT_P (equiv->loc))
9873 const_equiv = equiv;
addcf1fa 9874 else if (GET_CODE (equiv->loc) == REG
fb0ac82c 9875 /* Extending hard register lifetimes causes crash
addcf1fa 9876 on SRC targets. Doing so on non-SRC is
9877 probably also not good idea, since we most
9878 probably have pseudoregister equivalence as
9879 well. */
9880 && REGNO (equiv->loc) >= FIRST_PSEUDO_REGISTER)
1617c276 9881 best_equiv = equiv;
9882 }
9883 /* Use the constant equivalence if that is cheap enough. */
9884 if (! best_equiv)
9885 best_equiv = const_equiv;
9886 else if (const_equiv
9887 && (rtx_cost (const_equiv->loc, SET)
9888 <= rtx_cost (best_equiv->loc, SET)))
9889 {
9890 best_equiv = const_equiv;
9891 const_equiv = 0;
9892 }
9893
9894 /* If best_equiv is nonzero, we know that MEM is set to a
9895 constant or register before the loop. We will use this
9896 knowledge to initialize the shadow register with that
9897 constant or reg rather than by loading from MEM. */
9898 if (best_equiv)
9899 best = copy_rtx (best_equiv->loc);
9900 }
fb0ac82c 9901
1617c276 9902 set = gen_move_insn (reg, best);
26d6ff2a 9903 set = loop_insn_hoist (loop, set);
fb0ac82c 9904 if (REG_P (best))
9905 {
9906 for (p = prev_ebb_head; p != loop->start; p = NEXT_INSN (p))
9907 if (REGNO_LAST_UID (REGNO (best)) == INSN_UID (p))
9908 {
9909 REGNO_LAST_UID (REGNO (best)) = INSN_UID (set);
9910 break;
9911 }
9912 }
9913
1617c276 9914 if (const_equiv)
c080d8f0 9915 set_unique_reg_note (set, REG_EQUAL, copy_rtx (const_equiv->loc));
ce326ac0 9916
8bc5499f 9917 if (written)
9918 {
9919 if (label == NULL_RTX)
ce326ac0 9920 {
8bc5499f 9921 label = gen_label_rtx ();
ec7d7ef9 9922 emit_label_after (label, loop->end);
ce326ac0 9923 }
9924
8bc5499f 9925 /* Store the memory immediately after END, which is
9926 the NOTE_LOOP_END. */
4a8f0b95 9927 set = gen_move_insn (copy_rtx (mem), reg);
0ab94f0c 9928 loop_insn_emit_after (loop, 0, label, set);
8bc5499f 9929 }
9930
9931 if (loop_dump_stream)
9932 {
9933 fprintf (loop_dump_stream, "Hoisted regno %d %s from ",
9934 REGNO (reg), (written ? "r/w" : "r/o"));
9935 print_rtl (loop_dump_stream, mem);
9936 fputc ('\n', loop_dump_stream);
ce326ac0 9937 }
8955ba91 9938
9939 /* Attempt a bit of copy propagation. This helps untangle the
9940 data flow, and enables {basic,general}_induction_var to find
9941 more bivs/givs. */
9942 EXECUTE_IF_SET_IN_REG_SET
9a01d2c3 9943 (&load_copies, FIRST_PSEUDO_REGISTER, j,
8955ba91 9944 {
9a01d2c3 9945 try_copy_prop (loop, reg, j);
8955ba91 9946 });
9a01d2c3 9947 CLEAR_REG_SET (&load_copies);
9948
9949 EXECUTE_IF_SET_IN_REG_SET
9950 (&store_copies, FIRST_PSEUDO_REGISTER, j,
9951 {
9952 try_swap_copy_prop (loop, reg, j);
9953 });
9954 CLEAR_REG_SET (&store_copies);
ce326ac0 9955 }
9956 }
9957
76c229ff 9958 if (label != NULL_RTX && end_label != NULL_RTX)
ce326ac0 9959 {
9960 /* Now, we need to replace all references to the previous exit
9961 label with the new one. */
cda612f5 9962 replace_label_data rr;
6be0adba 9963 rr.r1 = end_label;
9964 rr.r2 = label;
cda612f5 9965 rr.update_label_nuses = true;
ce326ac0 9966
ec7d7ef9 9967 for (p = loop->start; p != loop->end; p = NEXT_INSN (p))
236f2840 9968 {
9969 for_each_rtx (&p, replace_label, &rr);
236f2840 9970 }
ce326ac0 9971 }
1617c276 9972
9973 cselib_finish ();
ce326ac0 9974}
9975
78918e47 9976/* For communication between note_reg_stored and its caller. */
9977struct note_reg_stored_arg
9978{
9979 int set_seen;
9980 rtx reg;
9981};
9982
9983/* Called via note_stores, record in SET_SEEN whether X, which is written,
9984 is equal to ARG. */
9985static void
3ad4992f 9986note_reg_stored (rtx x, rtx setter ATTRIBUTE_UNUSED, void *arg)
78918e47 9987{
4a8f0b95 9988 struct note_reg_stored_arg *t = (struct note_reg_stored_arg *) arg;
78918e47 9989 if (t->reg == x)
9990 t->set_seen = 1;
9991}
9992
8955ba91 9993/* Try to replace every occurrence of pseudo REGNO with REPLACEMENT.
9994 There must be exactly one insn that sets this pseudo; it will be
9995 deleted if all replacements succeed and we can prove that the register
15fc3eb7 9996 is not used after the loop. */
02e7a332 9997
8955ba91 9998static void
3ad4992f 9999try_copy_prop (const struct loop *loop, rtx replacement, unsigned int regno)
8955ba91 10000{
78918e47 10001 /* This is the reg that we are copying from. */
10002 rtx reg_rtx = regno_reg_rtx[regno];
8955ba91 10003 rtx init_insn = 0;
10004 rtx insn;
78918e47 10005 /* These help keep track of whether we replaced all uses of the reg. */
10006 int replaced_last = 0;
10007 int store_is_first = 0;
10008
ec7d7ef9 10009 for (insn = next_insn_in_loop (loop, loop->scan_start);
8955ba91 10010 insn != NULL_RTX;
ec7d7ef9 10011 insn = next_insn_in_loop (loop, insn))
8955ba91 10012 {
10013 rtx set;
7240b503 10014
78918e47 10015 /* Only substitute within one extended basic block from the initializing
10016 insn. */
10017 if (GET_CODE (insn) == CODE_LABEL && init_insn)
10018 break;
7240b503 10019
9204e736 10020 if (! INSN_P (insn))
8955ba91 10021 continue;
78918e47 10022
10023 /* Is this the initializing insn? */
8955ba91 10024 set = single_set (insn);
10025 if (set
10026 && GET_CODE (SET_DEST (set)) == REG
10027 && REGNO (SET_DEST (set)) == regno)
10028 {
10029 if (init_insn)
10030 abort ();
78918e47 10031
8955ba91 10032 init_insn = insn;
78918e47 10033 if (REGNO_FIRST_UID (regno) == INSN_UID (insn))
10034 store_is_first = 1;
10035 }
10036
10037 /* Only substitute after seeing the initializing insn. */
10038 if (init_insn && insn != init_insn)
4a8f0b95 10039 {
78918e47 10040 struct note_reg_stored_arg arg;
78918e47 10041
2ff1269a 10042 replace_loop_regs (insn, reg_rtx, replacement);
78918e47 10043 if (REGNO_LAST_UID (regno) == INSN_UID (insn))
10044 replaced_last = 1;
10045
10046 /* Stop replacing when REPLACEMENT is modified. */
10047 arg.reg = replacement;
10048 arg.set_seen = 0;
10049 note_stores (PATTERN (insn), note_reg_stored, &arg);
10050 if (arg.set_seen)
2f0bfe72 10051 {
10052 rtx note = find_reg_note (insn, REG_EQUAL, NULL);
10053
10054 /* It is possible that we've turned previously valid REG_EQUAL to
10055 invalid, as we change the REGNO to REPLACEMENT and unlike REGNO,
10056 REPLACEMENT is modified, we get different meaning. */
10057 if (note && reg_mentioned_p (replacement, XEXP (note, 0)))
10058 remove_note (insn, note);
10059 break;
10060 }
8955ba91 10061 }
8955ba91 10062 }
10063 if (! init_insn)
10064 abort ();
10065 if (apply_change_group ())
10066 {
78918e47 10067 if (loop_dump_stream)
10068 fprintf (loop_dump_stream, " Replaced reg %d", regno);
10069 if (store_is_first && replaced_last)
8955ba91 10070 {
e9107d97 10071 rtx first;
10072 rtx retval_note;
10073
10074 /* Assume we're just deleting INIT_INSN. */
10075 first = init_insn;
10076 /* Look for REG_RETVAL note. If we're deleting the end of
10077 the libcall sequence, the whole sequence can go. */
10078 retval_note = find_reg_note (init_insn, REG_RETVAL, NULL_RTX);
10079 /* If we found a REG_RETVAL note, find the first instruction
10080 in the sequence. */
10081 if (retval_note)
10082 first = XEXP (retval_note, 0);
10083
10084 /* Delete the instructions. */
10085 loop_delete_insns (first, init_insn);
8955ba91 10086 }
10087 if (loop_dump_stream)
78918e47 10088 fprintf (loop_dump_stream, ".\n");
8955ba91 10089 }
10090}
10091
e9107d97 10092/* Replace all the instructions from FIRST up to and including LAST
10093 with NOTE_INSN_DELETED notes. */
10094
10095static void
3ad4992f 10096loop_delete_insns (rtx first, rtx last)
e9107d97 10097{
10098 while (1)
10099 {
e9107d97 10100 if (loop_dump_stream)
10101 fprintf (loop_dump_stream, ", deleting init_insn (%d)",
10102 INSN_UID (first));
b36d64df 10103 delete_insn (first);
e9107d97 10104
10105 /* If this was the LAST instructions we're supposed to delete,
10106 we're done. */
10107 if (first == last)
10108 break;
10109
10110 first = NEXT_INSN (first);
10111 }
10112}
10113
9a01d2c3 10114/* Try to replace occurrences of pseudo REGNO with REPLACEMENT within
10115 loop LOOP if the order of the sets of these registers can be
10116 swapped. There must be exactly one insn within the loop that sets
10117 this pseudo followed immediately by a move insn that sets
10118 REPLACEMENT with REGNO. */
10119static void
3ad4992f 10120try_swap_copy_prop (const struct loop *loop, rtx replacement,
10121 unsigned int regno)
9a01d2c3 10122{
10123 rtx insn;
4acc436e 10124 rtx set = NULL_RTX;
9a01d2c3 10125 unsigned int new_regno;
10126
10127 new_regno = REGNO (replacement);
10128
10129 for (insn = next_insn_in_loop (loop, loop->scan_start);
10130 insn != NULL_RTX;
10131 insn = next_insn_in_loop (loop, insn))
10132 {
10133 /* Search for the insn that copies REGNO to NEW_REGNO? */
4acc436e 10134 if (INSN_P (insn)
9a01d2c3 10135 && (set = single_set (insn))
10136 && GET_CODE (SET_DEST (set)) == REG
10137 && REGNO (SET_DEST (set)) == new_regno
10138 && GET_CODE (SET_SRC (set)) == REG
10139 && REGNO (SET_SRC (set)) == regno)
10140 break;
10141 }
10142
28b58870 10143 if (insn != NULL_RTX)
9a01d2c3 10144 {
10145 rtx prev_insn;
10146 rtx prev_set;
72ae569f 10147
9a01d2c3 10148 /* Some DEF-USE info would come in handy here to make this
10149 function more general. For now, just check the previous insn
10150 which is the most likely candidate for setting REGNO. */
72ae569f 10151
9a01d2c3 10152 prev_insn = PREV_INSN (insn);
72ae569f 10153
4acc436e 10154 if (INSN_P (insn)
9a01d2c3 10155 && (prev_set = single_set (prev_insn))
10156 && GET_CODE (SET_DEST (prev_set)) == REG
10157 && REGNO (SET_DEST (prev_set)) == regno)
10158 {
10159 /* We have:
10160 (set (reg regno) (expr))
10161 (set (reg new_regno) (reg regno))
72ae569f 10162
9a01d2c3 10163 so try converting this to:
10164 (set (reg new_regno) (expr))
10165 (set (reg regno) (reg new_regno))
10166
10167 The former construct is often generated when a global
10168 variable used for an induction variable is shadowed by a
10169 register (NEW_REGNO). The latter construct improves the
10170 chances of GIV replacement and BIV elimination. */
10171
10172 validate_change (prev_insn, &SET_DEST (prev_set),
10173 replacement, 1);
10174 validate_change (insn, &SET_DEST (set),
10175 SET_SRC (set), 1);
10176 validate_change (insn, &SET_SRC (set),
10177 replacement, 1);
10178
10179 if (apply_change_group ())
10180 {
10181 if (loop_dump_stream)
72ae569f 10182 fprintf (loop_dump_stream,
10183 " Swapped set of reg %d at %d with reg %d at %d.\n",
10184 regno, INSN_UID (insn),
9a01d2c3 10185 new_regno, INSN_UID (prev_insn));
10186
10187 /* Update first use of REGNO. */
10188 if (REGNO_FIRST_UID (regno) == INSN_UID (prev_insn))
10189 REGNO_FIRST_UID (regno) = INSN_UID (insn);
10190
10191 /* Now perform copy propagation to hopefully
10192 remove all uses of REGNO within the loop. */
10193 try_copy_prop (loop, replacement, regno);
10194 }
10195 }
10196 }
10197}
10198
4d9dcced 10199/* Worker function for find_mem_in_note, called via for_each_rtx. */
10200
f6981054 10201static int
3ad4992f 10202find_mem_in_note_1 (rtx *x, void *data)
f6981054 10203{
10204 if (*x != NULL_RTX && GET_CODE (*x) == MEM)
10205 {
10206 rtx *res = (rtx *) data;
10207 *res = *x;
10208 return 1;
10209 }
10210 return 0;
10211}
10212
4d9dcced 10213/* Returns the first MEM found in NOTE by depth-first search. */
10214
f6981054 10215static rtx
3ad4992f 10216find_mem_in_note (rtx note)
f6981054 10217{
10218 if (note && for_each_rtx (&note, find_mem_in_note_1, &note))
10219 return note;
10220 return NULL_RTX;
10221}
3ad4992f 10222
ce326ac0 10223/* Replace MEM with its associated pseudo register. This function is
2ff1269a 10224 called from load_mems via for_each_rtx. DATA is actually a pointer
10225 to a structure describing the instruction currently being scanned
ce326ac0 10226 and the MEM we are currently replacing. */
10227
10228static int
3ad4992f 10229replace_loop_mem (rtx *mem, void *data)
ce326ac0 10230{
72ae569f 10231 loop_replace_args *args = (loop_replace_args *) data;
ce326ac0 10232 rtx m = *mem;
10233
10234 if (m == NULL_RTX)
10235 return 0;
10236
10237 switch (GET_CODE (m))
10238 {
10239 case MEM:
10240 break;
10241
10242 case CONST_DOUBLE:
10243 /* We're not interested in the MEM associated with a
10244 CONST_DOUBLE, so there's no need to traverse into one. */
10245 return -1;
10246
10247 default:
10248 /* This is not a MEM. */
10249 return 0;
10250 }
10251
2ff1269a 10252 if (!rtx_equal_p (args->match, m))
ce326ac0 10253 /* This is not the MEM we are currently replacing. */
10254 return 0;
10255
ce326ac0 10256 /* Actually replace the MEM. */
2ff1269a 10257 validate_change (args->insn, mem, args->replacement, 1);
ce326ac0 10258
10259 return 0;
10260}
10261
2ff1269a 10262static void
3ad4992f 10263replace_loop_mems (rtx insn, rtx mem, rtx reg, int written)
72ae569f 10264{
2ff1269a 10265 loop_replace_args args;
10266
10267 args.insn = insn;
10268 args.match = mem;
10269 args.replacement = reg;
10270
10271 for_each_rtx (&insn, replace_loop_mem, &args);
f6981054 10272
10273 /* If we hoist a mem write out of the loop, then REG_EQUAL
10274 notes referring to the mem are no longer valid. */
10275 if (written)
10276 {
10277 rtx note, sub;
10278 rtx *link;
10279
10280 for (link = &REG_NOTES (insn); (note = *link); link = &XEXP (note, 1))
10281 {
10282 if (REG_NOTE_KIND (note) == REG_EQUAL
10283 && (sub = find_mem_in_note (note))
10284 && true_dependence (mem, VOIDmode, sub, rtx_varies_p))
10285 {
10286 /* Remove the note. */
10287 validate_change (NULL_RTX, link, XEXP (note, 1), 1);
10288 break;
10289 }
10290 }
10291 }
2ff1269a 10292}
10293
8955ba91 10294/* Replace one register with another. Called through for_each_rtx; PX points
72ae569f 10295 to the rtx being scanned. DATA is actually a pointer to
2ff1269a 10296 a structure of arguments. */
8955ba91 10297
10298static int
3ad4992f 10299replace_loop_reg (rtx *px, void *data)
8955ba91 10300{
10301 rtx x = *px;
72ae569f 10302 loop_replace_args *args = (loop_replace_args *) data;
8955ba91 10303
10304 if (x == NULL_RTX)
10305 return 0;
10306
2ff1269a 10307 if (x == args->match)
10308 validate_change (args->insn, px, args->replacement, 1);
8955ba91 10309
10310 return 0;
10311}
10312
2ff1269a 10313static void
3ad4992f 10314replace_loop_regs (rtx insn, rtx reg, rtx replacement)
2ff1269a 10315{
10316 loop_replace_args args;
10317
10318 args.insn = insn;
10319 args.match = reg;
10320 args.replacement = replacement;
10321
10322 for_each_rtx (&insn, replace_loop_reg, &args);
10323}
0437fa92 10324\f
89e8d34f 10325/* Emit insn for PATTERN after WHERE_INSN in basic block WHERE_BB
10326 (ignored in the interim). */
10327
10328static rtx
3ad4992f 10329loop_insn_emit_after (const struct loop *loop ATTRIBUTE_UNUSED,
10330 basic_block where_bb ATTRIBUTE_UNUSED, rtx where_insn,
10331 rtx pattern)
89e8d34f 10332{
10333 return emit_insn_after (pattern, where_insn);
10334}
10335
10336
6ef828f9 10337/* If WHERE_INSN is nonzero emit insn for PATTERN before WHERE_INSN
26d6ff2a 10338 in basic block WHERE_BB (ignored in the interim) within the loop
10339 otherwise hoist PATTERN into the loop pre-header. */
10340
0ab94f0c 10341rtx
3ad4992f 10342loop_insn_emit_before (const struct loop *loop,
10343 basic_block where_bb ATTRIBUTE_UNUSED,
10344 rtx where_insn, rtx pattern)
26d6ff2a 10345{
10346 if (! where_insn)
10347 return loop_insn_hoist (loop, pattern);
10348 return emit_insn_before (pattern, where_insn);
10349}
10350
10351
0ab94f0c 10352/* Emit call insn for PATTERN before WHERE_INSN in basic block
10353 WHERE_BB (ignored in the interim) within the loop. */
10354
10355static rtx
3ad4992f 10356loop_call_insn_emit_before (const struct loop *loop ATTRIBUTE_UNUSED,
10357 basic_block where_bb ATTRIBUTE_UNUSED,
10358 rtx where_insn, rtx pattern)
0ab94f0c 10359{
10360 return emit_call_insn_before (pattern, where_insn);
10361}
10362
10363
26d6ff2a 10364/* Hoist insn for PATTERN into the loop pre-header. */
10365
10366rtx
3ad4992f 10367loop_insn_hoist (const struct loop *loop, rtx pattern)
26d6ff2a 10368{
10369 return loop_insn_emit_before (loop, 0, loop->start, pattern);
10370}
89e8d34f 10371
10372
0ab94f0c 10373/* Hoist call insn for PATTERN into the loop pre-header. */
10374
10375static rtx
3ad4992f 10376loop_call_insn_hoist (const struct loop *loop, rtx pattern)
0ab94f0c 10377{
10378 return loop_call_insn_emit_before (loop, 0, loop->start, pattern);
10379}
10380
10381
89e8d34f 10382/* Sink insn for PATTERN after the loop end. */
10383
10384rtx
3ad4992f 10385loop_insn_sink (const struct loop *loop, rtx pattern)
89e8d34f 10386{
10387 return loop_insn_emit_before (loop, 0, loop->sink, pattern);
10388}
10389
d01481af 10390/* bl->final_value can be either general_operand or PLUS of general_operand
b903337a 10391 and constant. Emit sequence of instructions to load it into REG. */
2ecaad6d 10392static rtx
3ad4992f 10393gen_load_of_final_value (rtx reg, rtx final_value)
2ecaad6d 10394{
10395 rtx seq;
10396 start_sequence ();
10397 final_value = force_operand (final_value, reg);
10398 if (final_value != reg)
10399 emit_move_insn (reg, final_value);
31d3e01c 10400 seq = get_insns ();
2ecaad6d 10401 end_sequence ();
10402 return seq;
10403}
89e8d34f 10404
10405/* If the loop has multiple exits, emit insn for PATTERN before the
10406 loop to ensure that it will always be executed no matter how the
10407 loop exits. Otherwise, emit the insn for PATTERN after the loop,
10408 since this is slightly more efficient. */
10409
10410static rtx
3ad4992f 10411loop_insn_sink_or_swim (const struct loop *loop, rtx pattern)
89e8d34f 10412{
10413 if (loop->exit_count)
10414 return loop_insn_hoist (loop, pattern);
10415 else
10416 return loop_insn_sink (loop, pattern);
10417}
26d6ff2a 10418\f
3b853ab8 10419static void
3ad4992f 10420loop_ivs_dump (const struct loop *loop, FILE *file, int verbose)
3b853ab8 10421{
10422 struct iv_class *bl;
10423 int iv_num = 0;
10424
10425 if (! loop || ! file)
10426 return;
10427
10428 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10429 iv_num++;
10430
10431 fprintf (file, "Loop %d: %d IV classes\n", loop->num, iv_num);
10432
10433 for (bl = LOOP_IVS (loop)->list; bl; bl = bl->next)
10434 {
10435 loop_iv_class_dump (bl, file, verbose);
10436 fputc ('\n', file);
10437 }
10438}
10439
10440
10441static void
3ad4992f 10442loop_iv_class_dump (const struct iv_class *bl, FILE *file,
10443 int verbose ATTRIBUTE_UNUSED)
3b853ab8 10444{
10445 struct induction *v;
10446 rtx incr;
10447 int i;
10448
10449 if (! bl || ! file)
10450 return;
10451
10452 fprintf (file, "IV class for reg %d, benefit %d\n",
10453 bl->regno, bl->total_benefit);
10454
10455 fprintf (file, " Init insn %d", INSN_UID (bl->init_insn));
10456 if (bl->initial_value)
10457 {
10458 fprintf (file, ", init val: ");
10459 print_simple_rtl (file, bl->initial_value);
10460 }
10461 if (bl->initial_test)
10462 {
10463 fprintf (file, ", init test: ");
10464 print_simple_rtl (file, bl->initial_test);
10465 }
10466 fputc ('\n', file);
10467
10468 if (bl->final_value)
10469 {
10470 fprintf (file, " Final val: ");
10471 print_simple_rtl (file, bl->final_value);
10472 fputc ('\n', file);
10473 }
10474
10475 if ((incr = biv_total_increment (bl)))
10476 {
10477 fprintf (file, " Total increment: ");
10478 print_simple_rtl (file, incr);
10479 fputc ('\n', file);
10480 }
10481
10482 /* List the increments. */
10483 for (i = 0, v = bl->biv; v; v = v->next_iv, i++)
10484 {
10485 fprintf (file, " Inc%d: insn %d, incr: ", i, INSN_UID (v->insn));
10486 print_simple_rtl (file, v->add_val);
10487 fputc ('\n', file);
10488 }
10489
10490 /* List the givs. */
10491 for (i = 0, v = bl->giv; v; v = v->next_iv, i++)
10492 {
4bb30577 10493 fprintf (file, " Giv%d: insn %d, benefit %d, ",
3b853ab8 10494 i, INSN_UID (v->insn), v->benefit);
10495 if (v->giv_type == DEST_ADDR)
8851e806 10496 print_simple_rtl (file, v->mem);
3b853ab8 10497 else
8851e806 10498 print_simple_rtl (file, single_set (v->insn));
3b853ab8 10499 fputc ('\n', file);
10500 }
10501}
10502
10503
bb6f5cc9 10504static void
3ad4992f 10505loop_biv_dump (const struct induction *v, FILE *file, int verbose)
bb6f5cc9 10506{
10507 if (! v || ! file)
10508 return;
10509
10510 fprintf (file,
10511 "Biv %d: insn %d",
10512 REGNO (v->dest_reg), INSN_UID (v->insn));
10513 fprintf (file, " const ");
10514 print_simple_rtl (file, v->add_val);
10515
10516 if (verbose && v->final_value)
10517 {
4bb30577 10518 fputc ('\n', file);
bb6f5cc9 10519 fprintf (file, " final ");
10520 print_simple_rtl (file, v->final_value);
10521 }
10522
10523 fputc ('\n', file);
10524}
10525
10526
10527static void
3ad4992f 10528loop_giv_dump (const struct induction *v, FILE *file, int verbose)
bb6f5cc9 10529{
10530 if (! v || ! file)
10531 return;
10532
10533 if (v->giv_type == DEST_REG)
10534 fprintf (file, "Giv %d: insn %d",
8851e806 10535 REGNO (v->dest_reg), INSN_UID (v->insn));
bb6f5cc9 10536 else
10537 fprintf (file, "Dest address: insn %d",
10538 INSN_UID (v->insn));
4bb30577 10539
bb6f5cc9 10540 fprintf (file, " src reg %d benefit %d",
10541 REGNO (v->src_reg), v->benefit);
10542 fprintf (file, " lifetime %d",
10543 v->lifetime);
4bb30577 10544
bb6f5cc9 10545 if (v->replaceable)
10546 fprintf (file, " replaceable");
4bb30577 10547
bb6f5cc9 10548 if (v->no_const_addval)
10549 fprintf (file, " ncav");
4bb30577 10550
5f155d21 10551 if (v->ext_dependent)
bb6f5cc9 10552 {
5f155d21 10553 switch (GET_CODE (v->ext_dependent))
bb6f5cc9 10554 {
10555 case SIGN_EXTEND:
10556 fprintf (file, " ext se");
10557 break;
10558 case ZERO_EXTEND:
10559 fprintf (file, " ext ze");
10560 break;
10561 case TRUNCATE:
10562 fprintf (file, " ext tr");
e17f5b23 10563 break;
bb6f5cc9 10564 default:
10565 abort ();
10566 }
10567 }
10568
4bb30577 10569 fputc ('\n', file);
bb6f5cc9 10570 fprintf (file, " mult ");
10571 print_simple_rtl (file, v->mult_val);
10572
4bb30577 10573 fputc ('\n', file);
bb6f5cc9 10574 fprintf (file, " add ");
10575 print_simple_rtl (file, v->add_val);
10576
10577 if (verbose && v->final_value)
10578 {
4bb30577 10579 fputc ('\n', file);
bb6f5cc9 10580 fprintf (file, " final ");
10581 print_simple_rtl (file, v->final_value);
10582 }
10583
4bb30577 10584 fputc ('\n', file);
bb6f5cc9 10585}
10586
10587
3b853ab8 10588void
3ad4992f 10589debug_ivs (const struct loop *loop)
3b853ab8 10590{
10591 loop_ivs_dump (loop, stderr, 1);
10592}
10593
10594
10595void
3ad4992f 10596debug_iv_class (const struct iv_class *bl)
3b853ab8 10597{
10598 loop_iv_class_dump (bl, stderr, 1);
10599}
10600
10601
bb6f5cc9 10602void
3ad4992f 10603debug_biv (const struct induction *v)
bb6f5cc9 10604{
10605 loop_biv_dump (v, stderr, 1);
10606}
10607
10608
10609void
3ad4992f 10610debug_giv (const struct induction *v)
bb6f5cc9 10611{
10612 loop_giv_dump (v, stderr, 1);
10613}
10614
10615
0437fa92 10616#define LOOP_BLOCK_NUM_1(INSN) \
10617((INSN) ? (BLOCK_FOR_INSN (INSN) ? BLOCK_NUM (INSN) : - 1) : -1)
10618
10619/* The notes do not have an assigned block, so look at the next insn. */
10620#define LOOP_BLOCK_NUM(INSN) \
10621((INSN) ? (GET_CODE (INSN) == NOTE \
10622 ? LOOP_BLOCK_NUM_1 (next_nonnote_insn (INSN)) \
10623 : LOOP_BLOCK_NUM_1 (INSN)) \
10624 : -1)
10625
10626#define LOOP_INSN_UID(INSN) ((INSN) ? INSN_UID (INSN) : -1)
10627
72ae569f 10628static void
3ad4992f 10629loop_dump_aux (const struct loop *loop, FILE *file,
10630 int verbose ATTRIBUTE_UNUSED)
0437fa92 10631{
10632 rtx label;
10633
10634 if (! loop || ! file)
10635 return;
10636
10637 /* Print diagnostics to compare our concept of a loop with
10638 what the loop notes say. */
10639 if (! PREV_INSN (loop->first->head)
10640 || GET_CODE (PREV_INSN (loop->first->head)) != NOTE
10641 || NOTE_LINE_NUMBER (PREV_INSN (loop->first->head))
10642 != NOTE_INSN_LOOP_BEG)
72ae569f 10643 fprintf (file, ";; No NOTE_INSN_LOOP_BEG at %d\n",
0437fa92 10644 INSN_UID (PREV_INSN (loop->first->head)));
10645 if (! NEXT_INSN (loop->last->end)
10646 || GET_CODE (NEXT_INSN (loop->last->end)) != NOTE
10647 || NOTE_LINE_NUMBER (NEXT_INSN (loop->last->end))
10648 != NOTE_INSN_LOOP_END)
10649 fprintf (file, ";; No NOTE_INSN_LOOP_END at %d\n",
10650 INSN_UID (NEXT_INSN (loop->last->end)));
10651
10652 if (loop->start)
10653 {
10654 fprintf (file,
10655 ";; start %d (%d), cont dom %d (%d), cont %d (%d), vtop %d (%d), end %d (%d)\n",
10656 LOOP_BLOCK_NUM (loop->start),
10657 LOOP_INSN_UID (loop->start),
10658 LOOP_BLOCK_NUM (loop->cont),
10659 LOOP_INSN_UID (loop->cont),
10660 LOOP_BLOCK_NUM (loop->cont),
10661 LOOP_INSN_UID (loop->cont),
10662 LOOP_BLOCK_NUM (loop->vtop),
10663 LOOP_INSN_UID (loop->vtop),
10664 LOOP_BLOCK_NUM (loop->end),
10665 LOOP_INSN_UID (loop->end));
10666 fprintf (file, ";; top %d (%d), scan start %d (%d)\n",
10667 LOOP_BLOCK_NUM (loop->top),
72ae569f 10668 LOOP_INSN_UID (loop->top),
0437fa92 10669 LOOP_BLOCK_NUM (loop->scan_start),
10670 LOOP_INSN_UID (loop->scan_start));
10671 fprintf (file, ";; exit_count %d", loop->exit_count);
10672 if (loop->exit_count)
10673 {
10674 fputs (", labels:", file);
10675 for (label = loop->exit_labels; label; label = LABEL_NEXTREF (label))
10676 {
10677 fprintf (file, " %d ",
10678 LOOP_INSN_UID (XEXP (label, 0)));
10679 }
10680 }
10681 fputs ("\n", file);
72ae569f 10682
0437fa92 10683 /* This can happen when a marked loop appears as two nested loops,
10684 say from while (a || b) {}. The inner loop won't match
10685 the loop markers but the outer one will. */
b3d6de89 10686 if (LOOP_BLOCK_NUM (loop->cont) != loop->latch->index)
0437fa92 10687 fprintf (file, ";; NOTE_INSN_LOOP_CONT not in loop latch\n");
10688 }
10689}
0437fa92 10690
10691/* Call this function from the debugger to dump LOOP. */
10692
10693void
3ad4992f 10694debug_loop (const struct loop *loop)
0437fa92 10695{
10696 flow_loop_dump (loop, stderr, loop_dump_aux, 1);
10697}
1fe6de19 10698
10699/* Call this function from the debugger to dump LOOPS. */
10700
10701void
3ad4992f 10702debug_loops (const struct loops *loops)
1fe6de19 10703{
10704 flow_loops_dump (loops, stderr, loop_dump_aux, 1);
10705}