]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/combine.c
[PATCH] Move RTL printing code from sched-vis.c into print-rtl.c
[thirdparty/gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "predict.h"
83 #include "tree.h"
84 #include "rtl.h"
85 #include "df.h"
86 #include "alias.h"
87 #include "stor-layout.h"
88 #include "tm_p.h"
89 #include "flags.h"
90 #include "regs.h"
91 #include "cfgrtl.h"
92 #include "cfgcleanup.h"
93 #include "insn-config.h"
94 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
95 #include "expmed.h"
96 #include "dojump.h"
97 #include "explow.h"
98 #include "calls.h"
99 #include "emit-rtl.h"
100 #include "varasm.h"
101 #include "stmt.h"
102 #include "expr.h"
103 #include "insn-attr.h"
104 #include "recog.h"
105 #include "diagnostic-core.h"
106 #include "target.h"
107 #include "insn-codes.h"
108 #include "optabs.h"
109 #include "rtlhooks-def.h"
110 #include "params.h"
111 #include "tree-pass.h"
112 #include "valtrack.h"
113 #include "cgraph.h"
114 #include "rtl-iter.h"
115 #include "print-rtl.h"
116
117 #ifndef LOAD_EXTEND_OP
118 #define LOAD_EXTEND_OP(M) UNKNOWN
119 #endif
120
121 /* Number of attempts to combine instructions in this function. */
122
123 static int combine_attempts;
124
125 /* Number of attempts that got as far as substitution in this function. */
126
127 static int combine_merges;
128
129 /* Number of instructions combined with added SETs in this function. */
130
131 static int combine_extras;
132
133 /* Number of instructions combined in this function. */
134
135 static int combine_successes;
136
137 /* Totals over entire compilation. */
138
139 static int total_attempts, total_merges, total_extras, total_successes;
140
141 /* combine_instructions may try to replace the right hand side of the
142 second instruction with the value of an associated REG_EQUAL note
143 before throwing it at try_combine. That is problematic when there
144 is a REG_DEAD note for a register used in the old right hand side
145 and can cause distribute_notes to do wrong things. This is the
146 second instruction if it has been so modified, null otherwise. */
147
148 static rtx_insn *i2mod;
149
150 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
151
152 static rtx i2mod_old_rhs;
153
154 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
155
156 static rtx i2mod_new_rhs;
157 \f
158 struct reg_stat_type {
159 /* Record last point of death of (hard or pseudo) register n. */
160 rtx_insn *last_death;
161
162 /* Record last point of modification of (hard or pseudo) register n. */
163 rtx_insn *last_set;
164
165 /* The next group of fields allows the recording of the last value assigned
166 to (hard or pseudo) register n. We use this information to see if an
167 operation being processed is redundant given a prior operation performed
168 on the register. For example, an `and' with a constant is redundant if
169 all the zero bits are already known to be turned off.
170
171 We use an approach similar to that used by cse, but change it in the
172 following ways:
173
174 (1) We do not want to reinitialize at each label.
175 (2) It is useful, but not critical, to know the actual value assigned
176 to a register. Often just its form is helpful.
177
178 Therefore, we maintain the following fields:
179
180 last_set_value the last value assigned
181 last_set_label records the value of label_tick when the
182 register was assigned
183 last_set_table_tick records the value of label_tick when a
184 value using the register is assigned
185 last_set_invalid set to nonzero when it is not valid
186 to use the value of this register in some
187 register's value
188
189 To understand the usage of these tables, it is important to understand
190 the distinction between the value in last_set_value being valid and
191 the register being validly contained in some other expression in the
192 table.
193
194 (The next two parameters are out of date).
195
196 reg_stat[i].last_set_value is valid if it is nonzero, and either
197 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
198
199 Register I may validly appear in any expression returned for the value
200 of another register if reg_n_sets[i] is 1. It may also appear in the
201 value for register J if reg_stat[j].last_set_invalid is zero, or
202 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
203
204 If an expression is found in the table containing a register which may
205 not validly appear in an expression, the register is replaced by
206 something that won't match, (clobber (const_int 0)). */
207
208 /* Record last value assigned to (hard or pseudo) register n. */
209
210 rtx last_set_value;
211
212 /* Record the value of label_tick when an expression involving register n
213 is placed in last_set_value. */
214
215 int last_set_table_tick;
216
217 /* Record the value of label_tick when the value for register n is placed in
218 last_set_value. */
219
220 int last_set_label;
221
222 /* These fields are maintained in parallel with last_set_value and are
223 used to store the mode in which the register was last set, the bits
224 that were known to be zero when it was last set, and the number of
225 sign bits copies it was known to have when it was last set. */
226
227 unsigned HOST_WIDE_INT last_set_nonzero_bits;
228 char last_set_sign_bit_copies;
229 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
230
231 /* Set nonzero if references to register n in expressions should not be
232 used. last_set_invalid is set nonzero when this register is being
233 assigned to and last_set_table_tick == label_tick. */
234
235 char last_set_invalid;
236
237 /* Some registers that are set more than once and used in more than one
238 basic block are nevertheless always set in similar ways. For example,
239 a QImode register may be loaded from memory in two places on a machine
240 where byte loads zero extend.
241
242 We record in the following fields if a register has some leading bits
243 that are always equal to the sign bit, and what we know about the
244 nonzero bits of a register, specifically which bits are known to be
245 zero.
246
247 If an entry is zero, it means that we don't know anything special. */
248
249 unsigned char sign_bit_copies;
250
251 unsigned HOST_WIDE_INT nonzero_bits;
252
253 /* Record the value of the label_tick when the last truncation
254 happened. The field truncated_to_mode is only valid if
255 truncation_label == label_tick. */
256
257 int truncation_label;
258
259 /* Record the last truncation seen for this register. If truncation
260 is not a nop to this mode we might be able to save an explicit
261 truncation if we know that value already contains a truncated
262 value. */
263
264 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
265 };
266
267
268 static vec<reg_stat_type> reg_stat;
269
270 /* One plus the highest pseudo for which we track REG_N_SETS.
271 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
272 but during combine_split_insns new pseudos can be created. As we don't have
273 updated DF information in that case, it is hard to initialize the array
274 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
275 so instead of growing the arrays, just assume all newly created pseudos
276 during combine might be set multiple times. */
277
278 static unsigned int reg_n_sets_max;
279
280 /* Record the luid of the last insn that invalidated memory
281 (anything that writes memory, and subroutine calls, but not pushes). */
282
283 static int mem_last_set;
284
285 /* Record the luid of the last CALL_INSN
286 so we can tell whether a potential combination crosses any calls. */
287
288 static int last_call_luid;
289
290 /* When `subst' is called, this is the insn that is being modified
291 (by combining in a previous insn). The PATTERN of this insn
292 is still the old pattern partially modified and it should not be
293 looked at, but this may be used to examine the successors of the insn
294 to judge whether a simplification is valid. */
295
296 static rtx_insn *subst_insn;
297
298 /* This is the lowest LUID that `subst' is currently dealing with.
299 get_last_value will not return a value if the register was set at or
300 after this LUID. If not for this mechanism, we could get confused if
301 I2 or I1 in try_combine were an insn that used the old value of a register
302 to obtain a new value. In that case, we might erroneously get the
303 new value of the register when we wanted the old one. */
304
305 static int subst_low_luid;
306
307 /* This contains any hard registers that are used in newpat; reg_dead_at_p
308 must consider all these registers to be always live. */
309
310 static HARD_REG_SET newpat_used_regs;
311
312 /* This is an insn to which a LOG_LINKS entry has been added. If this
313 insn is the earlier than I2 or I3, combine should rescan starting at
314 that location. */
315
316 static rtx_insn *added_links_insn;
317
318 /* Basic block in which we are performing combines. */
319 static basic_block this_basic_block;
320 static bool optimize_this_for_speed_p;
321
322 \f
323 /* Length of the currently allocated uid_insn_cost array. */
324
325 static int max_uid_known;
326
327 /* The following array records the insn_rtx_cost for every insn
328 in the instruction stream. */
329
330 static int *uid_insn_cost;
331
332 /* The following array records the LOG_LINKS for every insn in the
333 instruction stream as struct insn_link pointers. */
334
335 struct insn_link {
336 rtx_insn *insn;
337 unsigned int regno;
338 struct insn_link *next;
339 };
340
341 static struct insn_link **uid_log_links;
342
343 #define INSN_COST(INSN) (uid_insn_cost[INSN_UID (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[INSN_UID (INSN)])
345
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348
349 /* Links for LOG_LINKS are allocated from this obstack. */
350
351 static struct obstack insn_link_obstack;
352
353 /* Allocate a link. */
354
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 {
358 struct insn_link *l
359 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 sizeof (struct insn_link));
361 l->insn = insn;
362 l->regno = regno;
363 l->next = next;
364 return l;
365 }
366
367 /* Incremented for each basic block. */
368
369 static int label_tick;
370
371 /* Reset to label_tick for each extended basic block in scanning order. */
372
373 static int label_tick_ebb_start;
374
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377
378 static machine_mode nonzero_bits_mode;
379
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
384 in a loop. */
385
386 static int nonzero_sign_valid;
387
388 \f
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
391
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393
394 struct undo
395 {
396 struct undo *next;
397 enum undo_kind kind;
398 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399 union { rtx *r; int *i; struct insn_link **l; } where;
400 };
401
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
404
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
407
408 struct undobuf
409 {
410 struct undo *undos;
411 struct undo *frees;
412 rtx_insn *other_insn;
413 };
414
415 static struct undobuf undobuf;
416
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
419
420 static int n_occurrences;
421
422 static rtx reg_nonzero_bits_for_combine (const_rtx, machine_mode, const_rtx,
423 machine_mode,
424 unsigned HOST_WIDE_INT,
425 unsigned HOST_WIDE_INT *);
426 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, machine_mode, const_rtx,
427 machine_mode,
428 unsigned int, unsigned int *);
429 static void do_SUBST (rtx *, rtx);
430 static void do_SUBST_INT (int *, int);
431 static void init_reg_last (void);
432 static void setup_incoming_promotions (rtx_insn *);
433 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
434 static int cant_combine_insn_p (rtx_insn *);
435 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
436 rtx_insn *, rtx_insn *, rtx *, rtx *);
437 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
438 static int contains_muldiv (rtx);
439 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
440 int *, rtx_insn *);
441 static void undo_all (void);
442 static void undo_commit (void);
443 static rtx *find_split_point (rtx *, rtx_insn *, bool);
444 static rtx subst (rtx, rtx, rtx, int, int, int);
445 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
446 static rtx simplify_if_then_else (rtx);
447 static rtx simplify_set (rtx);
448 static rtx simplify_logical (rtx);
449 static rtx expand_compound_operation (rtx);
450 static const_rtx expand_field_assignment (const_rtx);
451 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
452 rtx, unsigned HOST_WIDE_INT, int, int, int);
453 static rtx extract_left_shift (rtx, int);
454 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
455 unsigned HOST_WIDE_INT *);
456 static rtx canon_reg_for_combine (rtx, rtx);
457 static rtx force_to_mode (rtx, machine_mode,
458 unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (machine_mode, rtx,
466 unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, machine_mode, rtx,
468 unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static int use_crosses_set_p (const_rtx, int);
487 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
488 static int reg_dead_at_p (rtx, rtx_insn *);
489 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
490 static int reg_bitfield_target_p (rtx, rtx);
491 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
492 static void distribute_links (struct insn_link *);
493 static void mark_used_regs_combine (rtx);
494 static void record_promoted_value (rtx_insn *, rtx);
495 static bool unmentioned_reg_p (rtx, rtx);
496 static void record_truncated_values (rtx *, void *);
497 static bool reg_truncated_to_mode (machine_mode, const_rtx);
498 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
499 \f
500
501 /* It is not safe to use ordinary gen_lowpart in combine.
502 See comments in gen_lowpart_for_combine. */
503 #undef RTL_HOOKS_GEN_LOWPART
504 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
505
506 /* Our implementation of gen_lowpart never emits a new pseudo. */
507 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
508 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
509
510 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
511 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
512
513 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
514 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
515
516 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
517 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
518
519 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
520
521 \f
522 /* Convenience wrapper for the canonicalize_comparison target hook.
523 Target hooks cannot use enum rtx_code. */
524 static inline void
525 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
526 bool op0_preserve_value)
527 {
528 int code_int = (int)*code;
529 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
530 *code = (enum rtx_code)code_int;
531 }
532
533 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
534 PATTERN can not be split. Otherwise, it returns an insn sequence.
535 This is a wrapper around split_insns which ensures that the
536 reg_stat vector is made larger if the splitter creates a new
537 register. */
538
539 static rtx_insn *
540 combine_split_insns (rtx pattern, rtx_insn *insn)
541 {
542 rtx_insn *ret;
543 unsigned int nregs;
544
545 ret = split_insns (pattern, insn);
546 nregs = max_reg_num ();
547 if (nregs > reg_stat.length ())
548 reg_stat.safe_grow_cleared (nregs);
549 return ret;
550 }
551
552 /* This is used by find_single_use to locate an rtx in LOC that
553 contains exactly one use of DEST, which is typically either a REG
554 or CC0. It returns a pointer to the innermost rtx expression
555 containing DEST. Appearances of DEST that are being used to
556 totally replace it are not counted. */
557
558 static rtx *
559 find_single_use_1 (rtx dest, rtx *loc)
560 {
561 rtx x = *loc;
562 enum rtx_code code = GET_CODE (x);
563 rtx *result = NULL;
564 rtx *this_result;
565 int i;
566 const char *fmt;
567
568 switch (code)
569 {
570 case CONST:
571 case LABEL_REF:
572 case SYMBOL_REF:
573 CASE_CONST_ANY:
574 case CLOBBER:
575 return 0;
576
577 case SET:
578 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 of a REG that occupies all of the REG, the insn uses DEST if
580 it is mentioned in the destination or the source. Otherwise, we
581 need just check the source. */
582 if (GET_CODE (SET_DEST (x)) != CC0
583 && GET_CODE (SET_DEST (x)) != PC
584 && !REG_P (SET_DEST (x))
585 && ! (GET_CODE (SET_DEST (x)) == SUBREG
586 && REG_P (SUBREG_REG (SET_DEST (x)))
587 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
588 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
589 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
590 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
591 break;
592
593 return find_single_use_1 (dest, &SET_SRC (x));
594
595 case MEM:
596 case SUBREG:
597 return find_single_use_1 (dest, &XEXP (x, 0));
598
599 default:
600 break;
601 }
602
603 /* If it wasn't one of the common cases above, check each expression and
604 vector of this code. Look for a unique usage of DEST. */
605
606 fmt = GET_RTX_FORMAT (code);
607 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
608 {
609 if (fmt[i] == 'e')
610 {
611 if (dest == XEXP (x, i)
612 || (REG_P (dest) && REG_P (XEXP (x, i))
613 && REGNO (dest) == REGNO (XEXP (x, i))))
614 this_result = loc;
615 else
616 this_result = find_single_use_1 (dest, &XEXP (x, i));
617
618 if (result == NULL)
619 result = this_result;
620 else if (this_result)
621 /* Duplicate usage. */
622 return NULL;
623 }
624 else if (fmt[i] == 'E')
625 {
626 int j;
627
628 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
629 {
630 if (XVECEXP (x, i, j) == dest
631 || (REG_P (dest)
632 && REG_P (XVECEXP (x, i, j))
633 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
634 this_result = loc;
635 else
636 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
637
638 if (result == NULL)
639 result = this_result;
640 else if (this_result)
641 return NULL;
642 }
643 }
644 }
645
646 return result;
647 }
648
649
650 /* See if DEST, produced in INSN, is used only a single time in the
651 sequel. If so, return a pointer to the innermost rtx expression in which
652 it is used.
653
654 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
655
656 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
657 care about REG_DEAD notes or LOG_LINKS.
658
659 Otherwise, we find the single use by finding an insn that has a
660 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
661 only referenced once in that insn, we know that it must be the first
662 and last insn referencing DEST. */
663
664 static rtx *
665 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
666 {
667 basic_block bb;
668 rtx_insn *next;
669 rtx *result;
670 struct insn_link *link;
671
672 if (dest == cc0_rtx)
673 {
674 next = NEXT_INSN (insn);
675 if (next == 0
676 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
677 return 0;
678
679 result = find_single_use_1 (dest, &PATTERN (next));
680 if (result && ploc)
681 *ploc = next;
682 return result;
683 }
684
685 if (!REG_P (dest))
686 return 0;
687
688 bb = BLOCK_FOR_INSN (insn);
689 for (next = NEXT_INSN (insn);
690 next && BLOCK_FOR_INSN (next) == bb;
691 next = NEXT_INSN (next))
692 if (INSN_P (next) && dead_or_set_p (next, dest))
693 {
694 FOR_EACH_LOG_LINK (link, next)
695 if (link->insn == insn && link->regno == REGNO (dest))
696 break;
697
698 if (link)
699 {
700 result = find_single_use_1 (dest, &PATTERN (next));
701 if (ploc)
702 *ploc = next;
703 return result;
704 }
705 }
706
707 return 0;
708 }
709 \f
710 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
711 insn. The substitution can be undone by undo_all. If INTO is already
712 set to NEWVAL, do not record this change. Because computing NEWVAL might
713 also call SUBST, we have to compute it before we put anything into
714 the undo table. */
715
716 static void
717 do_SUBST (rtx *into, rtx newval)
718 {
719 struct undo *buf;
720 rtx oldval = *into;
721
722 if (oldval == newval)
723 return;
724
725 /* We'd like to catch as many invalid transformations here as
726 possible. Unfortunately, there are way too many mode changes
727 that are perfectly valid, so we'd waste too much effort for
728 little gain doing the checks here. Focus on catching invalid
729 transformations involving integer constants. */
730 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
731 && CONST_INT_P (newval))
732 {
733 /* Sanity check that we're replacing oldval with a CONST_INT
734 that is a valid sign-extension for the original mode. */
735 gcc_assert (INTVAL (newval)
736 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
737
738 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
739 CONST_INT is not valid, because after the replacement, the
740 original mode would be gone. Unfortunately, we can't tell
741 when do_SUBST is called to replace the operand thereof, so we
742 perform this test on oldval instead, checking whether an
743 invalid replacement took place before we got here. */
744 gcc_assert (!(GET_CODE (oldval) == SUBREG
745 && CONST_INT_P (SUBREG_REG (oldval))));
746 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
747 && CONST_INT_P (XEXP (oldval, 0))));
748 }
749
750 if (undobuf.frees)
751 buf = undobuf.frees, undobuf.frees = buf->next;
752 else
753 buf = XNEW (struct undo);
754
755 buf->kind = UNDO_RTX;
756 buf->where.r = into;
757 buf->old_contents.r = oldval;
758 *into = newval;
759
760 buf->next = undobuf.undos, undobuf.undos = buf;
761 }
762
763 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
764
765 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
766 for the value of a HOST_WIDE_INT value (including CONST_INT) is
767 not safe. */
768
769 static void
770 do_SUBST_INT (int *into, int newval)
771 {
772 struct undo *buf;
773 int oldval = *into;
774
775 if (oldval == newval)
776 return;
777
778 if (undobuf.frees)
779 buf = undobuf.frees, undobuf.frees = buf->next;
780 else
781 buf = XNEW (struct undo);
782
783 buf->kind = UNDO_INT;
784 buf->where.i = into;
785 buf->old_contents.i = oldval;
786 *into = newval;
787
788 buf->next = undobuf.undos, undobuf.undos = buf;
789 }
790
791 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
792
793 /* Similar to SUBST, but just substitute the mode. This is used when
794 changing the mode of a pseudo-register, so that any other
795 references to the entry in the regno_reg_rtx array will change as
796 well. */
797
798 static void
799 do_SUBST_MODE (rtx *into, machine_mode newval)
800 {
801 struct undo *buf;
802 machine_mode oldval = GET_MODE (*into);
803
804 if (oldval == newval)
805 return;
806
807 if (undobuf.frees)
808 buf = undobuf.frees, undobuf.frees = buf->next;
809 else
810 buf = XNEW (struct undo);
811
812 buf->kind = UNDO_MODE;
813 buf->where.r = into;
814 buf->old_contents.m = oldval;
815 adjust_reg_mode (*into, newval);
816
817 buf->next = undobuf.undos, undobuf.undos = buf;
818 }
819
820 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
821
822 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
823
824 static void
825 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
826 {
827 struct undo *buf;
828 struct insn_link * oldval = *into;
829
830 if (oldval == newval)
831 return;
832
833 if (undobuf.frees)
834 buf = undobuf.frees, undobuf.frees = buf->next;
835 else
836 buf = XNEW (struct undo);
837
838 buf->kind = UNDO_LINKS;
839 buf->where.l = into;
840 buf->old_contents.l = oldval;
841 *into = newval;
842
843 buf->next = undobuf.undos, undobuf.undos = buf;
844 }
845
846 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
847 \f
848 /* Subroutine of try_combine. Determine whether the replacement patterns
849 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
850 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
851 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
852 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
853 of all the instructions can be estimated and the replacements are more
854 expensive than the original sequence. */
855
856 static bool
857 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
858 rtx newpat, rtx newi2pat, rtx newotherpat)
859 {
860 int i0_cost, i1_cost, i2_cost, i3_cost;
861 int new_i2_cost, new_i3_cost;
862 int old_cost, new_cost;
863
864 /* Lookup the original insn_rtx_costs. */
865 i2_cost = INSN_COST (i2);
866 i3_cost = INSN_COST (i3);
867
868 if (i1)
869 {
870 i1_cost = INSN_COST (i1);
871 if (i0)
872 {
873 i0_cost = INSN_COST (i0);
874 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
876 }
877 else
878 {
879 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
880 ? i1_cost + i2_cost + i3_cost : 0);
881 i0_cost = 0;
882 }
883 }
884 else
885 {
886 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
887 i1_cost = i0_cost = 0;
888 }
889
890 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
891 correct that. */
892 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
893 old_cost -= i1_cost;
894
895
896 /* Calculate the replacement insn_rtx_costs. */
897 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
898 if (newi2pat)
899 {
900 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
901 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
902 ? new_i2_cost + new_i3_cost : 0;
903 }
904 else
905 {
906 new_cost = new_i3_cost;
907 new_i2_cost = 0;
908 }
909
910 if (undobuf.other_insn)
911 {
912 int old_other_cost, new_other_cost;
913
914 old_other_cost = INSN_COST (undobuf.other_insn);
915 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
916 if (old_other_cost > 0 && new_other_cost > 0)
917 {
918 old_cost += old_other_cost;
919 new_cost += new_other_cost;
920 }
921 else
922 old_cost = 0;
923 }
924
925 /* Disallow this combination if both new_cost and old_cost are greater than
926 zero, and new_cost is greater than old cost. */
927 int reject = old_cost > 0 && new_cost > old_cost;
928
929 if (dump_file)
930 {
931 fprintf (dump_file, "%s combination of insns ",
932 reject ? "rejecting" : "allowing");
933 if (i0)
934 fprintf (dump_file, "%d, ", INSN_UID (i0));
935 if (i1 && INSN_UID (i1) != INSN_UID (i2))
936 fprintf (dump_file, "%d, ", INSN_UID (i1));
937 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
938
939 fprintf (dump_file, "original costs ");
940 if (i0)
941 fprintf (dump_file, "%d + ", i0_cost);
942 if (i1 && INSN_UID (i1) != INSN_UID (i2))
943 fprintf (dump_file, "%d + ", i1_cost);
944 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
945
946 if (newi2pat)
947 fprintf (dump_file, "replacement costs %d + %d = %d\n",
948 new_i2_cost, new_i3_cost, new_cost);
949 else
950 fprintf (dump_file, "replacement cost %d\n", new_cost);
951 }
952
953 if (reject)
954 return false;
955
956 /* Update the uid_insn_cost array with the replacement costs. */
957 INSN_COST (i2) = new_i2_cost;
958 INSN_COST (i3) = new_i3_cost;
959 if (i1)
960 {
961 INSN_COST (i1) = 0;
962 if (i0)
963 INSN_COST (i0) = 0;
964 }
965
966 return true;
967 }
968
969
970 /* Delete any insns that copy a register to itself. */
971
972 static void
973 delete_noop_moves (void)
974 {
975 rtx_insn *insn, *next;
976 basic_block bb;
977
978 FOR_EACH_BB_FN (bb, cfun)
979 {
980 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
981 {
982 next = NEXT_INSN (insn);
983 if (INSN_P (insn) && noop_move_p (insn))
984 {
985 if (dump_file)
986 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
987
988 delete_insn_and_edges (insn);
989 }
990 }
991 }
992 }
993
994 \f
995 /* Return false if we do not want to (or cannot) combine DEF. */
996 static bool
997 can_combine_def_p (df_ref def)
998 {
999 /* Do not consider if it is pre/post modification in MEM. */
1000 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1001 return false;
1002
1003 unsigned int regno = DF_REF_REGNO (def);
1004
1005 /* Do not combine frame pointer adjustments. */
1006 if ((regno == FRAME_POINTER_REGNUM
1007 && (!reload_completed || frame_pointer_needed))
1008 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1009 && regno == HARD_FRAME_POINTER_REGNUM
1010 && (!reload_completed || frame_pointer_needed))
1011 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1012 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1013 return false;
1014
1015 return true;
1016 }
1017
1018 /* Return false if we do not want to (or cannot) combine USE. */
1019 static bool
1020 can_combine_use_p (df_ref use)
1021 {
1022 /* Do not consider the usage of the stack pointer by function call. */
1023 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1024 return false;
1025
1026 return true;
1027 }
1028
1029 /* Fill in log links field for all insns. */
1030
1031 static void
1032 create_log_links (void)
1033 {
1034 basic_block bb;
1035 rtx_insn **next_use;
1036 rtx_insn *insn;
1037 df_ref def, use;
1038
1039 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1040
1041 /* Pass through each block from the end, recording the uses of each
1042 register and establishing log links when def is encountered.
1043 Note that we do not clear next_use array in order to save time,
1044 so we have to test whether the use is in the same basic block as def.
1045
1046 There are a few cases below when we do not consider the definition or
1047 usage -- these are taken from original flow.c did. Don't ask me why it is
1048 done this way; I don't know and if it works, I don't want to know. */
1049
1050 FOR_EACH_BB_FN (bb, cfun)
1051 {
1052 FOR_BB_INSNS_REVERSE (bb, insn)
1053 {
1054 if (!NONDEBUG_INSN_P (insn))
1055 continue;
1056
1057 /* Log links are created only once. */
1058 gcc_assert (!LOG_LINKS (insn));
1059
1060 FOR_EACH_INSN_DEF (def, insn)
1061 {
1062 unsigned int regno = DF_REF_REGNO (def);
1063 rtx_insn *use_insn;
1064
1065 if (!next_use[regno])
1066 continue;
1067
1068 if (!can_combine_def_p (def))
1069 continue;
1070
1071 use_insn = next_use[regno];
1072 next_use[regno] = NULL;
1073
1074 if (BLOCK_FOR_INSN (use_insn) != bb)
1075 continue;
1076
1077 /* flow.c claimed:
1078
1079 We don't build a LOG_LINK for hard registers contained
1080 in ASM_OPERANDs. If these registers get replaced,
1081 we might wind up changing the semantics of the insn,
1082 even if reload can make what appear to be valid
1083 assignments later. */
1084 if (regno < FIRST_PSEUDO_REGISTER
1085 && asm_noperands (PATTERN (use_insn)) >= 0)
1086 continue;
1087
1088 /* Don't add duplicate links between instructions. */
1089 struct insn_link *links;
1090 FOR_EACH_LOG_LINK (links, use_insn)
1091 if (insn == links->insn && regno == links->regno)
1092 break;
1093
1094 if (!links)
1095 LOG_LINKS (use_insn)
1096 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1097 }
1098
1099 FOR_EACH_INSN_USE (use, insn)
1100 if (can_combine_use_p (use))
1101 next_use[DF_REF_REGNO (use)] = insn;
1102 }
1103 }
1104
1105 free (next_use);
1106 }
1107
1108 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1109 true if we found a LOG_LINK that proves that A feeds B. This only works
1110 if there are no instructions between A and B which could have a link
1111 depending on A, since in that case we would not record a link for B.
1112 We also check the implicit dependency created by a cc0 setter/user
1113 pair. */
1114
1115 static bool
1116 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1117 {
1118 struct insn_link *links;
1119 FOR_EACH_LOG_LINK (links, b)
1120 if (links->insn == a)
1121 return true;
1122 if (HAVE_cc0 && sets_cc0_p (a))
1123 return true;
1124 return false;
1125 }
1126 \f
1127 /* Main entry point for combiner. F is the first insn of the function.
1128 NREGS is the first unused pseudo-reg number.
1129
1130 Return nonzero if the combiner has turned an indirect jump
1131 instruction into a direct jump. */
1132 static int
1133 combine_instructions (rtx_insn *f, unsigned int nregs)
1134 {
1135 rtx_insn *insn, *next;
1136 rtx_insn *prev;
1137 struct insn_link *links, *nextlinks;
1138 rtx_insn *first;
1139 basic_block last_bb;
1140
1141 int new_direct_jump_p = 0;
1142
1143 for (first = f; first && !INSN_P (first); )
1144 first = NEXT_INSN (first);
1145 if (!first)
1146 return 0;
1147
1148 combine_attempts = 0;
1149 combine_merges = 0;
1150 combine_extras = 0;
1151 combine_successes = 0;
1152
1153 rtl_hooks = combine_rtl_hooks;
1154
1155 reg_stat.safe_grow_cleared (nregs);
1156
1157 init_recog_no_volatile ();
1158
1159 /* Allocate array for insn info. */
1160 max_uid_known = get_max_uid ();
1161 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1162 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1163 gcc_obstack_init (&insn_link_obstack);
1164
1165 nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);
1166
1167 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1168 problems when, for example, we have j <<= 1 in a loop. */
1169
1170 nonzero_sign_valid = 0;
1171 label_tick = label_tick_ebb_start = 1;
1172
1173 /* Scan all SETs and see if we can deduce anything about what
1174 bits are known to be zero for some registers and how many copies
1175 of the sign bit are known to exist for those registers.
1176
1177 Also set any known values so that we can use it while searching
1178 for what bits are known to be set. */
1179
1180 setup_incoming_promotions (first);
1181 /* Allow the entry block and the first block to fall into the same EBB.
1182 Conceptually the incoming promotions are assigned to the entry block. */
1183 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1184
1185 create_log_links ();
1186 FOR_EACH_BB_FN (this_basic_block, cfun)
1187 {
1188 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1189 last_call_luid = 0;
1190 mem_last_set = -1;
1191
1192 label_tick++;
1193 if (!single_pred_p (this_basic_block)
1194 || single_pred (this_basic_block) != last_bb)
1195 label_tick_ebb_start = label_tick;
1196 last_bb = this_basic_block;
1197
1198 FOR_BB_INSNS (this_basic_block, insn)
1199 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1200 {
1201 rtx links;
1202
1203 subst_low_luid = DF_INSN_LUID (insn);
1204 subst_insn = insn;
1205
1206 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1207 insn);
1208 record_dead_and_set_regs (insn);
1209
1210 if (AUTO_INC_DEC)
1211 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1212 if (REG_NOTE_KIND (links) == REG_INC)
1213 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1214 insn);
1215
1216 /* Record the current insn_rtx_cost of this instruction. */
1217 if (NONJUMP_INSN_P (insn))
1218 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1219 optimize_this_for_speed_p);
1220 if (dump_file)
1221 fprintf (dump_file, "insn_cost %d: %d\n",
1222 INSN_UID (insn), INSN_COST (insn));
1223 }
1224 }
1225
1226 nonzero_sign_valid = 1;
1227
1228 /* Now scan all the insns in forward order. */
1229 label_tick = label_tick_ebb_start = 1;
1230 init_reg_last ();
1231 setup_incoming_promotions (first);
1232 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1233 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1234
1235 FOR_EACH_BB_FN (this_basic_block, cfun)
1236 {
1237 rtx_insn *last_combined_insn = NULL;
1238 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1239 last_call_luid = 0;
1240 mem_last_set = -1;
1241
1242 label_tick++;
1243 if (!single_pred_p (this_basic_block)
1244 || single_pred (this_basic_block) != last_bb)
1245 label_tick_ebb_start = label_tick;
1246 last_bb = this_basic_block;
1247
1248 rtl_profile_for_bb (this_basic_block);
1249 for (insn = BB_HEAD (this_basic_block);
1250 insn != NEXT_INSN (BB_END (this_basic_block));
1251 insn = next ? next : NEXT_INSN (insn))
1252 {
1253 next = 0;
1254 if (!NONDEBUG_INSN_P (insn))
1255 continue;
1256
1257 while (last_combined_insn
1258 && last_combined_insn->deleted ())
1259 last_combined_insn = PREV_INSN (last_combined_insn);
1260 if (last_combined_insn == NULL_RTX
1261 || BARRIER_P (last_combined_insn)
1262 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1263 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1264 last_combined_insn = insn;
1265
1266 /* See if we know about function return values before this
1267 insn based upon SUBREG flags. */
1268 check_promoted_subreg (insn, PATTERN (insn));
1269
1270 /* See if we can find hardregs and subreg of pseudos in
1271 narrower modes. This could help turning TRUNCATEs
1272 into SUBREGs. */
1273 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1274
1275 /* Try this insn with each insn it links back to. */
1276
1277 FOR_EACH_LOG_LINK (links, insn)
1278 if ((next = try_combine (insn, links->insn, NULL,
1279 NULL, &new_direct_jump_p,
1280 last_combined_insn)) != 0)
1281 {
1282 statistics_counter_event (cfun, "two-insn combine", 1);
1283 goto retry;
1284 }
1285
1286 /* Try each sequence of three linked insns ending with this one. */
1287
1288 if (max_combine >= 3)
1289 FOR_EACH_LOG_LINK (links, insn)
1290 {
1291 rtx_insn *link = links->insn;
1292
1293 /* If the linked insn has been replaced by a note, then there
1294 is no point in pursuing this chain any further. */
1295 if (NOTE_P (link))
1296 continue;
1297
1298 FOR_EACH_LOG_LINK (nextlinks, link)
1299 if ((next = try_combine (insn, link, nextlinks->insn,
1300 NULL, &new_direct_jump_p,
1301 last_combined_insn)) != 0)
1302 {
1303 statistics_counter_event (cfun, "three-insn combine", 1);
1304 goto retry;
1305 }
1306 }
1307
1308 /* Try to combine a jump insn that uses CC0
1309 with a preceding insn that sets CC0, and maybe with its
1310 logical predecessor as well.
1311 This is how we make decrement-and-branch insns.
1312 We need this special code because data flow connections
1313 via CC0 do not get entered in LOG_LINKS. */
1314
1315 if (HAVE_cc0
1316 && JUMP_P (insn)
1317 && (prev = prev_nonnote_insn (insn)) != 0
1318 && NONJUMP_INSN_P (prev)
1319 && sets_cc0_p (PATTERN (prev)))
1320 {
1321 if ((next = try_combine (insn, prev, NULL, NULL,
1322 &new_direct_jump_p,
1323 last_combined_insn)) != 0)
1324 goto retry;
1325
1326 FOR_EACH_LOG_LINK (nextlinks, prev)
1327 if ((next = try_combine (insn, prev, nextlinks->insn,
1328 NULL, &new_direct_jump_p,
1329 last_combined_insn)) != 0)
1330 goto retry;
1331 }
1332
1333 /* Do the same for an insn that explicitly references CC0. */
1334 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1335 && (prev = prev_nonnote_insn (insn)) != 0
1336 && NONJUMP_INSN_P (prev)
1337 && sets_cc0_p (PATTERN (prev))
1338 && GET_CODE (PATTERN (insn)) == SET
1339 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1340 {
1341 if ((next = try_combine (insn, prev, NULL, NULL,
1342 &new_direct_jump_p,
1343 last_combined_insn)) != 0)
1344 goto retry;
1345
1346 FOR_EACH_LOG_LINK (nextlinks, prev)
1347 if ((next = try_combine (insn, prev, nextlinks->insn,
1348 NULL, &new_direct_jump_p,
1349 last_combined_insn)) != 0)
1350 goto retry;
1351 }
1352
1353 /* Finally, see if any of the insns that this insn links to
1354 explicitly references CC0. If so, try this insn, that insn,
1355 and its predecessor if it sets CC0. */
1356 if (HAVE_cc0)
1357 {
1358 FOR_EACH_LOG_LINK (links, insn)
1359 if (NONJUMP_INSN_P (links->insn)
1360 && GET_CODE (PATTERN (links->insn)) == SET
1361 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1362 && (prev = prev_nonnote_insn (links->insn)) != 0
1363 && NONJUMP_INSN_P (prev)
1364 && sets_cc0_p (PATTERN (prev))
1365 && (next = try_combine (insn, links->insn,
1366 prev, NULL, &new_direct_jump_p,
1367 last_combined_insn)) != 0)
1368 goto retry;
1369 }
1370
1371 /* Try combining an insn with two different insns whose results it
1372 uses. */
1373 if (max_combine >= 3)
1374 FOR_EACH_LOG_LINK (links, insn)
1375 for (nextlinks = links->next; nextlinks;
1376 nextlinks = nextlinks->next)
1377 if ((next = try_combine (insn, links->insn,
1378 nextlinks->insn, NULL,
1379 &new_direct_jump_p,
1380 last_combined_insn)) != 0)
1381
1382 {
1383 statistics_counter_event (cfun, "three-insn combine", 1);
1384 goto retry;
1385 }
1386
1387 /* Try four-instruction combinations. */
1388 if (max_combine >= 4)
1389 FOR_EACH_LOG_LINK (links, insn)
1390 {
1391 struct insn_link *next1;
1392 rtx_insn *link = links->insn;
1393
1394 /* If the linked insn has been replaced by a note, then there
1395 is no point in pursuing this chain any further. */
1396 if (NOTE_P (link))
1397 continue;
1398
1399 FOR_EACH_LOG_LINK (next1, link)
1400 {
1401 rtx_insn *link1 = next1->insn;
1402 if (NOTE_P (link1))
1403 continue;
1404 /* I0 -> I1 -> I2 -> I3. */
1405 FOR_EACH_LOG_LINK (nextlinks, link1)
1406 if ((next = try_combine (insn, link, link1,
1407 nextlinks->insn,
1408 &new_direct_jump_p,
1409 last_combined_insn)) != 0)
1410 {
1411 statistics_counter_event (cfun, "four-insn combine", 1);
1412 goto retry;
1413 }
1414 /* I0, I1 -> I2, I2 -> I3. */
1415 for (nextlinks = next1->next; nextlinks;
1416 nextlinks = nextlinks->next)
1417 if ((next = try_combine (insn, link, link1,
1418 nextlinks->insn,
1419 &new_direct_jump_p,
1420 last_combined_insn)) != 0)
1421 {
1422 statistics_counter_event (cfun, "four-insn combine", 1);
1423 goto retry;
1424 }
1425 }
1426
1427 for (next1 = links->next; next1; next1 = next1->next)
1428 {
1429 rtx_insn *link1 = next1->insn;
1430 if (NOTE_P (link1))
1431 continue;
1432 /* I0 -> I2; I1, I2 -> I3. */
1433 FOR_EACH_LOG_LINK (nextlinks, link)
1434 if ((next = try_combine (insn, link, link1,
1435 nextlinks->insn,
1436 &new_direct_jump_p,
1437 last_combined_insn)) != 0)
1438 {
1439 statistics_counter_event (cfun, "four-insn combine", 1);
1440 goto retry;
1441 }
1442 /* I0 -> I1; I1, I2 -> I3. */
1443 FOR_EACH_LOG_LINK (nextlinks, link1)
1444 if ((next = try_combine (insn, link, link1,
1445 nextlinks->insn,
1446 &new_direct_jump_p,
1447 last_combined_insn)) != 0)
1448 {
1449 statistics_counter_event (cfun, "four-insn combine", 1);
1450 goto retry;
1451 }
1452 }
1453 }
1454
1455 /* Try this insn with each REG_EQUAL note it links back to. */
1456 FOR_EACH_LOG_LINK (links, insn)
1457 {
1458 rtx set, note;
1459 rtx_insn *temp = links->insn;
1460 if ((set = single_set (temp)) != 0
1461 && (note = find_reg_equal_equiv_note (temp)) != 0
1462 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1463 /* Avoid using a register that may already been marked
1464 dead by an earlier instruction. */
1465 && ! unmentioned_reg_p (note, SET_SRC (set))
1466 && (GET_MODE (note) == VOIDmode
1467 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1468 : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
1469 {
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig = SET_SRC (set);
1474 SET_SRC (set) = note;
1475 i2mod = temp;
1476 i2mod_old_rhs = copy_rtx (orig);
1477 i2mod_new_rhs = copy_rtx (note);
1478 next = try_combine (insn, i2mod, NULL, NULL,
1479 &new_direct_jump_p,
1480 last_combined_insn);
1481 i2mod = NULL;
1482 if (next)
1483 {
1484 statistics_counter_event (cfun, "insn-with-note combine", 1);
1485 goto retry;
1486 }
1487 SET_SRC (set) = orig;
1488 }
1489 }
1490
1491 if (!NOTE_P (insn))
1492 record_dead_and_set_regs (insn);
1493
1494 retry:
1495 ;
1496 }
1497 }
1498
1499 default_rtl_profile ();
1500 clear_bb_flags ();
1501 new_direct_jump_p |= purge_all_dead_edges ();
1502 delete_noop_moves ();
1503
1504 /* Clean up. */
1505 obstack_free (&insn_link_obstack, NULL);
1506 free (uid_log_links);
1507 free (uid_insn_cost);
1508 reg_stat.release ();
1509
1510 {
1511 struct undo *undo, *next;
1512 for (undo = undobuf.frees; undo; undo = next)
1513 {
1514 next = undo->next;
1515 free (undo);
1516 }
1517 undobuf.frees = 0;
1518 }
1519
1520 total_attempts += combine_attempts;
1521 total_merges += combine_merges;
1522 total_extras += combine_extras;
1523 total_successes += combine_successes;
1524
1525 nonzero_sign_valid = 0;
1526 rtl_hooks = general_rtl_hooks;
1527
1528 /* Make recognizer allow volatile MEMs again. */
1529 init_recog ();
1530
1531 return new_direct_jump_p;
1532 }
1533
1534 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1535
1536 static void
1537 init_reg_last (void)
1538 {
1539 unsigned int i;
1540 reg_stat_type *p;
1541
1542 FOR_EACH_VEC_ELT (reg_stat, i, p)
1543 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1544 }
1545 \f
1546 /* Set up any promoted values for incoming argument registers. */
1547
1548 static void
1549 setup_incoming_promotions (rtx_insn *first)
1550 {
1551 tree arg;
1552 bool strictly_local = false;
1553
1554 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1555 arg = DECL_CHAIN (arg))
1556 {
1557 rtx x, reg = DECL_INCOMING_RTL (arg);
1558 int uns1, uns3;
1559 machine_mode mode1, mode2, mode3, mode4;
1560
1561 /* Only continue if the incoming argument is in a register. */
1562 if (!REG_P (reg))
1563 continue;
1564
1565 /* Determine, if possible, whether all call sites of the current
1566 function lie within the current compilation unit. (This does
1567 take into account the exporting of a function via taking its
1568 address, and so forth.) */
1569 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1570
1571 /* The mode and signedness of the argument before any promotions happen
1572 (equal to the mode of the pseudo holding it at that stage). */
1573 mode1 = TYPE_MODE (TREE_TYPE (arg));
1574 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1575
1576 /* The mode and signedness of the argument after any source language and
1577 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1578 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1579 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1580
1581 /* The mode and signedness of the argument as it is actually passed,
1582 see assign_parm_setup_reg in function.c. */
1583 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1584 TREE_TYPE (cfun->decl), 0);
1585
1586 /* The mode of the register in which the argument is being passed. */
1587 mode4 = GET_MODE (reg);
1588
1589 /* Eliminate sign extensions in the callee when:
1590 (a) A mode promotion has occurred; */
1591 if (mode1 == mode3)
1592 continue;
1593 /* (b) The mode of the register is the same as the mode of
1594 the argument as it is passed; */
1595 if (mode3 != mode4)
1596 continue;
1597 /* (c) There's no language level extension; */
1598 if (mode1 == mode2)
1599 ;
1600 /* (c.1) All callers are from the current compilation unit. If that's
1601 the case we don't have to rely on an ABI, we only have to know
1602 what we're generating right now, and we know that we will do the
1603 mode1 to mode2 promotion with the given sign. */
1604 else if (!strictly_local)
1605 continue;
1606 /* (c.2) The combination of the two promotions is useful. This is
1607 true when the signs match, or if the first promotion is unsigned.
1608 In the later case, (sign_extend (zero_extend x)) is the same as
1609 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1610 else if (uns1)
1611 uns3 = true;
1612 else if (uns3)
1613 continue;
1614
1615 /* Record that the value was promoted from mode1 to mode3,
1616 so that any sign extension at the head of the current
1617 function may be eliminated. */
1618 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1619 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1620 record_value_for_reg (reg, first, x);
1621 }
1622 }
1623
1624 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1625 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1626 because some machines (maybe most) will actually do the sign-extension and
1627 this is the conservative approach.
1628
1629 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1630 kludge. */
1631
1632 static rtx
1633 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1634 {
1635 if (GET_MODE_PRECISION (mode) < prec
1636 && CONST_INT_P (src)
1637 && INTVAL (src) > 0
1638 && val_signbit_known_set_p (mode, INTVAL (src)))
1639 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (mode));
1640
1641 return src;
1642 }
1643
1644 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1645 and SET. */
1646
1647 static void
1648 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1649 rtx x)
1650 {
1651 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1652 unsigned HOST_WIDE_INT bits = 0;
1653 rtx reg_equal = NULL, src = SET_SRC (set);
1654 unsigned int num = 0;
1655
1656 if (reg_equal_note)
1657 reg_equal = XEXP (reg_equal_note, 0);
1658
1659 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1660 {
1661 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1662 if (reg_equal)
1663 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1664 }
1665
1666 /* Don't call nonzero_bits if it cannot change anything. */
1667 if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
1668 {
1669 bits = nonzero_bits (src, nonzero_bits_mode);
1670 if (reg_equal && bits)
1671 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1672 rsp->nonzero_bits |= bits;
1673 }
1674
1675 /* Don't call num_sign_bit_copies if it cannot change anything. */
1676 if (rsp->sign_bit_copies != 1)
1677 {
1678 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1679 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1680 {
1681 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1682 if (num == 0 || numeq > num)
1683 num = numeq;
1684 }
1685 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1686 rsp->sign_bit_copies = num;
1687 }
1688 }
1689
1690 /* Called via note_stores. If X is a pseudo that is narrower than
1691 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1692
1693 If we are setting only a portion of X and we can't figure out what
1694 portion, assume all bits will be used since we don't know what will
1695 be happening.
1696
1697 Similarly, set how many bits of X are known to be copies of the sign bit
1698 at all locations in the function. This is the smallest number implied
1699 by any set of X. */
1700
1701 static void
1702 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1703 {
1704 rtx_insn *insn = (rtx_insn *) data;
1705
1706 if (REG_P (x)
1707 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1708 /* If this register is undefined at the start of the file, we can't
1709 say what its contents were. */
1710 && ! REGNO_REG_SET_P
1711 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1712 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
1713 {
1714 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1715
1716 if (set == 0 || GET_CODE (set) == CLOBBER)
1717 {
1718 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1719 rsp->sign_bit_copies = 1;
1720 return;
1721 }
1722
1723 /* If this register is being initialized using itself, and the
1724 register is uninitialized in this basic block, and there are
1725 no LOG_LINKS which set the register, then part of the
1726 register is uninitialized. In that case we can't assume
1727 anything about the number of nonzero bits.
1728
1729 ??? We could do better if we checked this in
1730 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1731 could avoid making assumptions about the insn which initially
1732 sets the register, while still using the information in other
1733 insns. We would have to be careful to check every insn
1734 involved in the combination. */
1735
1736 if (insn
1737 && reg_referenced_p (x, PATTERN (insn))
1738 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1739 REGNO (x)))
1740 {
1741 struct insn_link *link;
1742
1743 FOR_EACH_LOG_LINK (link, insn)
1744 if (dead_or_set_p (link->insn, x))
1745 break;
1746 if (!link)
1747 {
1748 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1749 rsp->sign_bit_copies = 1;
1750 return;
1751 }
1752 }
1753
1754 /* If this is a complex assignment, see if we can convert it into a
1755 simple assignment. */
1756 set = expand_field_assignment (set);
1757
1758 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1759 set what we know about X. */
1760
1761 if (SET_DEST (set) == x
1762 || (paradoxical_subreg_p (SET_DEST (set))
1763 && SUBREG_REG (SET_DEST (set)) == x))
1764 update_rsp_from_reg_equal (rsp, insn, set, x);
1765 else
1766 {
1767 rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
1768 rsp->sign_bit_copies = 1;
1769 }
1770 }
1771 }
1772 \f
1773 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1774 optionally insns that were previously combined into I3 or that will be
1775 combined into the merger of INSN and I3. The order is PRED, PRED2,
1776 INSN, SUCC, SUCC2, I3.
1777
1778 Return 0 if the combination is not allowed for any reason.
1779
1780 If the combination is allowed, *PDEST will be set to the single
1781 destination of INSN and *PSRC to the single source, and this function
1782 will return 1. */
1783
1784 static int
1785 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1786 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1787 rtx *pdest, rtx *psrc)
1788 {
1789 int i;
1790 const_rtx set = 0;
1791 rtx src, dest;
1792 rtx_insn *p;
1793 rtx link;
1794 bool all_adjacent = true;
1795 int (*is_volatile_p) (const_rtx);
1796
1797 if (succ)
1798 {
1799 if (succ2)
1800 {
1801 if (next_active_insn (succ2) != i3)
1802 all_adjacent = false;
1803 if (next_active_insn (succ) != succ2)
1804 all_adjacent = false;
1805 }
1806 else if (next_active_insn (succ) != i3)
1807 all_adjacent = false;
1808 if (next_active_insn (insn) != succ)
1809 all_adjacent = false;
1810 }
1811 else if (next_active_insn (insn) != i3)
1812 all_adjacent = false;
1813
1814 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1815 or a PARALLEL consisting of such a SET and CLOBBERs.
1816
1817 If INSN has CLOBBER parallel parts, ignore them for our processing.
1818 By definition, these happen during the execution of the insn. When it
1819 is merged with another insn, all bets are off. If they are, in fact,
1820 needed and aren't also supplied in I3, they may be added by
1821 recog_for_combine. Otherwise, it won't match.
1822
1823 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1824 note.
1825
1826 Get the source and destination of INSN. If more than one, can't
1827 combine. */
1828
1829 if (GET_CODE (PATTERN (insn)) == SET)
1830 set = PATTERN (insn);
1831 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1832 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1833 {
1834 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1835 {
1836 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1837
1838 switch (GET_CODE (elt))
1839 {
1840 /* This is important to combine floating point insns
1841 for the SH4 port. */
1842 case USE:
1843 /* Combining an isolated USE doesn't make sense.
1844 We depend here on combinable_i3pat to reject them. */
1845 /* The code below this loop only verifies that the inputs of
1846 the SET in INSN do not change. We call reg_set_between_p
1847 to verify that the REG in the USE does not change between
1848 I3 and INSN.
1849 If the USE in INSN was for a pseudo register, the matching
1850 insn pattern will likely match any register; combining this
1851 with any other USE would only be safe if we knew that the
1852 used registers have identical values, or if there was
1853 something to tell them apart, e.g. different modes. For
1854 now, we forgo such complicated tests and simply disallow
1855 combining of USES of pseudo registers with any other USE. */
1856 if (REG_P (XEXP (elt, 0))
1857 && GET_CODE (PATTERN (i3)) == PARALLEL)
1858 {
1859 rtx i3pat = PATTERN (i3);
1860 int i = XVECLEN (i3pat, 0) - 1;
1861 unsigned int regno = REGNO (XEXP (elt, 0));
1862
1863 do
1864 {
1865 rtx i3elt = XVECEXP (i3pat, 0, i);
1866
1867 if (GET_CODE (i3elt) == USE
1868 && REG_P (XEXP (i3elt, 0))
1869 && (REGNO (XEXP (i3elt, 0)) == regno
1870 ? reg_set_between_p (XEXP (elt, 0),
1871 PREV_INSN (insn), i3)
1872 : regno >= FIRST_PSEUDO_REGISTER))
1873 return 0;
1874 }
1875 while (--i >= 0);
1876 }
1877 break;
1878
1879 /* We can ignore CLOBBERs. */
1880 case CLOBBER:
1881 break;
1882
1883 case SET:
1884 /* Ignore SETs whose result isn't used but not those that
1885 have side-effects. */
1886 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1887 && insn_nothrow_p (insn)
1888 && !side_effects_p (elt))
1889 break;
1890
1891 /* If we have already found a SET, this is a second one and
1892 so we cannot combine with this insn. */
1893 if (set)
1894 return 0;
1895
1896 set = elt;
1897 break;
1898
1899 default:
1900 /* Anything else means we can't combine. */
1901 return 0;
1902 }
1903 }
1904
1905 if (set == 0
1906 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1907 so don't do anything with it. */
1908 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1909 return 0;
1910 }
1911 else
1912 return 0;
1913
1914 if (set == 0)
1915 return 0;
1916
1917 /* The simplification in expand_field_assignment may call back to
1918 get_last_value, so set safe guard here. */
1919 subst_low_luid = DF_INSN_LUID (insn);
1920
1921 set = expand_field_assignment (set);
1922 src = SET_SRC (set), dest = SET_DEST (set);
1923
1924 /* Do not eliminate user-specified register if it is in an
1925 asm input because we may break the register asm usage defined
1926 in GCC manual if allow to do so.
1927 Be aware that this may cover more cases than we expect but this
1928 should be harmless. */
1929 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1930 && extract_asm_operands (PATTERN (i3)))
1931 return 0;
1932
1933 /* Don't eliminate a store in the stack pointer. */
1934 if (dest == stack_pointer_rtx
1935 /* Don't combine with an insn that sets a register to itself if it has
1936 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1937 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1938 /* Can't merge an ASM_OPERANDS. */
1939 || GET_CODE (src) == ASM_OPERANDS
1940 /* Can't merge a function call. */
1941 || GET_CODE (src) == CALL
1942 /* Don't eliminate a function call argument. */
1943 || (CALL_P (i3)
1944 && (find_reg_fusage (i3, USE, dest)
1945 || (REG_P (dest)
1946 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1947 && global_regs[REGNO (dest)])))
1948 /* Don't substitute into an incremented register. */
1949 || FIND_REG_INC_NOTE (i3, dest)
1950 || (succ && FIND_REG_INC_NOTE (succ, dest))
1951 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1952 /* Don't substitute into a non-local goto, this confuses CFG. */
1953 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1954 /* Make sure that DEST is not used after SUCC but before I3. */
1955 || (!all_adjacent
1956 && ((succ2
1957 && (reg_used_between_p (dest, succ2, i3)
1958 || reg_used_between_p (dest, succ, succ2)))
1959 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))))
1960 /* Make sure that the value that is to be substituted for the register
1961 does not use any registers whose values alter in between. However,
1962 If the insns are adjacent, a use can't cross a set even though we
1963 think it might (this can happen for a sequence of insns each setting
1964 the same destination; last_set of that register might point to
1965 a NOTE). If INSN has a REG_EQUIV note, the register is always
1966 equivalent to the memory so the substitution is valid even if there
1967 are intervening stores. Also, don't move a volatile asm or
1968 UNSPEC_VOLATILE across any other insns. */
1969 || (! all_adjacent
1970 && (((!MEM_P (src)
1971 || ! find_reg_note (insn, REG_EQUIV, src))
1972 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1973 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1974 || GET_CODE (src) == UNSPEC_VOLATILE))
1975 /* Don't combine across a CALL_INSN, because that would possibly
1976 change whether the life span of some REGs crosses calls or not,
1977 and it is a pain to update that information.
1978 Exception: if source is a constant, moving it later can't hurt.
1979 Accept that as a special case. */
1980 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1981 return 0;
1982
1983 /* DEST must either be a REG or CC0. */
1984 if (REG_P (dest))
1985 {
1986 /* If register alignment is being enforced for multi-word items in all
1987 cases except for parameters, it is possible to have a register copy
1988 insn referencing a hard register that is not allowed to contain the
1989 mode being copied and which would not be valid as an operand of most
1990 insns. Eliminate this problem by not combining with such an insn.
1991
1992 Also, on some machines we don't want to extend the life of a hard
1993 register. */
1994
1995 if (REG_P (src)
1996 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
1997 && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
1998 /* Don't extend the life of a hard register unless it is
1999 user variable (if we have few registers) or it can't
2000 fit into the desired register (meaning something special
2001 is going on).
2002 Also avoid substituting a return register into I3, because
2003 reload can't handle a conflict with constraints of other
2004 inputs. */
2005 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2006 && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
2007 return 0;
2008 }
2009 else if (GET_CODE (dest) != CC0)
2010 return 0;
2011
2012
2013 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2014 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2015 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2016 {
2017 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2018
2019 /* If the clobber represents an earlyclobber operand, we must not
2020 substitute an expression containing the clobbered register.
2021 As we do not analyze the constraint strings here, we have to
2022 make the conservative assumption. However, if the register is
2023 a fixed hard reg, the clobber cannot represent any operand;
2024 we leave it up to the machine description to either accept or
2025 reject use-and-clobber patterns. */
2026 if (!REG_P (reg)
2027 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2028 || !fixed_regs[REGNO (reg)])
2029 if (reg_overlap_mentioned_p (reg, src))
2030 return 0;
2031 }
2032
2033 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2034 or not), reject, unless nothing volatile comes between it and I3 */
2035
2036 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2037 {
2038 /* Make sure neither succ nor succ2 contains a volatile reference. */
2039 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2040 return 0;
2041 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2042 return 0;
2043 /* We'll check insns between INSN and I3 below. */
2044 }
2045
2046 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2047 to be an explicit register variable, and was chosen for a reason. */
2048
2049 if (GET_CODE (src) == ASM_OPERANDS
2050 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2051 return 0;
2052
2053 /* If INSN contains volatile references (specifically volatile MEMs),
2054 we cannot combine across any other volatile references.
2055 Even if INSN doesn't contain volatile references, any intervening
2056 volatile insn might affect machine state. */
2057
2058 is_volatile_p = volatile_refs_p (PATTERN (insn))
2059 ? volatile_refs_p
2060 : volatile_insn_p;
2061
2062 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2063 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2064 return 0;
2065
2066 /* If INSN contains an autoincrement or autodecrement, make sure that
2067 register is not used between there and I3, and not already used in
2068 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2069 Also insist that I3 not be a jump; if it were one
2070 and the incremented register were spilled, we would lose. */
2071
2072 if (AUTO_INC_DEC)
2073 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2074 if (REG_NOTE_KIND (link) == REG_INC
2075 && (JUMP_P (i3)
2076 || reg_used_between_p (XEXP (link, 0), insn, i3)
2077 || (pred != NULL_RTX
2078 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2079 || (pred2 != NULL_RTX
2080 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2081 || (succ != NULL_RTX
2082 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2083 || (succ2 != NULL_RTX
2084 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2085 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2086 return 0;
2087
2088 /* Don't combine an insn that follows a CC0-setting insn.
2089 An insn that uses CC0 must not be separated from the one that sets it.
2090 We do, however, allow I2 to follow a CC0-setting insn if that insn
2091 is passed as I1; in that case it will be deleted also.
2092 We also allow combining in this case if all the insns are adjacent
2093 because that would leave the two CC0 insns adjacent as well.
2094 It would be more logical to test whether CC0 occurs inside I1 or I2,
2095 but that would be much slower, and this ought to be equivalent. */
2096
2097 if (HAVE_cc0)
2098 {
2099 p = prev_nonnote_insn (insn);
2100 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2101 && ! all_adjacent)
2102 return 0;
2103 }
2104
2105 /* If we get here, we have passed all the tests and the combination is
2106 to be allowed. */
2107
2108 *pdest = dest;
2109 *psrc = src;
2110
2111 return 1;
2112 }
2113 \f
2114 /* LOC is the location within I3 that contains its pattern or the component
2115 of a PARALLEL of the pattern. We validate that it is valid for combining.
2116
2117 One problem is if I3 modifies its output, as opposed to replacing it
2118 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2119 doing so would produce an insn that is not equivalent to the original insns.
2120
2121 Consider:
2122
2123 (set (reg:DI 101) (reg:DI 100))
2124 (set (subreg:SI (reg:DI 101) 0) <foo>)
2125
2126 This is NOT equivalent to:
2127
2128 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2129 (set (reg:DI 101) (reg:DI 100))])
2130
2131 Not only does this modify 100 (in which case it might still be valid
2132 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2133
2134 We can also run into a problem if I2 sets a register that I1
2135 uses and I1 gets directly substituted into I3 (not via I2). In that
2136 case, we would be getting the wrong value of I2DEST into I3, so we
2137 must reject the combination. This case occurs when I2 and I1 both
2138 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2139 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2140 of a SET must prevent combination from occurring. The same situation
2141 can occur for I0, in which case I0_NOT_IN_SRC is set.
2142
2143 Before doing the above check, we first try to expand a field assignment
2144 into a set of logical operations.
2145
2146 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2147 we place a register that is both set and used within I3. If more than one
2148 such register is detected, we fail.
2149
2150 Return 1 if the combination is valid, zero otherwise. */
2151
2152 static int
2153 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2154 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2155 {
2156 rtx x = *loc;
2157
2158 if (GET_CODE (x) == SET)
2159 {
2160 rtx set = x ;
2161 rtx dest = SET_DEST (set);
2162 rtx src = SET_SRC (set);
2163 rtx inner_dest = dest;
2164 rtx subdest;
2165
2166 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2167 || GET_CODE (inner_dest) == SUBREG
2168 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2169 inner_dest = XEXP (inner_dest, 0);
2170
2171 /* Check for the case where I3 modifies its output, as discussed
2172 above. We don't want to prevent pseudos from being combined
2173 into the address of a MEM, so only prevent the combination if
2174 i1 or i2 set the same MEM. */
2175 if ((inner_dest != dest &&
2176 (!MEM_P (inner_dest)
2177 || rtx_equal_p (i2dest, inner_dest)
2178 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2179 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2180 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2181 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2182 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2183
2184 /* This is the same test done in can_combine_p except we can't test
2185 all_adjacent; we don't have to, since this instruction will stay
2186 in place, thus we are not considering increasing the lifetime of
2187 INNER_DEST.
2188
2189 Also, if this insn sets a function argument, combining it with
2190 something that might need a spill could clobber a previous
2191 function argument; the all_adjacent test in can_combine_p also
2192 checks this; here, we do a more specific test for this case. */
2193
2194 || (REG_P (inner_dest)
2195 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2196 && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
2197 GET_MODE (inner_dest))))
2198 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2199 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2200 return 0;
2201
2202 /* If DEST is used in I3, it is being killed in this insn, so
2203 record that for later. We have to consider paradoxical
2204 subregs here, since they kill the whole register, but we
2205 ignore partial subregs, STRICT_LOW_PART, etc.
2206 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2207 STACK_POINTER_REGNUM, since these are always considered to be
2208 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2209 subdest = dest;
2210 if (GET_CODE (subdest) == SUBREG
2211 && (GET_MODE_SIZE (GET_MODE (subdest))
2212 >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
2213 subdest = SUBREG_REG (subdest);
2214 if (pi3dest_killed
2215 && REG_P (subdest)
2216 && reg_referenced_p (subdest, PATTERN (i3))
2217 && REGNO (subdest) != FRAME_POINTER_REGNUM
2218 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2219 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2220 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2221 || (REGNO (subdest) != ARG_POINTER_REGNUM
2222 || ! fixed_regs [REGNO (subdest)]))
2223 && REGNO (subdest) != STACK_POINTER_REGNUM)
2224 {
2225 if (*pi3dest_killed)
2226 return 0;
2227
2228 *pi3dest_killed = subdest;
2229 }
2230 }
2231
2232 else if (GET_CODE (x) == PARALLEL)
2233 {
2234 int i;
2235
2236 for (i = 0; i < XVECLEN (x, 0); i++)
2237 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2238 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2239 return 0;
2240 }
2241
2242 return 1;
2243 }
2244 \f
2245 /* Return 1 if X is an arithmetic expression that contains a multiplication
2246 and division. We don't count multiplications by powers of two here. */
2247
2248 static int
2249 contains_muldiv (rtx x)
2250 {
2251 switch (GET_CODE (x))
2252 {
2253 case MOD: case DIV: case UMOD: case UDIV:
2254 return 1;
2255
2256 case MULT:
2257 return ! (CONST_INT_P (XEXP (x, 1))
2258 && exact_log2 (UINTVAL (XEXP (x, 1))) >= 0);
2259 default:
2260 if (BINARY_P (x))
2261 return contains_muldiv (XEXP (x, 0))
2262 || contains_muldiv (XEXP (x, 1));
2263
2264 if (UNARY_P (x))
2265 return contains_muldiv (XEXP (x, 0));
2266
2267 return 0;
2268 }
2269 }
2270 \f
2271 /* Determine whether INSN can be used in a combination. Return nonzero if
2272 not. This is used in try_combine to detect early some cases where we
2273 can't perform combinations. */
2274
2275 static int
2276 cant_combine_insn_p (rtx_insn *insn)
2277 {
2278 rtx set;
2279 rtx src, dest;
2280
2281 /* If this isn't really an insn, we can't do anything.
2282 This can occur when flow deletes an insn that it has merged into an
2283 auto-increment address. */
2284 if (! INSN_P (insn))
2285 return 1;
2286
2287 /* Never combine loads and stores involving hard regs that are likely
2288 to be spilled. The register allocator can usually handle such
2289 reg-reg moves by tying. If we allow the combiner to make
2290 substitutions of likely-spilled regs, reload might die.
2291 As an exception, we allow combinations involving fixed regs; these are
2292 not available to the register allocator so there's no risk involved. */
2293
2294 set = single_set (insn);
2295 if (! set)
2296 return 0;
2297 src = SET_SRC (set);
2298 dest = SET_DEST (set);
2299 if (GET_CODE (src) == SUBREG)
2300 src = SUBREG_REG (src);
2301 if (GET_CODE (dest) == SUBREG)
2302 dest = SUBREG_REG (dest);
2303 if (REG_P (src) && REG_P (dest)
2304 && ((HARD_REGISTER_P (src)
2305 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2306 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2307 || (HARD_REGISTER_P (dest)
2308 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2309 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2310 return 1;
2311
2312 return 0;
2313 }
2314
2315 struct likely_spilled_retval_info
2316 {
2317 unsigned regno, nregs;
2318 unsigned mask;
2319 };
2320
2321 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2322 hard registers that are known to be written to / clobbered in full. */
2323 static void
2324 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2325 {
2326 struct likely_spilled_retval_info *const info =
2327 (struct likely_spilled_retval_info *) data;
2328 unsigned regno, nregs;
2329 unsigned new_mask;
2330
2331 if (!REG_P (XEXP (set, 0)))
2332 return;
2333 regno = REGNO (x);
2334 if (regno >= info->regno + info->nregs)
2335 return;
2336 nregs = REG_NREGS (x);
2337 if (regno + nregs <= info->regno)
2338 return;
2339 new_mask = (2U << (nregs - 1)) - 1;
2340 if (regno < info->regno)
2341 new_mask >>= info->regno - regno;
2342 else
2343 new_mask <<= regno - info->regno;
2344 info->mask &= ~new_mask;
2345 }
2346
2347 /* Return nonzero iff part of the return value is live during INSN, and
2348 it is likely spilled. This can happen when more than one insn is needed
2349 to copy the return value, e.g. when we consider to combine into the
2350 second copy insn for a complex value. */
2351
2352 static int
2353 likely_spilled_retval_p (rtx_insn *insn)
2354 {
2355 rtx_insn *use = BB_END (this_basic_block);
2356 rtx reg;
2357 rtx_insn *p;
2358 unsigned regno, nregs;
2359 /* We assume here that no machine mode needs more than
2360 32 hard registers when the value overlaps with a register
2361 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2362 unsigned mask;
2363 struct likely_spilled_retval_info info;
2364
2365 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2366 return 0;
2367 reg = XEXP (PATTERN (use), 0);
2368 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2369 return 0;
2370 regno = REGNO (reg);
2371 nregs = REG_NREGS (reg);
2372 if (nregs == 1)
2373 return 0;
2374 mask = (2U << (nregs - 1)) - 1;
2375
2376 /* Disregard parts of the return value that are set later. */
2377 info.regno = regno;
2378 info.nregs = nregs;
2379 info.mask = mask;
2380 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2381 if (INSN_P (p))
2382 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2383 mask = info.mask;
2384
2385 /* Check if any of the (probably) live return value registers is
2386 likely spilled. */
2387 nregs --;
2388 do
2389 {
2390 if ((mask & 1 << nregs)
2391 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2392 return 1;
2393 } while (nregs--);
2394 return 0;
2395 }
2396
2397 /* Adjust INSN after we made a change to its destination.
2398
2399 Changing the destination can invalidate notes that say something about
2400 the results of the insn and a LOG_LINK pointing to the insn. */
2401
2402 static void
2403 adjust_for_new_dest (rtx_insn *insn)
2404 {
2405 /* For notes, be conservative and simply remove them. */
2406 remove_reg_equal_equiv_notes (insn);
2407
2408 /* The new insn will have a destination that was previously the destination
2409 of an insn just above it. Call distribute_links to make a LOG_LINK from
2410 the next use of that destination. */
2411
2412 rtx set = single_set (insn);
2413 gcc_assert (set);
2414
2415 rtx reg = SET_DEST (set);
2416
2417 while (GET_CODE (reg) == ZERO_EXTRACT
2418 || GET_CODE (reg) == STRICT_LOW_PART
2419 || GET_CODE (reg) == SUBREG)
2420 reg = XEXP (reg, 0);
2421 gcc_assert (REG_P (reg));
2422
2423 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2424
2425 df_insn_rescan (insn);
2426 }
2427
2428 /* Return TRUE if combine can reuse reg X in mode MODE.
2429 ADDED_SETS is nonzero if the original set is still required. */
2430 static bool
2431 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2432 {
2433 unsigned int regno;
2434
2435 if (!REG_P (x))
2436 return false;
2437
2438 regno = REGNO (x);
2439 /* Allow hard registers if the new mode is legal, and occupies no more
2440 registers than the old mode. */
2441 if (regno < FIRST_PSEUDO_REGISTER)
2442 return (HARD_REGNO_MODE_OK (regno, mode)
2443 && REG_NREGS (x) >= hard_regno_nregs[regno][mode]);
2444
2445 /* Or a pseudo that is only used once. */
2446 return (regno < reg_n_sets_max
2447 && REG_N_SETS (regno) == 1
2448 && !added_sets
2449 && !REG_USERVAR_P (x));
2450 }
2451
2452
2453 /* Check whether X, the destination of a set, refers to part of
2454 the register specified by REG. */
2455
2456 static bool
2457 reg_subword_p (rtx x, rtx reg)
2458 {
2459 /* Check that reg is an integer mode register. */
2460 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2461 return false;
2462
2463 if (GET_CODE (x) == STRICT_LOW_PART
2464 || GET_CODE (x) == ZERO_EXTRACT)
2465 x = XEXP (x, 0);
2466
2467 return GET_CODE (x) == SUBREG
2468 && SUBREG_REG (x) == reg
2469 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2470 }
2471
2472 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2473 Note that the INSN should be deleted *after* removing dead edges, so
2474 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2475 but not for a (set (pc) (label_ref FOO)). */
2476
2477 static void
2478 update_cfg_for_uncondjump (rtx_insn *insn)
2479 {
2480 basic_block bb = BLOCK_FOR_INSN (insn);
2481 gcc_assert (BB_END (bb) == insn);
2482
2483 purge_dead_edges (bb);
2484
2485 delete_insn (insn);
2486 if (EDGE_COUNT (bb->succs) == 1)
2487 {
2488 rtx_insn *insn;
2489
2490 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2491
2492 /* Remove barriers from the footer if there are any. */
2493 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2494 if (BARRIER_P (insn))
2495 {
2496 if (PREV_INSN (insn))
2497 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2498 else
2499 BB_FOOTER (bb) = NEXT_INSN (insn);
2500 if (NEXT_INSN (insn))
2501 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2502 }
2503 else if (LABEL_P (insn))
2504 break;
2505 }
2506 }
2507
2508 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2509 by an arbitrary number of CLOBBERs. */
2510 static bool
2511 is_parallel_of_n_reg_sets (rtx pat, int n)
2512 {
2513 if (GET_CODE (pat) != PARALLEL)
2514 return false;
2515
2516 int len = XVECLEN (pat, 0);
2517 if (len < n)
2518 return false;
2519
2520 int i;
2521 for (i = 0; i < n; i++)
2522 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2523 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2524 return false;
2525 for ( ; i < len; i++)
2526 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
2527 return false;
2528
2529 return true;
2530 }
2531
2532 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2533 CLOBBERs), can be split into individual SETs in that order, without
2534 changing semantics. */
2535 static bool
2536 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2537 {
2538 if (!insn_nothrow_p (insn))
2539 return false;
2540
2541 rtx pat = PATTERN (insn);
2542
2543 int i, j;
2544 for (i = 0; i < n; i++)
2545 {
2546 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2547 return false;
2548
2549 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2550
2551 for (j = i + 1; j < n; j++)
2552 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2553 return false;
2554 }
2555
2556 return true;
2557 }
2558
2559 /* Try to combine the insns I0, I1 and I2 into I3.
2560 Here I0, I1 and I2 appear earlier than I3.
2561 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2562 I3.
2563
2564 If we are combining more than two insns and the resulting insn is not
2565 recognized, try splitting it into two insns. If that happens, I2 and I3
2566 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2567 Otherwise, I0, I1 and I2 are pseudo-deleted.
2568
2569 Return 0 if the combination does not work. Then nothing is changed.
2570 If we did the combination, return the insn at which combine should
2571 resume scanning.
2572
2573 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2574 new direct jump instruction.
2575
2576 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2577 been I3 passed to an earlier try_combine within the same basic
2578 block. */
2579
2580 static rtx_insn *
2581 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2582 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2583 {
2584 /* New patterns for I3 and I2, respectively. */
2585 rtx newpat, newi2pat = 0;
2586 rtvec newpat_vec_with_clobbers = 0;
2587 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2588 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2589 dead. */
2590 int added_sets_0, added_sets_1, added_sets_2;
2591 /* Total number of SETs to put into I3. */
2592 int total_sets;
2593 /* Nonzero if I2's or I1's body now appears in I3. */
2594 int i2_is_used = 0, i1_is_used = 0;
2595 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2596 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2597 /* Contains I3 if the destination of I3 is used in its source, which means
2598 that the old life of I3 is being killed. If that usage is placed into
2599 I2 and not in I3, a REG_DEAD note must be made. */
2600 rtx i3dest_killed = 0;
2601 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2602 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2603 /* Copy of SET_SRC of I1 and I0, if needed. */
2604 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2605 /* Set if I2DEST was reused as a scratch register. */
2606 bool i2scratch = false;
2607 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2608 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2609 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2610 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2611 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2612 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2613 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2614 /* Notes that must be added to REG_NOTES in I3 and I2. */
2615 rtx new_i3_notes, new_i2_notes;
2616 /* Notes that we substituted I3 into I2 instead of the normal case. */
2617 int i3_subst_into_i2 = 0;
2618 /* Notes that I1, I2 or I3 is a MULT operation. */
2619 int have_mult = 0;
2620 int swap_i2i3 = 0;
2621 int changed_i3_dest = 0;
2622
2623 int maxreg;
2624 rtx_insn *temp_insn;
2625 rtx temp_expr;
2626 struct insn_link *link;
2627 rtx other_pat = 0;
2628 rtx new_other_notes;
2629 int i;
2630
2631 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2632 never be). */
2633 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2634 return 0;
2635
2636 /* Only try four-insn combinations when there's high likelihood of
2637 success. Look for simple insns, such as loads of constants or
2638 binary operations involving a constant. */
2639 if (i0)
2640 {
2641 int i;
2642 int ngood = 0;
2643 int nshift = 0;
2644 rtx set0, set3;
2645
2646 if (!flag_expensive_optimizations)
2647 return 0;
2648
2649 for (i = 0; i < 4; i++)
2650 {
2651 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2652 rtx set = single_set (insn);
2653 rtx src;
2654 if (!set)
2655 continue;
2656 src = SET_SRC (set);
2657 if (CONSTANT_P (src))
2658 {
2659 ngood += 2;
2660 break;
2661 }
2662 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2663 ngood++;
2664 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2665 || GET_CODE (src) == LSHIFTRT)
2666 nshift++;
2667 }
2668
2669 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2670 are likely manipulating its value. Ideally we'll be able to combine
2671 all four insns into a bitfield insertion of some kind.
2672
2673 Note the source in I0 might be inside a sign/zero extension and the
2674 memory modes in I0 and I3 might be different. So extract the address
2675 from the destination of I3 and search for it in the source of I0.
2676
2677 In the event that there's a match but the source/dest do not actually
2678 refer to the same memory, the worst that happens is we try some
2679 combinations that we wouldn't have otherwise. */
2680 if ((set0 = single_set (i0))
2681 /* Ensure the source of SET0 is a MEM, possibly buried inside
2682 an extension. */
2683 && (GET_CODE (SET_SRC (set0)) == MEM
2684 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2685 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2686 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2687 && (set3 = single_set (i3))
2688 /* Ensure the destination of SET3 is a MEM. */
2689 && GET_CODE (SET_DEST (set3)) == MEM
2690 /* Would it be better to extract the base address for the MEM
2691 in SET3 and look for that? I don't have cases where it matters
2692 but I could envision such cases. */
2693 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2694 ngood += 2;
2695
2696 if (ngood < 2 && nshift < 2)
2697 return 0;
2698 }
2699
2700 /* Exit early if one of the insns involved can't be used for
2701 combinations. */
2702 if (CALL_P (i2)
2703 || (i1 && CALL_P (i1))
2704 || (i0 && CALL_P (i0))
2705 || cant_combine_insn_p (i3)
2706 || cant_combine_insn_p (i2)
2707 || (i1 && cant_combine_insn_p (i1))
2708 || (i0 && cant_combine_insn_p (i0))
2709 || likely_spilled_retval_p (i3))
2710 return 0;
2711
2712 combine_attempts++;
2713 undobuf.other_insn = 0;
2714
2715 /* Reset the hard register usage information. */
2716 CLEAR_HARD_REG_SET (newpat_used_regs);
2717
2718 if (dump_file && (dump_flags & TDF_DETAILS))
2719 {
2720 if (i0)
2721 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2722 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2723 else if (i1)
2724 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2725 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2726 else
2727 fprintf (dump_file, "\nTrying %d -> %d:\n",
2728 INSN_UID (i2), INSN_UID (i3));
2729 }
2730
2731 /* If multiple insns feed into one of I2 or I3, they can be in any
2732 order. To simplify the code below, reorder them in sequence. */
2733 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2734 std::swap (i0, i2);
2735 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2736 std::swap (i0, i1);
2737 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2738 std::swap (i1, i2);
2739
2740 added_links_insn = 0;
2741
2742 /* First check for one important special case that the code below will
2743 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2744 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2745 we may be able to replace that destination with the destination of I3.
2746 This occurs in the common code where we compute both a quotient and
2747 remainder into a structure, in which case we want to do the computation
2748 directly into the structure to avoid register-register copies.
2749
2750 Note that this case handles both multiple sets in I2 and also cases
2751 where I2 has a number of CLOBBERs inside the PARALLEL.
2752
2753 We make very conservative checks below and only try to handle the
2754 most common cases of this. For example, we only handle the case
2755 where I2 and I3 are adjacent to avoid making difficult register
2756 usage tests. */
2757
2758 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2759 && REG_P (SET_SRC (PATTERN (i3)))
2760 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2761 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2762 && GET_CODE (PATTERN (i2)) == PARALLEL
2763 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2764 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2765 below would need to check what is inside (and reg_overlap_mentioned_p
2766 doesn't support those codes anyway). Don't allow those destinations;
2767 the resulting insn isn't likely to be recognized anyway. */
2768 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2769 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2770 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2771 SET_DEST (PATTERN (i3)))
2772 && next_active_insn (i2) == i3)
2773 {
2774 rtx p2 = PATTERN (i2);
2775
2776 /* Make sure that the destination of I3,
2777 which we are going to substitute into one output of I2,
2778 is not used within another output of I2. We must avoid making this:
2779 (parallel [(set (mem (reg 69)) ...)
2780 (set (reg 69) ...)])
2781 which is not well-defined as to order of actions.
2782 (Besides, reload can't handle output reloads for this.)
2783
2784 The problem can also happen if the dest of I3 is a memory ref,
2785 if another dest in I2 is an indirect memory ref. */
2786 for (i = 0; i < XVECLEN (p2, 0); i++)
2787 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2788 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2789 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2790 SET_DEST (XVECEXP (p2, 0, i))))
2791 break;
2792
2793 /* Make sure this PARALLEL is not an asm. We do not allow combining
2794 that usually (see can_combine_p), so do not here either. */
2795 for (i = 0; i < XVECLEN (p2, 0); i++)
2796 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2797 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2798 break;
2799
2800 if (i == XVECLEN (p2, 0))
2801 for (i = 0; i < XVECLEN (p2, 0); i++)
2802 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2803 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2804 {
2805 combine_merges++;
2806
2807 subst_insn = i3;
2808 subst_low_luid = DF_INSN_LUID (i2);
2809
2810 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2811 i2src = SET_SRC (XVECEXP (p2, 0, i));
2812 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2813 i2dest_killed = dead_or_set_p (i2, i2dest);
2814
2815 /* Replace the dest in I2 with our dest and make the resulting
2816 insn the new pattern for I3. Then skip to where we validate
2817 the pattern. Everything was set up above. */
2818 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2819 newpat = p2;
2820 i3_subst_into_i2 = 1;
2821 goto validate_replacement;
2822 }
2823 }
2824
2825 /* If I2 is setting a pseudo to a constant and I3 is setting some
2826 sub-part of it to another constant, merge them by making a new
2827 constant. */
2828 if (i1 == 0
2829 && (temp_expr = single_set (i2)) != 0
2830 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2831 && GET_CODE (PATTERN (i3)) == SET
2832 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2833 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2834 {
2835 rtx dest = SET_DEST (PATTERN (i3));
2836 int offset = -1;
2837 int width = 0;
2838
2839 if (GET_CODE (dest) == ZERO_EXTRACT)
2840 {
2841 if (CONST_INT_P (XEXP (dest, 1))
2842 && CONST_INT_P (XEXP (dest, 2)))
2843 {
2844 width = INTVAL (XEXP (dest, 1));
2845 offset = INTVAL (XEXP (dest, 2));
2846 dest = XEXP (dest, 0);
2847 if (BITS_BIG_ENDIAN)
2848 offset = GET_MODE_PRECISION (GET_MODE (dest)) - width - offset;
2849 }
2850 }
2851 else
2852 {
2853 if (GET_CODE (dest) == STRICT_LOW_PART)
2854 dest = XEXP (dest, 0);
2855 width = GET_MODE_PRECISION (GET_MODE (dest));
2856 offset = 0;
2857 }
2858
2859 if (offset >= 0)
2860 {
2861 /* If this is the low part, we're done. */
2862 if (subreg_lowpart_p (dest))
2863 ;
2864 /* Handle the case where inner is twice the size of outer. */
2865 else if (GET_MODE_PRECISION (GET_MODE (SET_DEST (temp_expr)))
2866 == 2 * GET_MODE_PRECISION (GET_MODE (dest)))
2867 offset += GET_MODE_PRECISION (GET_MODE (dest));
2868 /* Otherwise give up for now. */
2869 else
2870 offset = -1;
2871 }
2872
2873 if (offset >= 0)
2874 {
2875 rtx inner = SET_SRC (PATTERN (i3));
2876 rtx outer = SET_SRC (temp_expr);
2877
2878 wide_int o
2879 = wi::insert (std::make_pair (outer, GET_MODE (SET_DEST (temp_expr))),
2880 std::make_pair (inner, GET_MODE (dest)),
2881 offset, width);
2882
2883 combine_merges++;
2884 subst_insn = i3;
2885 subst_low_luid = DF_INSN_LUID (i2);
2886 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2887 i2dest = SET_DEST (temp_expr);
2888 i2dest_killed = dead_or_set_p (i2, i2dest);
2889
2890 /* Replace the source in I2 with the new constant and make the
2891 resulting insn the new pattern for I3. Then skip to where we
2892 validate the pattern. Everything was set up above. */
2893 SUBST (SET_SRC (temp_expr),
2894 immed_wide_int_const (o, GET_MODE (SET_DEST (temp_expr))));
2895
2896 newpat = PATTERN (i2);
2897
2898 /* The dest of I3 has been replaced with the dest of I2. */
2899 changed_i3_dest = 1;
2900 goto validate_replacement;
2901 }
2902 }
2903
2904 /* If we have no I1 and I2 looks like:
2905 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2906 (set Y OP)])
2907 make up a dummy I1 that is
2908 (set Y OP)
2909 and change I2 to be
2910 (set (reg:CC X) (compare:CC Y (const_int 0)))
2911
2912 (We can ignore any trailing CLOBBERs.)
2913
2914 This undoes a previous combination and allows us to match a branch-and-
2915 decrement insn. */
2916
2917 if (!HAVE_cc0 && i1 == 0
2918 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2919 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2920 == MODE_CC)
2921 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2922 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2923 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2924 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2925 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2926 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2927 {
2928 /* We make I1 with the same INSN_UID as I2. This gives it
2929 the same DF_INSN_LUID for value tracking. Our fake I1 will
2930 never appear in the insn stream so giving it the same INSN_UID
2931 as I2 will not cause a problem. */
2932
2933 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2934 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2935 -1, NULL_RTX);
2936 INSN_UID (i1) = INSN_UID (i2);
2937
2938 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2939 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2940 SET_DEST (PATTERN (i1)));
2941 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2942 SUBST_LINK (LOG_LINKS (i2),
2943 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2944 }
2945
2946 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2947 make those two SETs separate I1 and I2 insns, and make an I0 that is
2948 the original I1. */
2949 if (!HAVE_cc0 && i0 == 0
2950 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2951 && can_split_parallel_of_n_reg_sets (i2, 2)
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2953 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2954 {
2955 /* If there is no I1, there is no I0 either. */
2956 i0 = i1;
2957
2958 /* We make I1 with the same INSN_UID as I2. This gives it
2959 the same DF_INSN_LUID for value tracking. Our fake I1 will
2960 never appear in the insn stream so giving it the same INSN_UID
2961 as I2 will not cause a problem. */
2962
2963 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2964 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2965 -1, NULL_RTX);
2966 INSN_UID (i1) = INSN_UID (i2);
2967
2968 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2969 }
2970
2971 /* Verify that I2 and I1 are valid for combining. */
2972 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2973 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
2974 &i1dest, &i1src))
2975 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
2976 &i0dest, &i0src)))
2977 {
2978 undo_all ();
2979 return 0;
2980 }
2981
2982 /* Record whether I2DEST is used in I2SRC and similarly for the other
2983 cases. Knowing this will help in register status updating below. */
2984 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
2985 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
2986 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
2987 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
2988 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
2989 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
2990 i2dest_killed = dead_or_set_p (i2, i2dest);
2991 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
2992 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
2993
2994 /* For the earlier insns, determine which of the subsequent ones they
2995 feed. */
2996 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
2997 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
2998 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
2999 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3000 && reg_overlap_mentioned_p (i0dest, i2src))));
3001
3002 /* Ensure that I3's pattern can be the destination of combines. */
3003 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3004 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3005 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3006 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3007 &i3dest_killed))
3008 {
3009 undo_all ();
3010 return 0;
3011 }
3012
3013 /* See if any of the insns is a MULT operation. Unless one is, we will
3014 reject a combination that is, since it must be slower. Be conservative
3015 here. */
3016 if (GET_CODE (i2src) == MULT
3017 || (i1 != 0 && GET_CODE (i1src) == MULT)
3018 || (i0 != 0 && GET_CODE (i0src) == MULT)
3019 || (GET_CODE (PATTERN (i3)) == SET
3020 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3021 have_mult = 1;
3022
3023 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3024 We used to do this EXCEPT in one case: I3 has a post-inc in an
3025 output operand. However, that exception can give rise to insns like
3026 mov r3,(r3)+
3027 which is a famous insn on the PDP-11 where the value of r3 used as the
3028 source was model-dependent. Avoid this sort of thing. */
3029
3030 #if 0
3031 if (!(GET_CODE (PATTERN (i3)) == SET
3032 && REG_P (SET_SRC (PATTERN (i3)))
3033 && MEM_P (SET_DEST (PATTERN (i3)))
3034 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3035 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3036 /* It's not the exception. */
3037 #endif
3038 if (AUTO_INC_DEC)
3039 {
3040 rtx link;
3041 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3042 if (REG_NOTE_KIND (link) == REG_INC
3043 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3044 || (i1 != 0
3045 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3046 {
3047 undo_all ();
3048 return 0;
3049 }
3050 }
3051
3052 /* See if the SETs in I1 or I2 need to be kept around in the merged
3053 instruction: whenever the value set there is still needed past I3.
3054 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3055
3056 For the SET in I1, we have two cases: if I1 and I2 independently feed
3057 into I3, the set in I1 needs to be kept around unless I1DEST dies
3058 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3059 in I1 needs to be kept around unless I1DEST dies or is set in either
3060 I2 or I3. The same considerations apply to I0. */
3061
3062 added_sets_2 = !dead_or_set_p (i3, i2dest);
3063
3064 if (i1)
3065 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3066 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3067 else
3068 added_sets_1 = 0;
3069
3070 if (i0)
3071 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3072 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3073 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3074 && dead_or_set_p (i2, i0dest)));
3075 else
3076 added_sets_0 = 0;
3077
3078 /* We are about to copy insns for the case where they need to be kept
3079 around. Check that they can be copied in the merged instruction. */
3080
3081 if (targetm.cannot_copy_insn_p
3082 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3083 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3084 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3085 {
3086 undo_all ();
3087 return 0;
3088 }
3089
3090 /* If the set in I2 needs to be kept around, we must make a copy of
3091 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3092 PATTERN (I2), we are only substituting for the original I1DEST, not into
3093 an already-substituted copy. This also prevents making self-referential
3094 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3095 I2DEST. */
3096
3097 if (added_sets_2)
3098 {
3099 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3100 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3101 else
3102 i2pat = copy_rtx (PATTERN (i2));
3103 }
3104
3105 if (added_sets_1)
3106 {
3107 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3108 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3109 else
3110 i1pat = copy_rtx (PATTERN (i1));
3111 }
3112
3113 if (added_sets_0)
3114 {
3115 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3116 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3117 else
3118 i0pat = copy_rtx (PATTERN (i0));
3119 }
3120
3121 combine_merges++;
3122
3123 /* Substitute in the latest insn for the regs set by the earlier ones. */
3124
3125 maxreg = max_reg_num ();
3126
3127 subst_insn = i3;
3128
3129 /* Many machines that don't use CC0 have insns that can both perform an
3130 arithmetic operation and set the condition code. These operations will
3131 be represented as a PARALLEL with the first element of the vector
3132 being a COMPARE of an arithmetic operation with the constant zero.
3133 The second element of the vector will set some pseudo to the result
3134 of the same arithmetic operation. If we simplify the COMPARE, we won't
3135 match such a pattern and so will generate an extra insn. Here we test
3136 for this case, where both the comparison and the operation result are
3137 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3138 I2SRC. Later we will make the PARALLEL that contains I2. */
3139
3140 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3141 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3142 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3143 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3144 {
3145 rtx newpat_dest;
3146 rtx *cc_use_loc = NULL;
3147 rtx_insn *cc_use_insn = NULL;
3148 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3149 machine_mode compare_mode, orig_compare_mode;
3150 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3151
3152 newpat = PATTERN (i3);
3153 newpat_dest = SET_DEST (newpat);
3154 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3155
3156 if (undobuf.other_insn == 0
3157 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3158 &cc_use_insn)))
3159 {
3160 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3161 compare_code = simplify_compare_const (compare_code,
3162 GET_MODE (i2dest), op0, &op1);
3163 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3164 }
3165
3166 /* Do the rest only if op1 is const0_rtx, which may be the
3167 result of simplification. */
3168 if (op1 == const0_rtx)
3169 {
3170 /* If a single use of the CC is found, prepare to modify it
3171 when SELECT_CC_MODE returns a new CC-class mode, or when
3172 the above simplify_compare_const() returned a new comparison
3173 operator. undobuf.other_insn is assigned the CC use insn
3174 when modifying it. */
3175 if (cc_use_loc)
3176 {
3177 #ifdef SELECT_CC_MODE
3178 machine_mode new_mode
3179 = SELECT_CC_MODE (compare_code, op0, op1);
3180 if (new_mode != orig_compare_mode
3181 && can_change_dest_mode (SET_DEST (newpat),
3182 added_sets_2, new_mode))
3183 {
3184 unsigned int regno = REGNO (newpat_dest);
3185 compare_mode = new_mode;
3186 if (regno < FIRST_PSEUDO_REGISTER)
3187 newpat_dest = gen_rtx_REG (compare_mode, regno);
3188 else
3189 {
3190 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3191 newpat_dest = regno_reg_rtx[regno];
3192 }
3193 }
3194 #endif
3195 /* Cases for modifying the CC-using comparison. */
3196 if (compare_code != orig_compare_code
3197 /* ??? Do we need to verify the zero rtx? */
3198 && XEXP (*cc_use_loc, 1) == const0_rtx)
3199 {
3200 /* Replace cc_use_loc with entire new RTX. */
3201 SUBST (*cc_use_loc,
3202 gen_rtx_fmt_ee (compare_code, compare_mode,
3203 newpat_dest, const0_rtx));
3204 undobuf.other_insn = cc_use_insn;
3205 }
3206 else if (compare_mode != orig_compare_mode)
3207 {
3208 /* Just replace the CC reg with a new mode. */
3209 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3210 undobuf.other_insn = cc_use_insn;
3211 }
3212 }
3213
3214 /* Now we modify the current newpat:
3215 First, SET_DEST(newpat) is updated if the CC mode has been
3216 altered. For targets without SELECT_CC_MODE, this should be
3217 optimized away. */
3218 if (compare_mode != orig_compare_mode)
3219 SUBST (SET_DEST (newpat), newpat_dest);
3220 /* This is always done to propagate i2src into newpat. */
3221 SUBST (SET_SRC (newpat),
3222 gen_rtx_COMPARE (compare_mode, op0, op1));
3223 /* Create new version of i2pat if needed; the below PARALLEL
3224 creation needs this to work correctly. */
3225 if (! rtx_equal_p (i2src, op0))
3226 i2pat = gen_rtx_SET (i2dest, op0);
3227 i2_is_used = 1;
3228 }
3229 }
3230
3231 if (i2_is_used == 0)
3232 {
3233 /* It is possible that the source of I2 or I1 may be performing
3234 an unneeded operation, such as a ZERO_EXTEND of something
3235 that is known to have the high part zero. Handle that case
3236 by letting subst look at the inner insns.
3237
3238 Another way to do this would be to have a function that tries
3239 to simplify a single insn instead of merging two or more
3240 insns. We don't do this because of the potential of infinite
3241 loops and because of the potential extra memory required.
3242 However, doing it the way we are is a bit of a kludge and
3243 doesn't catch all cases.
3244
3245 But only do this if -fexpensive-optimizations since it slows
3246 things down and doesn't usually win.
3247
3248 This is not done in the COMPARE case above because the
3249 unmodified I2PAT is used in the PARALLEL and so a pattern
3250 with a modified I2SRC would not match. */
3251
3252 if (flag_expensive_optimizations)
3253 {
3254 /* Pass pc_rtx so no substitutions are done, just
3255 simplifications. */
3256 if (i1)
3257 {
3258 subst_low_luid = DF_INSN_LUID (i1);
3259 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3260 }
3261
3262 subst_low_luid = DF_INSN_LUID (i2);
3263 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3264 }
3265
3266 n_occurrences = 0; /* `subst' counts here */
3267 subst_low_luid = DF_INSN_LUID (i2);
3268
3269 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3270 copy of I2SRC each time we substitute it, in order to avoid creating
3271 self-referential RTL when we will be substituting I1SRC for I1DEST
3272 later. Likewise if I0 feeds into I2, either directly or indirectly
3273 through I1, and I0DEST is in I0SRC. */
3274 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3275 (i1_feeds_i2_n && i1dest_in_i1src)
3276 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3277 && i0dest_in_i0src));
3278 substed_i2 = 1;
3279
3280 /* Record whether I2's body now appears within I3's body. */
3281 i2_is_used = n_occurrences;
3282 }
3283
3284 /* If we already got a failure, don't try to do more. Otherwise, try to
3285 substitute I1 if we have it. */
3286
3287 if (i1 && GET_CODE (newpat) != CLOBBER)
3288 {
3289 /* Check that an autoincrement side-effect on I1 has not been lost.
3290 This happens if I1DEST is mentioned in I2 and dies there, and
3291 has disappeared from the new pattern. */
3292 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3293 && i1_feeds_i2_n
3294 && dead_or_set_p (i2, i1dest)
3295 && !reg_overlap_mentioned_p (i1dest, newpat))
3296 /* Before we can do this substitution, we must redo the test done
3297 above (see detailed comments there) that ensures I1DEST isn't
3298 mentioned in any SETs in NEWPAT that are field assignments. */
3299 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3300 0, 0, 0))
3301 {
3302 undo_all ();
3303 return 0;
3304 }
3305
3306 n_occurrences = 0;
3307 subst_low_luid = DF_INSN_LUID (i1);
3308
3309 /* If the following substitution will modify I1SRC, make a copy of it
3310 for the case where it is substituted for I1DEST in I2PAT later. */
3311 if (added_sets_2 && i1_feeds_i2_n)
3312 i1src_copy = copy_rtx (i1src);
3313
3314 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3315 copy of I1SRC each time we substitute it, in order to avoid creating
3316 self-referential RTL when we will be substituting I0SRC for I0DEST
3317 later. */
3318 newpat = subst (newpat, i1dest, i1src, 0, 0,
3319 i0_feeds_i1_n && i0dest_in_i0src);
3320 substed_i1 = 1;
3321
3322 /* Record whether I1's body now appears within I3's body. */
3323 i1_is_used = n_occurrences;
3324 }
3325
3326 /* Likewise for I0 if we have it. */
3327
3328 if (i0 && GET_CODE (newpat) != CLOBBER)
3329 {
3330 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3331 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3332 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3333 && !reg_overlap_mentioned_p (i0dest, newpat))
3334 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3335 0, 0, 0))
3336 {
3337 undo_all ();
3338 return 0;
3339 }
3340
3341 /* If the following substitution will modify I0SRC, make a copy of it
3342 for the case where it is substituted for I0DEST in I1PAT later. */
3343 if (added_sets_1 && i0_feeds_i1_n)
3344 i0src_copy = copy_rtx (i0src);
3345 /* And a copy for I0DEST in I2PAT substitution. */
3346 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3347 || (i0_feeds_i2_n)))
3348 i0src_copy2 = copy_rtx (i0src);
3349
3350 n_occurrences = 0;
3351 subst_low_luid = DF_INSN_LUID (i0);
3352 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3353 substed_i0 = 1;
3354 }
3355
3356 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3357 to count all the ways that I2SRC and I1SRC can be used. */
3358 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3359 && i2_is_used + added_sets_2 > 1)
3360 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3361 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3362 > 1))
3363 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3364 && (n_occurrences + added_sets_0
3365 + (added_sets_1 && i0_feeds_i1_n)
3366 + (added_sets_2 && i0_feeds_i2_n)
3367 > 1))
3368 /* Fail if we tried to make a new register. */
3369 || max_reg_num () != maxreg
3370 /* Fail if we couldn't do something and have a CLOBBER. */
3371 || GET_CODE (newpat) == CLOBBER
3372 /* Fail if this new pattern is a MULT and we didn't have one before
3373 at the outer level. */
3374 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3375 && ! have_mult))
3376 {
3377 undo_all ();
3378 return 0;
3379 }
3380
3381 /* If the actions of the earlier insns must be kept
3382 in addition to substituting them into the latest one,
3383 we must make a new PARALLEL for the latest insn
3384 to hold additional the SETs. */
3385
3386 if (added_sets_0 || added_sets_1 || added_sets_2)
3387 {
3388 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3389 combine_extras++;
3390
3391 if (GET_CODE (newpat) == PARALLEL)
3392 {
3393 rtvec old = XVEC (newpat, 0);
3394 total_sets = XVECLEN (newpat, 0) + extra_sets;
3395 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3396 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3397 sizeof (old->elem[0]) * old->num_elem);
3398 }
3399 else
3400 {
3401 rtx old = newpat;
3402 total_sets = 1 + extra_sets;
3403 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3404 XVECEXP (newpat, 0, 0) = old;
3405 }
3406
3407 if (added_sets_0)
3408 XVECEXP (newpat, 0, --total_sets) = i0pat;
3409
3410 if (added_sets_1)
3411 {
3412 rtx t = i1pat;
3413 if (i0_feeds_i1_n)
3414 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3415
3416 XVECEXP (newpat, 0, --total_sets) = t;
3417 }
3418 if (added_sets_2)
3419 {
3420 rtx t = i2pat;
3421 if (i1_feeds_i2_n)
3422 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3423 i0_feeds_i1_n && i0dest_in_i0src);
3424 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3425 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3426
3427 XVECEXP (newpat, 0, --total_sets) = t;
3428 }
3429 }
3430
3431 validate_replacement:
3432
3433 /* Note which hard regs this insn has as inputs. */
3434 mark_used_regs_combine (newpat);
3435
3436 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3437 consider splitting this pattern, we might need these clobbers. */
3438 if (i1 && GET_CODE (newpat) == PARALLEL
3439 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3440 {
3441 int len = XVECLEN (newpat, 0);
3442
3443 newpat_vec_with_clobbers = rtvec_alloc (len);
3444 for (i = 0; i < len; i++)
3445 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3446 }
3447
3448 /* We have recognized nothing yet. */
3449 insn_code_number = -1;
3450
3451 /* See if this is a PARALLEL of two SETs where one SET's destination is
3452 a register that is unused and this isn't marked as an instruction that
3453 might trap in an EH region. In that case, we just need the other SET.
3454 We prefer this over the PARALLEL.
3455
3456 This can occur when simplifying a divmod insn. We *must* test for this
3457 case here because the code below that splits two independent SETs doesn't
3458 handle this case correctly when it updates the register status.
3459
3460 It's pointless doing this if we originally had two sets, one from
3461 i3, and one from i2. Combining then splitting the parallel results
3462 in the original i2 again plus an invalid insn (which we delete).
3463 The net effect is only to move instructions around, which makes
3464 debug info less accurate. */
3465
3466 if (!(added_sets_2 && i1 == 0)
3467 && is_parallel_of_n_reg_sets (newpat, 2)
3468 && asm_noperands (newpat) < 0)
3469 {
3470 rtx set0 = XVECEXP (newpat, 0, 0);
3471 rtx set1 = XVECEXP (newpat, 0, 1);
3472 rtx oldpat = newpat;
3473
3474 if (((REG_P (SET_DEST (set1))
3475 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3476 || (GET_CODE (SET_DEST (set1)) == SUBREG
3477 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3478 && insn_nothrow_p (i3)
3479 && !side_effects_p (SET_SRC (set1)))
3480 {
3481 newpat = set0;
3482 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3483 }
3484
3485 else if (((REG_P (SET_DEST (set0))
3486 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3487 || (GET_CODE (SET_DEST (set0)) == SUBREG
3488 && find_reg_note (i3, REG_UNUSED,
3489 SUBREG_REG (SET_DEST (set0)))))
3490 && insn_nothrow_p (i3)
3491 && !side_effects_p (SET_SRC (set0)))
3492 {
3493 newpat = set1;
3494 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3495
3496 if (insn_code_number >= 0)
3497 changed_i3_dest = 1;
3498 }
3499
3500 if (insn_code_number < 0)
3501 newpat = oldpat;
3502 }
3503
3504 /* Is the result of combination a valid instruction? */
3505 if (insn_code_number < 0)
3506 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3507
3508 /* If we were combining three insns and the result is a simple SET
3509 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3510 insns. There are two ways to do this. It can be split using a
3511 machine-specific method (like when you have an addition of a large
3512 constant) or by combine in the function find_split_point. */
3513
3514 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3515 && asm_noperands (newpat) < 0)
3516 {
3517 rtx parallel, *split;
3518 rtx_insn *m_split_insn;
3519
3520 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3521 use I2DEST as a scratch register will help. In the latter case,
3522 convert I2DEST to the mode of the source of NEWPAT if we can. */
3523
3524 m_split_insn = combine_split_insns (newpat, i3);
3525
3526 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3527 inputs of NEWPAT. */
3528
3529 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3530 possible to try that as a scratch reg. This would require adding
3531 more code to make it work though. */
3532
3533 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3534 {
3535 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3536
3537 /* First try to split using the original register as a
3538 scratch register. */
3539 parallel = gen_rtx_PARALLEL (VOIDmode,
3540 gen_rtvec (2, newpat,
3541 gen_rtx_CLOBBER (VOIDmode,
3542 i2dest)));
3543 m_split_insn = combine_split_insns (parallel, i3);
3544
3545 /* If that didn't work, try changing the mode of I2DEST if
3546 we can. */
3547 if (m_split_insn == 0
3548 && new_mode != GET_MODE (i2dest)
3549 && new_mode != VOIDmode
3550 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3551 {
3552 machine_mode old_mode = GET_MODE (i2dest);
3553 rtx ni2dest;
3554
3555 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3556 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3557 else
3558 {
3559 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3560 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3561 }
3562
3563 parallel = (gen_rtx_PARALLEL
3564 (VOIDmode,
3565 gen_rtvec (2, newpat,
3566 gen_rtx_CLOBBER (VOIDmode,
3567 ni2dest))));
3568 m_split_insn = combine_split_insns (parallel, i3);
3569
3570 if (m_split_insn == 0
3571 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3572 {
3573 struct undo *buf;
3574
3575 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3576 buf = undobuf.undos;
3577 undobuf.undos = buf->next;
3578 buf->next = undobuf.frees;
3579 undobuf.frees = buf;
3580 }
3581 }
3582
3583 i2scratch = m_split_insn != 0;
3584 }
3585
3586 /* If recog_for_combine has discarded clobbers, try to use them
3587 again for the split. */
3588 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3589 {
3590 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3591 m_split_insn = combine_split_insns (parallel, i3);
3592 }
3593
3594 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3595 {
3596 rtx m_split_pat = PATTERN (m_split_insn);
3597 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3598 if (insn_code_number >= 0)
3599 newpat = m_split_pat;
3600 }
3601 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3602 && (next_nonnote_nondebug_insn (i2) == i3
3603 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3604 {
3605 rtx i2set, i3set;
3606 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3607 newi2pat = PATTERN (m_split_insn);
3608
3609 i3set = single_set (NEXT_INSN (m_split_insn));
3610 i2set = single_set (m_split_insn);
3611
3612 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3613
3614 /* If I2 or I3 has multiple SETs, we won't know how to track
3615 register status, so don't use these insns. If I2's destination
3616 is used between I2 and I3, we also can't use these insns. */
3617
3618 if (i2_code_number >= 0 && i2set && i3set
3619 && (next_nonnote_nondebug_insn (i2) == i3
3620 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3621 insn_code_number = recog_for_combine (&newi3pat, i3,
3622 &new_i3_notes);
3623 if (insn_code_number >= 0)
3624 newpat = newi3pat;
3625
3626 /* It is possible that both insns now set the destination of I3.
3627 If so, we must show an extra use of it. */
3628
3629 if (insn_code_number >= 0)
3630 {
3631 rtx new_i3_dest = SET_DEST (i3set);
3632 rtx new_i2_dest = SET_DEST (i2set);
3633
3634 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3635 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3636 || GET_CODE (new_i3_dest) == SUBREG)
3637 new_i3_dest = XEXP (new_i3_dest, 0);
3638
3639 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3640 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3641 || GET_CODE (new_i2_dest) == SUBREG)
3642 new_i2_dest = XEXP (new_i2_dest, 0);
3643
3644 if (REG_P (new_i3_dest)
3645 && REG_P (new_i2_dest)
3646 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3647 && REGNO (new_i2_dest) < reg_n_sets_max)
3648 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3649 }
3650 }
3651
3652 /* If we can split it and use I2DEST, go ahead and see if that
3653 helps things be recognized. Verify that none of the registers
3654 are set between I2 and I3. */
3655 if (insn_code_number < 0
3656 && (split = find_split_point (&newpat, i3, false)) != 0
3657 && (!HAVE_cc0 || REG_P (i2dest))
3658 /* We need I2DEST in the proper mode. If it is a hard register
3659 or the only use of a pseudo, we can change its mode.
3660 Make sure we don't change a hard register to have a mode that
3661 isn't valid for it, or change the number of registers. */
3662 && (GET_MODE (*split) == GET_MODE (i2dest)
3663 || GET_MODE (*split) == VOIDmode
3664 || can_change_dest_mode (i2dest, added_sets_2,
3665 GET_MODE (*split)))
3666 && (next_nonnote_nondebug_insn (i2) == i3
3667 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3668 /* We can't overwrite I2DEST if its value is still used by
3669 NEWPAT. */
3670 && ! reg_referenced_p (i2dest, newpat))
3671 {
3672 rtx newdest = i2dest;
3673 enum rtx_code split_code = GET_CODE (*split);
3674 machine_mode split_mode = GET_MODE (*split);
3675 bool subst_done = false;
3676 newi2pat = NULL_RTX;
3677
3678 i2scratch = true;
3679
3680 /* *SPLIT may be part of I2SRC, so make sure we have the
3681 original expression around for later debug processing.
3682 We should not need I2SRC any more in other cases. */
3683 if (MAY_HAVE_DEBUG_INSNS)
3684 i2src = copy_rtx (i2src);
3685 else
3686 i2src = NULL;
3687
3688 /* Get NEWDEST as a register in the proper mode. We have already
3689 validated that we can do this. */
3690 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3691 {
3692 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3693 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3694 else
3695 {
3696 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3697 newdest = regno_reg_rtx[REGNO (i2dest)];
3698 }
3699 }
3700
3701 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3702 an ASHIFT. This can occur if it was inside a PLUS and hence
3703 appeared to be a memory address. This is a kludge. */
3704 if (split_code == MULT
3705 && CONST_INT_P (XEXP (*split, 1))
3706 && INTVAL (XEXP (*split, 1)) > 0
3707 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3708 {
3709 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3710 XEXP (*split, 0), GEN_INT (i)));
3711 /* Update split_code because we may not have a multiply
3712 anymore. */
3713 split_code = GET_CODE (*split);
3714 }
3715
3716 /* Similarly for (plus (mult FOO (const_int pow2))). */
3717 if (split_code == PLUS
3718 && GET_CODE (XEXP (*split, 0)) == MULT
3719 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3720 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3721 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3722 {
3723 rtx nsplit = XEXP (*split, 0);
3724 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3725 XEXP (nsplit, 0), GEN_INT (i)));
3726 /* Update split_code because we may not have a multiply
3727 anymore. */
3728 split_code = GET_CODE (*split);
3729 }
3730
3731 #ifdef INSN_SCHEDULING
3732 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3733 be written as a ZERO_EXTEND. */
3734 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3735 {
3736 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3737 what it really is. */
3738 if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
3739 == SIGN_EXTEND)
3740 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3741 SUBREG_REG (*split)));
3742 else
3743 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3744 SUBREG_REG (*split)));
3745 }
3746 #endif
3747
3748 /* Attempt to split binary operators using arithmetic identities. */
3749 if (BINARY_P (SET_SRC (newpat))
3750 && split_mode == GET_MODE (SET_SRC (newpat))
3751 && ! side_effects_p (SET_SRC (newpat)))
3752 {
3753 rtx setsrc = SET_SRC (newpat);
3754 machine_mode mode = GET_MODE (setsrc);
3755 enum rtx_code code = GET_CODE (setsrc);
3756 rtx src_op0 = XEXP (setsrc, 0);
3757 rtx src_op1 = XEXP (setsrc, 1);
3758
3759 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3760 if (rtx_equal_p (src_op0, src_op1))
3761 {
3762 newi2pat = gen_rtx_SET (newdest, src_op0);
3763 SUBST (XEXP (setsrc, 0), newdest);
3764 SUBST (XEXP (setsrc, 1), newdest);
3765 subst_done = true;
3766 }
3767 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3768 else if ((code == PLUS || code == MULT)
3769 && GET_CODE (src_op0) == code
3770 && GET_CODE (XEXP (src_op0, 0)) == code
3771 && (INTEGRAL_MODE_P (mode)
3772 || (FLOAT_MODE_P (mode)
3773 && flag_unsafe_math_optimizations)))
3774 {
3775 rtx p = XEXP (XEXP (src_op0, 0), 0);
3776 rtx q = XEXP (XEXP (src_op0, 0), 1);
3777 rtx r = XEXP (src_op0, 1);
3778 rtx s = src_op1;
3779
3780 /* Split both "((X op Y) op X) op Y" and
3781 "((X op Y) op Y) op X" as "T op T" where T is
3782 "X op Y". */
3783 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3784 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3785 {
3786 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3787 SUBST (XEXP (setsrc, 0), newdest);
3788 SUBST (XEXP (setsrc, 1), newdest);
3789 subst_done = true;
3790 }
3791 /* Split "((X op X) op Y) op Y)" as "T op T" where
3792 T is "X op Y". */
3793 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3794 {
3795 rtx tmp = simplify_gen_binary (code, mode, p, r);
3796 newi2pat = gen_rtx_SET (newdest, tmp);
3797 SUBST (XEXP (setsrc, 0), newdest);
3798 SUBST (XEXP (setsrc, 1), newdest);
3799 subst_done = true;
3800 }
3801 }
3802 }
3803
3804 if (!subst_done)
3805 {
3806 newi2pat = gen_rtx_SET (newdest, *split);
3807 SUBST (*split, newdest);
3808 }
3809
3810 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3811
3812 /* recog_for_combine might have added CLOBBERs to newi2pat.
3813 Make sure NEWPAT does not depend on the clobbered regs. */
3814 if (GET_CODE (newi2pat) == PARALLEL)
3815 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3816 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3817 {
3818 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3819 if (reg_overlap_mentioned_p (reg, newpat))
3820 {
3821 undo_all ();
3822 return 0;
3823 }
3824 }
3825
3826 /* If the split point was a MULT and we didn't have one before,
3827 don't use one now. */
3828 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3829 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3830 }
3831 }
3832
3833 /* Check for a case where we loaded from memory in a narrow mode and
3834 then sign extended it, but we need both registers. In that case,
3835 we have a PARALLEL with both loads from the same memory location.
3836 We can split this into a load from memory followed by a register-register
3837 copy. This saves at least one insn, more if register allocation can
3838 eliminate the copy.
3839
3840 We cannot do this if the destination of the first assignment is a
3841 condition code register or cc0. We eliminate this case by making sure
3842 the SET_DEST and SET_SRC have the same mode.
3843
3844 We cannot do this if the destination of the second assignment is
3845 a register that we have already assumed is zero-extended. Similarly
3846 for a SUBREG of such a register. */
3847
3848 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3849 && GET_CODE (newpat) == PARALLEL
3850 && XVECLEN (newpat, 0) == 2
3851 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3852 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3853 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3854 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3855 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3856 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3857 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3858 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3859 DF_INSN_LUID (i2))
3860 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3861 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3862 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3863 (REG_P (temp_expr)
3864 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3865 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3866 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3867 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3868 != GET_MODE_MASK (word_mode))))
3869 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3870 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3871 (REG_P (temp_expr)
3872 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3873 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3874 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3875 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3876 != GET_MODE_MASK (word_mode)))))
3877 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3878 SET_SRC (XVECEXP (newpat, 0, 1)))
3879 && ! find_reg_note (i3, REG_UNUSED,
3880 SET_DEST (XVECEXP (newpat, 0, 0))))
3881 {
3882 rtx ni2dest;
3883
3884 newi2pat = XVECEXP (newpat, 0, 0);
3885 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3886 newpat = XVECEXP (newpat, 0, 1);
3887 SUBST (SET_SRC (newpat),
3888 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3889 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3890
3891 if (i2_code_number >= 0)
3892 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3893
3894 if (insn_code_number >= 0)
3895 swap_i2i3 = 1;
3896 }
3897
3898 /* Similarly, check for a case where we have a PARALLEL of two independent
3899 SETs but we started with three insns. In this case, we can do the sets
3900 as two separate insns. This case occurs when some SET allows two
3901 other insns to combine, but the destination of that SET is still live.
3902
3903 Also do this if we started with two insns and (at least) one of the
3904 resulting sets is a noop; this noop will be deleted later. */
3905
3906 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3907 && GET_CODE (newpat) == PARALLEL
3908 && XVECLEN (newpat, 0) == 2
3909 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3910 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3911 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3912 || set_noop_p (XVECEXP (newpat, 0, 1)))
3913 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3914 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3915 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3916 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3917 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3918 XVECEXP (newpat, 0, 0))
3919 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3920 XVECEXP (newpat, 0, 1))
3921 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3922 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3923 {
3924 rtx set0 = XVECEXP (newpat, 0, 0);
3925 rtx set1 = XVECEXP (newpat, 0, 1);
3926
3927 /* Normally, it doesn't matter which of the two is done first,
3928 but the one that references cc0 can't be the second, and
3929 one which uses any regs/memory set in between i2 and i3 can't
3930 be first. The PARALLEL might also have been pre-existing in i3,
3931 so we need to make sure that we won't wrongly hoist a SET to i2
3932 that would conflict with a death note present in there. */
3933 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3934 && !(REG_P (SET_DEST (set1))
3935 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3936 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3937 && find_reg_note (i2, REG_DEAD,
3938 SUBREG_REG (SET_DEST (set1))))
3939 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3940 /* If I3 is a jump, ensure that set0 is a jump so that
3941 we do not create invalid RTL. */
3942 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3943 )
3944 {
3945 newi2pat = set1;
3946 newpat = set0;
3947 }
3948 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3949 && !(REG_P (SET_DEST (set0))
3950 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3951 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3952 && find_reg_note (i2, REG_DEAD,
3953 SUBREG_REG (SET_DEST (set0))))
3954 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
3955 /* If I3 is a jump, ensure that set1 is a jump so that
3956 we do not create invalid RTL. */
3957 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
3958 )
3959 {
3960 newi2pat = set0;
3961 newpat = set1;
3962 }
3963 else
3964 {
3965 undo_all ();
3966 return 0;
3967 }
3968
3969 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3970
3971 if (i2_code_number >= 0)
3972 {
3973 /* recog_for_combine might have added CLOBBERs to newi2pat.
3974 Make sure NEWPAT does not depend on the clobbered regs. */
3975 if (GET_CODE (newi2pat) == PARALLEL)
3976 {
3977 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3978 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3979 {
3980 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3981 if (reg_overlap_mentioned_p (reg, newpat))
3982 {
3983 undo_all ();
3984 return 0;
3985 }
3986 }
3987 }
3988
3989 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3990 }
3991 }
3992
3993 /* If it still isn't recognized, fail and change things back the way they
3994 were. */
3995 if ((insn_code_number < 0
3996 /* Is the result a reasonable ASM_OPERANDS? */
3997 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
3998 {
3999 undo_all ();
4000 return 0;
4001 }
4002
4003 /* If we had to change another insn, make sure it is valid also. */
4004 if (undobuf.other_insn)
4005 {
4006 CLEAR_HARD_REG_SET (newpat_used_regs);
4007
4008 other_pat = PATTERN (undobuf.other_insn);
4009 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4010 &new_other_notes);
4011
4012 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4013 {
4014 undo_all ();
4015 return 0;
4016 }
4017 }
4018
4019 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4020 they are adjacent to each other or not. */
4021 if (HAVE_cc0)
4022 {
4023 rtx_insn *p = prev_nonnote_insn (i3);
4024 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4025 && sets_cc0_p (newi2pat))
4026 {
4027 undo_all ();
4028 return 0;
4029 }
4030 }
4031
4032 /* Only allow this combination if insn_rtx_costs reports that the
4033 replacement instructions are cheaper than the originals. */
4034 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4035 {
4036 undo_all ();
4037 return 0;
4038 }
4039
4040 if (MAY_HAVE_DEBUG_INSNS)
4041 {
4042 struct undo *undo;
4043
4044 for (undo = undobuf.undos; undo; undo = undo->next)
4045 if (undo->kind == UNDO_MODE)
4046 {
4047 rtx reg = *undo->where.r;
4048 machine_mode new_mode = GET_MODE (reg);
4049 machine_mode old_mode = undo->old_contents.m;
4050
4051 /* Temporarily revert mode back. */
4052 adjust_reg_mode (reg, old_mode);
4053
4054 if (reg == i2dest && i2scratch)
4055 {
4056 /* If we used i2dest as a scratch register with a
4057 different mode, substitute it for the original
4058 i2src while its original mode is temporarily
4059 restored, and then clear i2scratch so that we don't
4060 do it again later. */
4061 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4062 this_basic_block);
4063 i2scratch = false;
4064 /* Put back the new mode. */
4065 adjust_reg_mode (reg, new_mode);
4066 }
4067 else
4068 {
4069 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4070 rtx_insn *first, *last;
4071
4072 if (reg == i2dest)
4073 {
4074 first = i2;
4075 last = last_combined_insn;
4076 }
4077 else
4078 {
4079 first = i3;
4080 last = undobuf.other_insn;
4081 gcc_assert (last);
4082 if (DF_INSN_LUID (last)
4083 < DF_INSN_LUID (last_combined_insn))
4084 last = last_combined_insn;
4085 }
4086
4087 /* We're dealing with a reg that changed mode but not
4088 meaning, so we want to turn it into a subreg for
4089 the new mode. However, because of REG sharing and
4090 because its mode had already changed, we have to do
4091 it in two steps. First, replace any debug uses of
4092 reg, with its original mode temporarily restored,
4093 with this copy we have created; then, replace the
4094 copy with the SUBREG of the original shared reg,
4095 once again changed to the new mode. */
4096 propagate_for_debug (first, last, reg, tempreg,
4097 this_basic_block);
4098 adjust_reg_mode (reg, new_mode);
4099 propagate_for_debug (first, last, tempreg,
4100 lowpart_subreg (old_mode, reg, new_mode),
4101 this_basic_block);
4102 }
4103 }
4104 }
4105
4106 /* If we will be able to accept this, we have made a
4107 change to the destination of I3. This requires us to
4108 do a few adjustments. */
4109
4110 if (changed_i3_dest)
4111 {
4112 PATTERN (i3) = newpat;
4113 adjust_for_new_dest (i3);
4114 }
4115
4116 /* We now know that we can do this combination. Merge the insns and
4117 update the status of registers and LOG_LINKS. */
4118
4119 if (undobuf.other_insn)
4120 {
4121 rtx note, next;
4122
4123 PATTERN (undobuf.other_insn) = other_pat;
4124
4125 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4126 ensure that they are still valid. Then add any non-duplicate
4127 notes added by recog_for_combine. */
4128 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4129 {
4130 next = XEXP (note, 1);
4131
4132 if ((REG_NOTE_KIND (note) == REG_DEAD
4133 && !reg_referenced_p (XEXP (note, 0),
4134 PATTERN (undobuf.other_insn)))
4135 ||(REG_NOTE_KIND (note) == REG_UNUSED
4136 && !reg_set_p (XEXP (note, 0),
4137 PATTERN (undobuf.other_insn))))
4138 remove_note (undobuf.other_insn, note);
4139 }
4140
4141 distribute_notes (new_other_notes, undobuf.other_insn,
4142 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4143 NULL_RTX);
4144 }
4145
4146 if (swap_i2i3)
4147 {
4148 rtx_insn *insn;
4149 struct insn_link *link;
4150 rtx ni2dest;
4151
4152 /* I3 now uses what used to be its destination and which is now
4153 I2's destination. This requires us to do a few adjustments. */
4154 PATTERN (i3) = newpat;
4155 adjust_for_new_dest (i3);
4156
4157 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4158 so we still will.
4159
4160 However, some later insn might be using I2's dest and have
4161 a LOG_LINK pointing at I3. We must remove this link.
4162 The simplest way to remove the link is to point it at I1,
4163 which we know will be a NOTE. */
4164
4165 /* newi2pat is usually a SET here; however, recog_for_combine might
4166 have added some clobbers. */
4167 if (GET_CODE (newi2pat) == PARALLEL)
4168 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4169 else
4170 ni2dest = SET_DEST (newi2pat);
4171
4172 for (insn = NEXT_INSN (i3);
4173 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4174 || insn != BB_HEAD (this_basic_block->next_bb));
4175 insn = NEXT_INSN (insn))
4176 {
4177 if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
4178 {
4179 FOR_EACH_LOG_LINK (link, insn)
4180 if (link->insn == i3)
4181 link->insn = i1;
4182
4183 break;
4184 }
4185 }
4186 }
4187
4188 {
4189 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4190 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4191 rtx midnotes = 0;
4192 int from_luid;
4193 /* Compute which registers we expect to eliminate. newi2pat may be setting
4194 either i3dest or i2dest, so we must check it. */
4195 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4196 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4197 || !i2dest_killed
4198 ? 0 : i2dest);
4199 /* For i1, we need to compute both local elimination and global
4200 elimination information with respect to newi2pat because i1dest
4201 may be the same as i3dest, in which case newi2pat may be setting
4202 i1dest. Global information is used when distributing REG_DEAD
4203 note for i2 and i3, in which case it does matter if newi2pat sets
4204 i1dest or not.
4205
4206 Local information is used when distributing REG_DEAD note for i1,
4207 in which case it doesn't matter if newi2pat sets i1dest or not.
4208 See PR62151, if we have four insns combination:
4209 i0: r0 <- i0src
4210 i1: r1 <- i1src (using r0)
4211 REG_DEAD (r0)
4212 i2: r0 <- i2src (using r1)
4213 i3: r3 <- i3src (using r0)
4214 ix: using r0
4215 From i1's point of view, r0 is eliminated, no matter if it is set
4216 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4217 should be discarded.
4218
4219 Note local information only affects cases in forms like "I1->I2->I3",
4220 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4221 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4222 i0dest anyway. */
4223 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4224 || !i1dest_killed
4225 ? 0 : i1dest);
4226 rtx elim_i1 = (local_elim_i1 == 0
4227 || (newi2pat && reg_set_p (i1dest, newi2pat))
4228 ? 0 : i1dest);
4229 /* Same case as i1. */
4230 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4231 ? 0 : i0dest);
4232 rtx elim_i0 = (local_elim_i0 == 0
4233 || (newi2pat && reg_set_p (i0dest, newi2pat))
4234 ? 0 : i0dest);
4235
4236 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4237 clear them. */
4238 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4239 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4240 if (i1)
4241 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4242 if (i0)
4243 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4244
4245 /* Ensure that we do not have something that should not be shared but
4246 occurs multiple times in the new insns. Check this by first
4247 resetting all the `used' flags and then copying anything is shared. */
4248
4249 reset_used_flags (i3notes);
4250 reset_used_flags (i2notes);
4251 reset_used_flags (i1notes);
4252 reset_used_flags (i0notes);
4253 reset_used_flags (newpat);
4254 reset_used_flags (newi2pat);
4255 if (undobuf.other_insn)
4256 reset_used_flags (PATTERN (undobuf.other_insn));
4257
4258 i3notes = copy_rtx_if_shared (i3notes);
4259 i2notes = copy_rtx_if_shared (i2notes);
4260 i1notes = copy_rtx_if_shared (i1notes);
4261 i0notes = copy_rtx_if_shared (i0notes);
4262 newpat = copy_rtx_if_shared (newpat);
4263 newi2pat = copy_rtx_if_shared (newi2pat);
4264 if (undobuf.other_insn)
4265 reset_used_flags (PATTERN (undobuf.other_insn));
4266
4267 INSN_CODE (i3) = insn_code_number;
4268 PATTERN (i3) = newpat;
4269
4270 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4271 {
4272 rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);
4273
4274 reset_used_flags (call_usage);
4275 call_usage = copy_rtx (call_usage);
4276
4277 if (substed_i2)
4278 {
4279 /* I2SRC must still be meaningful at this point. Some splitting
4280 operations can invalidate I2SRC, but those operations do not
4281 apply to calls. */
4282 gcc_assert (i2src);
4283 replace_rtx (call_usage, i2dest, i2src);
4284 }
4285
4286 if (substed_i1)
4287 replace_rtx (call_usage, i1dest, i1src);
4288 if (substed_i0)
4289 replace_rtx (call_usage, i0dest, i0src);
4290
4291 CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
4292 }
4293
4294 if (undobuf.other_insn)
4295 INSN_CODE (undobuf.other_insn) = other_code_number;
4296
4297 /* We had one special case above where I2 had more than one set and
4298 we replaced a destination of one of those sets with the destination
4299 of I3. In that case, we have to update LOG_LINKS of insns later
4300 in this basic block. Note that this (expensive) case is rare.
4301
4302 Also, in this case, we must pretend that all REG_NOTEs for I2
4303 actually came from I3, so that REG_UNUSED notes from I2 will be
4304 properly handled. */
4305
4306 if (i3_subst_into_i2)
4307 {
4308 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4309 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4310 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4311 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4312 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4313 && ! find_reg_note (i2, REG_UNUSED,
4314 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4315 for (temp_insn = NEXT_INSN (i2);
4316 temp_insn
4317 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4318 || BB_HEAD (this_basic_block) != temp_insn);
4319 temp_insn = NEXT_INSN (temp_insn))
4320 if (temp_insn != i3 && INSN_P (temp_insn))
4321 FOR_EACH_LOG_LINK (link, temp_insn)
4322 if (link->insn == i2)
4323 link->insn = i3;
4324
4325 if (i3notes)
4326 {
4327 rtx link = i3notes;
4328 while (XEXP (link, 1))
4329 link = XEXP (link, 1);
4330 XEXP (link, 1) = i2notes;
4331 }
4332 else
4333 i3notes = i2notes;
4334 i2notes = 0;
4335 }
4336
4337 LOG_LINKS (i3) = NULL;
4338 REG_NOTES (i3) = 0;
4339 LOG_LINKS (i2) = NULL;
4340 REG_NOTES (i2) = 0;
4341
4342 if (newi2pat)
4343 {
4344 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4345 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4346 this_basic_block);
4347 INSN_CODE (i2) = i2_code_number;
4348 PATTERN (i2) = newi2pat;
4349 }
4350 else
4351 {
4352 if (MAY_HAVE_DEBUG_INSNS && i2src)
4353 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4354 this_basic_block);
4355 SET_INSN_DELETED (i2);
4356 }
4357
4358 if (i1)
4359 {
4360 LOG_LINKS (i1) = NULL;
4361 REG_NOTES (i1) = 0;
4362 if (MAY_HAVE_DEBUG_INSNS)
4363 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4364 this_basic_block);
4365 SET_INSN_DELETED (i1);
4366 }
4367
4368 if (i0)
4369 {
4370 LOG_LINKS (i0) = NULL;
4371 REG_NOTES (i0) = 0;
4372 if (MAY_HAVE_DEBUG_INSNS)
4373 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4374 this_basic_block);
4375 SET_INSN_DELETED (i0);
4376 }
4377
4378 /* Get death notes for everything that is now used in either I3 or
4379 I2 and used to die in a previous insn. If we built two new
4380 patterns, move from I1 to I2 then I2 to I3 so that we get the
4381 proper movement on registers that I2 modifies. */
4382
4383 if (i0)
4384 from_luid = DF_INSN_LUID (i0);
4385 else if (i1)
4386 from_luid = DF_INSN_LUID (i1);
4387 else
4388 from_luid = DF_INSN_LUID (i2);
4389 if (newi2pat)
4390 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4391 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4392
4393 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4394 if (i3notes)
4395 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4396 elim_i2, elim_i1, elim_i0);
4397 if (i2notes)
4398 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4399 elim_i2, elim_i1, elim_i0);
4400 if (i1notes)
4401 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4402 elim_i2, local_elim_i1, local_elim_i0);
4403 if (i0notes)
4404 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4405 elim_i2, elim_i1, local_elim_i0);
4406 if (midnotes)
4407 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4408 elim_i2, elim_i1, elim_i0);
4409
4410 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4411 know these are REG_UNUSED and want them to go to the desired insn,
4412 so we always pass it as i3. */
4413
4414 if (newi2pat && new_i2_notes)
4415 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4416 NULL_RTX);
4417
4418 if (new_i3_notes)
4419 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4420 NULL_RTX);
4421
4422 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4423 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4424 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4425 in that case, it might delete I2. Similarly for I2 and I1.
4426 Show an additional death due to the REG_DEAD note we make here. If
4427 we discard it in distribute_notes, we will decrement it again. */
4428
4429 if (i3dest_killed)
4430 {
4431 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4432 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4433 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4434 elim_i1, elim_i0);
4435 else
4436 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4437 elim_i2, elim_i1, elim_i0);
4438 }
4439
4440 if (i2dest_in_i2src)
4441 {
4442 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4443 if (newi2pat && reg_set_p (i2dest, newi2pat))
4444 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4445 NULL_RTX, NULL_RTX);
4446 else
4447 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4448 NULL_RTX, NULL_RTX, NULL_RTX);
4449 }
4450
4451 if (i1dest_in_i1src)
4452 {
4453 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4454 if (newi2pat && reg_set_p (i1dest, newi2pat))
4455 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4456 NULL_RTX, NULL_RTX);
4457 else
4458 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4459 NULL_RTX, NULL_RTX, NULL_RTX);
4460 }
4461
4462 if (i0dest_in_i0src)
4463 {
4464 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4465 if (newi2pat && reg_set_p (i0dest, newi2pat))
4466 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4467 NULL_RTX, NULL_RTX);
4468 else
4469 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4470 NULL_RTX, NULL_RTX, NULL_RTX);
4471 }
4472
4473 distribute_links (i3links);
4474 distribute_links (i2links);
4475 distribute_links (i1links);
4476 distribute_links (i0links);
4477
4478 if (REG_P (i2dest))
4479 {
4480 struct insn_link *link;
4481 rtx_insn *i2_insn = 0;
4482 rtx i2_val = 0, set;
4483
4484 /* The insn that used to set this register doesn't exist, and
4485 this life of the register may not exist either. See if one of
4486 I3's links points to an insn that sets I2DEST. If it does,
4487 that is now the last known value for I2DEST. If we don't update
4488 this and I2 set the register to a value that depended on its old
4489 contents, we will get confused. If this insn is used, thing
4490 will be set correctly in combine_instructions. */
4491 FOR_EACH_LOG_LINK (link, i3)
4492 if ((set = single_set (link->insn)) != 0
4493 && rtx_equal_p (i2dest, SET_DEST (set)))
4494 i2_insn = link->insn, i2_val = SET_SRC (set);
4495
4496 record_value_for_reg (i2dest, i2_insn, i2_val);
4497
4498 /* If the reg formerly set in I2 died only once and that was in I3,
4499 zero its use count so it won't make `reload' do any work. */
4500 if (! added_sets_2
4501 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4502 && ! i2dest_in_i2src
4503 && REGNO (i2dest) < reg_n_sets_max)
4504 INC_REG_N_SETS (REGNO (i2dest), -1);
4505 }
4506
4507 if (i1 && REG_P (i1dest))
4508 {
4509 struct insn_link *link;
4510 rtx_insn *i1_insn = 0;
4511 rtx i1_val = 0, set;
4512
4513 FOR_EACH_LOG_LINK (link, i3)
4514 if ((set = single_set (link->insn)) != 0
4515 && rtx_equal_p (i1dest, SET_DEST (set)))
4516 i1_insn = link->insn, i1_val = SET_SRC (set);
4517
4518 record_value_for_reg (i1dest, i1_insn, i1_val);
4519
4520 if (! added_sets_1
4521 && ! i1dest_in_i1src
4522 && REGNO (i1dest) < reg_n_sets_max)
4523 INC_REG_N_SETS (REGNO (i1dest), -1);
4524 }
4525
4526 if (i0 && REG_P (i0dest))
4527 {
4528 struct insn_link *link;
4529 rtx_insn *i0_insn = 0;
4530 rtx i0_val = 0, set;
4531
4532 FOR_EACH_LOG_LINK (link, i3)
4533 if ((set = single_set (link->insn)) != 0
4534 && rtx_equal_p (i0dest, SET_DEST (set)))
4535 i0_insn = link->insn, i0_val = SET_SRC (set);
4536
4537 record_value_for_reg (i0dest, i0_insn, i0_val);
4538
4539 if (! added_sets_0
4540 && ! i0dest_in_i0src
4541 && REGNO (i0dest) < reg_n_sets_max)
4542 INC_REG_N_SETS (REGNO (i0dest), -1);
4543 }
4544
4545 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4546 been made to this insn. The order is important, because newi2pat
4547 can affect nonzero_bits of newpat. */
4548 if (newi2pat)
4549 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4550 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4551 }
4552
4553 if (undobuf.other_insn != NULL_RTX)
4554 {
4555 if (dump_file)
4556 {
4557 fprintf (dump_file, "modifying other_insn ");
4558 dump_insn_slim (dump_file, undobuf.other_insn);
4559 }
4560 df_insn_rescan (undobuf.other_insn);
4561 }
4562
4563 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4564 {
4565 if (dump_file)
4566 {
4567 fprintf (dump_file, "modifying insn i0 ");
4568 dump_insn_slim (dump_file, i0);
4569 }
4570 df_insn_rescan (i0);
4571 }
4572
4573 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4574 {
4575 if (dump_file)
4576 {
4577 fprintf (dump_file, "modifying insn i1 ");
4578 dump_insn_slim (dump_file, i1);
4579 }
4580 df_insn_rescan (i1);
4581 }
4582
4583 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4584 {
4585 if (dump_file)
4586 {
4587 fprintf (dump_file, "modifying insn i2 ");
4588 dump_insn_slim (dump_file, i2);
4589 }
4590 df_insn_rescan (i2);
4591 }
4592
4593 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4594 {
4595 if (dump_file)
4596 {
4597 fprintf (dump_file, "modifying insn i3 ");
4598 dump_insn_slim (dump_file, i3);
4599 }
4600 df_insn_rescan (i3);
4601 }
4602
4603 /* Set new_direct_jump_p if a new return or simple jump instruction
4604 has been created. Adjust the CFG accordingly. */
4605 if (returnjump_p (i3) || any_uncondjump_p (i3))
4606 {
4607 *new_direct_jump_p = 1;
4608 mark_jump_label (PATTERN (i3), i3, 0);
4609 update_cfg_for_uncondjump (i3);
4610 }
4611
4612 if (undobuf.other_insn != NULL_RTX
4613 && (returnjump_p (undobuf.other_insn)
4614 || any_uncondjump_p (undobuf.other_insn)))
4615 {
4616 *new_direct_jump_p = 1;
4617 update_cfg_for_uncondjump (undobuf.other_insn);
4618 }
4619
4620 /* A noop might also need cleaning up of CFG, if it comes from the
4621 simplification of a jump. */
4622 if (JUMP_P (i3)
4623 && GET_CODE (newpat) == SET
4624 && SET_SRC (newpat) == pc_rtx
4625 && SET_DEST (newpat) == pc_rtx)
4626 {
4627 *new_direct_jump_p = 1;
4628 update_cfg_for_uncondjump (i3);
4629 }
4630
4631 if (undobuf.other_insn != NULL_RTX
4632 && JUMP_P (undobuf.other_insn)
4633 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4634 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4635 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4636 {
4637 *new_direct_jump_p = 1;
4638 update_cfg_for_uncondjump (undobuf.other_insn);
4639 }
4640
4641 combine_successes++;
4642 undo_commit ();
4643
4644 if (added_links_insn
4645 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4646 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4647 return added_links_insn;
4648 else
4649 return newi2pat ? i2 : i3;
4650 }
4651 \f
4652 /* Get a marker for undoing to the current state. */
4653
4654 static void *
4655 get_undo_marker (void)
4656 {
4657 return undobuf.undos;
4658 }
4659
4660 /* Undo the modifications up to the marker. */
4661
4662 static void
4663 undo_to_marker (void *marker)
4664 {
4665 struct undo *undo, *next;
4666
4667 for (undo = undobuf.undos; undo != marker; undo = next)
4668 {
4669 gcc_assert (undo);
4670
4671 next = undo->next;
4672 switch (undo->kind)
4673 {
4674 case UNDO_RTX:
4675 *undo->where.r = undo->old_contents.r;
4676 break;
4677 case UNDO_INT:
4678 *undo->where.i = undo->old_contents.i;
4679 break;
4680 case UNDO_MODE:
4681 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4682 break;
4683 case UNDO_LINKS:
4684 *undo->where.l = undo->old_contents.l;
4685 break;
4686 default:
4687 gcc_unreachable ();
4688 }
4689
4690 undo->next = undobuf.frees;
4691 undobuf.frees = undo;
4692 }
4693
4694 undobuf.undos = (struct undo *) marker;
4695 }
4696
4697 /* Undo all the modifications recorded in undobuf. */
4698
4699 static void
4700 undo_all (void)
4701 {
4702 undo_to_marker (0);
4703 }
4704
4705 /* We've committed to accepting the changes we made. Move all
4706 of the undos to the free list. */
4707
4708 static void
4709 undo_commit (void)
4710 {
4711 struct undo *undo, *next;
4712
4713 for (undo = undobuf.undos; undo; undo = next)
4714 {
4715 next = undo->next;
4716 undo->next = undobuf.frees;
4717 undobuf.frees = undo;
4718 }
4719 undobuf.undos = 0;
4720 }
4721 \f
4722 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4723 where we have an arithmetic expression and return that point. LOC will
4724 be inside INSN.
4725
4726 try_combine will call this function to see if an insn can be split into
4727 two insns. */
4728
4729 static rtx *
4730 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4731 {
4732 rtx x = *loc;
4733 enum rtx_code code = GET_CODE (x);
4734 rtx *split;
4735 unsigned HOST_WIDE_INT len = 0;
4736 HOST_WIDE_INT pos = 0;
4737 int unsignedp = 0;
4738 rtx inner = NULL_RTX;
4739
4740 /* First special-case some codes. */
4741 switch (code)
4742 {
4743 case SUBREG:
4744 #ifdef INSN_SCHEDULING
4745 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4746 point. */
4747 if (MEM_P (SUBREG_REG (x)))
4748 return loc;
4749 #endif
4750 return find_split_point (&SUBREG_REG (x), insn, false);
4751
4752 case MEM:
4753 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4754 using LO_SUM and HIGH. */
4755 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4756 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4757 {
4758 machine_mode address_mode = get_address_mode (x);
4759
4760 SUBST (XEXP (x, 0),
4761 gen_rtx_LO_SUM (address_mode,
4762 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4763 XEXP (x, 0)));
4764 return &XEXP (XEXP (x, 0), 0);
4765 }
4766
4767 /* If we have a PLUS whose second operand is a constant and the
4768 address is not valid, perhaps will can split it up using
4769 the machine-specific way to split large constants. We use
4770 the first pseudo-reg (one of the virtual regs) as a placeholder;
4771 it will not remain in the result. */
4772 if (GET_CODE (XEXP (x, 0)) == PLUS
4773 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4774 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4775 MEM_ADDR_SPACE (x)))
4776 {
4777 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4778 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4779 subst_insn);
4780
4781 /* This should have produced two insns, each of which sets our
4782 placeholder. If the source of the second is a valid address,
4783 we can make put both sources together and make a split point
4784 in the middle. */
4785
4786 if (seq
4787 && NEXT_INSN (seq) != NULL_RTX
4788 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4789 && NONJUMP_INSN_P (seq)
4790 && GET_CODE (PATTERN (seq)) == SET
4791 && SET_DEST (PATTERN (seq)) == reg
4792 && ! reg_mentioned_p (reg,
4793 SET_SRC (PATTERN (seq)))
4794 && NONJUMP_INSN_P (NEXT_INSN (seq))
4795 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4796 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4797 && memory_address_addr_space_p
4798 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4799 MEM_ADDR_SPACE (x)))
4800 {
4801 rtx src1 = SET_SRC (PATTERN (seq));
4802 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4803
4804 /* Replace the placeholder in SRC2 with SRC1. If we can
4805 find where in SRC2 it was placed, that can become our
4806 split point and we can replace this address with SRC2.
4807 Just try two obvious places. */
4808
4809 src2 = replace_rtx (src2, reg, src1);
4810 split = 0;
4811 if (XEXP (src2, 0) == src1)
4812 split = &XEXP (src2, 0);
4813 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4814 && XEXP (XEXP (src2, 0), 0) == src1)
4815 split = &XEXP (XEXP (src2, 0), 0);
4816
4817 if (split)
4818 {
4819 SUBST (XEXP (x, 0), src2);
4820 return split;
4821 }
4822 }
4823
4824 /* If that didn't work, perhaps the first operand is complex and
4825 needs to be computed separately, so make a split point there.
4826 This will occur on machines that just support REG + CONST
4827 and have a constant moved through some previous computation. */
4828
4829 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4830 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4831 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4832 return &XEXP (XEXP (x, 0), 0);
4833 }
4834
4835 /* If we have a PLUS whose first operand is complex, try computing it
4836 separately by making a split there. */
4837 if (GET_CODE (XEXP (x, 0)) == PLUS
4838 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4839 MEM_ADDR_SPACE (x))
4840 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4841 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4842 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4843 return &XEXP (XEXP (x, 0), 0);
4844 break;
4845
4846 case SET:
4847 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4848 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4849 we need to put the operand into a register. So split at that
4850 point. */
4851
4852 if (SET_DEST (x) == cc0_rtx
4853 && GET_CODE (SET_SRC (x)) != COMPARE
4854 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4855 && !OBJECT_P (SET_SRC (x))
4856 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4857 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4858 return &SET_SRC (x);
4859
4860 /* See if we can split SET_SRC as it stands. */
4861 split = find_split_point (&SET_SRC (x), insn, true);
4862 if (split && split != &SET_SRC (x))
4863 return split;
4864
4865 /* See if we can split SET_DEST as it stands. */
4866 split = find_split_point (&SET_DEST (x), insn, false);
4867 if (split && split != &SET_DEST (x))
4868 return split;
4869
4870 /* See if this is a bitfield assignment with everything constant. If
4871 so, this is an IOR of an AND, so split it into that. */
4872 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4873 && HWI_COMPUTABLE_MODE_P (GET_MODE (XEXP (SET_DEST (x), 0)))
4874 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4875 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4876 && CONST_INT_P (SET_SRC (x))
4877 && ((INTVAL (XEXP (SET_DEST (x), 1))
4878 + INTVAL (XEXP (SET_DEST (x), 2)))
4879 <= GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0))))
4880 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4881 {
4882 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4883 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4884 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4885 rtx dest = XEXP (SET_DEST (x), 0);
4886 machine_mode mode = GET_MODE (dest);
4887 unsigned HOST_WIDE_INT mask
4888 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
4889 rtx or_mask;
4890
4891 if (BITS_BIG_ENDIAN)
4892 pos = GET_MODE_PRECISION (mode) - len - pos;
4893
4894 or_mask = gen_int_mode (src << pos, mode);
4895 if (src == mask)
4896 SUBST (SET_SRC (x),
4897 simplify_gen_binary (IOR, mode, dest, or_mask));
4898 else
4899 {
4900 rtx negmask = gen_int_mode (~(mask << pos), mode);
4901 SUBST (SET_SRC (x),
4902 simplify_gen_binary (IOR, mode,
4903 simplify_gen_binary (AND, mode,
4904 dest, negmask),
4905 or_mask));
4906 }
4907
4908 SUBST (SET_DEST (x), dest);
4909
4910 split = find_split_point (&SET_SRC (x), insn, true);
4911 if (split && split != &SET_SRC (x))
4912 return split;
4913 }
4914
4915 /* Otherwise, see if this is an operation that we can split into two.
4916 If so, try to split that. */
4917 code = GET_CODE (SET_SRC (x));
4918
4919 switch (code)
4920 {
4921 case AND:
4922 /* If we are AND'ing with a large constant that is only a single
4923 bit and the result is only being used in a context where we
4924 need to know if it is zero or nonzero, replace it with a bit
4925 extraction. This will avoid the large constant, which might
4926 have taken more than one insn to make. If the constant were
4927 not a valid argument to the AND but took only one insn to make,
4928 this is no worse, but if it took more than one insn, it will
4929 be better. */
4930
4931 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4932 && REG_P (XEXP (SET_SRC (x), 0))
4933 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
4934 && REG_P (SET_DEST (x))
4935 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
4936 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
4937 && XEXP (*split, 0) == SET_DEST (x)
4938 && XEXP (*split, 1) == const0_rtx)
4939 {
4940 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
4941 XEXP (SET_SRC (x), 0),
4942 pos, NULL_RTX, 1, 1, 0, 0);
4943 if (extraction != 0)
4944 {
4945 SUBST (SET_SRC (x), extraction);
4946 return find_split_point (loc, insn, false);
4947 }
4948 }
4949 break;
4950
4951 case NE:
4952 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
4953 is known to be on, this can be converted into a NEG of a shift. */
4954 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
4955 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
4956 && 1 <= (pos = exact_log2
4957 (nonzero_bits (XEXP (SET_SRC (x), 0),
4958 GET_MODE (XEXP (SET_SRC (x), 0))))))
4959 {
4960 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
4961
4962 SUBST (SET_SRC (x),
4963 gen_rtx_NEG (mode,
4964 gen_rtx_LSHIFTRT (mode,
4965 XEXP (SET_SRC (x), 0),
4966 GEN_INT (pos))));
4967
4968 split = find_split_point (&SET_SRC (x), insn, true);
4969 if (split && split != &SET_SRC (x))
4970 return split;
4971 }
4972 break;
4973
4974 case SIGN_EXTEND:
4975 inner = XEXP (SET_SRC (x), 0);
4976
4977 /* We can't optimize if either mode is a partial integer
4978 mode as we don't know how many bits are significant
4979 in those modes. */
4980 if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
4981 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
4982 break;
4983
4984 pos = 0;
4985 len = GET_MODE_PRECISION (GET_MODE (inner));
4986 unsignedp = 0;
4987 break;
4988
4989 case SIGN_EXTRACT:
4990 case ZERO_EXTRACT:
4991 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
4992 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
4993 {
4994 inner = XEXP (SET_SRC (x), 0);
4995 len = INTVAL (XEXP (SET_SRC (x), 1));
4996 pos = INTVAL (XEXP (SET_SRC (x), 2));
4997
4998 if (BITS_BIG_ENDIAN)
4999 pos = GET_MODE_PRECISION (GET_MODE (inner)) - len - pos;
5000 unsignedp = (code == ZERO_EXTRACT);
5001 }
5002 break;
5003
5004 default:
5005 break;
5006 }
5007
5008 if (len && pos >= 0
5009 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner)))
5010 {
5011 machine_mode mode = GET_MODE (SET_SRC (x));
5012
5013 /* For unsigned, we have a choice of a shift followed by an
5014 AND or two shifts. Use two shifts for field sizes where the
5015 constant might be too large. We assume here that we can
5016 always at least get 8-bit constants in an AND insn, which is
5017 true for every current RISC. */
5018
5019 if (unsignedp && len <= 8)
5020 {
5021 unsigned HOST_WIDE_INT mask
5022 = ((unsigned HOST_WIDE_INT) 1 << len) - 1;
5023 SUBST (SET_SRC (x),
5024 gen_rtx_AND (mode,
5025 gen_rtx_LSHIFTRT
5026 (mode, gen_lowpart (mode, inner),
5027 GEN_INT (pos)),
5028 gen_int_mode (mask, mode)));
5029
5030 split = find_split_point (&SET_SRC (x), insn, true);
5031 if (split && split != &SET_SRC (x))
5032 return split;
5033 }
5034 else
5035 {
5036 SUBST (SET_SRC (x),
5037 gen_rtx_fmt_ee
5038 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5039 gen_rtx_ASHIFT (mode,
5040 gen_lowpart (mode, inner),
5041 GEN_INT (GET_MODE_PRECISION (mode)
5042 - len - pos)),
5043 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5044
5045 split = find_split_point (&SET_SRC (x), insn, true);
5046 if (split && split != &SET_SRC (x))
5047 return split;
5048 }
5049 }
5050
5051 /* See if this is a simple operation with a constant as the second
5052 operand. It might be that this constant is out of range and hence
5053 could be used as a split point. */
5054 if (BINARY_P (SET_SRC (x))
5055 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5056 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5057 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5058 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5059 return &XEXP (SET_SRC (x), 1);
5060
5061 /* Finally, see if this is a simple operation with its first operand
5062 not in a register. The operation might require this operand in a
5063 register, so return it as a split point. We can always do this
5064 because if the first operand were another operation, we would have
5065 already found it as a split point. */
5066 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5067 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5068 return &XEXP (SET_SRC (x), 0);
5069
5070 return 0;
5071
5072 case AND:
5073 case IOR:
5074 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5075 it is better to write this as (not (ior A B)) so we can split it.
5076 Similarly for IOR. */
5077 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5078 {
5079 SUBST (*loc,
5080 gen_rtx_NOT (GET_MODE (x),
5081 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5082 GET_MODE (x),
5083 XEXP (XEXP (x, 0), 0),
5084 XEXP (XEXP (x, 1), 0))));
5085 return find_split_point (loc, insn, set_src);
5086 }
5087
5088 /* Many RISC machines have a large set of logical insns. If the
5089 second operand is a NOT, put it first so we will try to split the
5090 other operand first. */
5091 if (GET_CODE (XEXP (x, 1)) == NOT)
5092 {
5093 rtx tem = XEXP (x, 0);
5094 SUBST (XEXP (x, 0), XEXP (x, 1));
5095 SUBST (XEXP (x, 1), tem);
5096 }
5097 break;
5098
5099 case PLUS:
5100 case MINUS:
5101 /* Canonicalization can produce (minus A (mult B C)), where C is a
5102 constant. It may be better to try splitting (plus (mult B -C) A)
5103 instead if this isn't a multiply by a power of two. */
5104 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5105 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5106 && exact_log2 (INTVAL (XEXP (XEXP (x, 1), 1))) < 0)
5107 {
5108 machine_mode mode = GET_MODE (x);
5109 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5110 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5111 SUBST (*loc, gen_rtx_PLUS (mode,
5112 gen_rtx_MULT (mode,
5113 XEXP (XEXP (x, 1), 0),
5114 gen_int_mode (other_int,
5115 mode)),
5116 XEXP (x, 0)));
5117 return find_split_point (loc, insn, set_src);
5118 }
5119
5120 /* Split at a multiply-accumulate instruction. However if this is
5121 the SET_SRC, we likely do not have such an instruction and it's
5122 worthless to try this split. */
5123 if (!set_src
5124 && (GET_CODE (XEXP (x, 0)) == MULT
5125 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5126 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5127 return loc;
5128
5129 default:
5130 break;
5131 }
5132
5133 /* Otherwise, select our actions depending on our rtx class. */
5134 switch (GET_RTX_CLASS (code))
5135 {
5136 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5137 case RTX_TERNARY:
5138 split = find_split_point (&XEXP (x, 2), insn, false);
5139 if (split)
5140 return split;
5141 /* ... fall through ... */
5142 case RTX_BIN_ARITH:
5143 case RTX_COMM_ARITH:
5144 case RTX_COMPARE:
5145 case RTX_COMM_COMPARE:
5146 split = find_split_point (&XEXP (x, 1), insn, false);
5147 if (split)
5148 return split;
5149 /* ... fall through ... */
5150 case RTX_UNARY:
5151 /* Some machines have (and (shift ...) ...) insns. If X is not
5152 an AND, but XEXP (X, 0) is, use it as our split point. */
5153 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5154 return &XEXP (x, 0);
5155
5156 split = find_split_point (&XEXP (x, 0), insn, false);
5157 if (split)
5158 return split;
5159 return loc;
5160
5161 default:
5162 /* Otherwise, we don't have a split point. */
5163 return 0;
5164 }
5165 }
5166 \f
5167 /* Throughout X, replace FROM with TO, and return the result.
5168 The result is TO if X is FROM;
5169 otherwise the result is X, but its contents may have been modified.
5170 If they were modified, a record was made in undobuf so that
5171 undo_all will (among other things) return X to its original state.
5172
5173 If the number of changes necessary is too much to record to undo,
5174 the excess changes are not made, so the result is invalid.
5175 The changes already made can still be undone.
5176 undobuf.num_undo is incremented for such changes, so by testing that
5177 the caller can tell whether the result is valid.
5178
5179 `n_occurrences' is incremented each time FROM is replaced.
5180
5181 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5182
5183 IN_COND is nonzero if we are at the top level of a condition.
5184
5185 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5186 by copying if `n_occurrences' is nonzero. */
5187
5188 static rtx
5189 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5190 {
5191 enum rtx_code code = GET_CODE (x);
5192 machine_mode op0_mode = VOIDmode;
5193 const char *fmt;
5194 int len, i;
5195 rtx new_rtx;
5196
5197 /* Two expressions are equal if they are identical copies of a shared
5198 RTX or if they are both registers with the same register number
5199 and mode. */
5200
5201 #define COMBINE_RTX_EQUAL_P(X,Y) \
5202 ((X) == (Y) \
5203 || (REG_P (X) && REG_P (Y) \
5204 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5205
5206 /* Do not substitute into clobbers of regs -- this will never result in
5207 valid RTL. */
5208 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5209 return x;
5210
5211 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5212 {
5213 n_occurrences++;
5214 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5215 }
5216
5217 /* If X and FROM are the same register but different modes, they
5218 will not have been seen as equal above. However, the log links code
5219 will make a LOG_LINKS entry for that case. If we do nothing, we
5220 will try to rerecognize our original insn and, when it succeeds,
5221 we will delete the feeding insn, which is incorrect.
5222
5223 So force this insn not to match in this (rare) case. */
5224 if (! in_dest && code == REG && REG_P (from)
5225 && reg_overlap_mentioned_p (x, from))
5226 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5227
5228 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5229 of which may contain things that can be combined. */
5230 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5231 return x;
5232
5233 /* It is possible to have a subexpression appear twice in the insn.
5234 Suppose that FROM is a register that appears within TO.
5235 Then, after that subexpression has been scanned once by `subst',
5236 the second time it is scanned, TO may be found. If we were
5237 to scan TO here, we would find FROM within it and create a
5238 self-referent rtl structure which is completely wrong. */
5239 if (COMBINE_RTX_EQUAL_P (x, to))
5240 return to;
5241
5242 /* Parallel asm_operands need special attention because all of the
5243 inputs are shared across the arms. Furthermore, unsharing the
5244 rtl results in recognition failures. Failure to handle this case
5245 specially can result in circular rtl.
5246
5247 Solve this by doing a normal pass across the first entry of the
5248 parallel, and only processing the SET_DESTs of the subsequent
5249 entries. Ug. */
5250
5251 if (code == PARALLEL
5252 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5253 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5254 {
5255 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5256
5257 /* If this substitution failed, this whole thing fails. */
5258 if (GET_CODE (new_rtx) == CLOBBER
5259 && XEXP (new_rtx, 0) == const0_rtx)
5260 return new_rtx;
5261
5262 SUBST (XVECEXP (x, 0, 0), new_rtx);
5263
5264 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5265 {
5266 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5267
5268 if (!REG_P (dest)
5269 && GET_CODE (dest) != CC0
5270 && GET_CODE (dest) != PC)
5271 {
5272 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5273
5274 /* If this substitution failed, this whole thing fails. */
5275 if (GET_CODE (new_rtx) == CLOBBER
5276 && XEXP (new_rtx, 0) == const0_rtx)
5277 return new_rtx;
5278
5279 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5280 }
5281 }
5282 }
5283 else
5284 {
5285 len = GET_RTX_LENGTH (code);
5286 fmt = GET_RTX_FORMAT (code);
5287
5288 /* We don't need to process a SET_DEST that is a register, CC0,
5289 or PC, so set up to skip this common case. All other cases
5290 where we want to suppress replacing something inside a
5291 SET_SRC are handled via the IN_DEST operand. */
5292 if (code == SET
5293 && (REG_P (SET_DEST (x))
5294 || GET_CODE (SET_DEST (x)) == CC0
5295 || GET_CODE (SET_DEST (x)) == PC))
5296 fmt = "ie";
5297
5298 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5299 constant. */
5300 if (fmt[0] == 'e')
5301 op0_mode = GET_MODE (XEXP (x, 0));
5302
5303 for (i = 0; i < len; i++)
5304 {
5305 if (fmt[i] == 'E')
5306 {
5307 int j;
5308 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5309 {
5310 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5311 {
5312 new_rtx = (unique_copy && n_occurrences
5313 ? copy_rtx (to) : to);
5314 n_occurrences++;
5315 }
5316 else
5317 {
5318 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5319 unique_copy);
5320
5321 /* If this substitution failed, this whole thing
5322 fails. */
5323 if (GET_CODE (new_rtx) == CLOBBER
5324 && XEXP (new_rtx, 0) == const0_rtx)
5325 return new_rtx;
5326 }
5327
5328 SUBST (XVECEXP (x, i, j), new_rtx);
5329 }
5330 }
5331 else if (fmt[i] == 'e')
5332 {
5333 /* If this is a register being set, ignore it. */
5334 new_rtx = XEXP (x, i);
5335 if (in_dest
5336 && i == 0
5337 && (((code == SUBREG || code == ZERO_EXTRACT)
5338 && REG_P (new_rtx))
5339 || code == STRICT_LOW_PART))
5340 ;
5341
5342 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5343 {
5344 /* In general, don't install a subreg involving two
5345 modes not tieable. It can worsen register
5346 allocation, and can even make invalid reload
5347 insns, since the reg inside may need to be copied
5348 from in the outside mode, and that may be invalid
5349 if it is an fp reg copied in integer mode.
5350
5351 We allow two exceptions to this: It is valid if
5352 it is inside another SUBREG and the mode of that
5353 SUBREG and the mode of the inside of TO is
5354 tieable and it is valid if X is a SET that copies
5355 FROM to CC0. */
5356
5357 if (GET_CODE (to) == SUBREG
5358 && ! MODES_TIEABLE_P (GET_MODE (to),
5359 GET_MODE (SUBREG_REG (to)))
5360 && ! (code == SUBREG
5361 && MODES_TIEABLE_P (GET_MODE (x),
5362 GET_MODE (SUBREG_REG (to))))
5363 && (!HAVE_cc0
5364 || (! (code == SET
5365 && i == 1
5366 && XEXP (x, 0) == cc0_rtx))))
5367 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5368
5369 if (code == SUBREG
5370 && REG_P (to)
5371 && REGNO (to) < FIRST_PSEUDO_REGISTER
5372 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5373 SUBREG_BYTE (x),
5374 GET_MODE (x)) < 0)
5375 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5376
5377 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5378 n_occurrences++;
5379 }
5380 else
5381 /* If we are in a SET_DEST, suppress most cases unless we
5382 have gone inside a MEM, in which case we want to
5383 simplify the address. We assume here that things that
5384 are actually part of the destination have their inner
5385 parts in the first expression. This is true for SUBREG,
5386 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5387 things aside from REG and MEM that should appear in a
5388 SET_DEST. */
5389 new_rtx = subst (XEXP (x, i), from, to,
5390 (((in_dest
5391 && (code == SUBREG || code == STRICT_LOW_PART
5392 || code == ZERO_EXTRACT))
5393 || code == SET)
5394 && i == 0),
5395 code == IF_THEN_ELSE && i == 0,
5396 unique_copy);
5397
5398 /* If we found that we will have to reject this combination,
5399 indicate that by returning the CLOBBER ourselves, rather than
5400 an expression containing it. This will speed things up as
5401 well as prevent accidents where two CLOBBERs are considered
5402 to be equal, thus producing an incorrect simplification. */
5403
5404 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5405 return new_rtx;
5406
5407 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5408 {
5409 machine_mode mode = GET_MODE (x);
5410
5411 x = simplify_subreg (GET_MODE (x), new_rtx,
5412 GET_MODE (SUBREG_REG (x)),
5413 SUBREG_BYTE (x));
5414 if (! x)
5415 x = gen_rtx_CLOBBER (mode, const0_rtx);
5416 }
5417 else if (CONST_SCALAR_INT_P (new_rtx)
5418 && GET_CODE (x) == ZERO_EXTEND)
5419 {
5420 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5421 new_rtx, GET_MODE (XEXP (x, 0)));
5422 gcc_assert (x);
5423 }
5424 else
5425 SUBST (XEXP (x, i), new_rtx);
5426 }
5427 }
5428 }
5429
5430 /* Check if we are loading something from the constant pool via float
5431 extension; in this case we would undo compress_float_constant
5432 optimization and degenerate constant load to an immediate value. */
5433 if (GET_CODE (x) == FLOAT_EXTEND
5434 && MEM_P (XEXP (x, 0))
5435 && MEM_READONLY_P (XEXP (x, 0)))
5436 {
5437 rtx tmp = avoid_constant_pool_reference (x);
5438 if (x != tmp)
5439 return x;
5440 }
5441
5442 /* Try to simplify X. If the simplification changed the code, it is likely
5443 that further simplification will help, so loop, but limit the number
5444 of repetitions that will be performed. */
5445
5446 for (i = 0; i < 4; i++)
5447 {
5448 /* If X is sufficiently simple, don't bother trying to do anything
5449 with it. */
5450 if (code != CONST_INT && code != REG && code != CLOBBER)
5451 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5452
5453 if (GET_CODE (x) == code)
5454 break;
5455
5456 code = GET_CODE (x);
5457
5458 /* We no longer know the original mode of operand 0 since we
5459 have changed the form of X) */
5460 op0_mode = VOIDmode;
5461 }
5462
5463 return x;
5464 }
5465 \f
5466 /* Simplify X, a piece of RTL. We just operate on the expression at the
5467 outer level; call `subst' to simplify recursively. Return the new
5468 expression.
5469
5470 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5471 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5472 of a condition. */
5473
5474 static rtx
5475 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5476 int in_cond)
5477 {
5478 enum rtx_code code = GET_CODE (x);
5479 machine_mode mode = GET_MODE (x);
5480 rtx temp;
5481 int i;
5482
5483 /* If this is a commutative operation, put a constant last and a complex
5484 expression first. We don't need to do this for comparisons here. */
5485 if (COMMUTATIVE_ARITH_P (x)
5486 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5487 {
5488 temp = XEXP (x, 0);
5489 SUBST (XEXP (x, 0), XEXP (x, 1));
5490 SUBST (XEXP (x, 1), temp);
5491 }
5492
5493 /* Try to fold this expression in case we have constants that weren't
5494 present before. */
5495 temp = 0;
5496 switch (GET_RTX_CLASS (code))
5497 {
5498 case RTX_UNARY:
5499 if (op0_mode == VOIDmode)
5500 op0_mode = GET_MODE (XEXP (x, 0));
5501 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5502 break;
5503 case RTX_COMPARE:
5504 case RTX_COMM_COMPARE:
5505 {
5506 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5507 if (cmp_mode == VOIDmode)
5508 {
5509 cmp_mode = GET_MODE (XEXP (x, 1));
5510 if (cmp_mode == VOIDmode)
5511 cmp_mode = op0_mode;
5512 }
5513 temp = simplify_relational_operation (code, mode, cmp_mode,
5514 XEXP (x, 0), XEXP (x, 1));
5515 }
5516 break;
5517 case RTX_COMM_ARITH:
5518 case RTX_BIN_ARITH:
5519 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5520 break;
5521 case RTX_BITFIELD_OPS:
5522 case RTX_TERNARY:
5523 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5524 XEXP (x, 1), XEXP (x, 2));
5525 break;
5526 default:
5527 break;
5528 }
5529
5530 if (temp)
5531 {
5532 x = temp;
5533 code = GET_CODE (temp);
5534 op0_mode = VOIDmode;
5535 mode = GET_MODE (temp);
5536 }
5537
5538 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5539 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5540 things. Check for cases where both arms are testing the same
5541 condition.
5542
5543 Don't do anything if all operands are very simple. */
5544
5545 if ((BINARY_P (x)
5546 && ((!OBJECT_P (XEXP (x, 0))
5547 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5548 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5549 || (!OBJECT_P (XEXP (x, 1))
5550 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5551 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5552 || (UNARY_P (x)
5553 && (!OBJECT_P (XEXP (x, 0))
5554 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5555 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5556 {
5557 rtx cond, true_rtx, false_rtx;
5558
5559 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5560 if (cond != 0
5561 /* If everything is a comparison, what we have is highly unlikely
5562 to be simpler, so don't use it. */
5563 && ! (COMPARISON_P (x)
5564 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5565 {
5566 rtx cop1 = const0_rtx;
5567 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5568
5569 if (cond_code == NE && COMPARISON_P (cond))
5570 return x;
5571
5572 /* Simplify the alternative arms; this may collapse the true and
5573 false arms to store-flag values. Be careful to use copy_rtx
5574 here since true_rtx or false_rtx might share RTL with x as a
5575 result of the if_then_else_cond call above. */
5576 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5577 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5578
5579 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5580 is unlikely to be simpler. */
5581 if (general_operand (true_rtx, VOIDmode)
5582 && general_operand (false_rtx, VOIDmode))
5583 {
5584 enum rtx_code reversed;
5585
5586 /* Restarting if we generate a store-flag expression will cause
5587 us to loop. Just drop through in this case. */
5588
5589 /* If the result values are STORE_FLAG_VALUE and zero, we can
5590 just make the comparison operation. */
5591 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5592 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5593 cond, cop1);
5594 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5595 && ((reversed = reversed_comparison_code_parts
5596 (cond_code, cond, cop1, NULL))
5597 != UNKNOWN))
5598 x = simplify_gen_relational (reversed, mode, VOIDmode,
5599 cond, cop1);
5600
5601 /* Likewise, we can make the negate of a comparison operation
5602 if the result values are - STORE_FLAG_VALUE and zero. */
5603 else if (CONST_INT_P (true_rtx)
5604 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5605 && false_rtx == const0_rtx)
5606 x = simplify_gen_unary (NEG, mode,
5607 simplify_gen_relational (cond_code,
5608 mode, VOIDmode,
5609 cond, cop1),
5610 mode);
5611 else if (CONST_INT_P (false_rtx)
5612 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5613 && true_rtx == const0_rtx
5614 && ((reversed = reversed_comparison_code_parts
5615 (cond_code, cond, cop1, NULL))
5616 != UNKNOWN))
5617 x = simplify_gen_unary (NEG, mode,
5618 simplify_gen_relational (reversed,
5619 mode, VOIDmode,
5620 cond, cop1),
5621 mode);
5622 else
5623 return gen_rtx_IF_THEN_ELSE (mode,
5624 simplify_gen_relational (cond_code,
5625 mode,
5626 VOIDmode,
5627 cond,
5628 cop1),
5629 true_rtx, false_rtx);
5630
5631 code = GET_CODE (x);
5632 op0_mode = VOIDmode;
5633 }
5634 }
5635 }
5636
5637 /* First see if we can apply the inverse distributive law. */
5638 if (code == PLUS || code == MINUS
5639 || code == AND || code == IOR || code == XOR)
5640 {
5641 x = apply_distributive_law (x);
5642 code = GET_CODE (x);
5643 op0_mode = VOIDmode;
5644 }
5645
5646 /* If CODE is an associative operation not otherwise handled, see if we
5647 can associate some operands. This can win if they are constants or
5648 if they are logically related (i.e. (a & b) & a). */
5649 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5650 || code == AND || code == IOR || code == XOR
5651 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5652 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5653 || (flag_associative_math && FLOAT_MODE_P (mode))))
5654 {
5655 if (GET_CODE (XEXP (x, 0)) == code)
5656 {
5657 rtx other = XEXP (XEXP (x, 0), 0);
5658 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5659 rtx inner_op1 = XEXP (x, 1);
5660 rtx inner;
5661
5662 /* Make sure we pass the constant operand if any as the second
5663 one if this is a commutative operation. */
5664 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5665 std::swap (inner_op0, inner_op1);
5666 inner = simplify_binary_operation (code == MINUS ? PLUS
5667 : code == DIV ? MULT
5668 : code,
5669 mode, inner_op0, inner_op1);
5670
5671 /* For commutative operations, try the other pair if that one
5672 didn't simplify. */
5673 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5674 {
5675 other = XEXP (XEXP (x, 0), 1);
5676 inner = simplify_binary_operation (code, mode,
5677 XEXP (XEXP (x, 0), 0),
5678 XEXP (x, 1));
5679 }
5680
5681 if (inner)
5682 return simplify_gen_binary (code, mode, other, inner);
5683 }
5684 }
5685
5686 /* A little bit of algebraic simplification here. */
5687 switch (code)
5688 {
5689 case MEM:
5690 /* Ensure that our address has any ASHIFTs converted to MULT in case
5691 address-recognizing predicates are called later. */
5692 temp = make_compound_operation (XEXP (x, 0), MEM);
5693 SUBST (XEXP (x, 0), temp);
5694 break;
5695
5696 case SUBREG:
5697 if (op0_mode == VOIDmode)
5698 op0_mode = GET_MODE (SUBREG_REG (x));
5699
5700 /* See if this can be moved to simplify_subreg. */
5701 if (CONSTANT_P (SUBREG_REG (x))
5702 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5703 /* Don't call gen_lowpart if the inner mode
5704 is VOIDmode and we cannot simplify it, as SUBREG without
5705 inner mode is invalid. */
5706 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5707 || gen_lowpart_common (mode, SUBREG_REG (x))))
5708 return gen_lowpart (mode, SUBREG_REG (x));
5709
5710 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5711 break;
5712 {
5713 rtx temp;
5714 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5715 SUBREG_BYTE (x));
5716 if (temp)
5717 return temp;
5718
5719 /* If op is known to have all lower bits zero, the result is zero. */
5720 if (!in_dest
5721 && SCALAR_INT_MODE_P (mode)
5722 && SCALAR_INT_MODE_P (op0_mode)
5723 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (op0_mode)
5724 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5725 && HWI_COMPUTABLE_MODE_P (op0_mode)
5726 && (nonzero_bits (SUBREG_REG (x), op0_mode)
5727 & GET_MODE_MASK (mode)) == 0)
5728 return CONST0_RTX (mode);
5729 }
5730
5731 /* Don't change the mode of the MEM if that would change the meaning
5732 of the address. */
5733 if (MEM_P (SUBREG_REG (x))
5734 && (MEM_VOLATILE_P (SUBREG_REG (x))
5735 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5736 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5737 return gen_rtx_CLOBBER (mode, const0_rtx);
5738
5739 /* Note that we cannot do any narrowing for non-constants since
5740 we might have been counting on using the fact that some bits were
5741 zero. We now do this in the SET. */
5742
5743 break;
5744
5745 case NEG:
5746 temp = expand_compound_operation (XEXP (x, 0));
5747
5748 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5749 replaced by (lshiftrt X C). This will convert
5750 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5751
5752 if (GET_CODE (temp) == ASHIFTRT
5753 && CONST_INT_P (XEXP (temp, 1))
5754 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5755 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5756 INTVAL (XEXP (temp, 1)));
5757
5758 /* If X has only a single bit that might be nonzero, say, bit I, convert
5759 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5760 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5761 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5762 or a SUBREG of one since we'd be making the expression more
5763 complex if it was just a register. */
5764
5765 if (!REG_P (temp)
5766 && ! (GET_CODE (temp) == SUBREG
5767 && REG_P (SUBREG_REG (temp)))
5768 && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
5769 {
5770 rtx temp1 = simplify_shift_const
5771 (NULL_RTX, ASHIFTRT, mode,
5772 simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
5773 GET_MODE_PRECISION (mode) - 1 - i),
5774 GET_MODE_PRECISION (mode) - 1 - i);
5775
5776 /* If all we did was surround TEMP with the two shifts, we
5777 haven't improved anything, so don't use it. Otherwise,
5778 we are better off with TEMP1. */
5779 if (GET_CODE (temp1) != ASHIFTRT
5780 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5781 || XEXP (XEXP (temp1, 0), 0) != temp)
5782 return temp1;
5783 }
5784 break;
5785
5786 case TRUNCATE:
5787 /* We can't handle truncation to a partial integer mode here
5788 because we don't know the real bitsize of the partial
5789 integer mode. */
5790 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5791 break;
5792
5793 if (HWI_COMPUTABLE_MODE_P (mode))
5794 SUBST (XEXP (x, 0),
5795 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5796 GET_MODE_MASK (mode), 0));
5797
5798 /* We can truncate a constant value and return it. */
5799 if (CONST_INT_P (XEXP (x, 0)))
5800 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5801
5802 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5803 whose value is a comparison can be replaced with a subreg if
5804 STORE_FLAG_VALUE permits. */
5805 if (HWI_COMPUTABLE_MODE_P (mode)
5806 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5807 && (temp = get_last_value (XEXP (x, 0)))
5808 && COMPARISON_P (temp))
5809 return gen_lowpart (mode, XEXP (x, 0));
5810 break;
5811
5812 case CONST:
5813 /* (const (const X)) can become (const X). Do it this way rather than
5814 returning the inner CONST since CONST can be shared with a
5815 REG_EQUAL note. */
5816 if (GET_CODE (XEXP (x, 0)) == CONST)
5817 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5818 break;
5819
5820 case LO_SUM:
5821 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5822 can add in an offset. find_split_point will split this address up
5823 again if it doesn't match. */
5824 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5825 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5826 return XEXP (x, 1);
5827 break;
5828
5829 case PLUS:
5830 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5831 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5832 bit-field and can be replaced by either a sign_extend or a
5833 sign_extract. The `and' may be a zero_extend and the two
5834 <c>, -<c> constants may be reversed. */
5835 if (GET_CODE (XEXP (x, 0)) == XOR
5836 && CONST_INT_P (XEXP (x, 1))
5837 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5838 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5839 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5840 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5841 && HWI_COMPUTABLE_MODE_P (mode)
5842 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5843 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5844 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5845 == ((unsigned HOST_WIDE_INT) 1 << (i + 1)) - 1))
5846 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5847 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5848 == (unsigned int) i + 1))))
5849 return simplify_shift_const
5850 (NULL_RTX, ASHIFTRT, mode,
5851 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5852 XEXP (XEXP (XEXP (x, 0), 0), 0),
5853 GET_MODE_PRECISION (mode) - (i + 1)),
5854 GET_MODE_PRECISION (mode) - (i + 1));
5855
5856 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5857 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5858 the bitsize of the mode - 1. This allows simplification of
5859 "a = (b & 8) == 0;" */
5860 if (XEXP (x, 1) == constm1_rtx
5861 && !REG_P (XEXP (x, 0))
5862 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5863 && REG_P (SUBREG_REG (XEXP (x, 0))))
5864 && nonzero_bits (XEXP (x, 0), mode) == 1)
5865 return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
5866 simplify_shift_const (NULL_RTX, ASHIFT, mode,
5867 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
5868 GET_MODE_PRECISION (mode) - 1),
5869 GET_MODE_PRECISION (mode) - 1);
5870
5871 /* If we are adding two things that have no bits in common, convert
5872 the addition into an IOR. This will often be further simplified,
5873 for example in cases like ((a & 1) + (a & 2)), which can
5874 become a & 3. */
5875
5876 if (HWI_COMPUTABLE_MODE_P (mode)
5877 && (nonzero_bits (XEXP (x, 0), mode)
5878 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5879 {
5880 /* Try to simplify the expression further. */
5881 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5882 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5883
5884 /* If we could, great. If not, do not go ahead with the IOR
5885 replacement, since PLUS appears in many special purpose
5886 address arithmetic instructions. */
5887 if (GET_CODE (temp) != CLOBBER
5888 && (GET_CODE (temp) != IOR
5889 || ((XEXP (temp, 0) != XEXP (x, 0)
5890 || XEXP (temp, 1) != XEXP (x, 1))
5891 && (XEXP (temp, 0) != XEXP (x, 1)
5892 || XEXP (temp, 1) != XEXP (x, 0)))))
5893 return temp;
5894 }
5895 break;
5896
5897 case MINUS:
5898 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
5899 (and <foo> (const_int pow2-1)) */
5900 if (GET_CODE (XEXP (x, 1)) == AND
5901 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
5902 && exact_log2 (-UINTVAL (XEXP (XEXP (x, 1), 1))) >= 0
5903 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
5904 return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
5905 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
5906 break;
5907
5908 case MULT:
5909 /* If we have (mult (plus A B) C), apply the distributive law and then
5910 the inverse distributive law to see if things simplify. This
5911 occurs mostly in addresses, often when unrolling loops. */
5912
5913 if (GET_CODE (XEXP (x, 0)) == PLUS)
5914 {
5915 rtx result = distribute_and_simplify_rtx (x, 0);
5916 if (result)
5917 return result;
5918 }
5919
5920 /* Try simplify a*(b/c) as (a*b)/c. */
5921 if (FLOAT_MODE_P (mode) && flag_associative_math
5922 && GET_CODE (XEXP (x, 0)) == DIV)
5923 {
5924 rtx tem = simplify_binary_operation (MULT, mode,
5925 XEXP (XEXP (x, 0), 0),
5926 XEXP (x, 1));
5927 if (tem)
5928 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
5929 }
5930 break;
5931
5932 case UDIV:
5933 /* If this is a divide by a power of two, treat it as a shift if
5934 its first operand is a shift. */
5935 if (CONST_INT_P (XEXP (x, 1))
5936 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
5937 && (GET_CODE (XEXP (x, 0)) == ASHIFT
5938 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
5939 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
5940 || GET_CODE (XEXP (x, 0)) == ROTATE
5941 || GET_CODE (XEXP (x, 0)) == ROTATERT))
5942 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
5943 break;
5944
5945 case EQ: case NE:
5946 case GT: case GTU: case GE: case GEU:
5947 case LT: case LTU: case LE: case LEU:
5948 case UNEQ: case LTGT:
5949 case UNGT: case UNGE:
5950 case UNLT: case UNLE:
5951 case UNORDERED: case ORDERED:
5952 /* If the first operand is a condition code, we can't do anything
5953 with it. */
5954 if (GET_CODE (XEXP (x, 0)) == COMPARE
5955 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
5956 && ! CC0_P (XEXP (x, 0))))
5957 {
5958 rtx op0 = XEXP (x, 0);
5959 rtx op1 = XEXP (x, 1);
5960 enum rtx_code new_code;
5961
5962 if (GET_CODE (op0) == COMPARE)
5963 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
5964
5965 /* Simplify our comparison, if possible. */
5966 new_code = simplify_comparison (code, &op0, &op1);
5967
5968 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
5969 if only the low-order bit is possibly nonzero in X (such as when
5970 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
5971 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
5972 known to be either 0 or -1, NE becomes a NEG and EQ becomes
5973 (plus X 1).
5974
5975 Remove any ZERO_EXTRACT we made when thinking this was a
5976 comparison. It may now be simpler to use, e.g., an AND. If a
5977 ZERO_EXTRACT is indeed appropriate, it will be placed back by
5978 the call to make_compound_operation in the SET case.
5979
5980 Don't apply these optimizations if the caller would
5981 prefer a comparison rather than a value.
5982 E.g., for the condition in an IF_THEN_ELSE most targets need
5983 an explicit comparison. */
5984
5985 if (in_cond)
5986 ;
5987
5988 else if (STORE_FLAG_VALUE == 1
5989 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5990 && op1 == const0_rtx
5991 && mode == GET_MODE (op0)
5992 && nonzero_bits (op0, mode) == 1)
5993 return gen_lowpart (mode,
5994 expand_compound_operation (op0));
5995
5996 else if (STORE_FLAG_VALUE == 1
5997 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
5998 && op1 == const0_rtx
5999 && mode == GET_MODE (op0)
6000 && (num_sign_bit_copies (op0, mode)
6001 == GET_MODE_PRECISION (mode)))
6002 {
6003 op0 = expand_compound_operation (op0);
6004 return simplify_gen_unary (NEG, mode,
6005 gen_lowpart (mode, op0),
6006 mode);
6007 }
6008
6009 else if (STORE_FLAG_VALUE == 1
6010 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6011 && op1 == const0_rtx
6012 && mode == GET_MODE (op0)
6013 && nonzero_bits (op0, mode) == 1)
6014 {
6015 op0 = expand_compound_operation (op0);
6016 return simplify_gen_binary (XOR, mode,
6017 gen_lowpart (mode, op0),
6018 const1_rtx);
6019 }
6020
6021 else if (STORE_FLAG_VALUE == 1
6022 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6023 && op1 == const0_rtx
6024 && mode == GET_MODE (op0)
6025 && (num_sign_bit_copies (op0, mode)
6026 == GET_MODE_PRECISION (mode)))
6027 {
6028 op0 = expand_compound_operation (op0);
6029 return plus_constant (mode, gen_lowpart (mode, op0), 1);
6030 }
6031
6032 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6033 those above. */
6034 if (in_cond)
6035 ;
6036
6037 else if (STORE_FLAG_VALUE == -1
6038 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6039 && op1 == const0_rtx
6040 && mode == GET_MODE (op0)
6041 && (num_sign_bit_copies (op0, mode)
6042 == GET_MODE_PRECISION (mode)))
6043 return gen_lowpart (mode,
6044 expand_compound_operation (op0));
6045
6046 else if (STORE_FLAG_VALUE == -1
6047 && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6048 && op1 == const0_rtx
6049 && mode == GET_MODE (op0)
6050 && nonzero_bits (op0, mode) == 1)
6051 {
6052 op0 = expand_compound_operation (op0);
6053 return simplify_gen_unary (NEG, mode,
6054 gen_lowpart (mode, op0),
6055 mode);
6056 }
6057
6058 else if (STORE_FLAG_VALUE == -1
6059 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6060 && op1 == const0_rtx
6061 && mode == GET_MODE (op0)
6062 && (num_sign_bit_copies (op0, mode)
6063 == GET_MODE_PRECISION (mode)))
6064 {
6065 op0 = expand_compound_operation (op0);
6066 return simplify_gen_unary (NOT, mode,
6067 gen_lowpart (mode, op0),
6068 mode);
6069 }
6070
6071 /* If X is 0/1, (eq X 0) is X-1. */
6072 else if (STORE_FLAG_VALUE == -1
6073 && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
6074 && op1 == const0_rtx
6075 && mode == GET_MODE (op0)
6076 && nonzero_bits (op0, mode) == 1)
6077 {
6078 op0 = expand_compound_operation (op0);
6079 return plus_constant (mode, gen_lowpart (mode, op0), -1);
6080 }
6081
6082 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6083 one bit that might be nonzero, we can convert (ne x 0) to
6084 (ashift x c) where C puts the bit in the sign bit. Remove any
6085 AND with STORE_FLAG_VALUE when we are done, since we are only
6086 going to test the sign bit. */
6087 if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
6088 && HWI_COMPUTABLE_MODE_P (mode)
6089 && val_signbit_p (mode, STORE_FLAG_VALUE)
6090 && op1 == const0_rtx
6091 && mode == GET_MODE (op0)
6092 && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
6093 {
6094 x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
6095 expand_compound_operation (op0),
6096 GET_MODE_PRECISION (mode) - 1 - i);
6097 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6098 return XEXP (x, 0);
6099 else
6100 return x;
6101 }
6102
6103 /* If the code changed, return a whole new comparison.
6104 We also need to avoid using SUBST in cases where
6105 simplify_comparison has widened a comparison with a CONST_INT,
6106 since in that case the wider CONST_INT may fail the sanity
6107 checks in do_SUBST. */
6108 if (new_code != code
6109 || (CONST_INT_P (op1)
6110 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6111 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6112 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6113
6114 /* Otherwise, keep this operation, but maybe change its operands.
6115 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6116 SUBST (XEXP (x, 0), op0);
6117 SUBST (XEXP (x, 1), op1);
6118 }
6119 break;
6120
6121 case IF_THEN_ELSE:
6122 return simplify_if_then_else (x);
6123
6124 case ZERO_EXTRACT:
6125 case SIGN_EXTRACT:
6126 case ZERO_EXTEND:
6127 case SIGN_EXTEND:
6128 /* If we are processing SET_DEST, we are done. */
6129 if (in_dest)
6130 return x;
6131
6132 return expand_compound_operation (x);
6133
6134 case SET:
6135 return simplify_set (x);
6136
6137 case AND:
6138 case IOR:
6139 return simplify_logical (x);
6140
6141 case ASHIFT:
6142 case LSHIFTRT:
6143 case ASHIFTRT:
6144 case ROTATE:
6145 case ROTATERT:
6146 /* If this is a shift by a constant amount, simplify it. */
6147 if (CONST_INT_P (XEXP (x, 1)))
6148 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6149 INTVAL (XEXP (x, 1)));
6150
6151 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6152 SUBST (XEXP (x, 1),
6153 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6154 ((unsigned HOST_WIDE_INT) 1
6155 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6156 - 1,
6157 0));
6158 break;
6159
6160 default:
6161 break;
6162 }
6163
6164 return x;
6165 }
6166 \f
6167 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6168
6169 static rtx
6170 simplify_if_then_else (rtx x)
6171 {
6172 machine_mode mode = GET_MODE (x);
6173 rtx cond = XEXP (x, 0);
6174 rtx true_rtx = XEXP (x, 1);
6175 rtx false_rtx = XEXP (x, 2);
6176 enum rtx_code true_code = GET_CODE (cond);
6177 int comparison_p = COMPARISON_P (cond);
6178 rtx temp;
6179 int i;
6180 enum rtx_code false_code;
6181 rtx reversed;
6182
6183 /* Simplify storing of the truth value. */
6184 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6185 return simplify_gen_relational (true_code, mode, VOIDmode,
6186 XEXP (cond, 0), XEXP (cond, 1));
6187
6188 /* Also when the truth value has to be reversed. */
6189 if (comparison_p
6190 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6191 && (reversed = reversed_comparison (cond, mode)))
6192 return reversed;
6193
6194 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6195 in it is being compared against certain values. Get the true and false
6196 comparisons and see if that says anything about the value of each arm. */
6197
6198 if (comparison_p
6199 && ((false_code = reversed_comparison_code (cond, NULL))
6200 != UNKNOWN)
6201 && REG_P (XEXP (cond, 0)))
6202 {
6203 HOST_WIDE_INT nzb;
6204 rtx from = XEXP (cond, 0);
6205 rtx true_val = XEXP (cond, 1);
6206 rtx false_val = true_val;
6207 int swapped = 0;
6208
6209 /* If FALSE_CODE is EQ, swap the codes and arms. */
6210
6211 if (false_code == EQ)
6212 {
6213 swapped = 1, true_code = EQ, false_code = NE;
6214 std::swap (true_rtx, false_rtx);
6215 }
6216
6217 /* If we are comparing against zero and the expression being tested has
6218 only a single bit that might be nonzero, that is its value when it is
6219 not equal to zero. Similarly if it is known to be -1 or 0. */
6220
6221 if (true_code == EQ && true_val == const0_rtx
6222 && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
6223 {
6224 false_code = EQ;
6225 false_val = gen_int_mode (nzb, GET_MODE (from));
6226 }
6227 else if (true_code == EQ && true_val == const0_rtx
6228 && (num_sign_bit_copies (from, GET_MODE (from))
6229 == GET_MODE_PRECISION (GET_MODE (from))))
6230 {
6231 false_code = EQ;
6232 false_val = constm1_rtx;
6233 }
6234
6235 /* Now simplify an arm if we know the value of the register in the
6236 branch and it is used in the arm. Be careful due to the potential
6237 of locally-shared RTL. */
6238
6239 if (reg_mentioned_p (from, true_rtx))
6240 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6241 from, true_val),
6242 pc_rtx, pc_rtx, 0, 0, 0);
6243 if (reg_mentioned_p (from, false_rtx))
6244 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6245 from, false_val),
6246 pc_rtx, pc_rtx, 0, 0, 0);
6247
6248 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6249 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6250
6251 true_rtx = XEXP (x, 1);
6252 false_rtx = XEXP (x, 2);
6253 true_code = GET_CODE (cond);
6254 }
6255
6256 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6257 reversed, do so to avoid needing two sets of patterns for
6258 subtract-and-branch insns. Similarly if we have a constant in the true
6259 arm, the false arm is the same as the first operand of the comparison, or
6260 the false arm is more complicated than the true arm. */
6261
6262 if (comparison_p
6263 && reversed_comparison_code (cond, NULL) != UNKNOWN
6264 && (true_rtx == pc_rtx
6265 || (CONSTANT_P (true_rtx)
6266 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6267 || true_rtx == const0_rtx
6268 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6269 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6270 && !OBJECT_P (false_rtx))
6271 || reg_mentioned_p (true_rtx, false_rtx)
6272 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6273 {
6274 true_code = reversed_comparison_code (cond, NULL);
6275 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6276 SUBST (XEXP (x, 1), false_rtx);
6277 SUBST (XEXP (x, 2), true_rtx);
6278
6279 std::swap (true_rtx, false_rtx);
6280 cond = XEXP (x, 0);
6281
6282 /* It is possible that the conditional has been simplified out. */
6283 true_code = GET_CODE (cond);
6284 comparison_p = COMPARISON_P (cond);
6285 }
6286
6287 /* If the two arms are identical, we don't need the comparison. */
6288
6289 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6290 return true_rtx;
6291
6292 /* Convert a == b ? b : a to "a". */
6293 if (true_code == EQ && ! side_effects_p (cond)
6294 && !HONOR_NANS (mode)
6295 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6296 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6297 return false_rtx;
6298 else if (true_code == NE && ! side_effects_p (cond)
6299 && !HONOR_NANS (mode)
6300 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6301 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6302 return true_rtx;
6303
6304 /* Look for cases where we have (abs x) or (neg (abs X)). */
6305
6306 if (GET_MODE_CLASS (mode) == MODE_INT
6307 && comparison_p
6308 && XEXP (cond, 1) == const0_rtx
6309 && GET_CODE (false_rtx) == NEG
6310 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6311 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6312 && ! side_effects_p (true_rtx))
6313 switch (true_code)
6314 {
6315 case GT:
6316 case GE:
6317 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6318 case LT:
6319 case LE:
6320 return
6321 simplify_gen_unary (NEG, mode,
6322 simplify_gen_unary (ABS, mode, true_rtx, mode),
6323 mode);
6324 default:
6325 break;
6326 }
6327
6328 /* Look for MIN or MAX. */
6329
6330 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6331 && comparison_p
6332 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6333 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6334 && ! side_effects_p (cond))
6335 switch (true_code)
6336 {
6337 case GE:
6338 case GT:
6339 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6340 case LE:
6341 case LT:
6342 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6343 case GEU:
6344 case GTU:
6345 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6346 case LEU:
6347 case LTU:
6348 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6349 default:
6350 break;
6351 }
6352
6353 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6354 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6355 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6356 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6357 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6358 neither 1 or -1, but it isn't worth checking for. */
6359
6360 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6361 && comparison_p
6362 && GET_MODE_CLASS (mode) == MODE_INT
6363 && ! side_effects_p (x))
6364 {
6365 rtx t = make_compound_operation (true_rtx, SET);
6366 rtx f = make_compound_operation (false_rtx, SET);
6367 rtx cond_op0 = XEXP (cond, 0);
6368 rtx cond_op1 = XEXP (cond, 1);
6369 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6370 machine_mode m = mode;
6371 rtx z = 0, c1 = NULL_RTX;
6372
6373 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6374 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6375 || GET_CODE (t) == ASHIFT
6376 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6377 && rtx_equal_p (XEXP (t, 0), f))
6378 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6379
6380 /* If an identity-zero op is commutative, check whether there
6381 would be a match if we swapped the operands. */
6382 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6383 || GET_CODE (t) == XOR)
6384 && rtx_equal_p (XEXP (t, 1), f))
6385 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6386 else if (GET_CODE (t) == SIGN_EXTEND
6387 && (GET_CODE (XEXP (t, 0)) == PLUS
6388 || GET_CODE (XEXP (t, 0)) == MINUS
6389 || GET_CODE (XEXP (t, 0)) == IOR
6390 || GET_CODE (XEXP (t, 0)) == XOR
6391 || GET_CODE (XEXP (t, 0)) == ASHIFT
6392 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6393 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6394 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6395 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6396 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6397 && (num_sign_bit_copies (f, GET_MODE (f))
6398 > (unsigned int)
6399 (GET_MODE_PRECISION (mode)
6400 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 0))))))
6401 {
6402 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6403 extend_op = SIGN_EXTEND;
6404 m = GET_MODE (XEXP (t, 0));
6405 }
6406 else if (GET_CODE (t) == SIGN_EXTEND
6407 && (GET_CODE (XEXP (t, 0)) == PLUS
6408 || GET_CODE (XEXP (t, 0)) == IOR
6409 || GET_CODE (XEXP (t, 0)) == XOR)
6410 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6411 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6412 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6413 && (num_sign_bit_copies (f, GET_MODE (f))
6414 > (unsigned int)
6415 (GET_MODE_PRECISION (mode)
6416 - GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (t, 0), 1))))))
6417 {
6418 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6419 extend_op = SIGN_EXTEND;
6420 m = GET_MODE (XEXP (t, 0));
6421 }
6422 else if (GET_CODE (t) == ZERO_EXTEND
6423 && (GET_CODE (XEXP (t, 0)) == PLUS
6424 || GET_CODE (XEXP (t, 0)) == MINUS
6425 || GET_CODE (XEXP (t, 0)) == IOR
6426 || GET_CODE (XEXP (t, 0)) == XOR
6427 || GET_CODE (XEXP (t, 0)) == ASHIFT
6428 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6429 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6430 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6431 && HWI_COMPUTABLE_MODE_P (mode)
6432 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6433 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6434 && ((nonzero_bits (f, GET_MODE (f))
6435 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
6436 == 0))
6437 {
6438 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6439 extend_op = ZERO_EXTEND;
6440 m = GET_MODE (XEXP (t, 0));
6441 }
6442 else if (GET_CODE (t) == ZERO_EXTEND
6443 && (GET_CODE (XEXP (t, 0)) == PLUS
6444 || GET_CODE (XEXP (t, 0)) == IOR
6445 || GET_CODE (XEXP (t, 0)) == XOR)
6446 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6447 && HWI_COMPUTABLE_MODE_P (mode)
6448 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6449 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6450 && ((nonzero_bits (f, GET_MODE (f))
6451 & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
6452 == 0))
6453 {
6454 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6455 extend_op = ZERO_EXTEND;
6456 m = GET_MODE (XEXP (t, 0));
6457 }
6458
6459 if (z)
6460 {
6461 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6462 cond_op0, cond_op1),
6463 pc_rtx, pc_rtx, 0, 0, 0);
6464 temp = simplify_gen_binary (MULT, m, temp,
6465 simplify_gen_binary (MULT, m, c1,
6466 const_true_rtx));
6467 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6468 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6469
6470 if (extend_op != UNKNOWN)
6471 temp = simplify_gen_unary (extend_op, mode, temp, m);
6472
6473 return temp;
6474 }
6475 }
6476
6477 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6478 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6479 negation of a single bit, we can convert this operation to a shift. We
6480 can actually do this more generally, but it doesn't seem worth it. */
6481
6482 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6483 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6484 && ((1 == nonzero_bits (XEXP (cond, 0), mode)
6485 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6486 || ((num_sign_bit_copies (XEXP (cond, 0), mode)
6487 == GET_MODE_PRECISION (mode))
6488 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6489 return
6490 simplify_shift_const (NULL_RTX, ASHIFT, mode,
6491 gen_lowpart (mode, XEXP (cond, 0)), i);
6492
6493 /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8. */
6494 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6495 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6496 && GET_MODE (XEXP (cond, 0)) == mode
6497 && (UINTVAL (true_rtx) & GET_MODE_MASK (mode))
6498 == nonzero_bits (XEXP (cond, 0), mode)
6499 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
6500 return XEXP (cond, 0);
6501
6502 return x;
6503 }
6504 \f
6505 /* Simplify X, a SET expression. Return the new expression. */
6506
6507 static rtx
6508 simplify_set (rtx x)
6509 {
6510 rtx src = SET_SRC (x);
6511 rtx dest = SET_DEST (x);
6512 machine_mode mode
6513 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6514 rtx_insn *other_insn;
6515 rtx *cc_use;
6516
6517 /* (set (pc) (return)) gets written as (return). */
6518 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6519 return src;
6520
6521 /* Now that we know for sure which bits of SRC we are using, see if we can
6522 simplify the expression for the object knowing that we only need the
6523 low-order bits. */
6524
6525 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6526 {
6527 src = force_to_mode (src, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
6528 SUBST (SET_SRC (x), src);
6529 }
6530
6531 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6532 the comparison result and try to simplify it unless we already have used
6533 undobuf.other_insn. */
6534 if ((GET_MODE_CLASS (mode) == MODE_CC
6535 || GET_CODE (src) == COMPARE
6536 || CC0_P (dest))
6537 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6538 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6539 && COMPARISON_P (*cc_use)
6540 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6541 {
6542 enum rtx_code old_code = GET_CODE (*cc_use);
6543 enum rtx_code new_code;
6544 rtx op0, op1, tmp;
6545 int other_changed = 0;
6546 rtx inner_compare = NULL_RTX;
6547 machine_mode compare_mode = GET_MODE (dest);
6548
6549 if (GET_CODE (src) == COMPARE)
6550 {
6551 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6552 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6553 {
6554 inner_compare = op0;
6555 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6556 }
6557 }
6558 else
6559 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6560
6561 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6562 op0, op1);
6563 if (!tmp)
6564 new_code = old_code;
6565 else if (!CONSTANT_P (tmp))
6566 {
6567 new_code = GET_CODE (tmp);
6568 op0 = XEXP (tmp, 0);
6569 op1 = XEXP (tmp, 1);
6570 }
6571 else
6572 {
6573 rtx pat = PATTERN (other_insn);
6574 undobuf.other_insn = other_insn;
6575 SUBST (*cc_use, tmp);
6576
6577 /* Attempt to simplify CC user. */
6578 if (GET_CODE (pat) == SET)
6579 {
6580 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6581 if (new_rtx != NULL_RTX)
6582 SUBST (SET_SRC (pat), new_rtx);
6583 }
6584
6585 /* Convert X into a no-op move. */
6586 SUBST (SET_DEST (x), pc_rtx);
6587 SUBST (SET_SRC (x), pc_rtx);
6588 return x;
6589 }
6590
6591 /* Simplify our comparison, if possible. */
6592 new_code = simplify_comparison (new_code, &op0, &op1);
6593
6594 #ifdef SELECT_CC_MODE
6595 /* If this machine has CC modes other than CCmode, check to see if we
6596 need to use a different CC mode here. */
6597 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6598 compare_mode = GET_MODE (op0);
6599 else if (inner_compare
6600 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6601 && new_code == old_code
6602 && op0 == XEXP (inner_compare, 0)
6603 && op1 == XEXP (inner_compare, 1))
6604 compare_mode = GET_MODE (inner_compare);
6605 else
6606 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6607
6608 /* If the mode changed, we have to change SET_DEST, the mode in the
6609 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6610 a hard register, just build new versions with the proper mode. If it
6611 is a pseudo, we lose unless it is only time we set the pseudo, in
6612 which case we can safely change its mode. */
6613 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6614 {
6615 if (can_change_dest_mode (dest, 0, compare_mode))
6616 {
6617 unsigned int regno = REGNO (dest);
6618 rtx new_dest;
6619
6620 if (regno < FIRST_PSEUDO_REGISTER)
6621 new_dest = gen_rtx_REG (compare_mode, regno);
6622 else
6623 {
6624 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6625 new_dest = regno_reg_rtx[regno];
6626 }
6627
6628 SUBST (SET_DEST (x), new_dest);
6629 SUBST (XEXP (*cc_use, 0), new_dest);
6630 other_changed = 1;
6631
6632 dest = new_dest;
6633 }
6634 }
6635 #endif /* SELECT_CC_MODE */
6636
6637 /* If the code changed, we have to build a new comparison in
6638 undobuf.other_insn. */
6639 if (new_code != old_code)
6640 {
6641 int other_changed_previously = other_changed;
6642 unsigned HOST_WIDE_INT mask;
6643 rtx old_cc_use = *cc_use;
6644
6645 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6646 dest, const0_rtx));
6647 other_changed = 1;
6648
6649 /* If the only change we made was to change an EQ into an NE or
6650 vice versa, OP0 has only one bit that might be nonzero, and OP1
6651 is zero, check if changing the user of the condition code will
6652 produce a valid insn. If it won't, we can keep the original code
6653 in that insn by surrounding our operation with an XOR. */
6654
6655 if (((old_code == NE && new_code == EQ)
6656 || (old_code == EQ && new_code == NE))
6657 && ! other_changed_previously && op1 == const0_rtx
6658 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6659 && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
6660 {
6661 rtx pat = PATTERN (other_insn), note = 0;
6662
6663 if ((recog_for_combine (&pat, other_insn, &note) < 0
6664 && ! check_asm_operands (pat)))
6665 {
6666 *cc_use = old_cc_use;
6667 other_changed = 0;
6668
6669 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6670 gen_int_mode (mask,
6671 GET_MODE (op0)));
6672 }
6673 }
6674 }
6675
6676 if (other_changed)
6677 undobuf.other_insn = other_insn;
6678
6679 /* Don't generate a compare of a CC with 0, just use that CC. */
6680 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6681 {
6682 SUBST (SET_SRC (x), op0);
6683 src = SET_SRC (x);
6684 }
6685 /* Otherwise, if we didn't previously have the same COMPARE we
6686 want, create it from scratch. */
6687 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6688 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6689 {
6690 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6691 src = SET_SRC (x);
6692 }
6693 }
6694 else
6695 {
6696 /* Get SET_SRC in a form where we have placed back any
6697 compound expressions. Then do the checks below. */
6698 src = make_compound_operation (src, SET);
6699 SUBST (SET_SRC (x), src);
6700 }
6701
6702 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6703 and X being a REG or (subreg (reg)), we may be able to convert this to
6704 (set (subreg:m2 x) (op)).
6705
6706 We can always do this if M1 is narrower than M2 because that means that
6707 we only care about the low bits of the result.
6708
6709 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6710 perform a narrower operation than requested since the high-order bits will
6711 be undefined. On machine where it is defined, this transformation is safe
6712 as long as M1 and M2 have the same number of words. */
6713
6714 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6715 && !OBJECT_P (SUBREG_REG (src))
6716 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6717 / UNITS_PER_WORD)
6718 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6719 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6720 && (WORD_REGISTER_OPERATIONS
6721 || (GET_MODE_SIZE (GET_MODE (src))
6722 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
6723 #ifdef CANNOT_CHANGE_MODE_CLASS
6724 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6725 && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
6726 GET_MODE (SUBREG_REG (src)),
6727 GET_MODE (src)))
6728 #endif
6729 && (REG_P (dest)
6730 || (GET_CODE (dest) == SUBREG
6731 && REG_P (SUBREG_REG (dest)))))
6732 {
6733 SUBST (SET_DEST (x),
6734 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6735 dest));
6736 SUBST (SET_SRC (x), SUBREG_REG (src));
6737
6738 src = SET_SRC (x), dest = SET_DEST (x);
6739 }
6740
6741 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6742 in SRC. */
6743 if (dest == cc0_rtx
6744 && GET_CODE (src) == SUBREG
6745 && subreg_lowpart_p (src)
6746 && (GET_MODE_PRECISION (GET_MODE (src))
6747 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (src)))))
6748 {
6749 rtx inner = SUBREG_REG (src);
6750 machine_mode inner_mode = GET_MODE (inner);
6751
6752 /* Here we make sure that we don't have a sign bit on. */
6753 if (val_signbit_known_clear_p (GET_MODE (src),
6754 nonzero_bits (inner, inner_mode)))
6755 {
6756 SUBST (SET_SRC (x), inner);
6757 src = SET_SRC (x);
6758 }
6759 }
6760
6761 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6762 would require a paradoxical subreg. Replace the subreg with a
6763 zero_extend to avoid the reload that would otherwise be required. */
6764
6765 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6766 && INTEGRAL_MODE_P (GET_MODE (SUBREG_REG (src)))
6767 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
6768 && SUBREG_BYTE (src) == 0
6769 && paradoxical_subreg_p (src)
6770 && MEM_P (SUBREG_REG (src)))
6771 {
6772 SUBST (SET_SRC (x),
6773 gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
6774 GET_MODE (src), SUBREG_REG (src)));
6775
6776 src = SET_SRC (x);
6777 }
6778
6779 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6780 are comparing an item known to be 0 or -1 against 0, use a logical
6781 operation instead. Check for one of the arms being an IOR of the other
6782 arm with some value. We compute three terms to be IOR'ed together. In
6783 practice, at most two will be nonzero. Then we do the IOR's. */
6784
6785 if (GET_CODE (dest) != PC
6786 && GET_CODE (src) == IF_THEN_ELSE
6787 && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
6788 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6789 && XEXP (XEXP (src, 0), 1) == const0_rtx
6790 && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
6791 && (!HAVE_conditional_move
6792 || ! can_conditionally_move_p (GET_MODE (src)))
6793 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
6794 GET_MODE (XEXP (XEXP (src, 0), 0)))
6795 == GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (src, 0), 0))))
6796 && ! side_effects_p (src))
6797 {
6798 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6799 ? XEXP (src, 1) : XEXP (src, 2));
6800 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6801 ? XEXP (src, 2) : XEXP (src, 1));
6802 rtx term1 = const0_rtx, term2, term3;
6803
6804 if (GET_CODE (true_rtx) == IOR
6805 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6806 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6807 else if (GET_CODE (true_rtx) == IOR
6808 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6809 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6810 else if (GET_CODE (false_rtx) == IOR
6811 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6812 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6813 else if (GET_CODE (false_rtx) == IOR
6814 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6815 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6816
6817 term2 = simplify_gen_binary (AND, GET_MODE (src),
6818 XEXP (XEXP (src, 0), 0), true_rtx);
6819 term3 = simplify_gen_binary (AND, GET_MODE (src),
6820 simplify_gen_unary (NOT, GET_MODE (src),
6821 XEXP (XEXP (src, 0), 0),
6822 GET_MODE (src)),
6823 false_rtx);
6824
6825 SUBST (SET_SRC (x),
6826 simplify_gen_binary (IOR, GET_MODE (src),
6827 simplify_gen_binary (IOR, GET_MODE (src),
6828 term1, term2),
6829 term3));
6830
6831 src = SET_SRC (x);
6832 }
6833
6834 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6835 whole thing fail. */
6836 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6837 return src;
6838 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6839 return dest;
6840 else
6841 /* Convert this into a field assignment operation, if possible. */
6842 return make_field_assignment (x);
6843 }
6844 \f
6845 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6846 result. */
6847
6848 static rtx
6849 simplify_logical (rtx x)
6850 {
6851 machine_mode mode = GET_MODE (x);
6852 rtx op0 = XEXP (x, 0);
6853 rtx op1 = XEXP (x, 1);
6854
6855 switch (GET_CODE (x))
6856 {
6857 case AND:
6858 /* We can call simplify_and_const_int only if we don't lose
6859 any (sign) bits when converting INTVAL (op1) to
6860 "unsigned HOST_WIDE_INT". */
6861 if (CONST_INT_P (op1)
6862 && (HWI_COMPUTABLE_MODE_P (mode)
6863 || INTVAL (op1) > 0))
6864 {
6865 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
6866 if (GET_CODE (x) != AND)
6867 return x;
6868
6869 op0 = XEXP (x, 0);
6870 op1 = XEXP (x, 1);
6871 }
6872
6873 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
6874 apply the distributive law and then the inverse distributive
6875 law to see if things simplify. */
6876 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
6877 {
6878 rtx result = distribute_and_simplify_rtx (x, 0);
6879 if (result)
6880 return result;
6881 }
6882 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
6883 {
6884 rtx result = distribute_and_simplify_rtx (x, 1);
6885 if (result)
6886 return result;
6887 }
6888 break;
6889
6890 case IOR:
6891 /* If we have (ior (and A B) C), apply the distributive law and then
6892 the inverse distributive law to see if things simplify. */
6893
6894 if (GET_CODE (op0) == AND)
6895 {
6896 rtx result = distribute_and_simplify_rtx (x, 0);
6897 if (result)
6898 return result;
6899 }
6900
6901 if (GET_CODE (op1) == AND)
6902 {
6903 rtx result = distribute_and_simplify_rtx (x, 1);
6904 if (result)
6905 return result;
6906 }
6907 break;
6908
6909 default:
6910 gcc_unreachable ();
6911 }
6912
6913 return x;
6914 }
6915 \f
6916 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
6917 operations" because they can be replaced with two more basic operations.
6918 ZERO_EXTEND is also considered "compound" because it can be replaced with
6919 an AND operation, which is simpler, though only one operation.
6920
6921 The function expand_compound_operation is called with an rtx expression
6922 and will convert it to the appropriate shifts and AND operations,
6923 simplifying at each stage.
6924
6925 The function make_compound_operation is called to convert an expression
6926 consisting of shifts and ANDs into the equivalent compound expression.
6927 It is the inverse of this function, loosely speaking. */
6928
6929 static rtx
6930 expand_compound_operation (rtx x)
6931 {
6932 unsigned HOST_WIDE_INT pos = 0, len;
6933 int unsignedp = 0;
6934 unsigned int modewidth;
6935 rtx tem;
6936
6937 switch (GET_CODE (x))
6938 {
6939 case ZERO_EXTEND:
6940 unsignedp = 1;
6941 case SIGN_EXTEND:
6942 /* We can't necessarily use a const_int for a multiword mode;
6943 it depends on implicitly extending the value.
6944 Since we don't know the right way to extend it,
6945 we can't tell whether the implicit way is right.
6946
6947 Even for a mode that is no wider than a const_int,
6948 we can't win, because we need to sign extend one of its bits through
6949 the rest of it, and we don't know which bit. */
6950 if (CONST_INT_P (XEXP (x, 0)))
6951 return x;
6952
6953 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
6954 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
6955 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
6956 reloaded. If not for that, MEM's would very rarely be safe.
6957
6958 Reject MODEs bigger than a word, because we might not be able
6959 to reference a two-register group starting with an arbitrary register
6960 (and currently gen_lowpart might crash for a SUBREG). */
6961
6962 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
6963 return x;
6964
6965 /* Reject MODEs that aren't scalar integers because turning vector
6966 or complex modes into shifts causes problems. */
6967
6968 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6969 return x;
6970
6971 len = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
6972 /* If the inner object has VOIDmode (the only way this can happen
6973 is if it is an ASM_OPERANDS), we can't do anything since we don't
6974 know how much masking to do. */
6975 if (len == 0)
6976 return x;
6977
6978 break;
6979
6980 case ZERO_EXTRACT:
6981 unsignedp = 1;
6982
6983 /* ... fall through ... */
6984
6985 case SIGN_EXTRACT:
6986 /* If the operand is a CLOBBER, just return it. */
6987 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
6988 return XEXP (x, 0);
6989
6990 if (!CONST_INT_P (XEXP (x, 1))
6991 || !CONST_INT_P (XEXP (x, 2))
6992 || GET_MODE (XEXP (x, 0)) == VOIDmode)
6993 return x;
6994
6995 /* Reject MODEs that aren't scalar integers because turning vector
6996 or complex modes into shifts causes problems. */
6997
6998 if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
6999 return x;
7000
7001 len = INTVAL (XEXP (x, 1));
7002 pos = INTVAL (XEXP (x, 2));
7003
7004 /* This should stay within the object being extracted, fail otherwise. */
7005 if (len + pos > GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))))
7006 return x;
7007
7008 if (BITS_BIG_ENDIAN)
7009 pos = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0))) - len - pos;
7010
7011 break;
7012
7013 default:
7014 return x;
7015 }
7016 /* Convert sign extension to zero extension, if we know that the high
7017 bit is not set, as this is easier to optimize. It will be converted
7018 back to cheaper alternative in make_extraction. */
7019 if (GET_CODE (x) == SIGN_EXTEND
7020 && (HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7021 && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
7022 & ~(((unsigned HOST_WIDE_INT)
7023 GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
7024 >> 1))
7025 == 0)))
7026 {
7027 machine_mode mode = GET_MODE (x);
7028 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7029 rtx temp2 = expand_compound_operation (temp);
7030
7031 /* Make sure this is a profitable operation. */
7032 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7033 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7034 return temp2;
7035 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7036 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7037 return temp;
7038 else
7039 return x;
7040 }
7041
7042 /* We can optimize some special cases of ZERO_EXTEND. */
7043 if (GET_CODE (x) == ZERO_EXTEND)
7044 {
7045 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7046 know that the last value didn't have any inappropriate bits
7047 set. */
7048 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7049 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7050 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7051 && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
7052 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7053 return XEXP (XEXP (x, 0), 0);
7054
7055 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7056 if (GET_CODE (XEXP (x, 0)) == SUBREG
7057 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7058 && subreg_lowpart_p (XEXP (x, 0))
7059 && HWI_COMPUTABLE_MODE_P (GET_MODE (x))
7060 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
7061 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7062 return SUBREG_REG (XEXP (x, 0));
7063
7064 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7065 is a comparison and STORE_FLAG_VALUE permits. This is like
7066 the first case, but it works even when GET_MODE (x) is larger
7067 than HOST_WIDE_INT. */
7068 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7069 && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
7070 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7071 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7072 <= HOST_BITS_PER_WIDE_INT)
7073 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7074 return XEXP (XEXP (x, 0), 0);
7075
7076 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7077 if (GET_CODE (XEXP (x, 0)) == SUBREG
7078 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
7079 && subreg_lowpart_p (XEXP (x, 0))
7080 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7081 && (GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)))
7082 <= HOST_BITS_PER_WIDE_INT)
7083 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
7084 return SUBREG_REG (XEXP (x, 0));
7085
7086 }
7087
7088 /* If we reach here, we want to return a pair of shifts. The inner
7089 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7090 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7091 logical depending on the value of UNSIGNEDP.
7092
7093 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7094 converted into an AND of a shift.
7095
7096 We must check for the case where the left shift would have a negative
7097 count. This can happen in a case like (x >> 31) & 255 on machines
7098 that can't shift by a constant. On those machines, we would first
7099 combine the shift with the AND to produce a variable-position
7100 extraction. Then the constant of 31 would be substituted in
7101 to produce such a position. */
7102
7103 modewidth = GET_MODE_PRECISION (GET_MODE (x));
7104 if (modewidth >= pos + len)
7105 {
7106 machine_mode mode = GET_MODE (x);
7107 tem = gen_lowpart (mode, XEXP (x, 0));
7108 if (!tem || GET_CODE (tem) == CLOBBER)
7109 return x;
7110 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7111 tem, modewidth - pos - len);
7112 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7113 mode, tem, modewidth - len);
7114 }
7115 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7116 tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
7117 simplify_shift_const (NULL_RTX, LSHIFTRT,
7118 GET_MODE (x),
7119 XEXP (x, 0), pos),
7120 ((unsigned HOST_WIDE_INT) 1 << len) - 1);
7121 else
7122 /* Any other cases we can't handle. */
7123 return x;
7124
7125 /* If we couldn't do this for some reason, return the original
7126 expression. */
7127 if (GET_CODE (tem) == CLOBBER)
7128 return x;
7129
7130 return tem;
7131 }
7132 \f
7133 /* X is a SET which contains an assignment of one object into
7134 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7135 or certain SUBREGS). If possible, convert it into a series of
7136 logical operations.
7137
7138 We half-heartedly support variable positions, but do not at all
7139 support variable lengths. */
7140
7141 static const_rtx
7142 expand_field_assignment (const_rtx x)
7143 {
7144 rtx inner;
7145 rtx pos; /* Always counts from low bit. */
7146 int len;
7147 rtx mask, cleared, masked;
7148 machine_mode compute_mode;
7149
7150 /* Loop until we find something we can't simplify. */
7151 while (1)
7152 {
7153 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7154 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7155 {
7156 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7157 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7158 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7159 }
7160 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7161 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7162 {
7163 inner = XEXP (SET_DEST (x), 0);
7164 len = INTVAL (XEXP (SET_DEST (x), 1));
7165 pos = XEXP (SET_DEST (x), 2);
7166
7167 /* A constant position should stay within the width of INNER. */
7168 if (CONST_INT_P (pos)
7169 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7170 break;
7171
7172 if (BITS_BIG_ENDIAN)
7173 {
7174 if (CONST_INT_P (pos))
7175 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7176 - INTVAL (pos));
7177 else if (GET_CODE (pos) == MINUS
7178 && CONST_INT_P (XEXP (pos, 1))
7179 && (INTVAL (XEXP (pos, 1))
7180 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7181 /* If position is ADJUST - X, new position is X. */
7182 pos = XEXP (pos, 0);
7183 else
7184 {
7185 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7186 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7187 gen_int_mode (prec - len,
7188 GET_MODE (pos)),
7189 pos);
7190 }
7191 }
7192 }
7193
7194 /* A SUBREG between two modes that occupy the same numbers of words
7195 can be done by moving the SUBREG to the source. */
7196 else if (GET_CODE (SET_DEST (x)) == SUBREG
7197 /* We need SUBREGs to compute nonzero_bits properly. */
7198 && nonzero_sign_valid
7199 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7200 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7201 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7202 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7203 {
7204 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7205 gen_lowpart
7206 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7207 SET_SRC (x)));
7208 continue;
7209 }
7210 else
7211 break;
7212
7213 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7214 inner = SUBREG_REG (inner);
7215
7216 compute_mode = GET_MODE (inner);
7217
7218 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7219 if (! SCALAR_INT_MODE_P (compute_mode))
7220 {
7221 machine_mode imode;
7222
7223 /* Don't do anything for vector or complex integral types. */
7224 if (! FLOAT_MODE_P (compute_mode))
7225 break;
7226
7227 /* Try to find an integral mode to pun with. */
7228 imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
7229 if (imode == BLKmode)
7230 break;
7231
7232 compute_mode = imode;
7233 inner = gen_lowpart (imode, inner);
7234 }
7235
7236 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7237 if (len >= HOST_BITS_PER_WIDE_INT)
7238 break;
7239
7240 /* Now compute the equivalent expression. Make a copy of INNER
7241 for the SET_DEST in case it is a MEM into which we will substitute;
7242 we don't want shared RTL in that case. */
7243 mask = gen_int_mode (((unsigned HOST_WIDE_INT) 1 << len) - 1,
7244 compute_mode);
7245 cleared = simplify_gen_binary (AND, compute_mode,
7246 simplify_gen_unary (NOT, compute_mode,
7247 simplify_gen_binary (ASHIFT,
7248 compute_mode,
7249 mask, pos),
7250 compute_mode),
7251 inner);
7252 masked = simplify_gen_binary (ASHIFT, compute_mode,
7253 simplify_gen_binary (
7254 AND, compute_mode,
7255 gen_lowpart (compute_mode, SET_SRC (x)),
7256 mask),
7257 pos);
7258
7259 x = gen_rtx_SET (copy_rtx (inner),
7260 simplify_gen_binary (IOR, compute_mode,
7261 cleared, masked));
7262 }
7263
7264 return x;
7265 }
7266 \f
7267 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7268 it is an RTX that represents the (variable) starting position; otherwise,
7269 POS is the (constant) starting bit position. Both are counted from the LSB.
7270
7271 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7272
7273 IN_DEST is nonzero if this is a reference in the destination of a SET.
7274 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7275 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7276 be used.
7277
7278 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7279 ZERO_EXTRACT should be built even for bits starting at bit 0.
7280
7281 MODE is the desired mode of the result (if IN_DEST == 0).
7282
7283 The result is an RTX for the extraction or NULL_RTX if the target
7284 can't handle it. */
7285
7286 static rtx
7287 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7288 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7289 int in_dest, int in_compare)
7290 {
7291 /* This mode describes the size of the storage area
7292 to fetch the overall value from. Within that, we
7293 ignore the POS lowest bits, etc. */
7294 machine_mode is_mode = GET_MODE (inner);
7295 machine_mode inner_mode;
7296 machine_mode wanted_inner_mode;
7297 machine_mode wanted_inner_reg_mode = word_mode;
7298 machine_mode pos_mode = word_mode;
7299 machine_mode extraction_mode = word_mode;
7300 machine_mode tmode = mode_for_size (len, MODE_INT, 1);
7301 rtx new_rtx = 0;
7302 rtx orig_pos_rtx = pos_rtx;
7303 HOST_WIDE_INT orig_pos;
7304
7305 if (pos_rtx && CONST_INT_P (pos_rtx))
7306 pos = INTVAL (pos_rtx), pos_rtx = 0;
7307
7308 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7309 {
7310 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7311 consider just the QI as the memory to extract from.
7312 The subreg adds or removes high bits; its mode is
7313 irrelevant to the meaning of this extraction,
7314 since POS and LEN count from the lsb. */
7315 if (MEM_P (SUBREG_REG (inner)))
7316 is_mode = GET_MODE (SUBREG_REG (inner));
7317 inner = SUBREG_REG (inner);
7318 }
7319 else if (GET_CODE (inner) == ASHIFT
7320 && CONST_INT_P (XEXP (inner, 1))
7321 && pos_rtx == 0 && pos == 0
7322 && len > UINTVAL (XEXP (inner, 1)))
7323 {
7324 /* We're extracting the least significant bits of an rtx
7325 (ashift X (const_int C)), where LEN > C. Extract the
7326 least significant (LEN - C) bits of X, giving an rtx
7327 whose mode is MODE, then shift it left C times. */
7328 new_rtx = make_extraction (mode, XEXP (inner, 0),
7329 0, 0, len - INTVAL (XEXP (inner, 1)),
7330 unsignedp, in_dest, in_compare);
7331 if (new_rtx != 0)
7332 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7333 }
7334 else if (GET_CODE (inner) == TRUNCATE)
7335 inner = XEXP (inner, 0);
7336
7337 inner_mode = GET_MODE (inner);
7338
7339 /* See if this can be done without an extraction. We never can if the
7340 width of the field is not the same as that of some integer mode. For
7341 registers, we can only avoid the extraction if the position is at the
7342 low-order bit and this is either not in the destination or we have the
7343 appropriate STRICT_LOW_PART operation available.
7344
7345 For MEM, we can avoid an extract if the field starts on an appropriate
7346 boundary and we can change the mode of the memory reference. */
7347
7348 if (tmode != BLKmode
7349 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7350 && !MEM_P (inner)
7351 && (inner_mode == tmode
7352 || !REG_P (inner)
7353 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7354 || reg_truncated_to_mode (tmode, inner))
7355 && (! in_dest
7356 || (REG_P (inner)
7357 && have_insn_for (STRICT_LOW_PART, tmode))))
7358 || (MEM_P (inner) && pos_rtx == 0
7359 && (pos
7360 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7361 : BITS_PER_UNIT)) == 0
7362 /* We can't do this if we are widening INNER_MODE (it
7363 may not be aligned, for one thing). */
7364 && GET_MODE_PRECISION (inner_mode) >= GET_MODE_PRECISION (tmode)
7365 && (inner_mode == tmode
7366 || (! mode_dependent_address_p (XEXP (inner, 0),
7367 MEM_ADDR_SPACE (inner))
7368 && ! MEM_VOLATILE_P (inner))))))
7369 {
7370 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7371 field. If the original and current mode are the same, we need not
7372 adjust the offset. Otherwise, we do if bytes big endian.
7373
7374 If INNER is not a MEM, get a piece consisting of just the field
7375 of interest (in this case POS % BITS_PER_WORD must be 0). */
7376
7377 if (MEM_P (inner))
7378 {
7379 HOST_WIDE_INT offset;
7380
7381 /* POS counts from lsb, but make OFFSET count in memory order. */
7382 if (BYTES_BIG_ENDIAN)
7383 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7384 else
7385 offset = pos / BITS_PER_UNIT;
7386
7387 new_rtx = adjust_address_nv (inner, tmode, offset);
7388 }
7389 else if (REG_P (inner))
7390 {
7391 if (tmode != inner_mode)
7392 {
7393 /* We can't call gen_lowpart in a DEST since we
7394 always want a SUBREG (see below) and it would sometimes
7395 return a new hard register. */
7396 if (pos || in_dest)
7397 {
7398 HOST_WIDE_INT final_word = pos / BITS_PER_WORD;
7399
7400 if (WORDS_BIG_ENDIAN
7401 && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7402 final_word = ((GET_MODE_SIZE (inner_mode)
7403 - GET_MODE_SIZE (tmode))
7404 / UNITS_PER_WORD) - final_word;
7405
7406 final_word *= UNITS_PER_WORD;
7407 if (BYTES_BIG_ENDIAN &&
7408 GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
7409 final_word += (GET_MODE_SIZE (inner_mode)
7410 - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;
7411
7412 /* Avoid creating invalid subregs, for example when
7413 simplifying (x>>32)&255. */
7414 if (!validate_subreg (tmode, inner_mode, inner, final_word))
7415 return NULL_RTX;
7416
7417 new_rtx = gen_rtx_SUBREG (tmode, inner, final_word);
7418 }
7419 else
7420 new_rtx = gen_lowpart (tmode, inner);
7421 }
7422 else
7423 new_rtx = inner;
7424 }
7425 else
7426 new_rtx = force_to_mode (inner, tmode,
7427 len >= HOST_BITS_PER_WIDE_INT
7428 ? ~(unsigned HOST_WIDE_INT) 0
7429 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
7430 0);
7431
7432 /* If this extraction is going into the destination of a SET,
7433 make a STRICT_LOW_PART unless we made a MEM. */
7434
7435 if (in_dest)
7436 return (MEM_P (new_rtx) ? new_rtx
7437 : (GET_CODE (new_rtx) != SUBREG
7438 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7439 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7440
7441 if (mode == tmode)
7442 return new_rtx;
7443
7444 if (CONST_SCALAR_INT_P (new_rtx))
7445 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7446 mode, new_rtx, tmode);
7447
7448 /* If we know that no extraneous bits are set, and that the high
7449 bit is not set, convert the extraction to the cheaper of
7450 sign and zero extension, that are equivalent in these cases. */
7451 if (flag_expensive_optimizations
7452 && (HWI_COMPUTABLE_MODE_P (tmode)
7453 && ((nonzero_bits (new_rtx, tmode)
7454 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7455 == 0)))
7456 {
7457 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7458 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7459
7460 /* Prefer ZERO_EXTENSION, since it gives more information to
7461 backends. */
7462 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7463 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7464 return temp;
7465 return temp1;
7466 }
7467
7468 /* Otherwise, sign- or zero-extend unless we already are in the
7469 proper mode. */
7470
7471 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7472 mode, new_rtx));
7473 }
7474
7475 /* Unless this is a COMPARE or we have a funny memory reference,
7476 don't do anything with zero-extending field extracts starting at
7477 the low-order bit since they are simple AND operations. */
7478 if (pos_rtx == 0 && pos == 0 && ! in_dest
7479 && ! in_compare && unsignedp)
7480 return 0;
7481
7482 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7483 if the position is not a constant and the length is not 1. In all
7484 other cases, we would only be going outside our object in cases when
7485 an original shift would have been undefined. */
7486 if (MEM_P (inner)
7487 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7488 || (pos_rtx != 0 && len != 1)))
7489 return 0;
7490
7491 enum extraction_pattern pattern = (in_dest ? EP_insv
7492 : unsignedp ? EP_extzv : EP_extv);
7493
7494 /* If INNER is not from memory, we want it to have the mode of a register
7495 extraction pattern's structure operand, or word_mode if there is no
7496 such pattern. The same applies to extraction_mode and pos_mode
7497 and their respective operands.
7498
7499 For memory, assume that the desired extraction_mode and pos_mode
7500 are the same as for a register operation, since at present we don't
7501 have named patterns for aligned memory structures. */
7502 struct extraction_insn insn;
7503 if (get_best_reg_extraction_insn (&insn, pattern,
7504 GET_MODE_BITSIZE (inner_mode), mode))
7505 {
7506 wanted_inner_reg_mode = insn.struct_mode;
7507 pos_mode = insn.pos_mode;
7508 extraction_mode = insn.field_mode;
7509 }
7510
7511 /* Never narrow an object, since that might not be safe. */
7512
7513 if (mode != VOIDmode
7514 && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
7515 extraction_mode = mode;
7516
7517 if (!MEM_P (inner))
7518 wanted_inner_mode = wanted_inner_reg_mode;
7519 else
7520 {
7521 /* Be careful not to go beyond the extracted object and maintain the
7522 natural alignment of the memory. */
7523 wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
7524 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7525 > GET_MODE_BITSIZE (wanted_inner_mode))
7526 {
7527 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
7528 gcc_assert (wanted_inner_mode != VOIDmode);
7529 }
7530 }
7531
7532 orig_pos = pos;
7533
7534 if (BITS_BIG_ENDIAN)
7535 {
7536 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7537 BITS_BIG_ENDIAN style. If position is constant, compute new
7538 position. Otherwise, build subtraction.
7539 Note that POS is relative to the mode of the original argument.
7540 If it's a MEM we need to recompute POS relative to that.
7541 However, if we're extracting from (or inserting into) a register,
7542 we want to recompute POS relative to wanted_inner_mode. */
7543 int width = (MEM_P (inner)
7544 ? GET_MODE_BITSIZE (is_mode)
7545 : GET_MODE_BITSIZE (wanted_inner_mode));
7546
7547 if (pos_rtx == 0)
7548 pos = width - len - pos;
7549 else
7550 pos_rtx
7551 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7552 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7553 pos_rtx);
7554 /* POS may be less than 0 now, but we check for that below.
7555 Note that it can only be less than 0 if !MEM_P (inner). */
7556 }
7557
7558 /* If INNER has a wider mode, and this is a constant extraction, try to
7559 make it smaller and adjust the byte to point to the byte containing
7560 the value. */
7561 if (wanted_inner_mode != VOIDmode
7562 && inner_mode != wanted_inner_mode
7563 && ! pos_rtx
7564 && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
7565 && MEM_P (inner)
7566 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7567 && ! MEM_VOLATILE_P (inner))
7568 {
7569 int offset = 0;
7570
7571 /* The computations below will be correct if the machine is big
7572 endian in both bits and bytes or little endian in bits and bytes.
7573 If it is mixed, we must adjust. */
7574
7575 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7576 adjust OFFSET to compensate. */
7577 if (BYTES_BIG_ENDIAN
7578 && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
7579 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7580
7581 /* We can now move to the desired byte. */
7582 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7583 * GET_MODE_SIZE (wanted_inner_mode);
7584 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7585
7586 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7587 && is_mode != wanted_inner_mode)
7588 offset = (GET_MODE_SIZE (is_mode)
7589 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7590
7591 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7592 }
7593
7594 /* If INNER is not memory, get it into the proper mode. If we are changing
7595 its mode, POS must be a constant and smaller than the size of the new
7596 mode. */
7597 else if (!MEM_P (inner))
7598 {
7599 /* On the LHS, don't create paradoxical subregs implicitely truncating
7600 the register unless TRULY_NOOP_TRUNCATION. */
7601 if (in_dest
7602 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7603 wanted_inner_mode))
7604 return NULL_RTX;
7605
7606 if (GET_MODE (inner) != wanted_inner_mode
7607 && (pos_rtx != 0
7608 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7609 return NULL_RTX;
7610
7611 if (orig_pos < 0)
7612 return NULL_RTX;
7613
7614 inner = force_to_mode (inner, wanted_inner_mode,
7615 pos_rtx
7616 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7617 ? ~(unsigned HOST_WIDE_INT) 0
7618 : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
7619 << orig_pos),
7620 0);
7621 }
7622
7623 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7624 have to zero extend. Otherwise, we can just use a SUBREG. */
7625 if (pos_rtx != 0
7626 && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
7627 {
7628 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7629 GET_MODE (pos_rtx));
7630
7631 /* If we know that no extraneous bits are set, and that the high
7632 bit is not set, convert extraction to cheaper one - either
7633 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7634 cases. */
7635 if (flag_expensive_optimizations
7636 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7637 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7638 & ~(((unsigned HOST_WIDE_INT)
7639 GET_MODE_MASK (GET_MODE (pos_rtx)))
7640 >> 1))
7641 == 0)))
7642 {
7643 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7644 GET_MODE (pos_rtx));
7645
7646 /* Prefer ZERO_EXTENSION, since it gives more information to
7647 backends. */
7648 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7649 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7650 temp = temp1;
7651 }
7652 pos_rtx = temp;
7653 }
7654
7655 /* Make POS_RTX unless we already have it and it is correct. If we don't
7656 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7657 be a CONST_INT. */
7658 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7659 pos_rtx = orig_pos_rtx;
7660
7661 else if (pos_rtx == 0)
7662 pos_rtx = GEN_INT (pos);
7663
7664 /* Make the required operation. See if we can use existing rtx. */
7665 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7666 extraction_mode, inner, GEN_INT (len), pos_rtx);
7667 if (! in_dest)
7668 new_rtx = gen_lowpart (mode, new_rtx);
7669
7670 return new_rtx;
7671 }
7672 \f
7673 /* See if X contains an ASHIFT of COUNT or more bits that can be commuted
7674 with any other operations in X. Return X without that shift if so. */
7675
7676 static rtx
7677 extract_left_shift (rtx x, int count)
7678 {
7679 enum rtx_code code = GET_CODE (x);
7680 machine_mode mode = GET_MODE (x);
7681 rtx tem;
7682
7683 switch (code)
7684 {
7685 case ASHIFT:
7686 /* This is the shift itself. If it is wide enough, we will return
7687 either the value being shifted if the shift count is equal to
7688 COUNT or a shift for the difference. */
7689 if (CONST_INT_P (XEXP (x, 1))
7690 && INTVAL (XEXP (x, 1)) >= count)
7691 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7692 INTVAL (XEXP (x, 1)) - count);
7693 break;
7694
7695 case NEG: case NOT:
7696 if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7697 return simplify_gen_unary (code, mode, tem, mode);
7698
7699 break;
7700
7701 case PLUS: case IOR: case XOR: case AND:
7702 /* If we can safely shift this constant and we find the inner shift,
7703 make a new operation. */
7704 if (CONST_INT_P (XEXP (x, 1))
7705 && (UINTVAL (XEXP (x, 1))
7706 & ((((unsigned HOST_WIDE_INT) 1 << count)) - 1)) == 0
7707 && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
7708 {
7709 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7710 return simplify_gen_binary (code, mode, tem,
7711 gen_int_mode (val, mode));
7712 }
7713 break;
7714
7715 default:
7716 break;
7717 }
7718
7719 return 0;
7720 }
7721 \f
7722 /* Look at the expression rooted at X. Look for expressions
7723 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
7724 Form these expressions.
7725
7726 Return the new rtx, usually just X.
7727
7728 Also, for machines like the VAX that don't have logical shift insns,
7729 try to convert logical to arithmetic shift operations in cases where
7730 they are equivalent. This undoes the canonicalizations to logical
7731 shifts done elsewhere.
7732
7733 We try, as much as possible, to re-use rtl expressions to save memory.
7734
7735 IN_CODE says what kind of expression we are processing. Normally, it is
7736 SET. In a memory address it is MEM. When processing the arguments of
7737 a comparison or a COMPARE against zero, it is COMPARE. */
7738
7739 rtx
7740 make_compound_operation (rtx x, enum rtx_code in_code)
7741 {
7742 enum rtx_code code = GET_CODE (x);
7743 machine_mode mode = GET_MODE (x);
7744 int mode_width = GET_MODE_PRECISION (mode);
7745 rtx rhs, lhs;
7746 enum rtx_code next_code;
7747 int i, j;
7748 rtx new_rtx = 0;
7749 rtx tem;
7750 const char *fmt;
7751
7752 /* Select the code to be used in recursive calls. Once we are inside an
7753 address, we stay there. If we have a comparison, set to COMPARE,
7754 but once inside, go back to our default of SET. */
7755
7756 next_code = (code == MEM ? MEM
7757 : ((code == COMPARE || COMPARISON_P (x))
7758 && XEXP (x, 1) == const0_rtx) ? COMPARE
7759 : in_code == COMPARE ? SET : in_code);
7760
7761 /* Process depending on the code of this operation. If NEW is set
7762 nonzero, it will be returned. */
7763
7764 switch (code)
7765 {
7766 case ASHIFT:
7767 /* Convert shifts by constants into multiplications if inside
7768 an address. */
7769 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7770 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7771 && INTVAL (XEXP (x, 1)) >= 0
7772 && SCALAR_INT_MODE_P (mode))
7773 {
7774 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7775 HOST_WIDE_INT multval = (HOST_WIDE_INT) 1 << count;
7776
7777 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7778 if (GET_CODE (new_rtx) == NEG)
7779 {
7780 new_rtx = XEXP (new_rtx, 0);
7781 multval = -multval;
7782 }
7783 multval = trunc_int_for_mode (multval, mode);
7784 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7785 }
7786 break;
7787
7788 case PLUS:
7789 lhs = XEXP (x, 0);
7790 rhs = XEXP (x, 1);
7791 lhs = make_compound_operation (lhs, next_code);
7792 rhs = make_compound_operation (rhs, next_code);
7793 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG
7794 && SCALAR_INT_MODE_P (mode))
7795 {
7796 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7797 XEXP (lhs, 1));
7798 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7799 }
7800 else if (GET_CODE (lhs) == MULT
7801 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7802 {
7803 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7804 simplify_gen_unary (NEG, mode,
7805 XEXP (lhs, 1),
7806 mode));
7807 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7808 }
7809 else
7810 {
7811 SUBST (XEXP (x, 0), lhs);
7812 SUBST (XEXP (x, 1), rhs);
7813 goto maybe_swap;
7814 }
7815 x = gen_lowpart (mode, new_rtx);
7816 goto maybe_swap;
7817
7818 case MINUS:
7819 lhs = XEXP (x, 0);
7820 rhs = XEXP (x, 1);
7821 lhs = make_compound_operation (lhs, next_code);
7822 rhs = make_compound_operation (rhs, next_code);
7823 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG
7824 && SCALAR_INT_MODE_P (mode))
7825 {
7826 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7827 XEXP (rhs, 1));
7828 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7829 }
7830 else if (GET_CODE (rhs) == MULT
7831 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7832 {
7833 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7834 simplify_gen_unary (NEG, mode,
7835 XEXP (rhs, 1),
7836 mode));
7837 new_rtx = simplify_gen_binary (PLUS, mode, tem, lhs);
7838 }
7839 else
7840 {
7841 SUBST (XEXP (x, 0), lhs);
7842 SUBST (XEXP (x, 1), rhs);
7843 return x;
7844 }
7845 return gen_lowpart (mode, new_rtx);
7846
7847 case AND:
7848 /* If the second operand is not a constant, we can't do anything
7849 with it. */
7850 if (!CONST_INT_P (XEXP (x, 1)))
7851 break;
7852
7853 /* If the constant is a power of two minus one and the first operand
7854 is a logical right shift, make an extraction. */
7855 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7856 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7857 {
7858 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7859 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1), i, 1,
7860 0, in_code == COMPARE);
7861 }
7862
7863 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7864 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7865 && subreg_lowpart_p (XEXP (x, 0))
7866 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7867 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7868 {
7869 new_rtx = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
7870 next_code);
7871 new_rtx = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new_rtx, 0,
7872 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
7873 0, in_code == COMPARE);
7874
7875 /* If that didn't give anything, see if the AND simplifies on
7876 its own. */
7877 if (!new_rtx && i >= 0)
7878 {
7879 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7880 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
7881 0, in_code == COMPARE);
7882 }
7883 }
7884 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
7885 else if ((GET_CODE (XEXP (x, 0)) == XOR
7886 || GET_CODE (XEXP (x, 0)) == IOR)
7887 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
7888 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
7889 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7890 {
7891 /* Apply the distributive law, and then try to make extractions. */
7892 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
7893 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
7894 XEXP (x, 1)),
7895 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
7896 XEXP (x, 1)));
7897 new_rtx = make_compound_operation (new_rtx, in_code);
7898 }
7899
7900 /* If we are have (and (rotate X C) M) and C is larger than the number
7901 of bits in M, this is an extraction. */
7902
7903 else if (GET_CODE (XEXP (x, 0)) == ROTATE
7904 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7905 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
7906 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
7907 {
7908 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7909 new_rtx = make_extraction (mode, new_rtx,
7910 (GET_MODE_PRECISION (mode)
7911 - INTVAL (XEXP (XEXP (x, 0), 1))),
7912 NULL_RTX, i, 1, 0, in_code == COMPARE);
7913 }
7914
7915 /* On machines without logical shifts, if the operand of the AND is
7916 a logical shift and our mask turns off all the propagated sign
7917 bits, we can replace the logical shift with an arithmetic shift. */
7918 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7919 && !have_insn_for (LSHIFTRT, mode)
7920 && have_insn_for (ASHIFTRT, mode)
7921 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
7922 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
7923 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
7924 && mode_width <= HOST_BITS_PER_WIDE_INT)
7925 {
7926 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
7927
7928 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
7929 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
7930 SUBST (XEXP (x, 0),
7931 gen_rtx_ASHIFTRT (mode,
7932 make_compound_operation
7933 (XEXP (XEXP (x, 0), 0), next_code),
7934 XEXP (XEXP (x, 0), 1)));
7935 }
7936
7937 /* If the constant is one less than a power of two, this might be
7938 representable by an extraction even if no shift is present.
7939 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
7940 we are in a COMPARE. */
7941 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7942 new_rtx = make_extraction (mode,
7943 make_compound_operation (XEXP (x, 0),
7944 next_code),
7945 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
7946
7947 /* If we are in a comparison and this is an AND with a power of two,
7948 convert this into the appropriate bit extract. */
7949 else if (in_code == COMPARE
7950 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
7951 new_rtx = make_extraction (mode,
7952 make_compound_operation (XEXP (x, 0),
7953 next_code),
7954 i, NULL_RTX, 1, 1, 0, 1);
7955
7956 break;
7957
7958 case LSHIFTRT:
7959 /* If the sign bit is known to be zero, replace this with an
7960 arithmetic shift. */
7961 if (have_insn_for (ASHIFTRT, mode)
7962 && ! have_insn_for (LSHIFTRT, mode)
7963 && mode_width <= HOST_BITS_PER_WIDE_INT
7964 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
7965 {
7966 new_rtx = gen_rtx_ASHIFTRT (mode,
7967 make_compound_operation (XEXP (x, 0),
7968 next_code),
7969 XEXP (x, 1));
7970 break;
7971 }
7972
7973 /* ... fall through ... */
7974
7975 case ASHIFTRT:
7976 lhs = XEXP (x, 0);
7977 rhs = XEXP (x, 1);
7978
7979 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
7980 this is a SIGN_EXTRACT. */
7981 if (CONST_INT_P (rhs)
7982 && GET_CODE (lhs) == ASHIFT
7983 && CONST_INT_P (XEXP (lhs, 1))
7984 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
7985 && INTVAL (XEXP (lhs, 1)) >= 0
7986 && INTVAL (rhs) < mode_width)
7987 {
7988 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
7989 new_rtx = make_extraction (mode, new_rtx,
7990 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
7991 NULL_RTX, mode_width - INTVAL (rhs),
7992 code == LSHIFTRT, 0, in_code == COMPARE);
7993 break;
7994 }
7995
7996 /* See if we have operations between an ASHIFTRT and an ASHIFT.
7997 If so, try to merge the shifts into a SIGN_EXTEND. We could
7998 also do this for some cases of SIGN_EXTRACT, but it doesn't
7999 seem worth the effort; the case checked for occurs on Alpha. */
8000
8001 if (!OBJECT_P (lhs)
8002 && ! (GET_CODE (lhs) == SUBREG
8003 && (OBJECT_P (SUBREG_REG (lhs))))
8004 && CONST_INT_P (rhs)
8005 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8006 && INTVAL (rhs) < mode_width
8007 && (new_rtx = extract_left_shift (lhs, INTVAL (rhs))) != 0)
8008 new_rtx = make_extraction (mode, make_compound_operation (new_rtx, next_code),
8009 0, NULL_RTX, mode_width - INTVAL (rhs),
8010 code == LSHIFTRT, 0, in_code == COMPARE);
8011
8012 break;
8013
8014 case SUBREG:
8015 /* Call ourselves recursively on the inner expression. If we are
8016 narrowing the object and it has a different RTL code from
8017 what it originally did, do this SUBREG as a force_to_mode. */
8018 {
8019 rtx inner = SUBREG_REG (x), simplified;
8020 enum rtx_code subreg_code = in_code;
8021
8022 /* If in_code is COMPARE, it isn't always safe to pass it through
8023 to the recursive make_compound_operation call. */
8024 if (subreg_code == COMPARE
8025 && (!subreg_lowpart_p (x)
8026 || GET_CODE (inner) == SUBREG
8027 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8028 is (const_int 0), rather than
8029 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0). */
8030 || (GET_CODE (inner) == AND
8031 && CONST_INT_P (XEXP (inner, 1))
8032 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8033 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8034 >= GET_MODE_BITSIZE (mode))))
8035 subreg_code = SET;
8036
8037 tem = make_compound_operation (inner, subreg_code);
8038
8039 simplified
8040 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8041 if (simplified)
8042 tem = simplified;
8043
8044 if (GET_CODE (tem) != GET_CODE (inner)
8045 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (inner))
8046 && subreg_lowpart_p (x))
8047 {
8048 rtx newer
8049 = force_to_mode (tem, mode, ~(unsigned HOST_WIDE_INT) 0, 0);
8050
8051 /* If we have something other than a SUBREG, we might have
8052 done an expansion, so rerun ourselves. */
8053 if (GET_CODE (newer) != SUBREG)
8054 newer = make_compound_operation (newer, in_code);
8055
8056 /* force_to_mode can expand compounds. If it just re-expanded the
8057 compound, use gen_lowpart to convert to the desired mode. */
8058 if (rtx_equal_p (newer, x)
8059 /* Likewise if it re-expanded the compound only partially.
8060 This happens for SUBREG of ZERO_EXTRACT if they extract
8061 the same number of bits. */
8062 || (GET_CODE (newer) == SUBREG
8063 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8064 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8065 && GET_CODE (inner) == AND
8066 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8067 return gen_lowpart (GET_MODE (x), tem);
8068
8069 return newer;
8070 }
8071
8072 if (simplified)
8073 return tem;
8074 }
8075 break;
8076
8077 default:
8078 break;
8079 }
8080
8081 if (new_rtx)
8082 {
8083 x = gen_lowpart (mode, new_rtx);
8084 code = GET_CODE (x);
8085 }
8086
8087 /* Now recursively process each operand of this operation. We need to
8088 handle ZERO_EXTEND specially so that we don't lose track of the
8089 inner mode. */
8090 if (GET_CODE (x) == ZERO_EXTEND)
8091 {
8092 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8093 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8094 new_rtx, GET_MODE (XEXP (x, 0)));
8095 if (tem)
8096 return tem;
8097 SUBST (XEXP (x, 0), new_rtx);
8098 return x;
8099 }
8100
8101 fmt = GET_RTX_FORMAT (code);
8102 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8103 if (fmt[i] == 'e')
8104 {
8105 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8106 SUBST (XEXP (x, i), new_rtx);
8107 }
8108 else if (fmt[i] == 'E')
8109 for (j = 0; j < XVECLEN (x, i); j++)
8110 {
8111 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8112 SUBST (XVECEXP (x, i, j), new_rtx);
8113 }
8114
8115 maybe_swap:
8116 /* If this is a commutative operation, the changes to the operands
8117 may have made it noncanonical. */
8118 if (COMMUTATIVE_ARITH_P (x)
8119 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
8120 {
8121 tem = XEXP (x, 0);
8122 SUBST (XEXP (x, 0), XEXP (x, 1));
8123 SUBST (XEXP (x, 1), tem);
8124 }
8125
8126 return x;
8127 }
8128 \f
8129 /* Given M see if it is a value that would select a field of bits
8130 within an item, but not the entire word. Return -1 if not.
8131 Otherwise, return the starting position of the field, where 0 is the
8132 low-order bit.
8133
8134 *PLEN is set to the length of the field. */
8135
8136 static int
8137 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8138 {
8139 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8140 int pos = m ? ctz_hwi (m) : -1;
8141 int len = 0;
8142
8143 if (pos >= 0)
8144 /* Now shift off the low-order zero bits and see if we have a
8145 power of two minus 1. */
8146 len = exact_log2 ((m >> pos) + 1);
8147
8148 if (len <= 0)
8149 pos = -1;
8150
8151 *plen = len;
8152 return pos;
8153 }
8154 \f
8155 /* If X refers to a register that equals REG in value, replace these
8156 references with REG. */
8157 static rtx
8158 canon_reg_for_combine (rtx x, rtx reg)
8159 {
8160 rtx op0, op1, op2;
8161 const char *fmt;
8162 int i;
8163 bool copied;
8164
8165 enum rtx_code code = GET_CODE (x);
8166 switch (GET_RTX_CLASS (code))
8167 {
8168 case RTX_UNARY:
8169 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8170 if (op0 != XEXP (x, 0))
8171 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8172 GET_MODE (reg));
8173 break;
8174
8175 case RTX_BIN_ARITH:
8176 case RTX_COMM_ARITH:
8177 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8178 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8179 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8180 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8181 break;
8182
8183 case RTX_COMPARE:
8184 case RTX_COMM_COMPARE:
8185 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8186 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8187 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8188 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8189 GET_MODE (op0), op0, op1);
8190 break;
8191
8192 case RTX_TERNARY:
8193 case RTX_BITFIELD_OPS:
8194 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8195 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8196 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8197 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8198 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8199 GET_MODE (op0), op0, op1, op2);
8200
8201 case RTX_OBJ:
8202 if (REG_P (x))
8203 {
8204 if (rtx_equal_p (get_last_value (reg), x)
8205 || rtx_equal_p (reg, get_last_value (x)))
8206 return reg;
8207 else
8208 break;
8209 }
8210
8211 /* fall through */
8212
8213 default:
8214 fmt = GET_RTX_FORMAT (code);
8215 copied = false;
8216 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8217 if (fmt[i] == 'e')
8218 {
8219 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8220 if (op != XEXP (x, i))
8221 {
8222 if (!copied)
8223 {
8224 copied = true;
8225 x = copy_rtx (x);
8226 }
8227 XEXP (x, i) = op;
8228 }
8229 }
8230 else if (fmt[i] == 'E')
8231 {
8232 int j;
8233 for (j = 0; j < XVECLEN (x, i); j++)
8234 {
8235 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8236 if (op != XVECEXP (x, i, j))
8237 {
8238 if (!copied)
8239 {
8240 copied = true;
8241 x = copy_rtx (x);
8242 }
8243 XVECEXP (x, i, j) = op;
8244 }
8245 }
8246 }
8247
8248 break;
8249 }
8250
8251 return x;
8252 }
8253
8254 /* Return X converted to MODE. If the value is already truncated to
8255 MODE we can just return a subreg even though in the general case we
8256 would need an explicit truncation. */
8257
8258 static rtx
8259 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8260 {
8261 if (!CONST_INT_P (x)
8262 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (x))
8263 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8264 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8265 {
8266 /* Bit-cast X into an integer mode. */
8267 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8268 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
8269 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
8270 x, GET_MODE (x));
8271 }
8272
8273 return gen_lowpart (mode, x);
8274 }
8275
8276 /* See if X can be simplified knowing that we will only refer to it in
8277 MODE and will only refer to those bits that are nonzero in MASK.
8278 If other bits are being computed or if masking operations are done
8279 that select a superset of the bits in MASK, they can sometimes be
8280 ignored.
8281
8282 Return a possibly simplified expression, but always convert X to
8283 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8284
8285 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8286 are all off in X. This is used when X will be complemented, by either
8287 NOT, NEG, or XOR. */
8288
8289 static rtx
8290 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8291 int just_select)
8292 {
8293 enum rtx_code code = GET_CODE (x);
8294 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8295 machine_mode op_mode;
8296 unsigned HOST_WIDE_INT fuller_mask, nonzero;
8297 rtx op0, op1, temp;
8298
8299 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8300 code below will do the wrong thing since the mode of such an
8301 expression is VOIDmode.
8302
8303 Also do nothing if X is a CLOBBER; this can happen if X was
8304 the return value from a call to gen_lowpart. */
8305 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8306 return x;
8307
8308 /* We want to perform the operation in its present mode unless we know
8309 that the operation is valid in MODE, in which case we do the operation
8310 in MODE. */
8311 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8312 && have_insn_for (code, mode))
8313 ? mode : GET_MODE (x));
8314
8315 /* It is not valid to do a right-shift in a narrower mode
8316 than the one it came in with. */
8317 if ((code == LSHIFTRT || code == ASHIFTRT)
8318 && GET_MODE_PRECISION (mode) < GET_MODE_PRECISION (GET_MODE (x)))
8319 op_mode = GET_MODE (x);
8320
8321 /* Truncate MASK to fit OP_MODE. */
8322 if (op_mode)
8323 mask &= GET_MODE_MASK (op_mode);
8324
8325 /* When we have an arithmetic operation, or a shift whose count we
8326 do not know, we need to assume that all bits up to the highest-order
8327 bit in MASK will be needed. This is how we form such a mask. */
8328 if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
8329 fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
8330 else
8331 fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
8332 - 1);
8333
8334 /* Determine what bits of X are guaranteed to be (non)zero. */
8335 nonzero = nonzero_bits (x, mode);
8336
8337 /* If none of the bits in X are needed, return a zero. */
8338 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8339 x = const0_rtx;
8340
8341 /* If X is a CONST_INT, return a new one. Do this here since the
8342 test below will fail. */
8343 if (CONST_INT_P (x))
8344 {
8345 if (SCALAR_INT_MODE_P (mode))
8346 return gen_int_mode (INTVAL (x) & mask, mode);
8347 else
8348 {
8349 x = GEN_INT (INTVAL (x) & mask);
8350 return gen_lowpart_common (mode, x);
8351 }
8352 }
8353
8354 /* If X is narrower than MODE and we want all the bits in X's mode, just
8355 get X in the proper mode. */
8356 if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
8357 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8358 return gen_lowpart (mode, x);
8359
8360 /* We can ignore the effect of a SUBREG if it narrows the mode or
8361 if the constant masks to zero all the bits the mode doesn't have. */
8362 if (GET_CODE (x) == SUBREG
8363 && subreg_lowpart_p (x)
8364 && ((GET_MODE_SIZE (GET_MODE (x))
8365 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
8366 || (0 == (mask
8367 & GET_MODE_MASK (GET_MODE (x))
8368 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8369 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8370
8371 /* The arithmetic simplifications here only work for scalar integer modes. */
8372 if (!SCALAR_INT_MODE_P (mode) || !SCALAR_INT_MODE_P (GET_MODE (x)))
8373 return gen_lowpart_or_truncate (mode, x);
8374
8375 switch (code)
8376 {
8377 case CLOBBER:
8378 /* If X is a (clobber (const_int)), return it since we know we are
8379 generating something that won't match. */
8380 return x;
8381
8382 case SIGN_EXTEND:
8383 case ZERO_EXTEND:
8384 case ZERO_EXTRACT:
8385 case SIGN_EXTRACT:
8386 x = expand_compound_operation (x);
8387 if (GET_CODE (x) != code)
8388 return force_to_mode (x, mode, mask, next_select);
8389 break;
8390
8391 case TRUNCATE:
8392 /* Similarly for a truncate. */
8393 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8394
8395 case AND:
8396 /* If this is an AND with a constant, convert it into an AND
8397 whose constant is the AND of that constant with MASK. If it
8398 remains an AND of MASK, delete it since it is redundant. */
8399
8400 if (CONST_INT_P (XEXP (x, 1)))
8401 {
8402 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8403 mask & INTVAL (XEXP (x, 1)));
8404
8405 /* If X is still an AND, see if it is an AND with a mask that
8406 is just some low-order bits. If so, and it is MASK, we don't
8407 need it. */
8408
8409 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8410 && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
8411 == mask))
8412 x = XEXP (x, 0);
8413
8414 /* If it remains an AND, try making another AND with the bits
8415 in the mode mask that aren't in MASK turned on. If the
8416 constant in the AND is wide enough, this might make a
8417 cheaper constant. */
8418
8419 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8420 && GET_MODE_MASK (GET_MODE (x)) != mask
8421 && HWI_COMPUTABLE_MODE_P (GET_MODE (x)))
8422 {
8423 unsigned HOST_WIDE_INT cval
8424 = UINTVAL (XEXP (x, 1))
8425 | (GET_MODE_MASK (GET_MODE (x)) & ~mask);
8426 rtx y;
8427
8428 y = simplify_gen_binary (AND, GET_MODE (x), XEXP (x, 0),
8429 gen_int_mode (cval, GET_MODE (x)));
8430 if (set_src_cost (y, GET_MODE (x), optimize_this_for_speed_p)
8431 < set_src_cost (x, GET_MODE (x), optimize_this_for_speed_p))
8432 x = y;
8433 }
8434
8435 break;
8436 }
8437
8438 goto binop;
8439
8440 case PLUS:
8441 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8442 low-order bits (as in an alignment operation) and FOO is already
8443 aligned to that boundary, mask C1 to that boundary as well.
8444 This may eliminate that PLUS and, later, the AND. */
8445
8446 {
8447 unsigned int width = GET_MODE_PRECISION (mode);
8448 unsigned HOST_WIDE_INT smask = mask;
8449
8450 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8451 number, sign extend it. */
8452
8453 if (width < HOST_BITS_PER_WIDE_INT
8454 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8455 smask |= HOST_WIDE_INT_M1U << width;
8456
8457 if (CONST_INT_P (XEXP (x, 1))
8458 && exact_log2 (- smask) >= 0
8459 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8460 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8461 return force_to_mode (plus_constant (GET_MODE (x), XEXP (x, 0),
8462 (INTVAL (XEXP (x, 1)) & smask)),
8463 mode, smask, next_select);
8464 }
8465
8466 /* ... fall through ... */
8467
8468 case MULT:
8469 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8470 most significant bit in MASK since carries from those bits will
8471 affect the bits we are interested in. */
8472 mask = fuller_mask;
8473 goto binop;
8474
8475 case MINUS:
8476 /* If X is (minus C Y) where C's least set bit is larger than any bit
8477 in the mask, then we may replace with (neg Y). */
8478 if (CONST_INT_P (XEXP (x, 0))
8479 && ((UINTVAL (XEXP (x, 0)) & -UINTVAL (XEXP (x, 0))) > mask))
8480 {
8481 x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
8482 GET_MODE (x));
8483 return force_to_mode (x, mode, mask, next_select);
8484 }
8485
8486 /* Similarly, if C contains every bit in the fuller_mask, then we may
8487 replace with (not Y). */
8488 if (CONST_INT_P (XEXP (x, 0))
8489 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8490 {
8491 x = simplify_gen_unary (NOT, GET_MODE (x),
8492 XEXP (x, 1), GET_MODE (x));
8493 return force_to_mode (x, mode, mask, next_select);
8494 }
8495
8496 mask = fuller_mask;
8497 goto binop;
8498
8499 case IOR:
8500 case XOR:
8501 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8502 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8503 operation which may be a bitfield extraction. Ensure that the
8504 constant we form is not wider than the mode of X. */
8505
8506 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8507 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8508 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8509 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8510 && CONST_INT_P (XEXP (x, 1))
8511 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8512 + floor_log2 (INTVAL (XEXP (x, 1))))
8513 < GET_MODE_PRECISION (GET_MODE (x)))
8514 && (UINTVAL (XEXP (x, 1))
8515 & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
8516 {
8517 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8518 << INTVAL (XEXP (XEXP (x, 0), 1)),
8519 GET_MODE (x));
8520 temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
8521 XEXP (XEXP (x, 0), 0), temp);
8522 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
8523 XEXP (XEXP (x, 0), 1));
8524 return force_to_mode (x, mode, mask, next_select);
8525 }
8526
8527 binop:
8528 /* For most binary operations, just propagate into the operation and
8529 change the mode if we have an operation of that mode. */
8530
8531 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8532 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8533
8534 /* If we ended up truncating both operands, truncate the result of the
8535 operation instead. */
8536 if (GET_CODE (op0) == TRUNCATE
8537 && GET_CODE (op1) == TRUNCATE)
8538 {
8539 op0 = XEXP (op0, 0);
8540 op1 = XEXP (op1, 0);
8541 }
8542
8543 op0 = gen_lowpart_or_truncate (op_mode, op0);
8544 op1 = gen_lowpart_or_truncate (op_mode, op1);
8545
8546 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8547 x = simplify_gen_binary (code, op_mode, op0, op1);
8548 break;
8549
8550 case ASHIFT:
8551 /* For left shifts, do the same, but just for the first operand.
8552 However, we cannot do anything with shifts where we cannot
8553 guarantee that the counts are smaller than the size of the mode
8554 because such a count will have a different meaning in a
8555 wider mode. */
8556
8557 if (! (CONST_INT_P (XEXP (x, 1))
8558 && INTVAL (XEXP (x, 1)) >= 0
8559 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8560 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8561 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8562 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8563 break;
8564
8565 /* If the shift count is a constant and we can do arithmetic in
8566 the mode of the shift, refine which bits we need. Otherwise, use the
8567 conservative form of the mask. */
8568 if (CONST_INT_P (XEXP (x, 1))
8569 && INTVAL (XEXP (x, 1)) >= 0
8570 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8571 && HWI_COMPUTABLE_MODE_P (op_mode))
8572 mask >>= INTVAL (XEXP (x, 1));
8573 else
8574 mask = fuller_mask;
8575
8576 op0 = gen_lowpart_or_truncate (op_mode,
8577 force_to_mode (XEXP (x, 0), op_mode,
8578 mask, next_select));
8579
8580 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8581 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8582 break;
8583
8584 case LSHIFTRT:
8585 /* Here we can only do something if the shift count is a constant,
8586 this shift constant is valid for the host, and we can do arithmetic
8587 in OP_MODE. */
8588
8589 if (CONST_INT_P (XEXP (x, 1))
8590 && INTVAL (XEXP (x, 1)) >= 0
8591 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8592 && HWI_COMPUTABLE_MODE_P (op_mode))
8593 {
8594 rtx inner = XEXP (x, 0);
8595 unsigned HOST_WIDE_INT inner_mask;
8596
8597 /* Select the mask of the bits we need for the shift operand. */
8598 inner_mask = mask << INTVAL (XEXP (x, 1));
8599
8600 /* We can only change the mode of the shift if we can do arithmetic
8601 in the mode of the shift and INNER_MASK is no wider than the
8602 width of X's mode. */
8603 if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
8604 op_mode = GET_MODE (x);
8605
8606 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8607
8608 if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
8609 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8610 }
8611
8612 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8613 shift and AND produces only copies of the sign bit (C2 is one less
8614 than a power of two), we can do this with just a shift. */
8615
8616 if (GET_CODE (x) == LSHIFTRT
8617 && CONST_INT_P (XEXP (x, 1))
8618 /* The shift puts one of the sign bit copies in the least significant
8619 bit. */
8620 && ((INTVAL (XEXP (x, 1))
8621 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8622 >= GET_MODE_PRECISION (GET_MODE (x)))
8623 && exact_log2 (mask + 1) >= 0
8624 /* Number of bits left after the shift must be more than the mask
8625 needs. */
8626 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8627 <= GET_MODE_PRECISION (GET_MODE (x)))
8628 /* Must be more sign bit copies than the mask needs. */
8629 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8630 >= exact_log2 (mask + 1)))
8631 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8632 GEN_INT (GET_MODE_PRECISION (GET_MODE (x))
8633 - exact_log2 (mask + 1)));
8634
8635 goto shiftrt;
8636
8637 case ASHIFTRT:
8638 /* If we are just looking for the sign bit, we don't need this shift at
8639 all, even if it has a variable count. */
8640 if (val_signbit_p (GET_MODE (x), mask))
8641 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8642
8643 /* If this is a shift by a constant, get a mask that contains those bits
8644 that are not copies of the sign bit. We then have two cases: If
8645 MASK only includes those bits, this can be a logical shift, which may
8646 allow simplifications. If MASK is a single-bit field not within
8647 those bits, we are requesting a copy of the sign bit and hence can
8648 shift the sign bit to the appropriate location. */
8649
8650 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8651 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8652 {
8653 int i;
8654
8655 /* If the considered data is wider than HOST_WIDE_INT, we can't
8656 represent a mask for all its bits in a single scalar.
8657 But we only care about the lower bits, so calculate these. */
8658
8659 if (GET_MODE_PRECISION (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
8660 {
8661 nonzero = ~(unsigned HOST_WIDE_INT) 0;
8662
8663 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8664 is the number of bits a full-width mask would have set.
8665 We need only shift if these are fewer than nonzero can
8666 hold. If not, we must keep all bits set in nonzero. */
8667
8668 if (GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8669 < HOST_BITS_PER_WIDE_INT)
8670 nonzero >>= INTVAL (XEXP (x, 1))
8671 + HOST_BITS_PER_WIDE_INT
8672 - GET_MODE_PRECISION (GET_MODE (x)) ;
8673 }
8674 else
8675 {
8676 nonzero = GET_MODE_MASK (GET_MODE (x));
8677 nonzero >>= INTVAL (XEXP (x, 1));
8678 }
8679
8680 if ((mask & ~nonzero) == 0)
8681 {
8682 x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
8683 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8684 if (GET_CODE (x) != ASHIFTRT)
8685 return force_to_mode (x, mode, mask, next_select);
8686 }
8687
8688 else if ((i = exact_log2 (mask)) >= 0)
8689 {
8690 x = simplify_shift_const
8691 (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
8692 GET_MODE_PRECISION (GET_MODE (x)) - 1 - i);
8693
8694 if (GET_CODE (x) != ASHIFTRT)
8695 return force_to_mode (x, mode, mask, next_select);
8696 }
8697 }
8698
8699 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8700 even if the shift count isn't a constant. */
8701 if (mask == 1)
8702 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8703 XEXP (x, 0), XEXP (x, 1));
8704
8705 shiftrt:
8706
8707 /* If this is a zero- or sign-extension operation that just affects bits
8708 we don't care about, remove it. Be sure the call above returned
8709 something that is still a shift. */
8710
8711 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8712 && CONST_INT_P (XEXP (x, 1))
8713 && INTVAL (XEXP (x, 1)) >= 0
8714 && (INTVAL (XEXP (x, 1))
8715 <= GET_MODE_PRECISION (GET_MODE (x)) - (floor_log2 (mask) + 1))
8716 && GET_CODE (XEXP (x, 0)) == ASHIFT
8717 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8718 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8719 next_select);
8720
8721 break;
8722
8723 case ROTATE:
8724 case ROTATERT:
8725 /* If the shift count is constant and we can do computations
8726 in the mode of X, compute where the bits we care about are.
8727 Otherwise, we can't do anything. Don't change the mode of
8728 the shift or propagate MODE into the shift, though. */
8729 if (CONST_INT_P (XEXP (x, 1))
8730 && INTVAL (XEXP (x, 1)) >= 0)
8731 {
8732 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8733 GET_MODE (x),
8734 gen_int_mode (mask, GET_MODE (x)),
8735 XEXP (x, 1));
8736 if (temp && CONST_INT_P (temp))
8737 x = simplify_gen_binary (code, GET_MODE (x),
8738 force_to_mode (XEXP (x, 0), GET_MODE (x),
8739 INTVAL (temp), next_select),
8740 XEXP (x, 1));
8741 }
8742 break;
8743
8744 case NEG:
8745 /* If we just want the low-order bit, the NEG isn't needed since it
8746 won't change the low-order bit. */
8747 if (mask == 1)
8748 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
8749
8750 /* We need any bits less significant than the most significant bit in
8751 MASK since carries from those bits will affect the bits we are
8752 interested in. */
8753 mask = fuller_mask;
8754 goto unop;
8755
8756 case NOT:
8757 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
8758 same as the XOR case above. Ensure that the constant we form is not
8759 wider than the mode of X. */
8760
8761 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8762 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8763 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8764 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
8765 < GET_MODE_PRECISION (GET_MODE (x)))
8766 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
8767 {
8768 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
8769 GET_MODE (x));
8770 temp = simplify_gen_binary (XOR, GET_MODE (x),
8771 XEXP (XEXP (x, 0), 0), temp);
8772 x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
8773 temp, XEXP (XEXP (x, 0), 1));
8774
8775 return force_to_mode (x, mode, mask, next_select);
8776 }
8777
8778 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
8779 use the full mask inside the NOT. */
8780 mask = fuller_mask;
8781
8782 unop:
8783 op0 = gen_lowpart_or_truncate (op_mode,
8784 force_to_mode (XEXP (x, 0), mode, mask,
8785 next_select));
8786 if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
8787 x = simplify_gen_unary (code, op_mode, op0, op_mode);
8788 break;
8789
8790 case NE:
8791 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
8792 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
8793 which is equal to STORE_FLAG_VALUE. */
8794 if ((mask & ~STORE_FLAG_VALUE) == 0
8795 && XEXP (x, 1) == const0_rtx
8796 && GET_MODE (XEXP (x, 0)) == mode
8797 && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
8798 && (nonzero_bits (XEXP (x, 0), mode)
8799 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
8800 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8801
8802 break;
8803
8804 case IF_THEN_ELSE:
8805 /* We have no way of knowing if the IF_THEN_ELSE can itself be
8806 written in a narrower mode. We play it safe and do not do so. */
8807
8808 op0 = gen_lowpart_or_truncate (GET_MODE (x),
8809 force_to_mode (XEXP (x, 1), mode,
8810 mask, next_select));
8811 op1 = gen_lowpart_or_truncate (GET_MODE (x),
8812 force_to_mode (XEXP (x, 2), mode,
8813 mask, next_select));
8814 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
8815 x = simplify_gen_ternary (IF_THEN_ELSE, GET_MODE (x),
8816 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
8817 op0, op1);
8818 break;
8819
8820 default:
8821 break;
8822 }
8823
8824 /* Ensure we return a value of the proper mode. */
8825 return gen_lowpart_or_truncate (mode, x);
8826 }
8827 \f
8828 /* Return nonzero if X is an expression that has one of two values depending on
8829 whether some other value is zero or nonzero. In that case, we return the
8830 value that is being tested, *PTRUE is set to the value if the rtx being
8831 returned has a nonzero value, and *PFALSE is set to the other alternative.
8832
8833 If we return zero, we set *PTRUE and *PFALSE to X. */
8834
8835 static rtx
8836 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
8837 {
8838 machine_mode mode = GET_MODE (x);
8839 enum rtx_code code = GET_CODE (x);
8840 rtx cond0, cond1, true0, true1, false0, false1;
8841 unsigned HOST_WIDE_INT nz;
8842
8843 /* If we are comparing a value against zero, we are done. */
8844 if ((code == NE || code == EQ)
8845 && XEXP (x, 1) == const0_rtx)
8846 {
8847 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
8848 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
8849 return XEXP (x, 0);
8850 }
8851
8852 /* If this is a unary operation whose operand has one of two values, apply
8853 our opcode to compute those values. */
8854 else if (UNARY_P (x)
8855 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
8856 {
8857 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
8858 *pfalse = simplify_gen_unary (code, mode, false0,
8859 GET_MODE (XEXP (x, 0)));
8860 return cond0;
8861 }
8862
8863 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
8864 make can't possibly match and would suppress other optimizations. */
8865 else if (code == COMPARE)
8866 ;
8867
8868 /* If this is a binary operation, see if either side has only one of two
8869 values. If either one does or if both do and they are conditional on
8870 the same value, compute the new true and false values. */
8871 else if (BINARY_P (x))
8872 {
8873 cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
8874 cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);
8875
8876 if ((cond0 != 0 || cond1 != 0)
8877 && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
8878 {
8879 /* If if_then_else_cond returned zero, then true/false are the
8880 same rtl. We must copy one of them to prevent invalid rtl
8881 sharing. */
8882 if (cond0 == 0)
8883 true0 = copy_rtx (true0);
8884 else if (cond1 == 0)
8885 true1 = copy_rtx (true1);
8886
8887 if (COMPARISON_P (x))
8888 {
8889 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
8890 true0, true1);
8891 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
8892 false0, false1);
8893 }
8894 else
8895 {
8896 *ptrue = simplify_gen_binary (code, mode, true0, true1);
8897 *pfalse = simplify_gen_binary (code, mode, false0, false1);
8898 }
8899
8900 return cond0 ? cond0 : cond1;
8901 }
8902
8903 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
8904 operands is zero when the other is nonzero, and vice-versa,
8905 and STORE_FLAG_VALUE is 1 or -1. */
8906
8907 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8908 && (code == PLUS || code == IOR || code == XOR || code == MINUS
8909 || code == UMAX)
8910 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8911 {
8912 rtx op0 = XEXP (XEXP (x, 0), 1);
8913 rtx op1 = XEXP (XEXP (x, 1), 1);
8914
8915 cond0 = XEXP (XEXP (x, 0), 0);
8916 cond1 = XEXP (XEXP (x, 1), 0);
8917
8918 if (COMPARISON_P (cond0)
8919 && COMPARISON_P (cond1)
8920 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8921 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8922 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8923 || ((swap_condition (GET_CODE (cond0))
8924 == reversed_comparison_code (cond1, NULL))
8925 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8926 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8927 && ! side_effects_p (x))
8928 {
8929 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
8930 *pfalse = simplify_gen_binary (MULT, mode,
8931 (code == MINUS
8932 ? simplify_gen_unary (NEG, mode,
8933 op1, mode)
8934 : op1),
8935 const_true_rtx);
8936 return cond0;
8937 }
8938 }
8939
8940 /* Similarly for MULT, AND and UMIN, except that for these the result
8941 is always zero. */
8942 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
8943 && (code == MULT || code == AND || code == UMIN)
8944 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
8945 {
8946 cond0 = XEXP (XEXP (x, 0), 0);
8947 cond1 = XEXP (XEXP (x, 1), 0);
8948
8949 if (COMPARISON_P (cond0)
8950 && COMPARISON_P (cond1)
8951 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
8952 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
8953 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
8954 || ((swap_condition (GET_CODE (cond0))
8955 == reversed_comparison_code (cond1, NULL))
8956 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
8957 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
8958 && ! side_effects_p (x))
8959 {
8960 *ptrue = *pfalse = const0_rtx;
8961 return cond0;
8962 }
8963 }
8964 }
8965
8966 else if (code == IF_THEN_ELSE)
8967 {
8968 /* If we have IF_THEN_ELSE already, extract the condition and
8969 canonicalize it if it is NE or EQ. */
8970 cond0 = XEXP (x, 0);
8971 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
8972 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
8973 return XEXP (cond0, 0);
8974 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
8975 {
8976 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
8977 return XEXP (cond0, 0);
8978 }
8979 else
8980 return cond0;
8981 }
8982
8983 /* If X is a SUBREG, we can narrow both the true and false values
8984 if the inner expression, if there is a condition. */
8985 else if (code == SUBREG
8986 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
8987 &true0, &false0)))
8988 {
8989 true0 = simplify_gen_subreg (mode, true0,
8990 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8991 false0 = simplify_gen_subreg (mode, false0,
8992 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
8993 if (true0 && false0)
8994 {
8995 *ptrue = true0;
8996 *pfalse = false0;
8997 return cond0;
8998 }
8999 }
9000
9001 /* If X is a constant, this isn't special and will cause confusions
9002 if we treat it as such. Likewise if it is equivalent to a constant. */
9003 else if (CONSTANT_P (x)
9004 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9005 ;
9006
9007 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9008 will be least confusing to the rest of the compiler. */
9009 else if (mode == BImode)
9010 {
9011 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9012 return x;
9013 }
9014
9015 /* If X is known to be either 0 or -1, those are the true and
9016 false values when testing X. */
9017 else if (x == constm1_rtx || x == const0_rtx
9018 || (mode != VOIDmode
9019 && num_sign_bit_copies (x, mode) == GET_MODE_PRECISION (mode)))
9020 {
9021 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9022 return x;
9023 }
9024
9025 /* Likewise for 0 or a single bit. */
9026 else if (HWI_COMPUTABLE_MODE_P (mode)
9027 && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
9028 {
9029 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9030 return x;
9031 }
9032
9033 /* Otherwise fail; show no condition with true and false values the same. */
9034 *ptrue = *pfalse = x;
9035 return 0;
9036 }
9037 \f
9038 /* Return the value of expression X given the fact that condition COND
9039 is known to be true when applied to REG as its first operand and VAL
9040 as its second. X is known to not be shared and so can be modified in
9041 place.
9042
9043 We only handle the simplest cases, and specifically those cases that
9044 arise with IF_THEN_ELSE expressions. */
9045
9046 static rtx
9047 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9048 {
9049 enum rtx_code code = GET_CODE (x);
9050 const char *fmt;
9051 int i, j;
9052
9053 if (side_effects_p (x))
9054 return x;
9055
9056 /* If either operand of the condition is a floating point value,
9057 then we have to avoid collapsing an EQ comparison. */
9058 if (cond == EQ
9059 && rtx_equal_p (x, reg)
9060 && ! FLOAT_MODE_P (GET_MODE (x))
9061 && ! FLOAT_MODE_P (GET_MODE (val)))
9062 return val;
9063
9064 if (cond == UNEQ && rtx_equal_p (x, reg))
9065 return val;
9066
9067 /* If X is (abs REG) and we know something about REG's relationship
9068 with zero, we may be able to simplify this. */
9069
9070 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9071 switch (cond)
9072 {
9073 case GE: case GT: case EQ:
9074 return XEXP (x, 0);
9075 case LT: case LE:
9076 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9077 XEXP (x, 0),
9078 GET_MODE (XEXP (x, 0)));
9079 default:
9080 break;
9081 }
9082
9083 /* The only other cases we handle are MIN, MAX, and comparisons if the
9084 operands are the same as REG and VAL. */
9085
9086 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9087 {
9088 if (rtx_equal_p (XEXP (x, 0), val))
9089 {
9090 std::swap (val, reg);
9091 cond = swap_condition (cond);
9092 }
9093
9094 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9095 {
9096 if (COMPARISON_P (x))
9097 {
9098 if (comparison_dominates_p (cond, code))
9099 return const_true_rtx;
9100
9101 code = reversed_comparison_code (x, NULL);
9102 if (code != UNKNOWN
9103 && comparison_dominates_p (cond, code))
9104 return const0_rtx;
9105 else
9106 return x;
9107 }
9108 else if (code == SMAX || code == SMIN
9109 || code == UMIN || code == UMAX)
9110 {
9111 int unsignedp = (code == UMIN || code == UMAX);
9112
9113 /* Do not reverse the condition when it is NE or EQ.
9114 This is because we cannot conclude anything about
9115 the value of 'SMAX (x, y)' when x is not equal to y,
9116 but we can when x equals y. */
9117 if ((code == SMAX || code == UMAX)
9118 && ! (cond == EQ || cond == NE))
9119 cond = reverse_condition (cond);
9120
9121 switch (cond)
9122 {
9123 case GE: case GT:
9124 return unsignedp ? x : XEXP (x, 1);
9125 case LE: case LT:
9126 return unsignedp ? x : XEXP (x, 0);
9127 case GEU: case GTU:
9128 return unsignedp ? XEXP (x, 1) : x;
9129 case LEU: case LTU:
9130 return unsignedp ? XEXP (x, 0) : x;
9131 default:
9132 break;
9133 }
9134 }
9135 }
9136 }
9137 else if (code == SUBREG)
9138 {
9139 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9140 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9141
9142 if (SUBREG_REG (x) != r)
9143 {
9144 /* We must simplify subreg here, before we lose track of the
9145 original inner_mode. */
9146 new_rtx = simplify_subreg (GET_MODE (x), r,
9147 inner_mode, SUBREG_BYTE (x));
9148 if (new_rtx)
9149 return new_rtx;
9150 else
9151 SUBST (SUBREG_REG (x), r);
9152 }
9153
9154 return x;
9155 }
9156 /* We don't have to handle SIGN_EXTEND here, because even in the
9157 case of replacing something with a modeless CONST_INT, a
9158 CONST_INT is already (supposed to be) a valid sign extension for
9159 its narrower mode, which implies it's already properly
9160 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9161 story is different. */
9162 else if (code == ZERO_EXTEND)
9163 {
9164 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9165 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9166
9167 if (XEXP (x, 0) != r)
9168 {
9169 /* We must simplify the zero_extend here, before we lose
9170 track of the original inner_mode. */
9171 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9172 r, inner_mode);
9173 if (new_rtx)
9174 return new_rtx;
9175 else
9176 SUBST (XEXP (x, 0), r);
9177 }
9178
9179 return x;
9180 }
9181
9182 fmt = GET_RTX_FORMAT (code);
9183 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9184 {
9185 if (fmt[i] == 'e')
9186 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9187 else if (fmt[i] == 'E')
9188 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9189 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9190 cond, reg, val));
9191 }
9192
9193 return x;
9194 }
9195 \f
9196 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9197 assignment as a field assignment. */
9198
9199 static int
9200 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9201 {
9202 if (widen_x && GET_MODE (x) != GET_MODE (y))
9203 {
9204 if (GET_MODE_SIZE (GET_MODE (x)) > GET_MODE_SIZE (GET_MODE (y)))
9205 return 0;
9206 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9207 return 0;
9208 /* For big endian, adjust the memory offset. */
9209 if (BYTES_BIG_ENDIAN)
9210 x = adjust_address_nv (x, GET_MODE (y),
9211 -subreg_lowpart_offset (GET_MODE (x),
9212 GET_MODE (y)));
9213 else
9214 x = adjust_address_nv (x, GET_MODE (y), 0);
9215 }
9216
9217 if (x == y || rtx_equal_p (x, y))
9218 return 1;
9219
9220 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9221 return 0;
9222
9223 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9224 Note that all SUBREGs of MEM are paradoxical; otherwise they
9225 would have been rewritten. */
9226 if (MEM_P (x) && GET_CODE (y) == SUBREG
9227 && MEM_P (SUBREG_REG (y))
9228 && rtx_equal_p (SUBREG_REG (y),
9229 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9230 return 1;
9231
9232 if (MEM_P (y) && GET_CODE (x) == SUBREG
9233 && MEM_P (SUBREG_REG (x))
9234 && rtx_equal_p (SUBREG_REG (x),
9235 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9236 return 1;
9237
9238 /* We used to see if get_last_value of X and Y were the same but that's
9239 not correct. In one direction, we'll cause the assignment to have
9240 the wrong destination and in the case, we'll import a register into this
9241 insn that might have already have been dead. So fail if none of the
9242 above cases are true. */
9243 return 0;
9244 }
9245 \f
9246 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9247 Return that assignment if so.
9248
9249 We only handle the most common cases. */
9250
9251 static rtx
9252 make_field_assignment (rtx x)
9253 {
9254 rtx dest = SET_DEST (x);
9255 rtx src = SET_SRC (x);
9256 rtx assign;
9257 rtx rhs, lhs;
9258 HOST_WIDE_INT c1;
9259 HOST_WIDE_INT pos;
9260 unsigned HOST_WIDE_INT len;
9261 rtx other;
9262 machine_mode mode;
9263
9264 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9265 a clear of a one-bit field. We will have changed it to
9266 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9267 for a SUBREG. */
9268
9269 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9270 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9271 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9272 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9273 {
9274 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9275 1, 1, 1, 0);
9276 if (assign != 0)
9277 return gen_rtx_SET (assign, const0_rtx);
9278 return x;
9279 }
9280
9281 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9282 && subreg_lowpart_p (XEXP (src, 0))
9283 && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
9284 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
9285 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9286 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9287 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9288 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9289 {
9290 assign = make_extraction (VOIDmode, dest, 0,
9291 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9292 1, 1, 1, 0);
9293 if (assign != 0)
9294 return gen_rtx_SET (assign, const0_rtx);
9295 return x;
9296 }
9297
9298 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9299 one-bit field. */
9300 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9301 && XEXP (XEXP (src, 0), 0) == const1_rtx
9302 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9303 {
9304 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9305 1, 1, 1, 0);
9306 if (assign != 0)
9307 return gen_rtx_SET (assign, const1_rtx);
9308 return x;
9309 }
9310
9311 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9312 SRC is an AND with all bits of that field set, then we can discard
9313 the AND. */
9314 if (GET_CODE (dest) == ZERO_EXTRACT
9315 && CONST_INT_P (XEXP (dest, 1))
9316 && GET_CODE (src) == AND
9317 && CONST_INT_P (XEXP (src, 1)))
9318 {
9319 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9320 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9321 unsigned HOST_WIDE_INT ze_mask;
9322
9323 if (width >= HOST_BITS_PER_WIDE_INT)
9324 ze_mask = -1;
9325 else
9326 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9327
9328 /* Complete overlap. We can remove the source AND. */
9329 if ((and_mask & ze_mask) == ze_mask)
9330 return gen_rtx_SET (dest, XEXP (src, 0));
9331
9332 /* Partial overlap. We can reduce the source AND. */
9333 if ((and_mask & ze_mask) != and_mask)
9334 {
9335 mode = GET_MODE (src);
9336 src = gen_rtx_AND (mode, XEXP (src, 0),
9337 gen_int_mode (and_mask & ze_mask, mode));
9338 return gen_rtx_SET (dest, src);
9339 }
9340 }
9341
9342 /* The other case we handle is assignments into a constant-position
9343 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9344 a mask that has all one bits except for a group of zero bits and
9345 OTHER is known to have zeros where C1 has ones, this is such an
9346 assignment. Compute the position and length from C1. Shift OTHER
9347 to the appropriate position, force it to the required mode, and
9348 make the extraction. Check for the AND in both operands. */
9349
9350 /* One or more SUBREGs might obscure the constant-position field
9351 assignment. The first one we are likely to encounter is an outer
9352 narrowing SUBREG, which we can just strip for the purposes of
9353 identifying the constant-field assignment. */
9354 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src))
9355 src = SUBREG_REG (src);
9356
9357 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9358 return x;
9359
9360 rhs = expand_compound_operation (XEXP (src, 0));
9361 lhs = expand_compound_operation (XEXP (src, 1));
9362
9363 if (GET_CODE (rhs) == AND
9364 && CONST_INT_P (XEXP (rhs, 1))
9365 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9366 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9367 /* The second SUBREG that might get in the way is a paradoxical
9368 SUBREG around the first operand of the AND. We want to
9369 pretend the operand is as wide as the destination here. We
9370 do this by adjusting the MEM to wider mode for the sole
9371 purpose of the call to rtx_equal_for_field_assignment_p. Also
9372 note this trick only works for MEMs. */
9373 else if (GET_CODE (rhs) == AND
9374 && paradoxical_subreg_p (XEXP (rhs, 0))
9375 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9376 && CONST_INT_P (XEXP (rhs, 1))
9377 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9378 dest, true))
9379 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9380 else if (GET_CODE (lhs) == AND
9381 && CONST_INT_P (XEXP (lhs, 1))
9382 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9383 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9384 /* The second SUBREG that might get in the way is a paradoxical
9385 SUBREG around the first operand of the AND. We want to
9386 pretend the operand is as wide as the destination here. We
9387 do this by adjusting the MEM to wider mode for the sole
9388 purpose of the call to rtx_equal_for_field_assignment_p. Also
9389 note this trick only works for MEMs. */
9390 else if (GET_CODE (lhs) == AND
9391 && paradoxical_subreg_p (XEXP (lhs, 0))
9392 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9393 && CONST_INT_P (XEXP (lhs, 1))
9394 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9395 dest, true))
9396 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9397 else
9398 return x;
9399
9400 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
9401 if (pos < 0 || pos + len > GET_MODE_PRECISION (GET_MODE (dest))
9402 || GET_MODE_PRECISION (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
9403 || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
9404 return x;
9405
9406 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9407 if (assign == 0)
9408 return x;
9409
9410 /* The mode to use for the source is the mode of the assignment, or of
9411 what is inside a possible STRICT_LOW_PART. */
9412 mode = (GET_CODE (assign) == STRICT_LOW_PART
9413 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9414
9415 /* Shift OTHER right POS places and make it the source, restricting it
9416 to the proper length and mode. */
9417
9418 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9419 GET_MODE (src),
9420 other, pos),
9421 dest);
9422 src = force_to_mode (src, mode,
9423 GET_MODE_PRECISION (mode) >= HOST_BITS_PER_WIDE_INT
9424 ? ~(unsigned HOST_WIDE_INT) 0
9425 : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
9426 0);
9427
9428 /* If SRC is masked by an AND that does not make a difference in
9429 the value being stored, strip it. */
9430 if (GET_CODE (assign) == ZERO_EXTRACT
9431 && CONST_INT_P (XEXP (assign, 1))
9432 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9433 && GET_CODE (src) == AND
9434 && CONST_INT_P (XEXP (src, 1))
9435 && UINTVAL (XEXP (src, 1))
9436 == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1)
9437 src = XEXP (src, 0);
9438
9439 return gen_rtx_SET (assign, src);
9440 }
9441 \f
9442 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9443 if so. */
9444
9445 static rtx
9446 apply_distributive_law (rtx x)
9447 {
9448 enum rtx_code code = GET_CODE (x);
9449 enum rtx_code inner_code;
9450 rtx lhs, rhs, other;
9451 rtx tem;
9452
9453 /* Distributivity is not true for floating point as it can change the
9454 value. So we don't do it unless -funsafe-math-optimizations. */
9455 if (FLOAT_MODE_P (GET_MODE (x))
9456 && ! flag_unsafe_math_optimizations)
9457 return x;
9458
9459 /* The outer operation can only be one of the following: */
9460 if (code != IOR && code != AND && code != XOR
9461 && code != PLUS && code != MINUS)
9462 return x;
9463
9464 lhs = XEXP (x, 0);
9465 rhs = XEXP (x, 1);
9466
9467 /* If either operand is a primitive we can't do anything, so get out
9468 fast. */
9469 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9470 return x;
9471
9472 lhs = expand_compound_operation (lhs);
9473 rhs = expand_compound_operation (rhs);
9474 inner_code = GET_CODE (lhs);
9475 if (inner_code != GET_CODE (rhs))
9476 return x;
9477
9478 /* See if the inner and outer operations distribute. */
9479 switch (inner_code)
9480 {
9481 case LSHIFTRT:
9482 case ASHIFTRT:
9483 case AND:
9484 case IOR:
9485 /* These all distribute except over PLUS. */
9486 if (code == PLUS || code == MINUS)
9487 return x;
9488 break;
9489
9490 case MULT:
9491 if (code != PLUS && code != MINUS)
9492 return x;
9493 break;
9494
9495 case ASHIFT:
9496 /* This is also a multiply, so it distributes over everything. */
9497 break;
9498
9499 /* This used to handle SUBREG, but this turned out to be counter-
9500 productive, since (subreg (op ...)) usually is not handled by
9501 insn patterns, and this "optimization" therefore transformed
9502 recognizable patterns into unrecognizable ones. Therefore the
9503 SUBREG case was removed from here.
9504
9505 It is possible that distributing SUBREG over arithmetic operations
9506 leads to an intermediate result than can then be optimized further,
9507 e.g. by moving the outer SUBREG to the other side of a SET as done
9508 in simplify_set. This seems to have been the original intent of
9509 handling SUBREGs here.
9510
9511 However, with current GCC this does not appear to actually happen,
9512 at least on major platforms. If some case is found where removing
9513 the SUBREG case here prevents follow-on optimizations, distributing
9514 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9515
9516 default:
9517 return x;
9518 }
9519
9520 /* Set LHS and RHS to the inner operands (A and B in the example
9521 above) and set OTHER to the common operand (C in the example).
9522 There is only one way to do this unless the inner operation is
9523 commutative. */
9524 if (COMMUTATIVE_ARITH_P (lhs)
9525 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9526 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9527 else if (COMMUTATIVE_ARITH_P (lhs)
9528 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9529 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9530 else if (COMMUTATIVE_ARITH_P (lhs)
9531 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9532 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9533 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9534 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9535 else
9536 return x;
9537
9538 /* Form the new inner operation, seeing if it simplifies first. */
9539 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9540
9541 /* There is one exception to the general way of distributing:
9542 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9543 if (code == XOR && inner_code == IOR)
9544 {
9545 inner_code = AND;
9546 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9547 }
9548
9549 /* We may be able to continuing distributing the result, so call
9550 ourselves recursively on the inner operation before forming the
9551 outer operation, which we return. */
9552 return simplify_gen_binary (inner_code, GET_MODE (x),
9553 apply_distributive_law (tem), other);
9554 }
9555
9556 /* See if X is of the form (* (+ A B) C), and if so convert to
9557 (+ (* A C) (* B C)) and try to simplify.
9558
9559 Most of the time, this results in no change. However, if some of
9560 the operands are the same or inverses of each other, simplifications
9561 will result.
9562
9563 For example, (and (ior A B) (not B)) can occur as the result of
9564 expanding a bit field assignment. When we apply the distributive
9565 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9566 which then simplifies to (and (A (not B))).
9567
9568 Note that no checks happen on the validity of applying the inverse
9569 distributive law. This is pointless since we can do it in the
9570 few places where this routine is called.
9571
9572 N is the index of the term that is decomposed (the arithmetic operation,
9573 i.e. (+ A B) in the first example above). !N is the index of the term that
9574 is distributed, i.e. of C in the first example above. */
9575 static rtx
9576 distribute_and_simplify_rtx (rtx x, int n)
9577 {
9578 machine_mode mode;
9579 enum rtx_code outer_code, inner_code;
9580 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9581
9582 /* Distributivity is not true for floating point as it can change the
9583 value. So we don't do it unless -funsafe-math-optimizations. */
9584 if (FLOAT_MODE_P (GET_MODE (x))
9585 && ! flag_unsafe_math_optimizations)
9586 return NULL_RTX;
9587
9588 decomposed = XEXP (x, n);
9589 if (!ARITHMETIC_P (decomposed))
9590 return NULL_RTX;
9591
9592 mode = GET_MODE (x);
9593 outer_code = GET_CODE (x);
9594 distributed = XEXP (x, !n);
9595
9596 inner_code = GET_CODE (decomposed);
9597 inner_op0 = XEXP (decomposed, 0);
9598 inner_op1 = XEXP (decomposed, 1);
9599
9600 /* Special case (and (xor B C) (not A)), which is equivalent to
9601 (xor (ior A B) (ior A C)) */
9602 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9603 {
9604 distributed = XEXP (distributed, 0);
9605 outer_code = IOR;
9606 }
9607
9608 if (n == 0)
9609 {
9610 /* Distribute the second term. */
9611 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9612 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9613 }
9614 else
9615 {
9616 /* Distribute the first term. */
9617 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9618 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9619 }
9620
9621 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9622 new_op0, new_op1));
9623 if (GET_CODE (tmp) != outer_code
9624 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9625 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9626 return tmp;
9627
9628 return NULL_RTX;
9629 }
9630 \f
9631 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9632 in MODE. Return an equivalent form, if different from (and VAROP
9633 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9634
9635 static rtx
9636 simplify_and_const_int_1 (machine_mode mode, rtx varop,
9637 unsigned HOST_WIDE_INT constop)
9638 {
9639 unsigned HOST_WIDE_INT nonzero;
9640 unsigned HOST_WIDE_INT orig_constop;
9641 rtx orig_varop;
9642 int i;
9643
9644 orig_varop = varop;
9645 orig_constop = constop;
9646 if (GET_CODE (varop) == CLOBBER)
9647 return NULL_RTX;
9648
9649 /* Simplify VAROP knowing that we will be only looking at some of the
9650 bits in it.
9651
9652 Note by passing in CONSTOP, we guarantee that the bits not set in
9653 CONSTOP are not significant and will never be examined. We must
9654 ensure that is the case by explicitly masking out those bits
9655 before returning. */
9656 varop = force_to_mode (varop, mode, constop, 0);
9657
9658 /* If VAROP is a CLOBBER, we will fail so return it. */
9659 if (GET_CODE (varop) == CLOBBER)
9660 return varop;
9661
9662 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9663 to VAROP and return the new constant. */
9664 if (CONST_INT_P (varop))
9665 return gen_int_mode (INTVAL (varop) & constop, mode);
9666
9667 /* See what bits may be nonzero in VAROP. Unlike the general case of
9668 a call to nonzero_bits, here we don't care about bits outside
9669 MODE. */
9670
9671 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9672
9673 /* Turn off all bits in the constant that are known to already be zero.
9674 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9675 which is tested below. */
9676
9677 constop &= nonzero;
9678
9679 /* If we don't have any bits left, return zero. */
9680 if (constop == 0)
9681 return const0_rtx;
9682
9683 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9684 a power of two, we can replace this with an ASHIFT. */
9685 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9686 && (i = exact_log2 (constop)) >= 0)
9687 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9688
9689 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9690 or XOR, then try to apply the distributive law. This may eliminate
9691 operations if either branch can be simplified because of the AND.
9692 It may also make some cases more complex, but those cases probably
9693 won't match a pattern either with or without this. */
9694
9695 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9696 return
9697 gen_lowpart
9698 (mode,
9699 apply_distributive_law
9700 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
9701 simplify_and_const_int (NULL_RTX,
9702 GET_MODE (varop),
9703 XEXP (varop, 0),
9704 constop),
9705 simplify_and_const_int (NULL_RTX,
9706 GET_MODE (varop),
9707 XEXP (varop, 1),
9708 constop))));
9709
9710 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9711 the AND and see if one of the operands simplifies to zero. If so, we
9712 may eliminate it. */
9713
9714 if (GET_CODE (varop) == PLUS
9715 && exact_log2 (constop + 1) >= 0)
9716 {
9717 rtx o0, o1;
9718
9719 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
9720 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
9721 if (o0 == const0_rtx)
9722 return o1;
9723 if (o1 == const0_rtx)
9724 return o0;
9725 }
9726
9727 /* Make a SUBREG if necessary. If we can't make it, fail. */
9728 varop = gen_lowpart (mode, varop);
9729 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
9730 return NULL_RTX;
9731
9732 /* If we are only masking insignificant bits, return VAROP. */
9733 if (constop == nonzero)
9734 return varop;
9735
9736 if (varop == orig_varop && constop == orig_constop)
9737 return NULL_RTX;
9738
9739 /* Otherwise, return an AND. */
9740 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
9741 }
9742
9743
9744 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
9745 in MODE.
9746
9747 Return an equivalent form, if different from X. Otherwise, return X. If
9748 X is zero, we are to always construct the equivalent form. */
9749
9750 static rtx
9751 simplify_and_const_int (rtx x, machine_mode mode, rtx varop,
9752 unsigned HOST_WIDE_INT constop)
9753 {
9754 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
9755 if (tem)
9756 return tem;
9757
9758 if (!x)
9759 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
9760 gen_int_mode (constop, mode));
9761 if (GET_MODE (x) != mode)
9762 x = gen_lowpart (mode, x);
9763 return x;
9764 }
9765 \f
9766 /* Given a REG, X, compute which bits in X can be nonzero.
9767 We don't care about bits outside of those defined in MODE.
9768
9769 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
9770 a shift, AND, or zero_extract, we can do better. */
9771
9772 static rtx
9773 reg_nonzero_bits_for_combine (const_rtx x, machine_mode mode,
9774 const_rtx known_x ATTRIBUTE_UNUSED,
9775 machine_mode known_mode ATTRIBUTE_UNUSED,
9776 unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
9777 unsigned HOST_WIDE_INT *nonzero)
9778 {
9779 rtx tem;
9780 reg_stat_type *rsp;
9781
9782 /* If X is a register whose nonzero bits value is current, use it.
9783 Otherwise, if X is a register whose value we can find, use that
9784 value. Otherwise, use the previously-computed global nonzero bits
9785 for this register. */
9786
9787 rsp = &reg_stat[REGNO (x)];
9788 if (rsp->last_set_value != 0
9789 && (rsp->last_set_mode == mode
9790 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
9791 && GET_MODE_CLASS (mode) == MODE_INT))
9792 && ((rsp->last_set_label >= label_tick_ebb_start
9793 && rsp->last_set_label < label_tick)
9794 || (rsp->last_set_label == label_tick
9795 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9796 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9797 && REGNO (x) < reg_n_sets_max
9798 && REG_N_SETS (REGNO (x)) == 1
9799 && !REGNO_REG_SET_P
9800 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9801 REGNO (x)))))
9802 {
9803 unsigned HOST_WIDE_INT mask = rsp->last_set_nonzero_bits;
9804
9805 if (GET_MODE_PRECISION (rsp->last_set_mode) < GET_MODE_PRECISION (mode))
9806 /* We don't know anything about the upper bits. */
9807 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (rsp->last_set_mode);
9808
9809 *nonzero &= mask;
9810 return NULL;
9811 }
9812
9813 tem = get_last_value (x);
9814
9815 if (tem)
9816 {
9817 if (SHORT_IMMEDIATES_SIGN_EXTEND)
9818 tem = sign_extend_short_imm (tem, GET_MODE (x),
9819 GET_MODE_PRECISION (mode));
9820
9821 return tem;
9822 }
9823 else if (nonzero_sign_valid && rsp->nonzero_bits)
9824 {
9825 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
9826
9827 if (GET_MODE_PRECISION (GET_MODE (x)) < GET_MODE_PRECISION (mode))
9828 /* We don't know anything about the upper bits. */
9829 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
9830
9831 *nonzero &= mask;
9832 }
9833
9834 return NULL;
9835 }
9836
9837 /* Return the number of bits at the high-order end of X that are known to
9838 be equal to the sign bit. X will be used in mode MODE; if MODE is
9839 VOIDmode, X will be used in its own mode. The returned value will always
9840 be between 1 and the number of bits in MODE. */
9841
9842 static rtx
9843 reg_num_sign_bit_copies_for_combine (const_rtx x, machine_mode mode,
9844 const_rtx known_x ATTRIBUTE_UNUSED,
9845 machine_mode known_mode
9846 ATTRIBUTE_UNUSED,
9847 unsigned int known_ret ATTRIBUTE_UNUSED,
9848 unsigned int *result)
9849 {
9850 rtx tem;
9851 reg_stat_type *rsp;
9852
9853 rsp = &reg_stat[REGNO (x)];
9854 if (rsp->last_set_value != 0
9855 && rsp->last_set_mode == mode
9856 && ((rsp->last_set_label >= label_tick_ebb_start
9857 && rsp->last_set_label < label_tick)
9858 || (rsp->last_set_label == label_tick
9859 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
9860 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
9861 && REGNO (x) < reg_n_sets_max
9862 && REG_N_SETS (REGNO (x)) == 1
9863 && !REGNO_REG_SET_P
9864 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
9865 REGNO (x)))))
9866 {
9867 *result = rsp->last_set_sign_bit_copies;
9868 return NULL;
9869 }
9870
9871 tem = get_last_value (x);
9872 if (tem != 0)
9873 return tem;
9874
9875 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
9876 && GET_MODE_PRECISION (GET_MODE (x)) == GET_MODE_PRECISION (mode))
9877 *result = rsp->sign_bit_copies;
9878
9879 return NULL;
9880 }
9881 \f
9882 /* Return the number of "extended" bits there are in X, when interpreted
9883 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
9884 unsigned quantities, this is the number of high-order zero bits.
9885 For signed quantities, this is the number of copies of the sign bit
9886 minus 1. In both case, this function returns the number of "spare"
9887 bits. For example, if two quantities for which this function returns
9888 at least 1 are added, the addition is known not to overflow.
9889
9890 This function will always return 0 unless called during combine, which
9891 implies that it must be called from a define_split. */
9892
9893 unsigned int
9894 extended_count (const_rtx x, machine_mode mode, int unsignedp)
9895 {
9896 if (nonzero_sign_valid == 0)
9897 return 0;
9898
9899 return (unsignedp
9900 ? (HWI_COMPUTABLE_MODE_P (mode)
9901 ? (unsigned int) (GET_MODE_PRECISION (mode) - 1
9902 - floor_log2 (nonzero_bits (x, mode)))
9903 : 0)
9904 : num_sign_bit_copies (x, mode) - 1);
9905 }
9906
9907 /* This function is called from `simplify_shift_const' to merge two
9908 outer operations. Specifically, we have already found that we need
9909 to perform operation *POP0 with constant *PCONST0 at the outermost
9910 position. We would now like to also perform OP1 with constant CONST1
9911 (with *POP0 being done last).
9912
9913 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
9914 the resulting operation. *PCOMP_P is set to 1 if we would need to
9915 complement the innermost operand, otherwise it is unchanged.
9916
9917 MODE is the mode in which the operation will be done. No bits outside
9918 the width of this mode matter. It is assumed that the width of this mode
9919 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
9920
9921 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
9922 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
9923 result is simply *PCONST0.
9924
9925 If the resulting operation cannot be expressed as one operation, we
9926 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
9927
9928 static int
9929 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
9930 {
9931 enum rtx_code op0 = *pop0;
9932 HOST_WIDE_INT const0 = *pconst0;
9933
9934 const0 &= GET_MODE_MASK (mode);
9935 const1 &= GET_MODE_MASK (mode);
9936
9937 /* If OP0 is an AND, clear unimportant bits in CONST1. */
9938 if (op0 == AND)
9939 const1 &= const0;
9940
9941 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
9942 if OP0 is SET. */
9943
9944 if (op1 == UNKNOWN || op0 == SET)
9945 return 1;
9946
9947 else if (op0 == UNKNOWN)
9948 op0 = op1, const0 = const1;
9949
9950 else if (op0 == op1)
9951 {
9952 switch (op0)
9953 {
9954 case AND:
9955 const0 &= const1;
9956 break;
9957 case IOR:
9958 const0 |= const1;
9959 break;
9960 case XOR:
9961 const0 ^= const1;
9962 break;
9963 case PLUS:
9964 const0 += const1;
9965 break;
9966 case NEG:
9967 op0 = UNKNOWN;
9968 break;
9969 default:
9970 break;
9971 }
9972 }
9973
9974 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
9975 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
9976 return 0;
9977
9978 /* If the two constants aren't the same, we can't do anything. The
9979 remaining six cases can all be done. */
9980 else if (const0 != const1)
9981 return 0;
9982
9983 else
9984 switch (op0)
9985 {
9986 case IOR:
9987 if (op1 == AND)
9988 /* (a & b) | b == b */
9989 op0 = SET;
9990 else /* op1 == XOR */
9991 /* (a ^ b) | b == a | b */
9992 {;}
9993 break;
9994
9995 case XOR:
9996 if (op1 == AND)
9997 /* (a & b) ^ b == (~a) & b */
9998 op0 = AND, *pcomp_p = 1;
9999 else /* op1 == IOR */
10000 /* (a | b) ^ b == a & ~b */
10001 op0 = AND, const0 = ~const0;
10002 break;
10003
10004 case AND:
10005 if (op1 == IOR)
10006 /* (a | b) & b == b */
10007 op0 = SET;
10008 else /* op1 == XOR */
10009 /* (a ^ b) & b) == (~a) & b */
10010 *pcomp_p = 1;
10011 break;
10012 default:
10013 break;
10014 }
10015
10016 /* Check for NO-OP cases. */
10017 const0 &= GET_MODE_MASK (mode);
10018 if (const0 == 0
10019 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10020 op0 = UNKNOWN;
10021 else if (const0 == 0 && op0 == AND)
10022 op0 = SET;
10023 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10024 && op0 == AND)
10025 op0 = UNKNOWN;
10026
10027 *pop0 = op0;
10028
10029 /* ??? Slightly redundant with the above mask, but not entirely.
10030 Moving this above means we'd have to sign-extend the mode mask
10031 for the final test. */
10032 if (op0 != UNKNOWN && op0 != NEG)
10033 *pconst0 = trunc_int_for_mode (const0, mode);
10034
10035 return 1;
10036 }
10037 \f
10038 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10039 the shift in. The original shift operation CODE is performed on OP in
10040 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10041 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10042 result of the shift is subject to operation OUTER_CODE with operand
10043 OUTER_CONST. */
10044
10045 static machine_mode
10046 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10047 machine_mode orig_mode, machine_mode mode,
10048 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10049 {
10050 if (orig_mode == mode)
10051 return mode;
10052 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10053
10054 /* In general we can't perform in wider mode for right shift and rotate. */
10055 switch (code)
10056 {
10057 case ASHIFTRT:
10058 /* We can still widen if the bits brought in from the left are identical
10059 to the sign bit of ORIG_MODE. */
10060 if (num_sign_bit_copies (op, mode)
10061 > (unsigned) (GET_MODE_PRECISION (mode)
10062 - GET_MODE_PRECISION (orig_mode)))
10063 return mode;
10064 return orig_mode;
10065
10066 case LSHIFTRT:
10067 /* Similarly here but with zero bits. */
10068 if (HWI_COMPUTABLE_MODE_P (mode)
10069 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10070 return mode;
10071
10072 /* We can also widen if the bits brought in will be masked off. This
10073 operation is performed in ORIG_MODE. */
10074 if (outer_code == AND)
10075 {
10076 int care_bits = low_bitmask_len (orig_mode, outer_const);
10077
10078 if (care_bits >= 0
10079 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10080 return mode;
10081 }
10082 /* fall through */
10083
10084 case ROTATE:
10085 return orig_mode;
10086
10087 case ROTATERT:
10088 gcc_unreachable ();
10089
10090 default:
10091 return mode;
10092 }
10093 }
10094
10095 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10096 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10097 if we cannot simplify it. Otherwise, return a simplified value.
10098
10099 The shift is normally computed in the widest mode we find in VAROP, as
10100 long as it isn't a different number of words than RESULT_MODE. Exceptions
10101 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10102
10103 static rtx
10104 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10105 rtx varop, int orig_count)
10106 {
10107 enum rtx_code orig_code = code;
10108 rtx orig_varop = varop;
10109 int count;
10110 machine_mode mode = result_mode;
10111 machine_mode shift_mode, tmode;
10112 unsigned int mode_words
10113 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10114 /* We form (outer_op (code varop count) (outer_const)). */
10115 enum rtx_code outer_op = UNKNOWN;
10116 HOST_WIDE_INT outer_const = 0;
10117 int complement_p = 0;
10118 rtx new_rtx, x;
10119
10120 /* Make sure and truncate the "natural" shift on the way in. We don't
10121 want to do this inside the loop as it makes it more difficult to
10122 combine shifts. */
10123 if (SHIFT_COUNT_TRUNCATED)
10124 orig_count &= GET_MODE_BITSIZE (mode) - 1;
10125
10126 /* If we were given an invalid count, don't do anything except exactly
10127 what was requested. */
10128
10129 if (orig_count < 0 || orig_count >= (int) GET_MODE_PRECISION (mode))
10130 return NULL_RTX;
10131
10132 count = orig_count;
10133
10134 /* Unless one of the branches of the `if' in this loop does a `continue',
10135 we will `break' the loop after the `if'. */
10136
10137 while (count != 0)
10138 {
10139 /* If we have an operand of (clobber (const_int 0)), fail. */
10140 if (GET_CODE (varop) == CLOBBER)
10141 return NULL_RTX;
10142
10143 /* Convert ROTATERT to ROTATE. */
10144 if (code == ROTATERT)
10145 {
10146 unsigned int bitsize = GET_MODE_PRECISION (result_mode);
10147 code = ROTATE;
10148 if (VECTOR_MODE_P (result_mode))
10149 count = bitsize / GET_MODE_NUNITS (result_mode) - count;
10150 else
10151 count = bitsize - count;
10152 }
10153
10154 shift_mode = try_widen_shift_mode (code, varop, count, result_mode,
10155 mode, outer_op, outer_const);
10156
10157 /* Handle cases where the count is greater than the size of the mode
10158 minus 1. For ASHIFT, use the size minus one as the count (this can
10159 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10160 take the count modulo the size. For other shifts, the result is
10161 zero.
10162
10163 Since these shifts are being produced by the compiler by combining
10164 multiple operations, each of which are defined, we know what the
10165 result is supposed to be. */
10166
10167 if (count > (GET_MODE_PRECISION (shift_mode) - 1))
10168 {
10169 if (code == ASHIFTRT)
10170 count = GET_MODE_PRECISION (shift_mode) - 1;
10171 else if (code == ROTATE || code == ROTATERT)
10172 count %= GET_MODE_PRECISION (shift_mode);
10173 else
10174 {
10175 /* We can't simply return zero because there may be an
10176 outer op. */
10177 varop = const0_rtx;
10178 count = 0;
10179 break;
10180 }
10181 }
10182
10183 /* If we discovered we had to complement VAROP, leave. Making a NOT
10184 here would cause an infinite loop. */
10185 if (complement_p)
10186 break;
10187
10188 /* An arithmetic right shift of a quantity known to be -1 or 0
10189 is a no-op. */
10190 if (code == ASHIFTRT
10191 && (num_sign_bit_copies (varop, shift_mode)
10192 == GET_MODE_PRECISION (shift_mode)))
10193 {
10194 count = 0;
10195 break;
10196 }
10197
10198 /* If we are doing an arithmetic right shift and discarding all but
10199 the sign bit copies, this is equivalent to doing a shift by the
10200 bitsize minus one. Convert it into that shift because it will often
10201 allow other simplifications. */
10202
10203 if (code == ASHIFTRT
10204 && (count + num_sign_bit_copies (varop, shift_mode)
10205 >= GET_MODE_PRECISION (shift_mode)))
10206 count = GET_MODE_PRECISION (shift_mode) - 1;
10207
10208 /* We simplify the tests below and elsewhere by converting
10209 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10210 `make_compound_operation' will convert it to an ASHIFTRT for
10211 those machines (such as VAX) that don't have an LSHIFTRT. */
10212 if (code == ASHIFTRT
10213 && val_signbit_known_clear_p (shift_mode,
10214 nonzero_bits (varop, shift_mode)))
10215 code = LSHIFTRT;
10216
10217 if (((code == LSHIFTRT
10218 && HWI_COMPUTABLE_MODE_P (shift_mode)
10219 && !(nonzero_bits (varop, shift_mode) >> count))
10220 || (code == ASHIFT
10221 && HWI_COMPUTABLE_MODE_P (shift_mode)
10222 && !((nonzero_bits (varop, shift_mode) << count)
10223 & GET_MODE_MASK (shift_mode))))
10224 && !side_effects_p (varop))
10225 varop = const0_rtx;
10226
10227 switch (GET_CODE (varop))
10228 {
10229 case SIGN_EXTEND:
10230 case ZERO_EXTEND:
10231 case SIGN_EXTRACT:
10232 case ZERO_EXTRACT:
10233 new_rtx = expand_compound_operation (varop);
10234 if (new_rtx != varop)
10235 {
10236 varop = new_rtx;
10237 continue;
10238 }
10239 break;
10240
10241 case MEM:
10242 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10243 minus the width of a smaller mode, we can do this with a
10244 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10245 if ((code == ASHIFTRT || code == LSHIFTRT)
10246 && ! mode_dependent_address_p (XEXP (varop, 0),
10247 MEM_ADDR_SPACE (varop))
10248 && ! MEM_VOLATILE_P (varop)
10249 && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
10250 MODE_INT, 1)) != BLKmode)
10251 {
10252 new_rtx = adjust_address_nv (varop, tmode,
10253 BYTES_BIG_ENDIAN ? 0
10254 : count / BITS_PER_UNIT);
10255
10256 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10257 : ZERO_EXTEND, mode, new_rtx);
10258 count = 0;
10259 continue;
10260 }
10261 break;
10262
10263 case SUBREG:
10264 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10265 the same number of words as what we've seen so far. Then store
10266 the widest mode in MODE. */
10267 if (subreg_lowpart_p (varop)
10268 && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10269 > GET_MODE_SIZE (GET_MODE (varop)))
10270 && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
10271 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10272 == mode_words
10273 && GET_MODE_CLASS (GET_MODE (varop)) == MODE_INT
10274 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (varop))) == MODE_INT)
10275 {
10276 varop = SUBREG_REG (varop);
10277 if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
10278 mode = GET_MODE (varop);
10279 continue;
10280 }
10281 break;
10282
10283 case MULT:
10284 /* Some machines use MULT instead of ASHIFT because MULT
10285 is cheaper. But it is still better on those machines to
10286 merge two shifts into one. */
10287 if (CONST_INT_P (XEXP (varop, 1))
10288 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10289 {
10290 varop
10291 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10292 XEXP (varop, 0),
10293 GEN_INT (exact_log2 (
10294 UINTVAL (XEXP (varop, 1)))));
10295 continue;
10296 }
10297 break;
10298
10299 case UDIV:
10300 /* Similar, for when divides are cheaper. */
10301 if (CONST_INT_P (XEXP (varop, 1))
10302 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10303 {
10304 varop
10305 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10306 XEXP (varop, 0),
10307 GEN_INT (exact_log2 (
10308 UINTVAL (XEXP (varop, 1)))));
10309 continue;
10310 }
10311 break;
10312
10313 case ASHIFTRT:
10314 /* If we are extracting just the sign bit of an arithmetic
10315 right shift, that shift is not needed. However, the sign
10316 bit of a wider mode may be different from what would be
10317 interpreted as the sign bit in a narrower mode, so, if
10318 the result is narrower, don't discard the shift. */
10319 if (code == LSHIFTRT
10320 && count == (GET_MODE_BITSIZE (result_mode) - 1)
10321 && (GET_MODE_BITSIZE (result_mode)
10322 >= GET_MODE_BITSIZE (GET_MODE (varop))))
10323 {
10324 varop = XEXP (varop, 0);
10325 continue;
10326 }
10327
10328 /* ... fall through ... */
10329
10330 case LSHIFTRT:
10331 case ASHIFT:
10332 case ROTATE:
10333 /* Here we have two nested shifts. The result is usually the
10334 AND of a new shift with a mask. We compute the result below. */
10335 if (CONST_INT_P (XEXP (varop, 1))
10336 && INTVAL (XEXP (varop, 1)) >= 0
10337 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (GET_MODE (varop))
10338 && HWI_COMPUTABLE_MODE_P (result_mode)
10339 && HWI_COMPUTABLE_MODE_P (mode)
10340 && !VECTOR_MODE_P (result_mode))
10341 {
10342 enum rtx_code first_code = GET_CODE (varop);
10343 unsigned int first_count = INTVAL (XEXP (varop, 1));
10344 unsigned HOST_WIDE_INT mask;
10345 rtx mask_rtx;
10346
10347 /* We have one common special case. We can't do any merging if
10348 the inner code is an ASHIFTRT of a smaller mode. However, if
10349 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10350 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10351 we can convert it to
10352 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10353 This simplifies certain SIGN_EXTEND operations. */
10354 if (code == ASHIFT && first_code == ASHIFTRT
10355 && count == (GET_MODE_PRECISION (result_mode)
10356 - GET_MODE_PRECISION (GET_MODE (varop))))
10357 {
10358 /* C3 has the low-order C1 bits zero. */
10359
10360 mask = GET_MODE_MASK (mode)
10361 & ~(((unsigned HOST_WIDE_INT) 1 << first_count) - 1);
10362
10363 varop = simplify_and_const_int (NULL_RTX, result_mode,
10364 XEXP (varop, 0), mask);
10365 varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
10366 varop, count);
10367 count = first_count;
10368 code = ASHIFTRT;
10369 continue;
10370 }
10371
10372 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10373 than C1 high-order bits equal to the sign bit, we can convert
10374 this to either an ASHIFT or an ASHIFTRT depending on the
10375 two counts.
10376
10377 We cannot do this if VAROP's mode is not SHIFT_MODE. */
10378
10379 if (code == ASHIFTRT && first_code == ASHIFT
10380 && GET_MODE (varop) == shift_mode
10381 && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
10382 > first_count))
10383 {
10384 varop = XEXP (varop, 0);
10385 count -= first_count;
10386 if (count < 0)
10387 {
10388 count = -count;
10389 code = ASHIFT;
10390 }
10391
10392 continue;
10393 }
10394
10395 /* There are some cases we can't do. If CODE is ASHIFTRT,
10396 we can only do this if FIRST_CODE is also ASHIFTRT.
10397
10398 We can't do the case when CODE is ROTATE and FIRST_CODE is
10399 ASHIFTRT.
10400
10401 If the mode of this shift is not the mode of the outer shift,
10402 we can't do this if either shift is a right shift or ROTATE.
10403
10404 Finally, we can't do any of these if the mode is too wide
10405 unless the codes are the same.
10406
10407 Handle the case where the shift codes are the same
10408 first. */
10409
10410 if (code == first_code)
10411 {
10412 if (GET_MODE (varop) != result_mode
10413 && (code == ASHIFTRT || code == LSHIFTRT
10414 || code == ROTATE))
10415 break;
10416
10417 count += first_count;
10418 varop = XEXP (varop, 0);
10419 continue;
10420 }
10421
10422 if (code == ASHIFTRT
10423 || (code == ROTATE && first_code == ASHIFTRT)
10424 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
10425 || (GET_MODE (varop) != result_mode
10426 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10427 || first_code == ROTATE
10428 || code == ROTATE)))
10429 break;
10430
10431 /* To compute the mask to apply after the shift, shift the
10432 nonzero bits of the inner shift the same way the
10433 outer shift will. */
10434
10435 mask_rtx = gen_int_mode (nonzero_bits (varop, GET_MODE (varop)),
10436 result_mode);
10437
10438 mask_rtx
10439 = simplify_const_binary_operation (code, result_mode, mask_rtx,
10440 GEN_INT (count));
10441
10442 /* Give up if we can't compute an outer operation to use. */
10443 if (mask_rtx == 0
10444 || !CONST_INT_P (mask_rtx)
10445 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10446 INTVAL (mask_rtx),
10447 result_mode, &complement_p))
10448 break;
10449
10450 /* If the shifts are in the same direction, we add the
10451 counts. Otherwise, we subtract them. */
10452 if ((code == ASHIFTRT || code == LSHIFTRT)
10453 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10454 count += first_count;
10455 else
10456 count -= first_count;
10457
10458 /* If COUNT is positive, the new shift is usually CODE,
10459 except for the two exceptions below, in which case it is
10460 FIRST_CODE. If the count is negative, FIRST_CODE should
10461 always be used */
10462 if (count > 0
10463 && ((first_code == ROTATE && code == ASHIFT)
10464 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10465 code = first_code;
10466 else if (count < 0)
10467 code = first_code, count = -count;
10468
10469 varop = XEXP (varop, 0);
10470 continue;
10471 }
10472
10473 /* If we have (A << B << C) for any shift, we can convert this to
10474 (A << C << B). This wins if A is a constant. Only try this if
10475 B is not a constant. */
10476
10477 else if (GET_CODE (varop) == code
10478 && CONST_INT_P (XEXP (varop, 0))
10479 && !CONST_INT_P (XEXP (varop, 1)))
10480 {
10481 rtx new_rtx = simplify_const_binary_operation (code, mode,
10482 XEXP (varop, 0),
10483 GEN_INT (count));
10484 varop = gen_rtx_fmt_ee (code, mode, new_rtx, XEXP (varop, 1));
10485 count = 0;
10486 continue;
10487 }
10488 break;
10489
10490 case NOT:
10491 if (VECTOR_MODE_P (mode))
10492 break;
10493
10494 /* Make this fit the case below. */
10495 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10496 continue;
10497
10498 case IOR:
10499 case AND:
10500 case XOR:
10501 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10502 with C the size of VAROP - 1 and the shift is logical if
10503 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10504 we have an (le X 0) operation. If we have an arithmetic shift
10505 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10506 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10507
10508 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10509 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10510 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10511 && (code == LSHIFTRT || code == ASHIFTRT)
10512 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10513 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10514 {
10515 count = 0;
10516 varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
10517 const0_rtx);
10518
10519 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10520 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10521
10522 continue;
10523 }
10524
10525 /* If we have (shift (logical)), move the logical to the outside
10526 to allow it to possibly combine with another logical and the
10527 shift to combine with another shift. This also canonicalizes to
10528 what a ZERO_EXTRACT looks like. Also, some machines have
10529 (and (shift)) insns. */
10530
10531 if (CONST_INT_P (XEXP (varop, 1))
10532 /* We can't do this if we have (ashiftrt (xor)) and the
10533 constant has its sign bit set in shift_mode with shift_mode
10534 wider than result_mode. */
10535 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10536 && result_mode != shift_mode
10537 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10538 shift_mode))
10539 && (new_rtx = simplify_const_binary_operation
10540 (code, result_mode,
10541 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10542 GEN_INT (count))) != 0
10543 && CONST_INT_P (new_rtx)
10544 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10545 INTVAL (new_rtx), result_mode, &complement_p))
10546 {
10547 varop = XEXP (varop, 0);
10548 continue;
10549 }
10550
10551 /* If we can't do that, try to simplify the shift in each arm of the
10552 logical expression, make a new logical expression, and apply
10553 the inverse distributive law. This also can't be done for
10554 (ashiftrt (xor)) where we've widened the shift and the constant
10555 changes the sign bit. */
10556 if (CONST_INT_P (XEXP (varop, 1))
10557 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10558 && result_mode != shift_mode
10559 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10560 shift_mode)))
10561 {
10562 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10563 XEXP (varop, 0), count);
10564 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
10565 XEXP (varop, 1), count);
10566
10567 varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
10568 lhs, rhs);
10569 varop = apply_distributive_law (varop);
10570
10571 count = 0;
10572 continue;
10573 }
10574 break;
10575
10576 case EQ:
10577 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10578 says that the sign bit can be tested, FOO has mode MODE, C is
10579 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10580 that may be nonzero. */
10581 if (code == LSHIFTRT
10582 && XEXP (varop, 1) == const0_rtx
10583 && GET_MODE (XEXP (varop, 0)) == result_mode
10584 && count == (GET_MODE_PRECISION (result_mode) - 1)
10585 && HWI_COMPUTABLE_MODE_P (result_mode)
10586 && STORE_FLAG_VALUE == -1
10587 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10588 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10589 &complement_p))
10590 {
10591 varop = XEXP (varop, 0);
10592 count = 0;
10593 continue;
10594 }
10595 break;
10596
10597 case NEG:
10598 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10599 than the number of bits in the mode is equivalent to A. */
10600 if (code == LSHIFTRT
10601 && count == (GET_MODE_PRECISION (result_mode) - 1)
10602 && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
10603 {
10604 varop = XEXP (varop, 0);
10605 count = 0;
10606 continue;
10607 }
10608
10609 /* NEG commutes with ASHIFT since it is multiplication. Move the
10610 NEG outside to allow shifts to combine. */
10611 if (code == ASHIFT
10612 && merge_outer_ops (&outer_op, &outer_const, NEG, 0, result_mode,
10613 &complement_p))
10614 {
10615 varop = XEXP (varop, 0);
10616 continue;
10617 }
10618 break;
10619
10620 case PLUS:
10621 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10622 is one less than the number of bits in the mode is
10623 equivalent to (xor A 1). */
10624 if (code == LSHIFTRT
10625 && count == (GET_MODE_PRECISION (result_mode) - 1)
10626 && XEXP (varop, 1) == constm1_rtx
10627 && nonzero_bits (XEXP (varop, 0), result_mode) == 1
10628 && merge_outer_ops (&outer_op, &outer_const, XOR, 1, result_mode,
10629 &complement_p))
10630 {
10631 count = 0;
10632 varop = XEXP (varop, 0);
10633 continue;
10634 }
10635
10636 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10637 that might be nonzero in BAR are those being shifted out and those
10638 bits are known zero in FOO, we can replace the PLUS with FOO.
10639 Similarly in the other operand order. This code occurs when
10640 we are computing the size of a variable-size array. */
10641
10642 if ((code == ASHIFTRT || code == LSHIFTRT)
10643 && count < HOST_BITS_PER_WIDE_INT
10644 && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
10645 && (nonzero_bits (XEXP (varop, 1), result_mode)
10646 & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
10647 {
10648 varop = XEXP (varop, 0);
10649 continue;
10650 }
10651 else if ((code == ASHIFTRT || code == LSHIFTRT)
10652 && count < HOST_BITS_PER_WIDE_INT
10653 && HWI_COMPUTABLE_MODE_P (result_mode)
10654 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10655 >> count)
10656 && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
10657 & nonzero_bits (XEXP (varop, 1),
10658 result_mode)))
10659 {
10660 varop = XEXP (varop, 1);
10661 continue;
10662 }
10663
10664 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
10665 if (code == ASHIFT
10666 && CONST_INT_P (XEXP (varop, 1))
10667 && (new_rtx = simplify_const_binary_operation
10668 (ASHIFT, result_mode,
10669 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10670 GEN_INT (count))) != 0
10671 && CONST_INT_P (new_rtx)
10672 && merge_outer_ops (&outer_op, &outer_const, PLUS,
10673 INTVAL (new_rtx), result_mode, &complement_p))
10674 {
10675 varop = XEXP (varop, 0);
10676 continue;
10677 }
10678
10679 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
10680 signbit', and attempt to change the PLUS to an XOR and move it to
10681 the outer operation as is done above in the AND/IOR/XOR case
10682 leg for shift(logical). See details in logical handling above
10683 for reasoning in doing so. */
10684 if (code == LSHIFTRT
10685 && CONST_INT_P (XEXP (varop, 1))
10686 && mode_signbit_p (result_mode, XEXP (varop, 1))
10687 && (new_rtx = simplify_const_binary_operation
10688 (code, result_mode,
10689 gen_int_mode (INTVAL (XEXP (varop, 1)), result_mode),
10690 GEN_INT (count))) != 0
10691 && CONST_INT_P (new_rtx)
10692 && merge_outer_ops (&outer_op, &outer_const, XOR,
10693 INTVAL (new_rtx), result_mode, &complement_p))
10694 {
10695 varop = XEXP (varop, 0);
10696 continue;
10697 }
10698
10699 break;
10700
10701 case MINUS:
10702 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
10703 with C the size of VAROP - 1 and the shift is logical if
10704 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10705 we have a (gt X 0) operation. If the shift is arithmetic with
10706 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
10707 we have a (neg (gt X 0)) operation. */
10708
10709 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10710 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
10711 && count == (GET_MODE_PRECISION (GET_MODE (varop)) - 1)
10712 && (code == LSHIFTRT || code == ASHIFTRT)
10713 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10714 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
10715 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10716 {
10717 count = 0;
10718 varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
10719 const0_rtx);
10720
10721 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10722 varop = gen_rtx_NEG (GET_MODE (varop), varop);
10723
10724 continue;
10725 }
10726 break;
10727
10728 case TRUNCATE:
10729 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
10730 if the truncate does not affect the value. */
10731 if (code == LSHIFTRT
10732 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
10733 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
10734 && (INTVAL (XEXP (XEXP (varop, 0), 1))
10735 >= (GET_MODE_PRECISION (GET_MODE (XEXP (varop, 0)))
10736 - GET_MODE_PRECISION (GET_MODE (varop)))))
10737 {
10738 rtx varop_inner = XEXP (varop, 0);
10739
10740 varop_inner
10741 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
10742 XEXP (varop_inner, 0),
10743 GEN_INT
10744 (count + INTVAL (XEXP (varop_inner, 1))));
10745 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
10746 count = 0;
10747 continue;
10748 }
10749 break;
10750
10751 default:
10752 break;
10753 }
10754
10755 break;
10756 }
10757
10758 shift_mode = try_widen_shift_mode (code, varop, count, result_mode, mode,
10759 outer_op, outer_const);
10760
10761 /* We have now finished analyzing the shift. The result should be
10762 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
10763 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
10764 to the result of the shift. OUTER_CONST is the relevant constant,
10765 but we must turn off all bits turned off in the shift. */
10766
10767 if (outer_op == UNKNOWN
10768 && orig_code == code && orig_count == count
10769 && varop == orig_varop
10770 && shift_mode == GET_MODE (varop))
10771 return NULL_RTX;
10772
10773 /* Make a SUBREG if necessary. If we can't make it, fail. */
10774 varop = gen_lowpart (shift_mode, varop);
10775 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10776 return NULL_RTX;
10777
10778 /* If we have an outer operation and we just made a shift, it is
10779 possible that we could have simplified the shift were it not
10780 for the outer operation. So try to do the simplification
10781 recursively. */
10782
10783 if (outer_op != UNKNOWN)
10784 x = simplify_shift_const_1 (code, shift_mode, varop, count);
10785 else
10786 x = NULL_RTX;
10787
10788 if (x == NULL_RTX)
10789 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
10790
10791 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
10792 turn off all the bits that the shift would have turned off. */
10793 if (orig_code == LSHIFTRT && result_mode != shift_mode)
10794 x = simplify_and_const_int (NULL_RTX, shift_mode, x,
10795 GET_MODE_MASK (result_mode) >> orig_count);
10796
10797 /* Do the remainder of the processing in RESULT_MODE. */
10798 x = gen_lowpart_or_truncate (result_mode, x);
10799
10800 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
10801 operation. */
10802 if (complement_p)
10803 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
10804
10805 if (outer_op != UNKNOWN)
10806 {
10807 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
10808 && GET_MODE_PRECISION (result_mode) < HOST_BITS_PER_WIDE_INT)
10809 outer_const = trunc_int_for_mode (outer_const, result_mode);
10810
10811 if (outer_op == AND)
10812 x = simplify_and_const_int (NULL_RTX, result_mode, x, outer_const);
10813 else if (outer_op == SET)
10814 {
10815 /* This means that we have determined that the result is
10816 equivalent to a constant. This should be rare. */
10817 if (!side_effects_p (x))
10818 x = GEN_INT (outer_const);
10819 }
10820 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
10821 x = simplify_gen_unary (outer_op, result_mode, x, result_mode);
10822 else
10823 x = simplify_gen_binary (outer_op, result_mode, x,
10824 GEN_INT (outer_const));
10825 }
10826
10827 return x;
10828 }
10829
10830 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
10831 The result of the shift is RESULT_MODE. If we cannot simplify it,
10832 return X or, if it is NULL, synthesize the expression with
10833 simplify_gen_binary. Otherwise, return a simplified value.
10834
10835 The shift is normally computed in the widest mode we find in VAROP, as
10836 long as it isn't a different number of words than RESULT_MODE. Exceptions
10837 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10838
10839 static rtx
10840 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
10841 rtx varop, int count)
10842 {
10843 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
10844 if (tem)
10845 return tem;
10846
10847 if (!x)
10848 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
10849 if (GET_MODE (x) != result_mode)
10850 x = gen_lowpart (result_mode, x);
10851 return x;
10852 }
10853
10854 \f
10855 /* A subroutine of recog_for_combine. See there for arguments and
10856 return value. */
10857
10858 static int
10859 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
10860 {
10861 rtx pat = *pnewpat;
10862 rtx pat_without_clobbers;
10863 int insn_code_number;
10864 int num_clobbers_to_add = 0;
10865 int i;
10866 rtx notes = NULL_RTX;
10867 rtx old_notes, old_pat;
10868 int old_icode;
10869
10870 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
10871 we use to indicate that something didn't match. If we find such a
10872 thing, force rejection. */
10873 if (GET_CODE (pat) == PARALLEL)
10874 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
10875 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
10876 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
10877 return -1;
10878
10879 old_pat = PATTERN (insn);
10880 old_notes = REG_NOTES (insn);
10881 PATTERN (insn) = pat;
10882 REG_NOTES (insn) = NULL_RTX;
10883
10884 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10885 if (dump_file && (dump_flags & TDF_DETAILS))
10886 {
10887 if (insn_code_number < 0)
10888 fputs ("Failed to match this instruction:\n", dump_file);
10889 else
10890 fputs ("Successfully matched this instruction:\n", dump_file);
10891 print_rtl_single (dump_file, pat);
10892 }
10893
10894 /* If it isn't, there is the possibility that we previously had an insn
10895 that clobbered some register as a side effect, but the combined
10896 insn doesn't need to do that. So try once more without the clobbers
10897 unless this represents an ASM insn. */
10898
10899 if (insn_code_number < 0 && ! check_asm_operands (pat)
10900 && GET_CODE (pat) == PARALLEL)
10901 {
10902 int pos;
10903
10904 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
10905 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
10906 {
10907 if (i != pos)
10908 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
10909 pos++;
10910 }
10911
10912 SUBST_INT (XVECLEN (pat, 0), pos);
10913
10914 if (pos == 1)
10915 pat = XVECEXP (pat, 0, 0);
10916
10917 PATTERN (insn) = pat;
10918 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
10919 if (dump_file && (dump_flags & TDF_DETAILS))
10920 {
10921 if (insn_code_number < 0)
10922 fputs ("Failed to match this instruction:\n", dump_file);
10923 else
10924 fputs ("Successfully matched this instruction:\n", dump_file);
10925 print_rtl_single (dump_file, pat);
10926 }
10927 }
10928
10929 pat_without_clobbers = pat;
10930
10931 PATTERN (insn) = old_pat;
10932 REG_NOTES (insn) = old_notes;
10933
10934 /* Recognize all noop sets, these will be killed by followup pass. */
10935 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
10936 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
10937
10938 /* If we had any clobbers to add, make a new pattern than contains
10939 them. Then check to make sure that all of them are dead. */
10940 if (num_clobbers_to_add)
10941 {
10942 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
10943 rtvec_alloc (GET_CODE (pat) == PARALLEL
10944 ? (XVECLEN (pat, 0)
10945 + num_clobbers_to_add)
10946 : num_clobbers_to_add + 1));
10947
10948 if (GET_CODE (pat) == PARALLEL)
10949 for (i = 0; i < XVECLEN (pat, 0); i++)
10950 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
10951 else
10952 XVECEXP (newpat, 0, 0) = pat;
10953
10954 add_clobbers (newpat, insn_code_number);
10955
10956 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
10957 i < XVECLEN (newpat, 0); i++)
10958 {
10959 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
10960 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
10961 return -1;
10962 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
10963 {
10964 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
10965 notes = alloc_reg_note (REG_UNUSED,
10966 XEXP (XVECEXP (newpat, 0, i), 0), notes);
10967 }
10968 }
10969 pat = newpat;
10970 }
10971
10972 if (insn_code_number >= 0
10973 && insn_code_number != NOOP_MOVE_INSN_CODE)
10974 {
10975 old_pat = PATTERN (insn);
10976 old_notes = REG_NOTES (insn);
10977 old_icode = INSN_CODE (insn);
10978 PATTERN (insn) = pat;
10979 REG_NOTES (insn) = notes;
10980
10981 /* Allow targets to reject combined insn. */
10982 if (!targetm.legitimate_combined_insn (insn))
10983 {
10984 if (dump_file && (dump_flags & TDF_DETAILS))
10985 fputs ("Instruction not appropriate for target.",
10986 dump_file);
10987
10988 /* Callers expect recog_for_combine to strip
10989 clobbers from the pattern on failure. */
10990 pat = pat_without_clobbers;
10991 notes = NULL_RTX;
10992
10993 insn_code_number = -1;
10994 }
10995
10996 PATTERN (insn) = old_pat;
10997 REG_NOTES (insn) = old_notes;
10998 INSN_CODE (insn) = old_icode;
10999 }
11000
11001 *pnewpat = pat;
11002 *pnotes = notes;
11003
11004 return insn_code_number;
11005 }
11006
11007 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11008 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11009 Return whether anything was so changed. */
11010
11011 static bool
11012 change_zero_ext (rtx *src)
11013 {
11014 bool changed = false;
11015
11016 subrtx_ptr_iterator::array_type array;
11017 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11018 {
11019 rtx x = **iter;
11020 machine_mode mode = GET_MODE (x);
11021 int size;
11022
11023 if (GET_CODE (x) == ZERO_EXTRACT
11024 && CONST_INT_P (XEXP (x, 1))
11025 && CONST_INT_P (XEXP (x, 2))
11026 && GET_MODE (XEXP (x, 0)) == mode)
11027 {
11028 size = INTVAL (XEXP (x, 1));
11029
11030 int start = INTVAL (XEXP (x, 2));
11031 if (BITS_BIG_ENDIAN)
11032 start = GET_MODE_PRECISION (mode) - size - start;
11033
11034 x = gen_rtx_LSHIFTRT (mode, XEXP (x, 0), GEN_INT (start));
11035 }
11036 else if (GET_CODE (x) == ZERO_EXTEND
11037 && GET_CODE (XEXP (x, 0)) == SUBREG
11038 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
11039 && subreg_lowpart_p (XEXP (x, 0)))
11040 {
11041 size = GET_MODE_PRECISION (GET_MODE (XEXP (x, 0)));
11042 x = SUBREG_REG (XEXP (x, 0));
11043 }
11044 else
11045 continue;
11046
11047 unsigned HOST_WIDE_INT mask = 1;
11048 mask <<= size;
11049 mask--;
11050
11051 x = gen_rtx_AND (mode, x, GEN_INT (mask));
11052
11053 SUBST (**iter, x);
11054 changed = true;
11055 }
11056
11057 return changed;
11058 }
11059
11060 /* Like recog, but we receive the address of a pointer to a new pattern.
11061 We try to match the rtx that the pointer points to.
11062 If that fails, we may try to modify or replace the pattern,
11063 storing the replacement into the same pointer object.
11064
11065 Modifications include deletion or addition of CLOBBERs. If the
11066 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11067 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11068 (and undo if that fails).
11069
11070 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11071 the CLOBBERs are placed.
11072
11073 The value is the final insn code from the pattern ultimately matched,
11074 or -1. */
11075
11076 static int
11077 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11078 {
11079 rtx pat = PATTERN (insn);
11080 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11081 if (insn_code_number >= 0 || check_asm_operands (pat))
11082 return insn_code_number;
11083
11084 void *marker = get_undo_marker ();
11085 bool changed = false;
11086
11087 if (GET_CODE (pat) == SET)
11088 changed = change_zero_ext (&SET_SRC (pat));
11089 else if (GET_CODE (pat) == PARALLEL)
11090 {
11091 int i;
11092 for (i = 0; i < XVECLEN (pat, 0); i++)
11093 {
11094 rtx set = XVECEXP (pat, 0, i);
11095 if (GET_CODE (set) == SET)
11096 changed |= change_zero_ext (&SET_SRC (set));
11097 }
11098 }
11099
11100 if (changed)
11101 {
11102 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11103
11104 if (insn_code_number < 0)
11105 undo_to_marker (marker);
11106 }
11107
11108 return insn_code_number;
11109 }
11110 \f
11111 /* Like gen_lowpart_general but for use by combine. In combine it
11112 is not possible to create any new pseudoregs. However, it is
11113 safe to create invalid memory addresses, because combine will
11114 try to recognize them and all they will do is make the combine
11115 attempt fail.
11116
11117 If for some reason this cannot do its job, an rtx
11118 (clobber (const_int 0)) is returned.
11119 An insn containing that will not be recognized. */
11120
11121 static rtx
11122 gen_lowpart_for_combine (machine_mode omode, rtx x)
11123 {
11124 machine_mode imode = GET_MODE (x);
11125 unsigned int osize = GET_MODE_SIZE (omode);
11126 unsigned int isize = GET_MODE_SIZE (imode);
11127 rtx result;
11128
11129 if (omode == imode)
11130 return x;
11131
11132 /* We can only support MODE being wider than a word if X is a
11133 constant integer or has a mode the same size. */
11134 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11135 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11136 goto fail;
11137
11138 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11139 won't know what to do. So we will strip off the SUBREG here and
11140 process normally. */
11141 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11142 {
11143 x = SUBREG_REG (x);
11144
11145 /* For use in case we fall down into the address adjustments
11146 further below, we need to adjust the known mode and size of
11147 x; imode and isize, since we just adjusted x. */
11148 imode = GET_MODE (x);
11149
11150 if (imode == omode)
11151 return x;
11152
11153 isize = GET_MODE_SIZE (imode);
11154 }
11155
11156 result = gen_lowpart_common (omode, x);
11157
11158 if (result)
11159 return result;
11160
11161 if (MEM_P (x))
11162 {
11163 int offset = 0;
11164
11165 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11166 address. */
11167 if (MEM_VOLATILE_P (x)
11168 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11169 goto fail;
11170
11171 /* If we want to refer to something bigger than the original memref,
11172 generate a paradoxical subreg instead. That will force a reload
11173 of the original memref X. */
11174 if (isize < osize)
11175 return gen_rtx_SUBREG (omode, x, 0);
11176
11177 if (WORDS_BIG_ENDIAN)
11178 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11179
11180 /* Adjust the address so that the address-after-the-data is
11181 unchanged. */
11182 if (BYTES_BIG_ENDIAN)
11183 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11184
11185 return adjust_address_nv (x, omode, offset);
11186 }
11187
11188 /* If X is a comparison operator, rewrite it in a new mode. This
11189 probably won't match, but may allow further simplifications. */
11190 else if (COMPARISON_P (x))
11191 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11192
11193 /* If we couldn't simplify X any other way, just enclose it in a
11194 SUBREG. Normally, this SUBREG won't match, but some patterns may
11195 include an explicit SUBREG or we may simplify it further in combine. */
11196 else
11197 {
11198 rtx res;
11199
11200 if (imode == VOIDmode)
11201 {
11202 imode = int_mode_for_mode (omode);
11203 x = gen_lowpart_common (imode, x);
11204 if (x == NULL)
11205 goto fail;
11206 }
11207 res = lowpart_subreg (omode, x, imode);
11208 if (res)
11209 return res;
11210 }
11211
11212 fail:
11213 return gen_rtx_CLOBBER (omode, const0_rtx);
11214 }
11215 \f
11216 /* Try to simplify a comparison between OP0 and a constant OP1,
11217 where CODE is the comparison code that will be tested, into a
11218 (CODE OP0 const0_rtx) form.
11219
11220 The result is a possibly different comparison code to use.
11221 *POP1 may be updated. */
11222
11223 static enum rtx_code
11224 simplify_compare_const (enum rtx_code code, machine_mode mode,
11225 rtx op0, rtx *pop1)
11226 {
11227 unsigned int mode_width = GET_MODE_PRECISION (mode);
11228 HOST_WIDE_INT const_op = INTVAL (*pop1);
11229
11230 /* Get the constant we are comparing against and turn off all bits
11231 not on in our mode. */
11232 if (mode != VOIDmode)
11233 const_op = trunc_int_for_mode (const_op, mode);
11234
11235 /* If we are comparing against a constant power of two and the value
11236 being compared can only have that single bit nonzero (e.g., it was
11237 `and'ed with that bit), we can replace this with a comparison
11238 with zero. */
11239 if (const_op
11240 && (code == EQ || code == NE || code == GE || code == GEU
11241 || code == LT || code == LTU)
11242 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11243 && exact_log2 (const_op & GET_MODE_MASK (mode)) >= 0
11244 && (nonzero_bits (op0, mode)
11245 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (mode))))
11246 {
11247 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11248 const_op = 0;
11249 }
11250
11251 /* Similarly, if we are comparing a value known to be either -1 or
11252 0 with -1, change it to the opposite comparison against zero. */
11253 if (const_op == -1
11254 && (code == EQ || code == NE || code == GT || code == LE
11255 || code == GEU || code == LTU)
11256 && num_sign_bit_copies (op0, mode) == mode_width)
11257 {
11258 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11259 const_op = 0;
11260 }
11261
11262 /* Do some canonicalizations based on the comparison code. We prefer
11263 comparisons against zero and then prefer equality comparisons.
11264 If we can reduce the size of a constant, we will do that too. */
11265 switch (code)
11266 {
11267 case LT:
11268 /* < C is equivalent to <= (C - 1) */
11269 if (const_op > 0)
11270 {
11271 const_op -= 1;
11272 code = LE;
11273 /* ... fall through to LE case below. */
11274 }
11275 else
11276 break;
11277
11278 case LE:
11279 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11280 if (const_op < 0)
11281 {
11282 const_op += 1;
11283 code = LT;
11284 }
11285
11286 /* If we are doing a <= 0 comparison on a value known to have
11287 a zero sign bit, we can replace this with == 0. */
11288 else if (const_op == 0
11289 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11290 && (nonzero_bits (op0, mode)
11291 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11292 == 0)
11293 code = EQ;
11294 break;
11295
11296 case GE:
11297 /* >= C is equivalent to > (C - 1). */
11298 if (const_op > 0)
11299 {
11300 const_op -= 1;
11301 code = GT;
11302 /* ... fall through to GT below. */
11303 }
11304 else
11305 break;
11306
11307 case GT:
11308 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11309 if (const_op < 0)
11310 {
11311 const_op += 1;
11312 code = GE;
11313 }
11314
11315 /* If we are doing a > 0 comparison on a value known to have
11316 a zero sign bit, we can replace this with != 0. */
11317 else if (const_op == 0
11318 && mode_width - 1 < HOST_BITS_PER_WIDE_INT
11319 && (nonzero_bits (op0, mode)
11320 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11321 == 0)
11322 code = NE;
11323 break;
11324
11325 case LTU:
11326 /* < C is equivalent to <= (C - 1). */
11327 if (const_op > 0)
11328 {
11329 const_op -= 1;
11330 code = LEU;
11331 /* ... fall through ... */
11332 }
11333 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11334 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11335 && (unsigned HOST_WIDE_INT) const_op
11336 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11337 {
11338 const_op = 0;
11339 code = GE;
11340 break;
11341 }
11342 else
11343 break;
11344
11345 case LEU:
11346 /* unsigned <= 0 is equivalent to == 0 */
11347 if (const_op == 0)
11348 code = EQ;
11349 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11350 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11351 && (unsigned HOST_WIDE_INT) const_op
11352 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11353 {
11354 const_op = 0;
11355 code = GE;
11356 }
11357 break;
11358
11359 case GEU:
11360 /* >= C is equivalent to > (C - 1). */
11361 if (const_op > 1)
11362 {
11363 const_op -= 1;
11364 code = GTU;
11365 /* ... fall through ... */
11366 }
11367
11368 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11369 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11370 && (unsigned HOST_WIDE_INT) const_op
11371 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1))
11372 {
11373 const_op = 0;
11374 code = LT;
11375 break;
11376 }
11377 else
11378 break;
11379
11380 case GTU:
11381 /* unsigned > 0 is equivalent to != 0 */
11382 if (const_op == 0)
11383 code = NE;
11384 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11385 else if (mode_width - 1 < HOST_BITS_PER_WIDE_INT
11386 && (unsigned HOST_WIDE_INT) const_op
11387 == ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)) - 1)
11388 {
11389 const_op = 0;
11390 code = LT;
11391 }
11392 break;
11393
11394 default:
11395 break;
11396 }
11397
11398 *pop1 = GEN_INT (const_op);
11399 return code;
11400 }
11401 \f
11402 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11403 comparison code that will be tested.
11404
11405 The result is a possibly different comparison code to use. *POP0 and
11406 *POP1 may be updated.
11407
11408 It is possible that we might detect that a comparison is either always
11409 true or always false. However, we do not perform general constant
11410 folding in combine, so this knowledge isn't useful. Such tautologies
11411 should have been detected earlier. Hence we ignore all such cases. */
11412
11413 static enum rtx_code
11414 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11415 {
11416 rtx op0 = *pop0;
11417 rtx op1 = *pop1;
11418 rtx tem, tem1;
11419 int i;
11420 machine_mode mode, tmode;
11421
11422 /* Try a few ways of applying the same transformation to both operands. */
11423 while (1)
11424 {
11425 #if !WORD_REGISTER_OPERATIONS
11426 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11427 so check specially. */
11428 if (code != GTU && code != GEU && code != LTU && code != LEU
11429 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11430 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11431 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11432 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11433 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11434 && (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0)))
11435 == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0))))
11436 && CONST_INT_P (XEXP (op0, 1))
11437 && XEXP (op0, 1) == XEXP (op1, 1)
11438 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11439 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11440 && (INTVAL (XEXP (op0, 1))
11441 == (GET_MODE_PRECISION (GET_MODE (op0))
11442 - (GET_MODE_PRECISION
11443 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))))))))
11444 {
11445 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11446 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11447 }
11448 #endif
11449
11450 /* If both operands are the same constant shift, see if we can ignore the
11451 shift. We can if the shift is a rotate or if the bits shifted out of
11452 this shift are known to be zero for both inputs and if the type of
11453 comparison is compatible with the shift. */
11454 if (GET_CODE (op0) == GET_CODE (op1)
11455 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11456 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11457 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11458 && (code != GT && code != LT && code != GE && code != LE))
11459 || (GET_CODE (op0) == ASHIFTRT
11460 && (code != GTU && code != LTU
11461 && code != GEU && code != LEU)))
11462 && CONST_INT_P (XEXP (op0, 1))
11463 && INTVAL (XEXP (op0, 1)) >= 0
11464 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11465 && XEXP (op0, 1) == XEXP (op1, 1))
11466 {
11467 machine_mode mode = GET_MODE (op0);
11468 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11469 int shift_count = INTVAL (XEXP (op0, 1));
11470
11471 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11472 mask &= (mask >> shift_count) << shift_count;
11473 else if (GET_CODE (op0) == ASHIFT)
11474 mask = (mask & (mask << shift_count)) >> shift_count;
11475
11476 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11477 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11478 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11479 else
11480 break;
11481 }
11482
11483 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11484 SUBREGs are of the same mode, and, in both cases, the AND would
11485 be redundant if the comparison was done in the narrower mode,
11486 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11487 and the operand's possibly nonzero bits are 0xffffff01; in that case
11488 if we only care about QImode, we don't need the AND). This case
11489 occurs if the output mode of an scc insn is not SImode and
11490 STORE_FLAG_VALUE == 1 (e.g., the 386).
11491
11492 Similarly, check for a case where the AND's are ZERO_EXTEND
11493 operations from some narrower mode even though a SUBREG is not
11494 present. */
11495
11496 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11497 && CONST_INT_P (XEXP (op0, 1))
11498 && CONST_INT_P (XEXP (op1, 1)))
11499 {
11500 rtx inner_op0 = XEXP (op0, 0);
11501 rtx inner_op1 = XEXP (op1, 0);
11502 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11503 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11504 int changed = 0;
11505
11506 if (paradoxical_subreg_p (inner_op0)
11507 && GET_CODE (inner_op1) == SUBREG
11508 && (GET_MODE (SUBREG_REG (inner_op0))
11509 == GET_MODE (SUBREG_REG (inner_op1)))
11510 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11511 <= HOST_BITS_PER_WIDE_INT)
11512 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11513 GET_MODE (SUBREG_REG (inner_op0)))))
11514 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11515 GET_MODE (SUBREG_REG (inner_op1))))))
11516 {
11517 op0 = SUBREG_REG (inner_op0);
11518 op1 = SUBREG_REG (inner_op1);
11519
11520 /* The resulting comparison is always unsigned since we masked
11521 off the original sign bit. */
11522 code = unsigned_condition (code);
11523
11524 changed = 1;
11525 }
11526
11527 else if (c0 == c1)
11528 for (tmode = GET_CLASS_NARROWEST_MODE
11529 (GET_MODE_CLASS (GET_MODE (op0)));
11530 tmode != GET_MODE (op0); tmode = GET_MODE_WIDER_MODE (tmode))
11531 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11532 {
11533 op0 = gen_lowpart (tmode, inner_op0);
11534 op1 = gen_lowpart (tmode, inner_op1);
11535 code = unsigned_condition (code);
11536 changed = 1;
11537 break;
11538 }
11539
11540 if (! changed)
11541 break;
11542 }
11543
11544 /* If both operands are NOT, we can strip off the outer operation
11545 and adjust the comparison code for swapped operands; similarly for
11546 NEG, except that this must be an equality comparison. */
11547 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11548 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11549 && (code == EQ || code == NE)))
11550 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11551
11552 else
11553 break;
11554 }
11555
11556 /* If the first operand is a constant, swap the operands and adjust the
11557 comparison code appropriately, but don't do this if the second operand
11558 is already a constant integer. */
11559 if (swap_commutative_operands_p (op0, op1))
11560 {
11561 std::swap (op0, op1);
11562 code = swap_condition (code);
11563 }
11564
11565 /* We now enter a loop during which we will try to simplify the comparison.
11566 For the most part, we only are concerned with comparisons with zero,
11567 but some things may really be comparisons with zero but not start
11568 out looking that way. */
11569
11570 while (CONST_INT_P (op1))
11571 {
11572 machine_mode mode = GET_MODE (op0);
11573 unsigned int mode_width = GET_MODE_PRECISION (mode);
11574 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11575 int equality_comparison_p;
11576 int sign_bit_comparison_p;
11577 int unsigned_comparison_p;
11578 HOST_WIDE_INT const_op;
11579
11580 /* We only want to handle integral modes. This catches VOIDmode,
11581 CCmode, and the floating-point modes. An exception is that we
11582 can handle VOIDmode if OP0 is a COMPARE or a comparison
11583 operation. */
11584
11585 if (GET_MODE_CLASS (mode) != MODE_INT
11586 && ! (mode == VOIDmode
11587 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
11588 break;
11589
11590 /* Try to simplify the compare to constant, possibly changing the
11591 comparison op, and/or changing op1 to zero. */
11592 code = simplify_compare_const (code, mode, op0, &op1);
11593 const_op = INTVAL (op1);
11594
11595 /* Compute some predicates to simplify code below. */
11596
11597 equality_comparison_p = (code == EQ || code == NE);
11598 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
11599 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
11600 || code == GEU);
11601
11602 /* If this is a sign bit comparison and we can do arithmetic in
11603 MODE, say that we will only be needing the sign bit of OP0. */
11604 if (sign_bit_comparison_p && HWI_COMPUTABLE_MODE_P (mode))
11605 op0 = force_to_mode (op0, mode,
11606 (unsigned HOST_WIDE_INT) 1
11607 << (GET_MODE_PRECISION (mode) - 1),
11608 0);
11609
11610 /* Now try cases based on the opcode of OP0. If none of the cases
11611 does a "continue", we exit this loop immediately after the
11612 switch. */
11613
11614 switch (GET_CODE (op0))
11615 {
11616 case ZERO_EXTRACT:
11617 /* If we are extracting a single bit from a variable position in
11618 a constant that has only a single bit set and are comparing it
11619 with zero, we can convert this into an equality comparison
11620 between the position and the location of the single bit. */
11621 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
11622 have already reduced the shift count modulo the word size. */
11623 if (!SHIFT_COUNT_TRUNCATED
11624 && CONST_INT_P (XEXP (op0, 0))
11625 && XEXP (op0, 1) == const1_rtx
11626 && equality_comparison_p && const_op == 0
11627 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
11628 {
11629 if (BITS_BIG_ENDIAN)
11630 i = BITS_PER_WORD - 1 - i;
11631
11632 op0 = XEXP (op0, 2);
11633 op1 = GEN_INT (i);
11634 const_op = i;
11635
11636 /* Result is nonzero iff shift count is equal to I. */
11637 code = reverse_condition (code);
11638 continue;
11639 }
11640
11641 /* ... fall through ... */
11642
11643 case SIGN_EXTRACT:
11644 tem = expand_compound_operation (op0);
11645 if (tem != op0)
11646 {
11647 op0 = tem;
11648 continue;
11649 }
11650 break;
11651
11652 case NOT:
11653 /* If testing for equality, we can take the NOT of the constant. */
11654 if (equality_comparison_p
11655 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
11656 {
11657 op0 = XEXP (op0, 0);
11658 op1 = tem;
11659 continue;
11660 }
11661
11662 /* If just looking at the sign bit, reverse the sense of the
11663 comparison. */
11664 if (sign_bit_comparison_p)
11665 {
11666 op0 = XEXP (op0, 0);
11667 code = (code == GE ? LT : GE);
11668 continue;
11669 }
11670 break;
11671
11672 case NEG:
11673 /* If testing for equality, we can take the NEG of the constant. */
11674 if (equality_comparison_p
11675 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
11676 {
11677 op0 = XEXP (op0, 0);
11678 op1 = tem;
11679 continue;
11680 }
11681
11682 /* The remaining cases only apply to comparisons with zero. */
11683 if (const_op != 0)
11684 break;
11685
11686 /* When X is ABS or is known positive,
11687 (neg X) is < 0 if and only if X != 0. */
11688
11689 if (sign_bit_comparison_p
11690 && (GET_CODE (XEXP (op0, 0)) == ABS
11691 || (mode_width <= HOST_BITS_PER_WIDE_INT
11692 && (nonzero_bits (XEXP (op0, 0), mode)
11693 & ((unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
11694 == 0)))
11695 {
11696 op0 = XEXP (op0, 0);
11697 code = (code == LT ? NE : EQ);
11698 continue;
11699 }
11700
11701 /* If we have NEG of something whose two high-order bits are the
11702 same, we know that "(-a) < 0" is equivalent to "a > 0". */
11703 if (num_sign_bit_copies (op0, mode) >= 2)
11704 {
11705 op0 = XEXP (op0, 0);
11706 code = swap_condition (code);
11707 continue;
11708 }
11709 break;
11710
11711 case ROTATE:
11712 /* If we are testing equality and our count is a constant, we
11713 can perform the inverse operation on our RHS. */
11714 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
11715 && (tem = simplify_binary_operation (ROTATERT, mode,
11716 op1, XEXP (op0, 1))) != 0)
11717 {
11718 op0 = XEXP (op0, 0);
11719 op1 = tem;
11720 continue;
11721 }
11722
11723 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
11724 a particular bit. Convert it to an AND of a constant of that
11725 bit. This will be converted into a ZERO_EXTRACT. */
11726 if (const_op == 0 && sign_bit_comparison_p
11727 && CONST_INT_P (XEXP (op0, 1))
11728 && mode_width <= HOST_BITS_PER_WIDE_INT)
11729 {
11730 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
11731 ((unsigned HOST_WIDE_INT) 1
11732 << (mode_width - 1
11733 - INTVAL (XEXP (op0, 1)))));
11734 code = (code == LT ? NE : EQ);
11735 continue;
11736 }
11737
11738 /* Fall through. */
11739
11740 case ABS:
11741 /* ABS is ignorable inside an equality comparison with zero. */
11742 if (const_op == 0 && equality_comparison_p)
11743 {
11744 op0 = XEXP (op0, 0);
11745 continue;
11746 }
11747 break;
11748
11749 case SIGN_EXTEND:
11750 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
11751 (compare FOO CONST) if CONST fits in FOO's mode and we
11752 are either testing inequality or have an unsigned
11753 comparison with ZERO_EXTEND or a signed comparison with
11754 SIGN_EXTEND. But don't do it if we don't have a compare
11755 insn of the given mode, since we'd have to revert it
11756 later on, and then we wouldn't know whether to sign- or
11757 zero-extend. */
11758 mode = GET_MODE (XEXP (op0, 0));
11759 if (GET_MODE_CLASS (mode) == MODE_INT
11760 && ! unsigned_comparison_p
11761 && HWI_COMPUTABLE_MODE_P (mode)
11762 && trunc_int_for_mode (const_op, mode) == const_op
11763 && have_insn_for (COMPARE, mode))
11764 {
11765 op0 = XEXP (op0, 0);
11766 continue;
11767 }
11768 break;
11769
11770 case SUBREG:
11771 /* Check for the case where we are comparing A - C1 with C2, that is
11772
11773 (subreg:MODE (plus (A) (-C1))) op (C2)
11774
11775 with C1 a constant, and try to lift the SUBREG, i.e. to do the
11776 comparison in the wider mode. One of the following two conditions
11777 must be true in order for this to be valid:
11778
11779 1. The mode extension results in the same bit pattern being added
11780 on both sides and the comparison is equality or unsigned. As
11781 C2 has been truncated to fit in MODE, the pattern can only be
11782 all 0s or all 1s.
11783
11784 2. The mode extension results in the sign bit being copied on
11785 each side.
11786
11787 The difficulty here is that we have predicates for A but not for
11788 (A - C1) so we need to check that C1 is within proper bounds so
11789 as to perturbate A as little as possible. */
11790
11791 if (mode_width <= HOST_BITS_PER_WIDE_INT
11792 && subreg_lowpart_p (op0)
11793 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) > mode_width
11794 && GET_CODE (SUBREG_REG (op0)) == PLUS
11795 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
11796 {
11797 machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
11798 rtx a = XEXP (SUBREG_REG (op0), 0);
11799 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
11800
11801 if ((c1 > 0
11802 && (unsigned HOST_WIDE_INT) c1
11803 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)
11804 && (equality_comparison_p || unsigned_comparison_p)
11805 /* (A - C1) zero-extends if it is positive and sign-extends
11806 if it is negative, C2 both zero- and sign-extends. */
11807 && ((0 == (nonzero_bits (a, inner_mode)
11808 & ~GET_MODE_MASK (mode))
11809 && const_op >= 0)
11810 /* (A - C1) sign-extends if it is positive and 1-extends
11811 if it is negative, C2 both sign- and 1-extends. */
11812 || (num_sign_bit_copies (a, inner_mode)
11813 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11814 - mode_width)
11815 && const_op < 0)))
11816 || ((unsigned HOST_WIDE_INT) c1
11817 < (unsigned HOST_WIDE_INT) 1 << (mode_width - 2)
11818 /* (A - C1) always sign-extends, like C2. */
11819 && num_sign_bit_copies (a, inner_mode)
11820 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
11821 - (mode_width - 1))))
11822 {
11823 op0 = SUBREG_REG (op0);
11824 continue;
11825 }
11826 }
11827
11828 /* If the inner mode is narrower and we are extracting the low part,
11829 we can treat the SUBREG as if it were a ZERO_EXTEND. */
11830 if (subreg_lowpart_p (op0)
11831 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0))) < mode_width)
11832 /* Fall through */ ;
11833 else
11834 break;
11835
11836 /* ... fall through ... */
11837
11838 case ZERO_EXTEND:
11839 mode = GET_MODE (XEXP (op0, 0));
11840 if (GET_MODE_CLASS (mode) == MODE_INT
11841 && (unsigned_comparison_p || equality_comparison_p)
11842 && HWI_COMPUTABLE_MODE_P (mode)
11843 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
11844 && const_op >= 0
11845 && have_insn_for (COMPARE, mode))
11846 {
11847 op0 = XEXP (op0, 0);
11848 continue;
11849 }
11850 break;
11851
11852 case PLUS:
11853 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
11854 this for equality comparisons due to pathological cases involving
11855 overflows. */
11856 if (equality_comparison_p
11857 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11858 op1, XEXP (op0, 1))))
11859 {
11860 op0 = XEXP (op0, 0);
11861 op1 = tem;
11862 continue;
11863 }
11864
11865 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
11866 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
11867 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
11868 {
11869 op0 = XEXP (XEXP (op0, 0), 0);
11870 code = (code == LT ? EQ : NE);
11871 continue;
11872 }
11873 break;
11874
11875 case MINUS:
11876 /* We used to optimize signed comparisons against zero, but that
11877 was incorrect. Unsigned comparisons against zero (GTU, LEU)
11878 arrive here as equality comparisons, or (GEU, LTU) are
11879 optimized away. No need to special-case them. */
11880
11881 /* (eq (minus A B) C) -> (eq A (plus B C)) or
11882 (eq B (minus A C)), whichever simplifies. We can only do
11883 this for equality comparisons due to pathological cases involving
11884 overflows. */
11885 if (equality_comparison_p
11886 && 0 != (tem = simplify_binary_operation (PLUS, mode,
11887 XEXP (op0, 1), op1)))
11888 {
11889 op0 = XEXP (op0, 0);
11890 op1 = tem;
11891 continue;
11892 }
11893
11894 if (equality_comparison_p
11895 && 0 != (tem = simplify_binary_operation (MINUS, mode,
11896 XEXP (op0, 0), op1)))
11897 {
11898 op0 = XEXP (op0, 1);
11899 op1 = tem;
11900 continue;
11901 }
11902
11903 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
11904 of bits in X minus 1, is one iff X > 0. */
11905 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
11906 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
11907 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
11908 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11909 {
11910 op0 = XEXP (op0, 1);
11911 code = (code == GE ? LE : GT);
11912 continue;
11913 }
11914 break;
11915
11916 case XOR:
11917 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
11918 if C is zero or B is a constant. */
11919 if (equality_comparison_p
11920 && 0 != (tem = simplify_binary_operation (XOR, mode,
11921 XEXP (op0, 1), op1)))
11922 {
11923 op0 = XEXP (op0, 0);
11924 op1 = tem;
11925 continue;
11926 }
11927 break;
11928
11929 case EQ: case NE:
11930 case UNEQ: case LTGT:
11931 case LT: case LTU: case UNLT: case LE: case LEU: case UNLE:
11932 case GT: case GTU: case UNGT: case GE: case GEU: case UNGE:
11933 case UNORDERED: case ORDERED:
11934 /* We can't do anything if OP0 is a condition code value, rather
11935 than an actual data value. */
11936 if (const_op != 0
11937 || CC0_P (XEXP (op0, 0))
11938 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
11939 break;
11940
11941 /* Get the two operands being compared. */
11942 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
11943 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
11944 else
11945 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
11946
11947 /* Check for the cases where we simply want the result of the
11948 earlier test or the opposite of that result. */
11949 if (code == NE || code == EQ
11950 || (val_signbit_known_set_p (GET_MODE (op0), STORE_FLAG_VALUE)
11951 && (code == LT || code == GE)))
11952 {
11953 enum rtx_code new_code;
11954 if (code == LT || code == NE)
11955 new_code = GET_CODE (op0);
11956 else
11957 new_code = reversed_comparison_code (op0, NULL);
11958
11959 if (new_code != UNKNOWN)
11960 {
11961 code = new_code;
11962 op0 = tem;
11963 op1 = tem1;
11964 continue;
11965 }
11966 }
11967 break;
11968
11969 case IOR:
11970 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
11971 iff X <= 0. */
11972 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
11973 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
11974 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
11975 {
11976 op0 = XEXP (op0, 1);
11977 code = (code == GE ? GT : LE);
11978 continue;
11979 }
11980 break;
11981
11982 case AND:
11983 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
11984 will be converted to a ZERO_EXTRACT later. */
11985 if (const_op == 0 && equality_comparison_p
11986 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11987 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
11988 {
11989 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
11990 XEXP (XEXP (op0, 0), 1));
11991 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
11992 continue;
11993 }
11994
11995 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
11996 zero and X is a comparison and C1 and C2 describe only bits set
11997 in STORE_FLAG_VALUE, we can compare with X. */
11998 if (const_op == 0 && equality_comparison_p
11999 && mode_width <= HOST_BITS_PER_WIDE_INT
12000 && CONST_INT_P (XEXP (op0, 1))
12001 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12002 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12003 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12004 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12005 {
12006 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12007 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12008 if ((~STORE_FLAG_VALUE & mask) == 0
12009 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12010 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12011 && COMPARISON_P (tem))))
12012 {
12013 op0 = XEXP (XEXP (op0, 0), 0);
12014 continue;
12015 }
12016 }
12017
12018 /* If we are doing an equality comparison of an AND of a bit equal
12019 to the sign bit, replace this with a LT or GE comparison of
12020 the underlying value. */
12021 if (equality_comparison_p
12022 && const_op == 0
12023 && CONST_INT_P (XEXP (op0, 1))
12024 && mode_width <= HOST_BITS_PER_WIDE_INT
12025 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12026 == (unsigned HOST_WIDE_INT) 1 << (mode_width - 1)))
12027 {
12028 op0 = XEXP (op0, 0);
12029 code = (code == EQ ? GE : LT);
12030 continue;
12031 }
12032
12033 /* If this AND operation is really a ZERO_EXTEND from a narrower
12034 mode, the constant fits within that mode, and this is either an
12035 equality or unsigned comparison, try to do this comparison in
12036 the narrower mode.
12037
12038 Note that in:
12039
12040 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12041 -> (ne:DI (reg:SI 4) (const_int 0))
12042
12043 unless TRULY_NOOP_TRUNCATION allows it or the register is
12044 known to hold a value of the required mode the
12045 transformation is invalid. */
12046 if ((equality_comparison_p || unsigned_comparison_p)
12047 && CONST_INT_P (XEXP (op0, 1))
12048 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12049 & GET_MODE_MASK (mode))
12050 + 1)) >= 0
12051 && const_op >> i == 0
12052 && (tmode = mode_for_size (i, MODE_INT, 1)) != BLKmode
12053 && (TRULY_NOOP_TRUNCATION_MODES_P (tmode, GET_MODE (op0))
12054 || (REG_P (XEXP (op0, 0))
12055 && reg_truncated_to_mode (tmode, XEXP (op0, 0)))))
12056 {
12057 op0 = gen_lowpart (tmode, XEXP (op0, 0));
12058 continue;
12059 }
12060
12061 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12062 fits in both M1 and M2 and the SUBREG is either paradoxical
12063 or represents the low part, permute the SUBREG and the AND
12064 and try again. */
12065 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12066 && CONST_INT_P (XEXP (op0, 1)))
12067 {
12068 tmode = GET_MODE (SUBREG_REG (XEXP (op0, 0)));
12069 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12070 /* Require an integral mode, to avoid creating something like
12071 (AND:SF ...). */
12072 if (SCALAR_INT_MODE_P (tmode)
12073 /* It is unsafe to commute the AND into the SUBREG if the
12074 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12075 not defined. As originally written the upper bits
12076 have a defined value due to the AND operation.
12077 However, if we commute the AND inside the SUBREG then
12078 they no longer have defined values and the meaning of
12079 the code has been changed.
12080 Also C1 should not change value in the smaller mode,
12081 see PR67028 (a positive C1 can become negative in the
12082 smaller mode, so that the AND does no longer mask the
12083 upper bits). */
12084 && ((WORD_REGISTER_OPERATIONS
12085 && mode_width > GET_MODE_PRECISION (tmode)
12086 && mode_width <= BITS_PER_WORD
12087 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12088 || (mode_width <= GET_MODE_PRECISION (tmode)
12089 && subreg_lowpart_p (XEXP (op0, 0))))
12090 && mode_width <= HOST_BITS_PER_WIDE_INT
12091 && HWI_COMPUTABLE_MODE_P (tmode)
12092 && (c1 & ~mask) == 0
12093 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12094 && c1 != mask
12095 && c1 != GET_MODE_MASK (tmode))
12096 {
12097 op0 = simplify_gen_binary (AND, tmode,
12098 SUBREG_REG (XEXP (op0, 0)),
12099 gen_int_mode (c1, tmode));
12100 op0 = gen_lowpart (mode, op0);
12101 continue;
12102 }
12103 }
12104
12105 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12106 if (const_op == 0 && equality_comparison_p
12107 && XEXP (op0, 1) == const1_rtx
12108 && GET_CODE (XEXP (op0, 0)) == NOT)
12109 {
12110 op0 = simplify_and_const_int (NULL_RTX, mode,
12111 XEXP (XEXP (op0, 0), 0), 1);
12112 code = (code == NE ? EQ : NE);
12113 continue;
12114 }
12115
12116 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12117 (eq (and (lshiftrt X) 1) 0).
12118 Also handle the case where (not X) is expressed using xor. */
12119 if (const_op == 0 && equality_comparison_p
12120 && XEXP (op0, 1) == const1_rtx
12121 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12122 {
12123 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12124 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12125
12126 if (GET_CODE (shift_op) == NOT
12127 || (GET_CODE (shift_op) == XOR
12128 && CONST_INT_P (XEXP (shift_op, 1))
12129 && CONST_INT_P (shift_count)
12130 && HWI_COMPUTABLE_MODE_P (mode)
12131 && (UINTVAL (XEXP (shift_op, 1))
12132 == (unsigned HOST_WIDE_INT) 1
12133 << INTVAL (shift_count))))
12134 {
12135 op0
12136 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12137 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12138 code = (code == NE ? EQ : NE);
12139 continue;
12140 }
12141 }
12142 break;
12143
12144 case ASHIFT:
12145 /* If we have (compare (ashift FOO N) (const_int C)) and
12146 the high order N bits of FOO (N+1 if an inequality comparison)
12147 are known to be zero, we can do this by comparing FOO with C
12148 shifted right N bits so long as the low-order N bits of C are
12149 zero. */
12150 if (CONST_INT_P (XEXP (op0, 1))
12151 && INTVAL (XEXP (op0, 1)) >= 0
12152 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12153 < HOST_BITS_PER_WIDE_INT)
12154 && (((unsigned HOST_WIDE_INT) const_op
12155 & (((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1)))
12156 - 1)) == 0)
12157 && mode_width <= HOST_BITS_PER_WIDE_INT
12158 && (nonzero_bits (XEXP (op0, 0), mode)
12159 & ~(mask >> (INTVAL (XEXP (op0, 1))
12160 + ! equality_comparison_p))) == 0)
12161 {
12162 /* We must perform a logical shift, not an arithmetic one,
12163 as we want the top N bits of C to be zero. */
12164 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12165
12166 temp >>= INTVAL (XEXP (op0, 1));
12167 op1 = gen_int_mode (temp, mode);
12168 op0 = XEXP (op0, 0);
12169 continue;
12170 }
12171
12172 /* If we are doing a sign bit comparison, it means we are testing
12173 a particular bit. Convert it to the appropriate AND. */
12174 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12175 && mode_width <= HOST_BITS_PER_WIDE_INT)
12176 {
12177 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12178 ((unsigned HOST_WIDE_INT) 1
12179 << (mode_width - 1
12180 - INTVAL (XEXP (op0, 1)))));
12181 code = (code == LT ? NE : EQ);
12182 continue;
12183 }
12184
12185 /* If this an equality comparison with zero and we are shifting
12186 the low bit to the sign bit, we can convert this to an AND of the
12187 low-order bit. */
12188 if (const_op == 0 && equality_comparison_p
12189 && CONST_INT_P (XEXP (op0, 1))
12190 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12191 {
12192 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12193 continue;
12194 }
12195 break;
12196
12197 case ASHIFTRT:
12198 /* If this is an equality comparison with zero, we can do this
12199 as a logical shift, which might be much simpler. */
12200 if (equality_comparison_p && const_op == 0
12201 && CONST_INT_P (XEXP (op0, 1)))
12202 {
12203 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12204 XEXP (op0, 0),
12205 INTVAL (XEXP (op0, 1)));
12206 continue;
12207 }
12208
12209 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12210 do the comparison in a narrower mode. */
12211 if (! unsigned_comparison_p
12212 && CONST_INT_P (XEXP (op0, 1))
12213 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12214 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12215 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12216 MODE_INT, 1)) != BLKmode
12217 && (((unsigned HOST_WIDE_INT) const_op
12218 + (GET_MODE_MASK (tmode) >> 1) + 1)
12219 <= GET_MODE_MASK (tmode)))
12220 {
12221 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12222 continue;
12223 }
12224
12225 /* Likewise if OP0 is a PLUS of a sign extension with a
12226 constant, which is usually represented with the PLUS
12227 between the shifts. */
12228 if (! unsigned_comparison_p
12229 && CONST_INT_P (XEXP (op0, 1))
12230 && GET_CODE (XEXP (op0, 0)) == PLUS
12231 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12232 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12233 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12234 && (tmode = mode_for_size (mode_width - INTVAL (XEXP (op0, 1)),
12235 MODE_INT, 1)) != BLKmode
12236 && (((unsigned HOST_WIDE_INT) const_op
12237 + (GET_MODE_MASK (tmode) >> 1) + 1)
12238 <= GET_MODE_MASK (tmode)))
12239 {
12240 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12241 rtx add_const = XEXP (XEXP (op0, 0), 1);
12242 rtx new_const = simplify_gen_binary (ASHIFTRT, GET_MODE (op0),
12243 add_const, XEXP (op0, 1));
12244
12245 op0 = simplify_gen_binary (PLUS, tmode,
12246 gen_lowpart (tmode, inner),
12247 new_const);
12248 continue;
12249 }
12250
12251 /* ... fall through ... */
12252 case LSHIFTRT:
12253 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12254 the low order N bits of FOO are known to be zero, we can do this
12255 by comparing FOO with C shifted left N bits so long as no
12256 overflow occurs. Even if the low order N bits of FOO aren't known
12257 to be zero, if the comparison is >= or < we can use the same
12258 optimization and for > or <= by setting all the low
12259 order N bits in the comparison constant. */
12260 if (CONST_INT_P (XEXP (op0, 1))
12261 && INTVAL (XEXP (op0, 1)) > 0
12262 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12263 && mode_width <= HOST_BITS_PER_WIDE_INT
12264 && (((unsigned HOST_WIDE_INT) const_op
12265 + (GET_CODE (op0) != LSHIFTRT
12266 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12267 + 1)
12268 : 0))
12269 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12270 {
12271 unsigned HOST_WIDE_INT low_bits
12272 = (nonzero_bits (XEXP (op0, 0), mode)
12273 & (((unsigned HOST_WIDE_INT) 1
12274 << INTVAL (XEXP (op0, 1))) - 1));
12275 if (low_bits == 0 || !equality_comparison_p)
12276 {
12277 /* If the shift was logical, then we must make the condition
12278 unsigned. */
12279 if (GET_CODE (op0) == LSHIFTRT)
12280 code = unsigned_condition (code);
12281
12282 const_op <<= INTVAL (XEXP (op0, 1));
12283 if (low_bits != 0
12284 && (code == GT || code == GTU
12285 || code == LE || code == LEU))
12286 const_op
12287 |= (((HOST_WIDE_INT) 1 << INTVAL (XEXP (op0, 1))) - 1);
12288 op1 = GEN_INT (const_op);
12289 op0 = XEXP (op0, 0);
12290 continue;
12291 }
12292 }
12293
12294 /* If we are using this shift to extract just the sign bit, we
12295 can replace this with an LT or GE comparison. */
12296 if (const_op == 0
12297 && (equality_comparison_p || sign_bit_comparison_p)
12298 && CONST_INT_P (XEXP (op0, 1))
12299 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12300 {
12301 op0 = XEXP (op0, 0);
12302 code = (code == NE || code == GT ? LT : GE);
12303 continue;
12304 }
12305 break;
12306
12307 default:
12308 break;
12309 }
12310
12311 break;
12312 }
12313
12314 /* Now make any compound operations involved in this comparison. Then,
12315 check for an outmost SUBREG on OP0 that is not doing anything or is
12316 paradoxical. The latter transformation must only be performed when
12317 it is known that the "extra" bits will be the same in op0 and op1 or
12318 that they don't matter. There are three cases to consider:
12319
12320 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12321 care bits and we can assume they have any convenient value. So
12322 making the transformation is safe.
12323
12324 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not defined.
12325 In this case the upper bits of op0 are undefined. We should not make
12326 the simplification in that case as we do not know the contents of
12327 those bits.
12328
12329 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is defined and not
12330 UNKNOWN. In that case we know those bits are zeros or ones. We must
12331 also be sure that they are the same as the upper bits of op1.
12332
12333 We can never remove a SUBREG for a non-equality comparison because
12334 the sign bit is in a different place in the underlying object. */
12335
12336 op0 = make_compound_operation (op0, op1 == const0_rtx ? COMPARE : SET);
12337 op1 = make_compound_operation (op1, SET);
12338
12339 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12340 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
12341 && GET_MODE_CLASS (GET_MODE (SUBREG_REG (op0))) == MODE_INT
12342 && (code == NE || code == EQ))
12343 {
12344 if (paradoxical_subreg_p (op0))
12345 {
12346 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12347 implemented. */
12348 if (REG_P (SUBREG_REG (op0)))
12349 {
12350 op0 = SUBREG_REG (op0);
12351 op1 = gen_lowpart (GET_MODE (op0), op1);
12352 }
12353 }
12354 else if ((GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op0)))
12355 <= HOST_BITS_PER_WIDE_INT)
12356 && (nonzero_bits (SUBREG_REG (op0),
12357 GET_MODE (SUBREG_REG (op0)))
12358 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12359 {
12360 tem = gen_lowpart (GET_MODE (SUBREG_REG (op0)), op1);
12361
12362 if ((nonzero_bits (tem, GET_MODE (SUBREG_REG (op0)))
12363 & ~GET_MODE_MASK (GET_MODE (op0))) == 0)
12364 op0 = SUBREG_REG (op0), op1 = tem;
12365 }
12366 }
12367
12368 /* We now do the opposite procedure: Some machines don't have compare
12369 insns in all modes. If OP0's mode is an integer mode smaller than a
12370 word and we can't do a compare in that mode, see if there is a larger
12371 mode for which we can do the compare. There are a number of cases in
12372 which we can use the wider mode. */
12373
12374 mode = GET_MODE (op0);
12375 if (mode != VOIDmode && GET_MODE_CLASS (mode) == MODE_INT
12376 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12377 && ! have_insn_for (COMPARE, mode))
12378 for (tmode = GET_MODE_WIDER_MODE (mode);
12379 (tmode != VOIDmode && HWI_COMPUTABLE_MODE_P (tmode));
12380 tmode = GET_MODE_WIDER_MODE (tmode))
12381 if (have_insn_for (COMPARE, tmode))
12382 {
12383 int zero_extended;
12384
12385 /* If this is a test for negative, we can make an explicit
12386 test of the sign bit. Test this first so we can use
12387 a paradoxical subreg to extend OP0. */
12388
12389 if (op1 == const0_rtx && (code == LT || code == GE)
12390 && HWI_COMPUTABLE_MODE_P (mode))
12391 {
12392 unsigned HOST_WIDE_INT sign
12393 = (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1);
12394 op0 = simplify_gen_binary (AND, tmode,
12395 gen_lowpart (tmode, op0),
12396 gen_int_mode (sign, tmode));
12397 code = (code == LT) ? NE : EQ;
12398 break;
12399 }
12400
12401 /* If the only nonzero bits in OP0 and OP1 are those in the
12402 narrower mode and this is an equality or unsigned comparison,
12403 we can use the wider mode. Similarly for sign-extended
12404 values, in which case it is true for all comparisons. */
12405 zero_extended = ((code == EQ || code == NE
12406 || code == GEU || code == GTU
12407 || code == LEU || code == LTU)
12408 && (nonzero_bits (op0, tmode)
12409 & ~GET_MODE_MASK (mode)) == 0
12410 && ((CONST_INT_P (op1)
12411 || (nonzero_bits (op1, tmode)
12412 & ~GET_MODE_MASK (mode)) == 0)));
12413
12414 if (zero_extended
12415 || ((num_sign_bit_copies (op0, tmode)
12416 > (unsigned int) (GET_MODE_PRECISION (tmode)
12417 - GET_MODE_PRECISION (mode)))
12418 && (num_sign_bit_copies (op1, tmode)
12419 > (unsigned int) (GET_MODE_PRECISION (tmode)
12420 - GET_MODE_PRECISION (mode)))))
12421 {
12422 /* If OP0 is an AND and we don't have an AND in MODE either,
12423 make a new AND in the proper mode. */
12424 if (GET_CODE (op0) == AND
12425 && !have_insn_for (AND, mode))
12426 op0 = simplify_gen_binary (AND, tmode,
12427 gen_lowpart (tmode,
12428 XEXP (op0, 0)),
12429 gen_lowpart (tmode,
12430 XEXP (op0, 1)));
12431 else
12432 {
12433 if (zero_extended)
12434 {
12435 op0 = simplify_gen_unary (ZERO_EXTEND, tmode, op0, mode);
12436 op1 = simplify_gen_unary (ZERO_EXTEND, tmode, op1, mode);
12437 }
12438 else
12439 {
12440 op0 = simplify_gen_unary (SIGN_EXTEND, tmode, op0, mode);
12441 op1 = simplify_gen_unary (SIGN_EXTEND, tmode, op1, mode);
12442 }
12443 break;
12444 }
12445 }
12446 }
12447
12448 /* We may have changed the comparison operands. Re-canonicalize. */
12449 if (swap_commutative_operands_p (op0, op1))
12450 {
12451 std::swap (op0, op1);
12452 code = swap_condition (code);
12453 }
12454
12455 /* If this machine only supports a subset of valid comparisons, see if we
12456 can convert an unsupported one into a supported one. */
12457 target_canonicalize_comparison (&code, &op0, &op1, 0);
12458
12459 *pop0 = op0;
12460 *pop1 = op1;
12461
12462 return code;
12463 }
12464 \f
12465 /* Utility function for record_value_for_reg. Count number of
12466 rtxs in X. */
12467 static int
12468 count_rtxs (rtx x)
12469 {
12470 enum rtx_code code = GET_CODE (x);
12471 const char *fmt;
12472 int i, j, ret = 1;
12473
12474 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12475 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12476 {
12477 rtx x0 = XEXP (x, 0);
12478 rtx x1 = XEXP (x, 1);
12479
12480 if (x0 == x1)
12481 return 1 + 2 * count_rtxs (x0);
12482
12483 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12484 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12485 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12486 return 2 + 2 * count_rtxs (x0)
12487 + count_rtxs (x == XEXP (x1, 0)
12488 ? XEXP (x1, 1) : XEXP (x1, 0));
12489
12490 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12491 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12492 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12493 return 2 + 2 * count_rtxs (x1)
12494 + count_rtxs (x == XEXP (x0, 0)
12495 ? XEXP (x0, 1) : XEXP (x0, 0));
12496 }
12497
12498 fmt = GET_RTX_FORMAT (code);
12499 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12500 if (fmt[i] == 'e')
12501 ret += count_rtxs (XEXP (x, i));
12502 else if (fmt[i] == 'E')
12503 for (j = 0; j < XVECLEN (x, i); j++)
12504 ret += count_rtxs (XVECEXP (x, i, j));
12505
12506 return ret;
12507 }
12508 \f
12509 /* Utility function for following routine. Called when X is part of a value
12510 being stored into last_set_value. Sets last_set_table_tick
12511 for each register mentioned. Similar to mention_regs in cse.c */
12512
12513 static void
12514 update_table_tick (rtx x)
12515 {
12516 enum rtx_code code = GET_CODE (x);
12517 const char *fmt = GET_RTX_FORMAT (code);
12518 int i, j;
12519
12520 if (code == REG)
12521 {
12522 unsigned int regno = REGNO (x);
12523 unsigned int endregno = END_REGNO (x);
12524 unsigned int r;
12525
12526 for (r = regno; r < endregno; r++)
12527 {
12528 reg_stat_type *rsp = &reg_stat[r];
12529 rsp->last_set_table_tick = label_tick;
12530 }
12531
12532 return;
12533 }
12534
12535 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12536 if (fmt[i] == 'e')
12537 {
12538 /* Check for identical subexpressions. If x contains
12539 identical subexpression we only have to traverse one of
12540 them. */
12541 if (i == 0 && ARITHMETIC_P (x))
12542 {
12543 /* Note that at this point x1 has already been
12544 processed. */
12545 rtx x0 = XEXP (x, 0);
12546 rtx x1 = XEXP (x, 1);
12547
12548 /* If x0 and x1 are identical then there is no need to
12549 process x0. */
12550 if (x0 == x1)
12551 break;
12552
12553 /* If x0 is identical to a subexpression of x1 then while
12554 processing x1, x0 has already been processed. Thus we
12555 are done with x. */
12556 if (ARITHMETIC_P (x1)
12557 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12558 break;
12559
12560 /* If x1 is identical to a subexpression of x0 then we
12561 still have to process the rest of x0. */
12562 if (ARITHMETIC_P (x0)
12563 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12564 {
12565 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
12566 break;
12567 }
12568 }
12569
12570 update_table_tick (XEXP (x, i));
12571 }
12572 else if (fmt[i] == 'E')
12573 for (j = 0; j < XVECLEN (x, i); j++)
12574 update_table_tick (XVECEXP (x, i, j));
12575 }
12576
12577 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
12578 are saying that the register is clobbered and we no longer know its
12579 value. If INSN is zero, don't update reg_stat[].last_set; this is
12580 only permitted with VALUE also zero and is used to invalidate the
12581 register. */
12582
12583 static void
12584 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
12585 {
12586 unsigned int regno = REGNO (reg);
12587 unsigned int endregno = END_REGNO (reg);
12588 unsigned int i;
12589 reg_stat_type *rsp;
12590
12591 /* If VALUE contains REG and we have a previous value for REG, substitute
12592 the previous value. */
12593 if (value && insn && reg_overlap_mentioned_p (reg, value))
12594 {
12595 rtx tem;
12596
12597 /* Set things up so get_last_value is allowed to see anything set up to
12598 our insn. */
12599 subst_low_luid = DF_INSN_LUID (insn);
12600 tem = get_last_value (reg);
12601
12602 /* If TEM is simply a binary operation with two CLOBBERs as operands,
12603 it isn't going to be useful and will take a lot of time to process,
12604 so just use the CLOBBER. */
12605
12606 if (tem)
12607 {
12608 if (ARITHMETIC_P (tem)
12609 && GET_CODE (XEXP (tem, 0)) == CLOBBER
12610 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
12611 tem = XEXP (tem, 0);
12612 else if (count_occurrences (value, reg, 1) >= 2)
12613 {
12614 /* If there are two or more occurrences of REG in VALUE,
12615 prevent the value from growing too much. */
12616 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
12617 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
12618 }
12619
12620 value = replace_rtx (copy_rtx (value), reg, tem);
12621 }
12622 }
12623
12624 /* For each register modified, show we don't know its value, that
12625 we don't know about its bitwise content, that its value has been
12626 updated, and that we don't know the location of the death of the
12627 register. */
12628 for (i = regno; i < endregno; i++)
12629 {
12630 rsp = &reg_stat[i];
12631
12632 if (insn)
12633 rsp->last_set = insn;
12634
12635 rsp->last_set_value = 0;
12636 rsp->last_set_mode = VOIDmode;
12637 rsp->last_set_nonzero_bits = 0;
12638 rsp->last_set_sign_bit_copies = 0;
12639 rsp->last_death = 0;
12640 rsp->truncated_to_mode = VOIDmode;
12641 }
12642
12643 /* Mark registers that are being referenced in this value. */
12644 if (value)
12645 update_table_tick (value);
12646
12647 /* Now update the status of each register being set.
12648 If someone is using this register in this block, set this register
12649 to invalid since we will get confused between the two lives in this
12650 basic block. This makes using this register always invalid. In cse, we
12651 scan the table to invalidate all entries using this register, but this
12652 is too much work for us. */
12653
12654 for (i = regno; i < endregno; i++)
12655 {
12656 rsp = &reg_stat[i];
12657 rsp->last_set_label = label_tick;
12658 if (!insn
12659 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
12660 rsp->last_set_invalid = 1;
12661 else
12662 rsp->last_set_invalid = 0;
12663 }
12664
12665 /* The value being assigned might refer to X (like in "x++;"). In that
12666 case, we must replace it with (clobber (const_int 0)) to prevent
12667 infinite loops. */
12668 rsp = &reg_stat[regno];
12669 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
12670 {
12671 value = copy_rtx (value);
12672 if (!get_last_value_validate (&value, insn, label_tick, 1))
12673 value = 0;
12674 }
12675
12676 /* For the main register being modified, update the value, the mode, the
12677 nonzero bits, and the number of sign bit copies. */
12678
12679 rsp->last_set_value = value;
12680
12681 if (value)
12682 {
12683 machine_mode mode = GET_MODE (reg);
12684 subst_low_luid = DF_INSN_LUID (insn);
12685 rsp->last_set_mode = mode;
12686 if (GET_MODE_CLASS (mode) == MODE_INT
12687 && HWI_COMPUTABLE_MODE_P (mode))
12688 mode = nonzero_bits_mode;
12689 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
12690 rsp->last_set_sign_bit_copies
12691 = num_sign_bit_copies (value, GET_MODE (reg));
12692 }
12693 }
12694
12695 /* Called via note_stores from record_dead_and_set_regs to handle one
12696 SET or CLOBBER in an insn. DATA is the instruction in which the
12697 set is occurring. */
12698
12699 static void
12700 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
12701 {
12702 rtx_insn *record_dead_insn = (rtx_insn *) data;
12703
12704 if (GET_CODE (dest) == SUBREG)
12705 dest = SUBREG_REG (dest);
12706
12707 if (!record_dead_insn)
12708 {
12709 if (REG_P (dest))
12710 record_value_for_reg (dest, NULL, NULL_RTX);
12711 return;
12712 }
12713
12714 if (REG_P (dest))
12715 {
12716 /* If we are setting the whole register, we know its value. Otherwise
12717 show that we don't know the value. We can handle SUBREG in
12718 some cases. */
12719 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
12720 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
12721 else if (GET_CODE (setter) == SET
12722 && GET_CODE (SET_DEST (setter)) == SUBREG
12723 && SUBREG_REG (SET_DEST (setter)) == dest
12724 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
12725 && subreg_lowpart_p (SET_DEST (setter)))
12726 record_value_for_reg (dest, record_dead_insn,
12727 gen_lowpart (GET_MODE (dest),
12728 SET_SRC (setter)));
12729 else
12730 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
12731 }
12732 else if (MEM_P (dest)
12733 /* Ignore pushes, they clobber nothing. */
12734 && ! push_operand (dest, GET_MODE (dest)))
12735 mem_last_set = DF_INSN_LUID (record_dead_insn);
12736 }
12737
12738 /* Update the records of when each REG was most recently set or killed
12739 for the things done by INSN. This is the last thing done in processing
12740 INSN in the combiner loop.
12741
12742 We update reg_stat[], in particular fields last_set, last_set_value,
12743 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
12744 last_death, and also the similar information mem_last_set (which insn
12745 most recently modified memory) and last_call_luid (which insn was the
12746 most recent subroutine call). */
12747
12748 static void
12749 record_dead_and_set_regs (rtx_insn *insn)
12750 {
12751 rtx link;
12752 unsigned int i;
12753
12754 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
12755 {
12756 if (REG_NOTE_KIND (link) == REG_DEAD
12757 && REG_P (XEXP (link, 0)))
12758 {
12759 unsigned int regno = REGNO (XEXP (link, 0));
12760 unsigned int endregno = END_REGNO (XEXP (link, 0));
12761
12762 for (i = regno; i < endregno; i++)
12763 {
12764 reg_stat_type *rsp;
12765
12766 rsp = &reg_stat[i];
12767 rsp->last_death = insn;
12768 }
12769 }
12770 else if (REG_NOTE_KIND (link) == REG_INC)
12771 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
12772 }
12773
12774 if (CALL_P (insn))
12775 {
12776 hard_reg_set_iterator hrsi;
12777 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
12778 {
12779 reg_stat_type *rsp;
12780
12781 rsp = &reg_stat[i];
12782 rsp->last_set_invalid = 1;
12783 rsp->last_set = insn;
12784 rsp->last_set_value = 0;
12785 rsp->last_set_mode = VOIDmode;
12786 rsp->last_set_nonzero_bits = 0;
12787 rsp->last_set_sign_bit_copies = 0;
12788 rsp->last_death = 0;
12789 rsp->truncated_to_mode = VOIDmode;
12790 }
12791
12792 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
12793
12794 /* We can't combine into a call pattern. Remember, though, that
12795 the return value register is set at this LUID. We could
12796 still replace a register with the return value from the
12797 wrong subroutine call! */
12798 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
12799 }
12800 else
12801 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
12802 }
12803
12804 /* If a SUBREG has the promoted bit set, it is in fact a property of the
12805 register present in the SUBREG, so for each such SUBREG go back and
12806 adjust nonzero and sign bit information of the registers that are
12807 known to have some zero/sign bits set.
12808
12809 This is needed because when combine blows the SUBREGs away, the
12810 information on zero/sign bits is lost and further combines can be
12811 missed because of that. */
12812
12813 static void
12814 record_promoted_value (rtx_insn *insn, rtx subreg)
12815 {
12816 struct insn_link *links;
12817 rtx set;
12818 unsigned int regno = REGNO (SUBREG_REG (subreg));
12819 machine_mode mode = GET_MODE (subreg);
12820
12821 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
12822 return;
12823
12824 for (links = LOG_LINKS (insn); links;)
12825 {
12826 reg_stat_type *rsp;
12827
12828 insn = links->insn;
12829 set = single_set (insn);
12830
12831 if (! set || !REG_P (SET_DEST (set))
12832 || REGNO (SET_DEST (set)) != regno
12833 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
12834 {
12835 links = links->next;
12836 continue;
12837 }
12838
12839 rsp = &reg_stat[regno];
12840 if (rsp->last_set == insn)
12841 {
12842 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
12843 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
12844 }
12845
12846 if (REG_P (SET_SRC (set)))
12847 {
12848 regno = REGNO (SET_SRC (set));
12849 links = LOG_LINKS (insn);
12850 }
12851 else
12852 break;
12853 }
12854 }
12855
12856 /* Check if X, a register, is known to contain a value already
12857 truncated to MODE. In this case we can use a subreg to refer to
12858 the truncated value even though in the generic case we would need
12859 an explicit truncation. */
12860
12861 static bool
12862 reg_truncated_to_mode (machine_mode mode, const_rtx x)
12863 {
12864 reg_stat_type *rsp = &reg_stat[REGNO (x)];
12865 machine_mode truncated = rsp->truncated_to_mode;
12866
12867 if (truncated == 0
12868 || rsp->truncation_label < label_tick_ebb_start)
12869 return false;
12870 if (GET_MODE_SIZE (truncated) <= GET_MODE_SIZE (mode))
12871 return true;
12872 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
12873 return true;
12874 return false;
12875 }
12876
12877 /* If X is a hard reg or a subreg record the mode that the register is
12878 accessed in. For non-TRULY_NOOP_TRUNCATION targets we might be able
12879 to turn a truncate into a subreg using this information. Return true
12880 if traversing X is complete. */
12881
12882 static bool
12883 record_truncated_value (rtx x)
12884 {
12885 machine_mode truncated_mode;
12886 reg_stat_type *rsp;
12887
12888 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
12889 {
12890 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
12891 truncated_mode = GET_MODE (x);
12892
12893 if (GET_MODE_SIZE (original_mode) <= GET_MODE_SIZE (truncated_mode))
12894 return true;
12895
12896 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
12897 return true;
12898
12899 x = SUBREG_REG (x);
12900 }
12901 /* ??? For hard-regs we now record everything. We might be able to
12902 optimize this using last_set_mode. */
12903 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
12904 truncated_mode = GET_MODE (x);
12905 else
12906 return false;
12907
12908 rsp = &reg_stat[REGNO (x)];
12909 if (rsp->truncated_to_mode == 0
12910 || rsp->truncation_label < label_tick_ebb_start
12911 || (GET_MODE_SIZE (truncated_mode)
12912 < GET_MODE_SIZE (rsp->truncated_to_mode)))
12913 {
12914 rsp->truncated_to_mode = truncated_mode;
12915 rsp->truncation_label = label_tick;
12916 }
12917
12918 return true;
12919 }
12920
12921 /* Callback for note_uses. Find hardregs and subregs of pseudos and
12922 the modes they are used in. This can help truning TRUNCATEs into
12923 SUBREGs. */
12924
12925 static void
12926 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
12927 {
12928 subrtx_var_iterator::array_type array;
12929 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
12930 if (record_truncated_value (*iter))
12931 iter.skip_subrtxes ();
12932 }
12933
12934 /* Scan X for promoted SUBREGs. For each one found,
12935 note what it implies to the registers used in it. */
12936
12937 static void
12938 check_promoted_subreg (rtx_insn *insn, rtx x)
12939 {
12940 if (GET_CODE (x) == SUBREG
12941 && SUBREG_PROMOTED_VAR_P (x)
12942 && REG_P (SUBREG_REG (x)))
12943 record_promoted_value (insn, x);
12944 else
12945 {
12946 const char *format = GET_RTX_FORMAT (GET_CODE (x));
12947 int i, j;
12948
12949 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
12950 switch (format[i])
12951 {
12952 case 'e':
12953 check_promoted_subreg (insn, XEXP (x, i));
12954 break;
12955 case 'V':
12956 case 'E':
12957 if (XVEC (x, i) != 0)
12958 for (j = 0; j < XVECLEN (x, i); j++)
12959 check_promoted_subreg (insn, XVECEXP (x, i, j));
12960 break;
12961 }
12962 }
12963 }
12964 \f
12965 /* Verify that all the registers and memory references mentioned in *LOC are
12966 still valid. *LOC was part of a value set in INSN when label_tick was
12967 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
12968 the invalid references with (clobber (const_int 0)) and return 1. This
12969 replacement is useful because we often can get useful information about
12970 the form of a value (e.g., if it was produced by a shift that always
12971 produces -1 or 0) even though we don't know exactly what registers it
12972 was produced from. */
12973
12974 static int
12975 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
12976 {
12977 rtx x = *loc;
12978 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
12979 int len = GET_RTX_LENGTH (GET_CODE (x));
12980 int i, j;
12981
12982 if (REG_P (x))
12983 {
12984 unsigned int regno = REGNO (x);
12985 unsigned int endregno = END_REGNO (x);
12986 unsigned int j;
12987
12988 for (j = regno; j < endregno; j++)
12989 {
12990 reg_stat_type *rsp = &reg_stat[j];
12991 if (rsp->last_set_invalid
12992 /* If this is a pseudo-register that was only set once and not
12993 live at the beginning of the function, it is always valid. */
12994 || (! (regno >= FIRST_PSEUDO_REGISTER
12995 && regno < reg_n_sets_max
12996 && REG_N_SETS (regno) == 1
12997 && (!REGNO_REG_SET_P
12998 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
12999 regno)))
13000 && rsp->last_set_label > tick))
13001 {
13002 if (replace)
13003 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13004 return replace;
13005 }
13006 }
13007
13008 return 1;
13009 }
13010 /* If this is a memory reference, make sure that there were no stores after
13011 it that might have clobbered the value. We don't have alias info, so we
13012 assume any store invalidates it. Moreover, we only have local UIDs, so
13013 we also assume that there were stores in the intervening basic blocks. */
13014 else if (MEM_P (x) && !MEM_READONLY_P (x)
13015 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13016 {
13017 if (replace)
13018 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13019 return replace;
13020 }
13021
13022 for (i = 0; i < len; i++)
13023 {
13024 if (fmt[i] == 'e')
13025 {
13026 /* Check for identical subexpressions. If x contains
13027 identical subexpression we only have to traverse one of
13028 them. */
13029 if (i == 1 && ARITHMETIC_P (x))
13030 {
13031 /* Note that at this point x0 has already been checked
13032 and found valid. */
13033 rtx x0 = XEXP (x, 0);
13034 rtx x1 = XEXP (x, 1);
13035
13036 /* If x0 and x1 are identical then x is also valid. */
13037 if (x0 == x1)
13038 return 1;
13039
13040 /* If x1 is identical to a subexpression of x0 then
13041 while checking x0, x1 has already been checked. Thus
13042 it is valid and so as x. */
13043 if (ARITHMETIC_P (x0)
13044 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13045 return 1;
13046
13047 /* If x0 is identical to a subexpression of x1 then x is
13048 valid iff the rest of x1 is valid. */
13049 if (ARITHMETIC_P (x1)
13050 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13051 return
13052 get_last_value_validate (&XEXP (x1,
13053 x0 == XEXP (x1, 0) ? 1 : 0),
13054 insn, tick, replace);
13055 }
13056
13057 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13058 replace) == 0)
13059 return 0;
13060 }
13061 else if (fmt[i] == 'E')
13062 for (j = 0; j < XVECLEN (x, i); j++)
13063 if (get_last_value_validate (&XVECEXP (x, i, j),
13064 insn, tick, replace) == 0)
13065 return 0;
13066 }
13067
13068 /* If we haven't found a reason for it to be invalid, it is valid. */
13069 return 1;
13070 }
13071
13072 /* Get the last value assigned to X, if known. Some registers
13073 in the value may be replaced with (clobber (const_int 0)) if their value
13074 is known longer known reliably. */
13075
13076 static rtx
13077 get_last_value (const_rtx x)
13078 {
13079 unsigned int regno;
13080 rtx value;
13081 reg_stat_type *rsp;
13082
13083 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13084 then convert it to the desired mode. If this is a paradoxical SUBREG,
13085 we cannot predict what values the "extra" bits might have. */
13086 if (GET_CODE (x) == SUBREG
13087 && subreg_lowpart_p (x)
13088 && !paradoxical_subreg_p (x)
13089 && (value = get_last_value (SUBREG_REG (x))) != 0)
13090 return gen_lowpart (GET_MODE (x), value);
13091
13092 if (!REG_P (x))
13093 return 0;
13094
13095 regno = REGNO (x);
13096 rsp = &reg_stat[regno];
13097 value = rsp->last_set_value;
13098
13099 /* If we don't have a value, or if it isn't for this basic block and
13100 it's either a hard register, set more than once, or it's a live
13101 at the beginning of the function, return 0.
13102
13103 Because if it's not live at the beginning of the function then the reg
13104 is always set before being used (is never used without being set).
13105 And, if it's set only once, and it's always set before use, then all
13106 uses must have the same last value, even if it's not from this basic
13107 block. */
13108
13109 if (value == 0
13110 || (rsp->last_set_label < label_tick_ebb_start
13111 && (regno < FIRST_PSEUDO_REGISTER
13112 || regno >= reg_n_sets_max
13113 || REG_N_SETS (regno) != 1
13114 || REGNO_REG_SET_P
13115 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13116 return 0;
13117
13118 /* If the value was set in a later insn than the ones we are processing,
13119 we can't use it even if the register was only set once. */
13120 if (rsp->last_set_label == label_tick
13121 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13122 return 0;
13123
13124 /* If the value has all its registers valid, return it. */
13125 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13126 return value;
13127
13128 /* Otherwise, make a copy and replace any invalid register with
13129 (clobber (const_int 0)). If that fails for some reason, return 0. */
13130
13131 value = copy_rtx (value);
13132 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13133 return value;
13134
13135 return 0;
13136 }
13137 \f
13138 /* Return nonzero if expression X refers to a REG or to memory
13139 that is set in an instruction more recent than FROM_LUID. */
13140
13141 static int
13142 use_crosses_set_p (const_rtx x, int from_luid)
13143 {
13144 const char *fmt;
13145 int i;
13146 enum rtx_code code = GET_CODE (x);
13147
13148 if (code == REG)
13149 {
13150 unsigned int regno = REGNO (x);
13151 unsigned endreg = END_REGNO (x);
13152
13153 #ifdef PUSH_ROUNDING
13154 /* Don't allow uses of the stack pointer to be moved,
13155 because we don't know whether the move crosses a push insn. */
13156 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13157 return 1;
13158 #endif
13159 for (; regno < endreg; regno++)
13160 {
13161 reg_stat_type *rsp = &reg_stat[regno];
13162 if (rsp->last_set
13163 && rsp->last_set_label == label_tick
13164 && DF_INSN_LUID (rsp->last_set) > from_luid)
13165 return 1;
13166 }
13167 return 0;
13168 }
13169
13170 if (code == MEM && mem_last_set > from_luid)
13171 return 1;
13172
13173 fmt = GET_RTX_FORMAT (code);
13174
13175 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13176 {
13177 if (fmt[i] == 'E')
13178 {
13179 int j;
13180 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13181 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13182 return 1;
13183 }
13184 else if (fmt[i] == 'e'
13185 && use_crosses_set_p (XEXP (x, i), from_luid))
13186 return 1;
13187 }
13188 return 0;
13189 }
13190 \f
13191 /* Define three variables used for communication between the following
13192 routines. */
13193
13194 static unsigned int reg_dead_regno, reg_dead_endregno;
13195 static int reg_dead_flag;
13196
13197 /* Function called via note_stores from reg_dead_at_p.
13198
13199 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13200 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13201
13202 static void
13203 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13204 {
13205 unsigned int regno, endregno;
13206
13207 if (!REG_P (dest))
13208 return;
13209
13210 regno = REGNO (dest);
13211 endregno = END_REGNO (dest);
13212 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13213 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13214 }
13215
13216 /* Return nonzero if REG is known to be dead at INSN.
13217
13218 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13219 referencing REG, it is dead. If we hit a SET referencing REG, it is
13220 live. Otherwise, see if it is live or dead at the start of the basic
13221 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13222 must be assumed to be always live. */
13223
13224 static int
13225 reg_dead_at_p (rtx reg, rtx_insn *insn)
13226 {
13227 basic_block block;
13228 unsigned int i;
13229
13230 /* Set variables for reg_dead_at_p_1. */
13231 reg_dead_regno = REGNO (reg);
13232 reg_dead_endregno = END_REGNO (reg);
13233
13234 reg_dead_flag = 0;
13235
13236 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13237 we allow the machine description to decide whether use-and-clobber
13238 patterns are OK. */
13239 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13240 {
13241 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13242 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13243 return 0;
13244 }
13245
13246 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13247 beginning of basic block. */
13248 block = BLOCK_FOR_INSN (insn);
13249 for (;;)
13250 {
13251 if (INSN_P (insn))
13252 {
13253 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13254 return 1;
13255
13256 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13257 if (reg_dead_flag)
13258 return reg_dead_flag == 1 ? 1 : 0;
13259
13260 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13261 return 1;
13262 }
13263
13264 if (insn == BB_HEAD (block))
13265 break;
13266
13267 insn = PREV_INSN (insn);
13268 }
13269
13270 /* Look at live-in sets for the basic block that we were in. */
13271 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13272 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13273 return 0;
13274
13275 return 1;
13276 }
13277 \f
13278 /* Note hard registers in X that are used. */
13279
13280 static void
13281 mark_used_regs_combine (rtx x)
13282 {
13283 RTX_CODE code = GET_CODE (x);
13284 unsigned int regno;
13285 int i;
13286
13287 switch (code)
13288 {
13289 case LABEL_REF:
13290 case SYMBOL_REF:
13291 case CONST:
13292 CASE_CONST_ANY:
13293 case PC:
13294 case ADDR_VEC:
13295 case ADDR_DIFF_VEC:
13296 case ASM_INPUT:
13297 /* CC0 must die in the insn after it is set, so we don't need to take
13298 special note of it here. */
13299 case CC0:
13300 return;
13301
13302 case CLOBBER:
13303 /* If we are clobbering a MEM, mark any hard registers inside the
13304 address as used. */
13305 if (MEM_P (XEXP (x, 0)))
13306 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13307 return;
13308
13309 case REG:
13310 regno = REGNO (x);
13311 /* A hard reg in a wide mode may really be multiple registers.
13312 If so, mark all of them just like the first. */
13313 if (regno < FIRST_PSEUDO_REGISTER)
13314 {
13315 /* None of this applies to the stack, frame or arg pointers. */
13316 if (regno == STACK_POINTER_REGNUM
13317 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13318 && regno == HARD_FRAME_POINTER_REGNUM)
13319 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13320 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13321 || regno == FRAME_POINTER_REGNUM)
13322 return;
13323
13324 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13325 }
13326 return;
13327
13328 case SET:
13329 {
13330 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13331 the address. */
13332 rtx testreg = SET_DEST (x);
13333
13334 while (GET_CODE (testreg) == SUBREG
13335 || GET_CODE (testreg) == ZERO_EXTRACT
13336 || GET_CODE (testreg) == STRICT_LOW_PART)
13337 testreg = XEXP (testreg, 0);
13338
13339 if (MEM_P (testreg))
13340 mark_used_regs_combine (XEXP (testreg, 0));
13341
13342 mark_used_regs_combine (SET_SRC (x));
13343 }
13344 return;
13345
13346 default:
13347 break;
13348 }
13349
13350 /* Recursively scan the operands of this expression. */
13351
13352 {
13353 const char *fmt = GET_RTX_FORMAT (code);
13354
13355 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13356 {
13357 if (fmt[i] == 'e')
13358 mark_used_regs_combine (XEXP (x, i));
13359 else if (fmt[i] == 'E')
13360 {
13361 int j;
13362
13363 for (j = 0; j < XVECLEN (x, i); j++)
13364 mark_used_regs_combine (XVECEXP (x, i, j));
13365 }
13366 }
13367 }
13368 }
13369 \f
13370 /* Remove register number REGNO from the dead registers list of INSN.
13371
13372 Return the note used to record the death, if there was one. */
13373
13374 rtx
13375 remove_death (unsigned int regno, rtx_insn *insn)
13376 {
13377 rtx note = find_regno_note (insn, REG_DEAD, regno);
13378
13379 if (note)
13380 remove_note (insn, note);
13381
13382 return note;
13383 }
13384
13385 /* For each register (hardware or pseudo) used within expression X, if its
13386 death is in an instruction with luid between FROM_LUID (inclusive) and
13387 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13388 list headed by PNOTES.
13389
13390 That said, don't move registers killed by maybe_kill_insn.
13391
13392 This is done when X is being merged by combination into TO_INSN. These
13393 notes will then be distributed as needed. */
13394
13395 static void
13396 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13397 rtx *pnotes)
13398 {
13399 const char *fmt;
13400 int len, i;
13401 enum rtx_code code = GET_CODE (x);
13402
13403 if (code == REG)
13404 {
13405 unsigned int regno = REGNO (x);
13406 rtx_insn *where_dead = reg_stat[regno].last_death;
13407
13408 /* Don't move the register if it gets killed in between from and to. */
13409 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13410 && ! reg_referenced_p (x, maybe_kill_insn))
13411 return;
13412
13413 if (where_dead
13414 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13415 && DF_INSN_LUID (where_dead) >= from_luid
13416 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13417 {
13418 rtx note = remove_death (regno, where_dead);
13419
13420 /* It is possible for the call above to return 0. This can occur
13421 when last_death points to I2 or I1 that we combined with.
13422 In that case make a new note.
13423
13424 We must also check for the case where X is a hard register
13425 and NOTE is a death note for a range of hard registers
13426 including X. In that case, we must put REG_DEAD notes for
13427 the remaining registers in place of NOTE. */
13428
13429 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13430 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13431 > GET_MODE_SIZE (GET_MODE (x))))
13432 {
13433 unsigned int deadregno = REGNO (XEXP (note, 0));
13434 unsigned int deadend = END_REGNO (XEXP (note, 0));
13435 unsigned int ourend = END_REGNO (x);
13436 unsigned int i;
13437
13438 for (i = deadregno; i < deadend; i++)
13439 if (i < regno || i >= ourend)
13440 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13441 }
13442
13443 /* If we didn't find any note, or if we found a REG_DEAD note that
13444 covers only part of the given reg, and we have a multi-reg hard
13445 register, then to be safe we must check for REG_DEAD notes
13446 for each register other than the first. They could have
13447 their own REG_DEAD notes lying around. */
13448 else if ((note == 0
13449 || (note != 0
13450 && (GET_MODE_SIZE (GET_MODE (XEXP (note, 0)))
13451 < GET_MODE_SIZE (GET_MODE (x)))))
13452 && regno < FIRST_PSEUDO_REGISTER
13453 && REG_NREGS (x) > 1)
13454 {
13455 unsigned int ourend = END_REGNO (x);
13456 unsigned int i, offset;
13457 rtx oldnotes = 0;
13458
13459 if (note)
13460 offset = hard_regno_nregs[regno][GET_MODE (XEXP (note, 0))];
13461 else
13462 offset = 1;
13463
13464 for (i = regno + offset; i < ourend; i++)
13465 move_deaths (regno_reg_rtx[i],
13466 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13467 }
13468
13469 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13470 {
13471 XEXP (note, 1) = *pnotes;
13472 *pnotes = note;
13473 }
13474 else
13475 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13476 }
13477
13478 return;
13479 }
13480
13481 else if (GET_CODE (x) == SET)
13482 {
13483 rtx dest = SET_DEST (x);
13484
13485 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13486
13487 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13488 that accesses one word of a multi-word item, some
13489 piece of everything register in the expression is used by
13490 this insn, so remove any old death. */
13491 /* ??? So why do we test for equality of the sizes? */
13492
13493 if (GET_CODE (dest) == ZERO_EXTRACT
13494 || GET_CODE (dest) == STRICT_LOW_PART
13495 || (GET_CODE (dest) == SUBREG
13496 && (((GET_MODE_SIZE (GET_MODE (dest))
13497 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13498 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13499 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13500 {
13501 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13502 return;
13503 }
13504
13505 /* If this is some other SUBREG, we know it replaces the entire
13506 value, so use that as the destination. */
13507 if (GET_CODE (dest) == SUBREG)
13508 dest = SUBREG_REG (dest);
13509
13510 /* If this is a MEM, adjust deaths of anything used in the address.
13511 For a REG (the only other possibility), the entire value is
13512 being replaced so the old value is not used in this insn. */
13513
13514 if (MEM_P (dest))
13515 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13516 to_insn, pnotes);
13517 return;
13518 }
13519
13520 else if (GET_CODE (x) == CLOBBER)
13521 return;
13522
13523 len = GET_RTX_LENGTH (code);
13524 fmt = GET_RTX_FORMAT (code);
13525
13526 for (i = 0; i < len; i++)
13527 {
13528 if (fmt[i] == 'E')
13529 {
13530 int j;
13531 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13532 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
13533 to_insn, pnotes);
13534 }
13535 else if (fmt[i] == 'e')
13536 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
13537 }
13538 }
13539 \f
13540 /* Return 1 if X is the target of a bit-field assignment in BODY, the
13541 pattern of an insn. X must be a REG. */
13542
13543 static int
13544 reg_bitfield_target_p (rtx x, rtx body)
13545 {
13546 int i;
13547
13548 if (GET_CODE (body) == SET)
13549 {
13550 rtx dest = SET_DEST (body);
13551 rtx target;
13552 unsigned int regno, tregno, endregno, endtregno;
13553
13554 if (GET_CODE (dest) == ZERO_EXTRACT)
13555 target = XEXP (dest, 0);
13556 else if (GET_CODE (dest) == STRICT_LOW_PART)
13557 target = SUBREG_REG (XEXP (dest, 0));
13558 else
13559 return 0;
13560
13561 if (GET_CODE (target) == SUBREG)
13562 target = SUBREG_REG (target);
13563
13564 if (!REG_P (target))
13565 return 0;
13566
13567 tregno = REGNO (target), regno = REGNO (x);
13568 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
13569 return target == x;
13570
13571 endtregno = end_hard_regno (GET_MODE (target), tregno);
13572 endregno = end_hard_regno (GET_MODE (x), regno);
13573
13574 return endregno > tregno && regno < endtregno;
13575 }
13576
13577 else if (GET_CODE (body) == PARALLEL)
13578 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
13579 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
13580 return 1;
13581
13582 return 0;
13583 }
13584 \f
13585 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
13586 as appropriate. I3 and I2 are the insns resulting from the combination
13587 insns including FROM (I2 may be zero).
13588
13589 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
13590 not need REG_DEAD notes because they are being substituted for. This
13591 saves searching in the most common cases.
13592
13593 Each note in the list is either ignored or placed on some insns, depending
13594 on the type of note. */
13595
13596 static void
13597 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
13598 rtx elim_i2, rtx elim_i1, rtx elim_i0)
13599 {
13600 rtx note, next_note;
13601 rtx tem_note;
13602 rtx_insn *tem_insn;
13603
13604 for (note = notes; note; note = next_note)
13605 {
13606 rtx_insn *place = 0, *place2 = 0;
13607
13608 next_note = XEXP (note, 1);
13609 switch (REG_NOTE_KIND (note))
13610 {
13611 case REG_BR_PROB:
13612 case REG_BR_PRED:
13613 /* Doesn't matter much where we put this, as long as it's somewhere.
13614 It is preferable to keep these notes on branches, which is most
13615 likely to be i3. */
13616 place = i3;
13617 break;
13618
13619 case REG_NON_LOCAL_GOTO:
13620 if (JUMP_P (i3))
13621 place = i3;
13622 else
13623 {
13624 gcc_assert (i2 && JUMP_P (i2));
13625 place = i2;
13626 }
13627 break;
13628
13629 case REG_EH_REGION:
13630 /* These notes must remain with the call or trapping instruction. */
13631 if (CALL_P (i3))
13632 place = i3;
13633 else if (i2 && CALL_P (i2))
13634 place = i2;
13635 else
13636 {
13637 gcc_assert (cfun->can_throw_non_call_exceptions);
13638 if (may_trap_p (i3))
13639 place = i3;
13640 else if (i2 && may_trap_p (i2))
13641 place = i2;
13642 /* ??? Otherwise assume we've combined things such that we
13643 can now prove that the instructions can't trap. Drop the
13644 note in this case. */
13645 }
13646 break;
13647
13648 case REG_ARGS_SIZE:
13649 /* ??? How to distribute between i3-i1. Assume i3 contains the
13650 entire adjustment. Assert i3 contains at least some adjust. */
13651 if (!noop_move_p (i3))
13652 {
13653 int old_size, args_size = INTVAL (XEXP (note, 0));
13654 /* fixup_args_size_notes looks at REG_NORETURN note,
13655 so ensure the note is placed there first. */
13656 if (CALL_P (i3))
13657 {
13658 rtx *np;
13659 for (np = &next_note; *np; np = &XEXP (*np, 1))
13660 if (REG_NOTE_KIND (*np) == REG_NORETURN)
13661 {
13662 rtx n = *np;
13663 *np = XEXP (n, 1);
13664 XEXP (n, 1) = REG_NOTES (i3);
13665 REG_NOTES (i3) = n;
13666 break;
13667 }
13668 }
13669 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
13670 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
13671 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
13672 gcc_assert (old_size != args_size
13673 || (CALL_P (i3)
13674 && !ACCUMULATE_OUTGOING_ARGS
13675 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
13676 }
13677 break;
13678
13679 case REG_NORETURN:
13680 case REG_SETJMP:
13681 case REG_TM:
13682 case REG_CALL_DECL:
13683 /* These notes must remain with the call. It should not be
13684 possible for both I2 and I3 to be a call. */
13685 if (CALL_P (i3))
13686 place = i3;
13687 else
13688 {
13689 gcc_assert (i2 && CALL_P (i2));
13690 place = i2;
13691 }
13692 break;
13693
13694 case REG_UNUSED:
13695 /* Any clobbers for i3 may still exist, and so we must process
13696 REG_UNUSED notes from that insn.
13697
13698 Any clobbers from i2 or i1 can only exist if they were added by
13699 recog_for_combine. In that case, recog_for_combine created the
13700 necessary REG_UNUSED notes. Trying to keep any original
13701 REG_UNUSED notes from these insns can cause incorrect output
13702 if it is for the same register as the original i3 dest.
13703 In that case, we will notice that the register is set in i3,
13704 and then add a REG_UNUSED note for the destination of i3, which
13705 is wrong. However, it is possible to have REG_UNUSED notes from
13706 i2 or i1 for register which were both used and clobbered, so
13707 we keep notes from i2 or i1 if they will turn into REG_DEAD
13708 notes. */
13709
13710 /* If this register is set or clobbered in I3, put the note there
13711 unless there is one already. */
13712 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
13713 {
13714 if (from_insn != i3)
13715 break;
13716
13717 if (! (REG_P (XEXP (note, 0))
13718 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
13719 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
13720 place = i3;
13721 }
13722 /* Otherwise, if this register is used by I3, then this register
13723 now dies here, so we must put a REG_DEAD note here unless there
13724 is one already. */
13725 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
13726 && ! (REG_P (XEXP (note, 0))
13727 ? find_regno_note (i3, REG_DEAD,
13728 REGNO (XEXP (note, 0)))
13729 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
13730 {
13731 PUT_REG_NOTE_KIND (note, REG_DEAD);
13732 place = i3;
13733 }
13734 break;
13735
13736 case REG_EQUAL:
13737 case REG_EQUIV:
13738 case REG_NOALIAS:
13739 /* These notes say something about results of an insn. We can
13740 only support them if they used to be on I3 in which case they
13741 remain on I3. Otherwise they are ignored.
13742
13743 If the note refers to an expression that is not a constant, we
13744 must also ignore the note since we cannot tell whether the
13745 equivalence is still true. It might be possible to do
13746 slightly better than this (we only have a problem if I2DEST
13747 or I1DEST is present in the expression), but it doesn't
13748 seem worth the trouble. */
13749
13750 if (from_insn == i3
13751 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
13752 place = i3;
13753 break;
13754
13755 case REG_INC:
13756 /* These notes say something about how a register is used. They must
13757 be present on any use of the register in I2 or I3. */
13758 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
13759 place = i3;
13760
13761 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
13762 {
13763 if (place)
13764 place2 = i2;
13765 else
13766 place = i2;
13767 }
13768 break;
13769
13770 case REG_LABEL_TARGET:
13771 case REG_LABEL_OPERAND:
13772 /* This can show up in several ways -- either directly in the
13773 pattern, or hidden off in the constant pool with (or without?)
13774 a REG_EQUAL note. */
13775 /* ??? Ignore the without-reg_equal-note problem for now. */
13776 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
13777 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
13778 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13779 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0)))
13780 place = i3;
13781
13782 if (i2
13783 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
13784 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
13785 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
13786 && LABEL_REF_LABEL (XEXP (tem_note, 0)) == XEXP (note, 0))))
13787 {
13788 if (place)
13789 place2 = i2;
13790 else
13791 place = i2;
13792 }
13793
13794 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
13795 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
13796 there. */
13797 if (place && JUMP_P (place)
13798 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13799 && (JUMP_LABEL (place) == NULL
13800 || JUMP_LABEL (place) == XEXP (note, 0)))
13801 {
13802 rtx label = JUMP_LABEL (place);
13803
13804 if (!label)
13805 JUMP_LABEL (place) = XEXP (note, 0);
13806 else if (LABEL_P (label))
13807 LABEL_NUSES (label)--;
13808 }
13809
13810 if (place2 && JUMP_P (place2)
13811 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
13812 && (JUMP_LABEL (place2) == NULL
13813 || JUMP_LABEL (place2) == XEXP (note, 0)))
13814 {
13815 rtx label = JUMP_LABEL (place2);
13816
13817 if (!label)
13818 JUMP_LABEL (place2) = XEXP (note, 0);
13819 else if (LABEL_P (label))
13820 LABEL_NUSES (label)--;
13821 place2 = 0;
13822 }
13823 break;
13824
13825 case REG_NONNEG:
13826 /* This note says something about the value of a register prior
13827 to the execution of an insn. It is too much trouble to see
13828 if the note is still correct in all situations. It is better
13829 to simply delete it. */
13830 break;
13831
13832 case REG_DEAD:
13833 /* If we replaced the right hand side of FROM_INSN with a
13834 REG_EQUAL note, the original use of the dying register
13835 will not have been combined into I3 and I2. In such cases,
13836 FROM_INSN is guaranteed to be the first of the combined
13837 instructions, so we simply need to search back before
13838 FROM_INSN for the previous use or set of this register,
13839 then alter the notes there appropriately.
13840
13841 If the register is used as an input in I3, it dies there.
13842 Similarly for I2, if it is nonzero and adjacent to I3.
13843
13844 If the register is not used as an input in either I3 or I2
13845 and it is not one of the registers we were supposed to eliminate,
13846 there are two possibilities. We might have a non-adjacent I2
13847 or we might have somehow eliminated an additional register
13848 from a computation. For example, we might have had A & B where
13849 we discover that B will always be zero. In this case we will
13850 eliminate the reference to A.
13851
13852 In both cases, we must search to see if we can find a previous
13853 use of A and put the death note there. */
13854
13855 if (from_insn
13856 && from_insn == i2mod
13857 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
13858 tem_insn = from_insn;
13859 else
13860 {
13861 if (from_insn
13862 && CALL_P (from_insn)
13863 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
13864 place = from_insn;
13865 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
13866 place = i3;
13867 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
13868 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13869 place = i2;
13870 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
13871 && !(i2mod
13872 && reg_overlap_mentioned_p (XEXP (note, 0),
13873 i2mod_old_rhs)))
13874 || rtx_equal_p (XEXP (note, 0), elim_i1)
13875 || rtx_equal_p (XEXP (note, 0), elim_i0))
13876 break;
13877 tem_insn = i3;
13878 /* If the new I2 sets the same register that is marked dead
13879 in the note, the note now should not be put on I2, as the
13880 note refers to a previous incarnation of the reg. */
13881 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
13882 tem_insn = i2;
13883 }
13884
13885 if (place == 0)
13886 {
13887 basic_block bb = this_basic_block;
13888
13889 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
13890 {
13891 if (!NONDEBUG_INSN_P (tem_insn))
13892 {
13893 if (tem_insn == BB_HEAD (bb))
13894 break;
13895 continue;
13896 }
13897
13898 /* If the register is being set at TEM_INSN, see if that is all
13899 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
13900 into a REG_UNUSED note instead. Don't delete sets to
13901 global register vars. */
13902 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
13903 || !global_regs[REGNO (XEXP (note, 0))])
13904 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
13905 {
13906 rtx set = single_set (tem_insn);
13907 rtx inner_dest = 0;
13908 rtx_insn *cc0_setter = NULL;
13909
13910 if (set != 0)
13911 for (inner_dest = SET_DEST (set);
13912 (GET_CODE (inner_dest) == STRICT_LOW_PART
13913 || GET_CODE (inner_dest) == SUBREG
13914 || GET_CODE (inner_dest) == ZERO_EXTRACT);
13915 inner_dest = XEXP (inner_dest, 0))
13916 ;
13917
13918 /* Verify that it was the set, and not a clobber that
13919 modified the register.
13920
13921 CC0 targets must be careful to maintain setter/user
13922 pairs. If we cannot delete the setter due to side
13923 effects, mark the user with an UNUSED note instead
13924 of deleting it. */
13925
13926 if (set != 0 && ! side_effects_p (SET_SRC (set))
13927 && rtx_equal_p (XEXP (note, 0), inner_dest)
13928 && (!HAVE_cc0
13929 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
13930 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
13931 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
13932 {
13933 /* Move the notes and links of TEM_INSN elsewhere.
13934 This might delete other dead insns recursively.
13935 First set the pattern to something that won't use
13936 any register. */
13937 rtx old_notes = REG_NOTES (tem_insn);
13938
13939 PATTERN (tem_insn) = pc_rtx;
13940 REG_NOTES (tem_insn) = NULL;
13941
13942 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
13943 NULL_RTX, NULL_RTX, NULL_RTX);
13944 distribute_links (LOG_LINKS (tem_insn));
13945
13946 SET_INSN_DELETED (tem_insn);
13947 if (tem_insn == i2)
13948 i2 = NULL;
13949
13950 /* Delete the setter too. */
13951 if (cc0_setter)
13952 {
13953 PATTERN (cc0_setter) = pc_rtx;
13954 old_notes = REG_NOTES (cc0_setter);
13955 REG_NOTES (cc0_setter) = NULL;
13956
13957 distribute_notes (old_notes, cc0_setter,
13958 cc0_setter, NULL,
13959 NULL_RTX, NULL_RTX, NULL_RTX);
13960 distribute_links (LOG_LINKS (cc0_setter));
13961
13962 SET_INSN_DELETED (cc0_setter);
13963 if (cc0_setter == i2)
13964 i2 = NULL;
13965 }
13966 }
13967 else
13968 {
13969 PUT_REG_NOTE_KIND (note, REG_UNUSED);
13970
13971 /* If there isn't already a REG_UNUSED note, put one
13972 here. Do not place a REG_DEAD note, even if
13973 the register is also used here; that would not
13974 match the algorithm used in lifetime analysis
13975 and can cause the consistency check in the
13976 scheduler to fail. */
13977 if (! find_regno_note (tem_insn, REG_UNUSED,
13978 REGNO (XEXP (note, 0))))
13979 place = tem_insn;
13980 break;
13981 }
13982 }
13983 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
13984 || (CALL_P (tem_insn)
13985 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
13986 {
13987 place = tem_insn;
13988
13989 /* If we are doing a 3->2 combination, and we have a
13990 register which formerly died in i3 and was not used
13991 by i2, which now no longer dies in i3 and is used in
13992 i2 but does not die in i2, and place is between i2
13993 and i3, then we may need to move a link from place to
13994 i2. */
13995 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
13996 && from_insn
13997 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
13998 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
13999 {
14000 struct insn_link *links = LOG_LINKS (place);
14001 LOG_LINKS (place) = NULL;
14002 distribute_links (links);
14003 }
14004 break;
14005 }
14006
14007 if (tem_insn == BB_HEAD (bb))
14008 break;
14009 }
14010
14011 }
14012
14013 /* If the register is set or already dead at PLACE, we needn't do
14014 anything with this note if it is still a REG_DEAD note.
14015 We check here if it is set at all, not if is it totally replaced,
14016 which is what `dead_or_set_p' checks, so also check for it being
14017 set partially. */
14018
14019 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14020 {
14021 unsigned int regno = REGNO (XEXP (note, 0));
14022 reg_stat_type *rsp = &reg_stat[regno];
14023
14024 if (dead_or_set_p (place, XEXP (note, 0))
14025 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14026 {
14027 /* Unless the register previously died in PLACE, clear
14028 last_death. [I no longer understand why this is
14029 being done.] */
14030 if (rsp->last_death != place)
14031 rsp->last_death = 0;
14032 place = 0;
14033 }
14034 else
14035 rsp->last_death = place;
14036
14037 /* If this is a death note for a hard reg that is occupying
14038 multiple registers, ensure that we are still using all
14039 parts of the object. If we find a piece of the object
14040 that is unused, we must arrange for an appropriate REG_DEAD
14041 note to be added for it. However, we can't just emit a USE
14042 and tag the note to it, since the register might actually
14043 be dead; so we recourse, and the recursive call then finds
14044 the previous insn that used this register. */
14045
14046 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14047 {
14048 unsigned int endregno = END_REGNO (XEXP (note, 0));
14049 bool all_used = true;
14050 unsigned int i;
14051
14052 for (i = regno; i < endregno; i++)
14053 if ((! refers_to_regno_p (i, PATTERN (place))
14054 && ! find_regno_fusage (place, USE, i))
14055 || dead_or_set_regno_p (place, i))
14056 {
14057 all_used = false;
14058 break;
14059 }
14060
14061 if (! all_used)
14062 {
14063 /* Put only REG_DEAD notes for pieces that are
14064 not already dead or set. */
14065
14066 for (i = regno; i < endregno;
14067 i += hard_regno_nregs[i][reg_raw_mode[i]])
14068 {
14069 rtx piece = regno_reg_rtx[i];
14070 basic_block bb = this_basic_block;
14071
14072 if (! dead_or_set_p (place, piece)
14073 && ! reg_bitfield_target_p (piece,
14074 PATTERN (place)))
14075 {
14076 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14077 NULL_RTX);
14078
14079 distribute_notes (new_note, place, place,
14080 NULL, NULL_RTX, NULL_RTX,
14081 NULL_RTX);
14082 }
14083 else if (! refers_to_regno_p (i, PATTERN (place))
14084 && ! find_regno_fusage (place, USE, i))
14085 for (tem_insn = PREV_INSN (place); ;
14086 tem_insn = PREV_INSN (tem_insn))
14087 {
14088 if (!NONDEBUG_INSN_P (tem_insn))
14089 {
14090 if (tem_insn == BB_HEAD (bb))
14091 break;
14092 continue;
14093 }
14094 if (dead_or_set_p (tem_insn, piece)
14095 || reg_bitfield_target_p (piece,
14096 PATTERN (tem_insn)))
14097 {
14098 add_reg_note (tem_insn, REG_UNUSED, piece);
14099 break;
14100 }
14101 }
14102 }
14103
14104 place = 0;
14105 }
14106 }
14107 }
14108 break;
14109
14110 default:
14111 /* Any other notes should not be present at this point in the
14112 compilation. */
14113 gcc_unreachable ();
14114 }
14115
14116 if (place)
14117 {
14118 XEXP (note, 1) = REG_NOTES (place);
14119 REG_NOTES (place) = note;
14120 }
14121
14122 if (place2)
14123 add_shallow_copy_of_reg_note (place2, note);
14124 }
14125 }
14126 \f
14127 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14128 I3, I2, and I1 to new locations. This is also called to add a link
14129 pointing at I3 when I3's destination is changed. */
14130
14131 static void
14132 distribute_links (struct insn_link *links)
14133 {
14134 struct insn_link *link, *next_link;
14135
14136 for (link = links; link; link = next_link)
14137 {
14138 rtx_insn *place = 0;
14139 rtx_insn *insn;
14140 rtx set, reg;
14141
14142 next_link = link->next;
14143
14144 /* If the insn that this link points to is a NOTE, ignore it. */
14145 if (NOTE_P (link->insn))
14146 continue;
14147
14148 set = 0;
14149 rtx pat = PATTERN (link->insn);
14150 if (GET_CODE (pat) == SET)
14151 set = pat;
14152 else if (GET_CODE (pat) == PARALLEL)
14153 {
14154 int i;
14155 for (i = 0; i < XVECLEN (pat, 0); i++)
14156 {
14157 set = XVECEXP (pat, 0, i);
14158 if (GET_CODE (set) != SET)
14159 continue;
14160
14161 reg = SET_DEST (set);
14162 while (GET_CODE (reg) == ZERO_EXTRACT
14163 || GET_CODE (reg) == STRICT_LOW_PART
14164 || GET_CODE (reg) == SUBREG)
14165 reg = XEXP (reg, 0);
14166
14167 if (!REG_P (reg))
14168 continue;
14169
14170 if (REGNO (reg) == link->regno)
14171 break;
14172 }
14173 if (i == XVECLEN (pat, 0))
14174 continue;
14175 }
14176 else
14177 continue;
14178
14179 reg = SET_DEST (set);
14180
14181 while (GET_CODE (reg) == ZERO_EXTRACT
14182 || GET_CODE (reg) == STRICT_LOW_PART
14183 || GET_CODE (reg) == SUBREG)
14184 reg = XEXP (reg, 0);
14185
14186 /* A LOG_LINK is defined as being placed on the first insn that uses
14187 a register and points to the insn that sets the register. Start
14188 searching at the next insn after the target of the link and stop
14189 when we reach a set of the register or the end of the basic block.
14190
14191 Note that this correctly handles the link that used to point from
14192 I3 to I2. Also note that not much searching is typically done here
14193 since most links don't point very far away. */
14194
14195 for (insn = NEXT_INSN (link->insn);
14196 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14197 || BB_HEAD (this_basic_block->next_bb) != insn));
14198 insn = NEXT_INSN (insn))
14199 if (DEBUG_INSN_P (insn))
14200 continue;
14201 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14202 {
14203 if (reg_referenced_p (reg, PATTERN (insn)))
14204 place = insn;
14205 break;
14206 }
14207 else if (CALL_P (insn)
14208 && find_reg_fusage (insn, USE, reg))
14209 {
14210 place = insn;
14211 break;
14212 }
14213 else if (INSN_P (insn) && reg_set_p (reg, insn))
14214 break;
14215
14216 /* If we found a place to put the link, place it there unless there
14217 is already a link to the same insn as LINK at that point. */
14218
14219 if (place)
14220 {
14221 struct insn_link *link2;
14222
14223 FOR_EACH_LOG_LINK (link2, place)
14224 if (link2->insn == link->insn && link2->regno == link->regno)
14225 break;
14226
14227 if (link2 == NULL)
14228 {
14229 link->next = LOG_LINKS (place);
14230 LOG_LINKS (place) = link;
14231
14232 /* Set added_links_insn to the earliest insn we added a
14233 link to. */
14234 if (added_links_insn == 0
14235 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14236 added_links_insn = place;
14237 }
14238 }
14239 }
14240 }
14241 \f
14242 /* Check for any register or memory mentioned in EQUIV that is not
14243 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14244 of EXPR where some registers may have been replaced by constants. */
14245
14246 static bool
14247 unmentioned_reg_p (rtx equiv, rtx expr)
14248 {
14249 subrtx_iterator::array_type array;
14250 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14251 {
14252 const_rtx x = *iter;
14253 if ((REG_P (x) || MEM_P (x))
14254 && !reg_mentioned_p (x, expr))
14255 return true;
14256 }
14257 return false;
14258 }
14259 \f
14260 DEBUG_FUNCTION void
14261 dump_combine_stats (FILE *file)
14262 {
14263 fprintf
14264 (file,
14265 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14266 combine_attempts, combine_merges, combine_extras, combine_successes);
14267 }
14268
14269 void
14270 dump_combine_total_stats (FILE *file)
14271 {
14272 fprintf
14273 (file,
14274 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14275 total_attempts, total_merges, total_extras, total_successes);
14276 }
14277 \f
14278 /* Try combining insns through substitution. */
14279 static unsigned int
14280 rest_of_handle_combine (void)
14281 {
14282 int rebuild_jump_labels_after_combine;
14283
14284 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14285 df_note_add_problem ();
14286 df_analyze ();
14287
14288 regstat_init_n_sets_and_refs ();
14289 reg_n_sets_max = max_reg_num ();
14290
14291 rebuild_jump_labels_after_combine
14292 = combine_instructions (get_insns (), max_reg_num ());
14293
14294 /* Combining insns may have turned an indirect jump into a
14295 direct jump. Rebuild the JUMP_LABEL fields of jumping
14296 instructions. */
14297 if (rebuild_jump_labels_after_combine)
14298 {
14299 timevar_push (TV_JUMP);
14300 rebuild_jump_labels (get_insns ());
14301 cleanup_cfg (0);
14302 timevar_pop (TV_JUMP);
14303 }
14304
14305 regstat_free_n_sets_and_refs ();
14306 return 0;
14307 }
14308
14309 namespace {
14310
14311 const pass_data pass_data_combine =
14312 {
14313 RTL_PASS, /* type */
14314 "combine", /* name */
14315 OPTGROUP_NONE, /* optinfo_flags */
14316 TV_COMBINE, /* tv_id */
14317 PROP_cfglayout, /* properties_required */
14318 0, /* properties_provided */
14319 0, /* properties_destroyed */
14320 0, /* todo_flags_start */
14321 TODO_df_finish, /* todo_flags_finish */
14322 };
14323
14324 class pass_combine : public rtl_opt_pass
14325 {
14326 public:
14327 pass_combine (gcc::context *ctxt)
14328 : rtl_opt_pass (pass_data_combine, ctxt)
14329 {}
14330
14331 /* opt_pass methods: */
14332 virtual bool gate (function *) { return (optimize > 0); }
14333 virtual unsigned int execute (function *)
14334 {
14335 return rest_of_handle_combine ();
14336 }
14337
14338 }; // class pass_combine
14339
14340 } // anon namespace
14341
14342 rtl_opt_pass *
14343 make_pass_combine (gcc::context *ctxt)
14344 {
14345 return new pass_combine (ctxt);
14346 }