]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/combine.c
Turn TRULY_NOOP_TRUNCATION into a hook
[thirdparty/gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with use_crosses_set_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107
108 /* Number of attempts to combine instructions in this function. */
109
110 static int combine_attempts;
111
112 /* Number of attempts that got as far as substitution in this function. */
113
114 static int combine_merges;
115
116 /* Number of instructions combined with added SETs in this function. */
117
118 static int combine_extras;
119
120 /* Number of instructions combined in this function. */
121
122 static int combine_successes;
123
124 /* Totals over entire compilation. */
125
126 static int total_attempts, total_merges, total_extras, total_successes;
127
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
134
135 static rtx_insn *i2mod;
136
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138
139 static rtx i2mod_old_rhs;
140
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142
143 static rtx i2mod_new_rhs;
144 \f
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
148
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
151
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
157
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
160
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
164
165 Therefore, we maintain the following fields:
166
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
175
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
180
181 (The next two parameters are out of date).
182
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
194
195 /* Record last value assigned to (hard or pseudo) register n. */
196
197 rtx last_set_value;
198
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
201
202 int last_set_table_tick;
203
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
206
207 int last_set_label;
208
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
213
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
217
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
221
222 char last_set_invalid;
223
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
228
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
233
234 If an entry is zero, it means that we don't know anything special. */
235
236 unsigned char sign_bit_copies;
237
238 unsigned HOST_WIDE_INT nonzero_bits;
239
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
243
244 int truncation_label;
245
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
250
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
252 };
253
254
255 static vec<reg_stat_type> reg_stat;
256
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
264
265 static unsigned int reg_n_sets_max;
266
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
269
270 static int mem_last_set;
271
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
274
275 static int last_call_luid;
276
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
282
283 static rtx_insn *subst_insn;
284
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
291
292 static int subst_low_luid;
293
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
296
297 static HARD_REG_SET newpat_used_regs;
298
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
302
303 static rtx_insn *added_links_insn;
304
305 /* Basic block in which we are performing combines. */
306 static basic_block this_basic_block;
307 static bool optimize_this_for_speed_p;
308
309 \f
310 /* Length of the currently allocated uid_insn_cost array. */
311
312 static int max_uid_known;
313
314 /* The following array records the insn_rtx_cost for every insn
315 in the instruction stream. */
316
317 static int *uid_insn_cost;
318
319 /* The following array records the LOG_LINKS for every insn in the
320 instruction stream as struct insn_link pointers. */
321
322 struct insn_link {
323 rtx_insn *insn;
324 unsigned int regno;
325 struct insn_link *next;
326 };
327
328 static struct insn_link **uid_log_links;
329
330 static inline int
331 insn_uid_check (const_rtx insn)
332 {
333 int uid = INSN_UID (insn);
334 gcc_checking_assert (uid <= max_uid_known);
335 return uid;
336 }
337
338 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
339 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
340
341 #define FOR_EACH_LOG_LINK(L, INSN) \
342 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
343
344 /* Links for LOG_LINKS are allocated from this obstack. */
345
346 static struct obstack insn_link_obstack;
347
348 /* Allocate a link. */
349
350 static inline struct insn_link *
351 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
352 {
353 struct insn_link *l
354 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
355 sizeof (struct insn_link));
356 l->insn = insn;
357 l->regno = regno;
358 l->next = next;
359 return l;
360 }
361
362 /* Incremented for each basic block. */
363
364 static int label_tick;
365
366 /* Reset to label_tick for each extended basic block in scanning order. */
367
368 static int label_tick_ebb_start;
369
370 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
371 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
372
373 static scalar_int_mode nonzero_bits_mode;
374
375 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
376 be safely used. It is zero while computing them and after combine has
377 completed. This former test prevents propagating values based on
378 previously set values, which can be incorrect if a variable is modified
379 in a loop. */
380
381 static int nonzero_sign_valid;
382
383 \f
384 /* Record one modification to rtl structure
385 to be undone by storing old_contents into *where. */
386
387 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
388
389 struct undo
390 {
391 struct undo *next;
392 enum undo_kind kind;
393 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
394 union { rtx *r; int *i; struct insn_link **l; } where;
395 };
396
397 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
398 num_undo says how many are currently recorded.
399
400 other_insn is nonzero if we have modified some other insn in the process
401 of working on subst_insn. It must be verified too. */
402
403 struct undobuf
404 {
405 struct undo *undos;
406 struct undo *frees;
407 rtx_insn *other_insn;
408 };
409
410 static struct undobuf undobuf;
411
412 /* Number of times the pseudo being substituted for
413 was found and replaced. */
414
415 static int n_occurrences;
416
417 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
418 scalar_int_mode,
419 unsigned HOST_WIDE_INT *);
420 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
421 scalar_int_mode,
422 unsigned int *);
423 static void do_SUBST (rtx *, rtx);
424 static void do_SUBST_INT (int *, int);
425 static void init_reg_last (void);
426 static void setup_incoming_promotions (rtx_insn *);
427 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
428 static int cant_combine_insn_p (rtx_insn *);
429 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
430 rtx_insn *, rtx_insn *, rtx *, rtx *);
431 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
432 static int contains_muldiv (rtx);
433 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 int *, rtx_insn *);
435 static void undo_all (void);
436 static void undo_commit (void);
437 static rtx *find_split_point (rtx *, rtx_insn *, bool);
438 static rtx subst (rtx, rtx, rtx, int, int, int);
439 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
440 static rtx simplify_if_then_else (rtx);
441 static rtx simplify_set (rtx);
442 static rtx simplify_logical (rtx);
443 static rtx expand_compound_operation (rtx);
444 static const_rtx expand_field_assignment (const_rtx);
445 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
446 rtx, unsigned HOST_WIDE_INT, int, int, int);
447 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
448 unsigned HOST_WIDE_INT *);
449 static rtx canon_reg_for_combine (rtx, rtx);
450 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
451 scalar_int_mode, unsigned HOST_WIDE_INT, int);
452 static rtx force_to_mode (rtx, machine_mode,
453 unsigned HOST_WIDE_INT, int);
454 static rtx if_then_else_cond (rtx, rtx *, rtx *);
455 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
456 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
457 static rtx make_field_assignment (rtx);
458 static rtx apply_distributive_law (rtx);
459 static rtx distribute_and_simplify_rtx (rtx, int);
460 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
461 unsigned HOST_WIDE_INT);
462 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
463 unsigned HOST_WIDE_INT);
464 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
465 HOST_WIDE_INT, machine_mode, int *);
466 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
467 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
468 int);
469 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
470 static rtx gen_lowpart_for_combine (machine_mode, rtx);
471 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
472 rtx, rtx *);
473 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
474 static void update_table_tick (rtx);
475 static void record_value_for_reg (rtx, rtx_insn *, rtx);
476 static void check_promoted_subreg (rtx_insn *, rtx);
477 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
478 static void record_dead_and_set_regs (rtx_insn *);
479 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
480 static rtx get_last_value (const_rtx);
481 static int use_crosses_set_p (const_rtx, int);
482 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
483 static int reg_dead_at_p (rtx, rtx_insn *);
484 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
485 static int reg_bitfield_target_p (rtx, rtx);
486 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
487 static void distribute_links (struct insn_link *);
488 static void mark_used_regs_combine (rtx);
489 static void record_promoted_value (rtx_insn *, rtx);
490 static bool unmentioned_reg_p (rtx, rtx);
491 static void record_truncated_values (rtx *, void *);
492 static bool reg_truncated_to_mode (machine_mode, const_rtx);
493 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
494 \f
495
496 /* It is not safe to use ordinary gen_lowpart in combine.
497 See comments in gen_lowpart_for_combine. */
498 #undef RTL_HOOKS_GEN_LOWPART
499 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
500
501 /* Our implementation of gen_lowpart never emits a new pseudo. */
502 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
503 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
504
505 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
506 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
507
508 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
509 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
510
511 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
512 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
513
514 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
515
516 \f
517 /* Convenience wrapper for the canonicalize_comparison target hook.
518 Target hooks cannot use enum rtx_code. */
519 static inline void
520 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
521 bool op0_preserve_value)
522 {
523 int code_int = (int)*code;
524 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
525 *code = (enum rtx_code)code_int;
526 }
527
528 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
529 PATTERN can not be split. Otherwise, it returns an insn sequence.
530 This is a wrapper around split_insns which ensures that the
531 reg_stat vector is made larger if the splitter creates a new
532 register. */
533
534 static rtx_insn *
535 combine_split_insns (rtx pattern, rtx_insn *insn)
536 {
537 rtx_insn *ret;
538 unsigned int nregs;
539
540 ret = split_insns (pattern, insn);
541 nregs = max_reg_num ();
542 if (nregs > reg_stat.length ())
543 reg_stat.safe_grow_cleared (nregs);
544 return ret;
545 }
546
547 /* This is used by find_single_use to locate an rtx in LOC that
548 contains exactly one use of DEST, which is typically either a REG
549 or CC0. It returns a pointer to the innermost rtx expression
550 containing DEST. Appearances of DEST that are being used to
551 totally replace it are not counted. */
552
553 static rtx *
554 find_single_use_1 (rtx dest, rtx *loc)
555 {
556 rtx x = *loc;
557 enum rtx_code code = GET_CODE (x);
558 rtx *result = NULL;
559 rtx *this_result;
560 int i;
561 const char *fmt;
562
563 switch (code)
564 {
565 case CONST:
566 case LABEL_REF:
567 case SYMBOL_REF:
568 CASE_CONST_ANY:
569 case CLOBBER:
570 return 0;
571
572 case SET:
573 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
574 of a REG that occupies all of the REG, the insn uses DEST if
575 it is mentioned in the destination or the source. Otherwise, we
576 need just check the source. */
577 if (GET_CODE (SET_DEST (x)) != CC0
578 && GET_CODE (SET_DEST (x)) != PC
579 && !REG_P (SET_DEST (x))
580 && ! (GET_CODE (SET_DEST (x)) == SUBREG
581 && REG_P (SUBREG_REG (SET_DEST (x)))
582 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
583 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
584 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
585 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
586 break;
587
588 return find_single_use_1 (dest, &SET_SRC (x));
589
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
593
594 default:
595 break;
596 }
597
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
600
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603 {
604 if (fmt[i] == 'e')
605 {
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
612
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
618 }
619 else if (fmt[i] == 'E')
620 {
621 int j;
622
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
624 {
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
632
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
637 }
638 }
639 }
640
641 return result;
642 }
643
644
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
648
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
650
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
653
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
658
659 static rtx *
660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
661 {
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
666
667 if (dest == cc0_rtx)
668 {
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
673
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
678 }
679
680 if (!REG_P (dest))
681 return 0;
682
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
688 {
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
692
693 if (link)
694 {
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
699 }
700 }
701
702 return 0;
703 }
704 \f
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
710
711 static void
712 do_SUBST (rtx *into, rtx newval)
713 {
714 struct undo *buf;
715 rtx oldval = *into;
716
717 if (oldval == newval)
718 return;
719
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
727 {
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
732
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
743 }
744
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
749
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
754
755 buf->next = undobuf.undos, undobuf.undos = buf;
756 }
757
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
759
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
763
764 static void
765 do_SUBST_INT (int *into, int newval)
766 {
767 struct undo *buf;
768 int oldval = *into;
769
770 if (oldval == newval)
771 return;
772
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
777
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
782
783 buf->next = undobuf.undos, undobuf.undos = buf;
784 }
785
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
787
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
792
793 static void
794 do_SUBST_MODE (rtx *into, machine_mode newval)
795 {
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
798
799 if (oldval == newval)
800 return;
801
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
806
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
811
812 buf->next = undobuf.undos, undobuf.undos = buf;
813 }
814
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
816
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
818
819 static void
820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
821 {
822 struct undo *buf;
823 struct insn_link * oldval = *into;
824
825 if (oldval == newval)
826 return;
827
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
832
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
837
838 buf->next = undobuf.undos, undobuf.undos = buf;
839 }
840
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
842 \f
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_rtx_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
850
851 static bool
852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
854 {
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
858
859 /* Lookup the original insn_rtx_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
862
863 if (i1)
864 {
865 i1_cost = INSN_COST (i1);
866 if (i0)
867 {
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
871 }
872 else
873 {
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
877 }
878 }
879 else
880 {
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
883 }
884
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
889
890
891 /* Calculate the replacement insn_rtx_costs. */
892 new_i3_cost = insn_rtx_cost (newpat, optimize_this_for_speed_p);
893 if (newi2pat)
894 {
895 new_i2_cost = insn_rtx_cost (newi2pat, optimize_this_for_speed_p);
896 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
897 ? new_i2_cost + new_i3_cost : 0;
898 }
899 else
900 {
901 new_cost = new_i3_cost;
902 new_i2_cost = 0;
903 }
904
905 if (undobuf.other_insn)
906 {
907 int old_other_cost, new_other_cost;
908
909 old_other_cost = INSN_COST (undobuf.other_insn);
910 new_other_cost = insn_rtx_cost (newotherpat, optimize_this_for_speed_p);
911 if (old_other_cost > 0 && new_other_cost > 0)
912 {
913 old_cost += old_other_cost;
914 new_cost += new_other_cost;
915 }
916 else
917 old_cost = 0;
918 }
919
920 /* Disallow this combination if both new_cost and old_cost are greater than
921 zero, and new_cost is greater than old cost. */
922 int reject = old_cost > 0 && new_cost > old_cost;
923
924 if (dump_file)
925 {
926 fprintf (dump_file, "%s combination of insns ",
927 reject ? "rejecting" : "allowing");
928 if (i0)
929 fprintf (dump_file, "%d, ", INSN_UID (i0));
930 if (i1 && INSN_UID (i1) != INSN_UID (i2))
931 fprintf (dump_file, "%d, ", INSN_UID (i1));
932 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
933
934 fprintf (dump_file, "original costs ");
935 if (i0)
936 fprintf (dump_file, "%d + ", i0_cost);
937 if (i1 && INSN_UID (i1) != INSN_UID (i2))
938 fprintf (dump_file, "%d + ", i1_cost);
939 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
940
941 if (newi2pat)
942 fprintf (dump_file, "replacement costs %d + %d = %d\n",
943 new_i2_cost, new_i3_cost, new_cost);
944 else
945 fprintf (dump_file, "replacement cost %d\n", new_cost);
946 }
947
948 if (reject)
949 return false;
950
951 /* Update the uid_insn_cost array with the replacement costs. */
952 INSN_COST (i2) = new_i2_cost;
953 INSN_COST (i3) = new_i3_cost;
954 if (i1)
955 {
956 INSN_COST (i1) = 0;
957 if (i0)
958 INSN_COST (i0) = 0;
959 }
960
961 return true;
962 }
963
964
965 /* Delete any insns that copy a register to itself. */
966
967 static void
968 delete_noop_moves (void)
969 {
970 rtx_insn *insn, *next;
971 basic_block bb;
972
973 FOR_EACH_BB_FN (bb, cfun)
974 {
975 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
976 {
977 next = NEXT_INSN (insn);
978 if (INSN_P (insn) && noop_move_p (insn))
979 {
980 if (dump_file)
981 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
982
983 delete_insn_and_edges (insn);
984 }
985 }
986 }
987 }
988
989 \f
990 /* Return false if we do not want to (or cannot) combine DEF. */
991 static bool
992 can_combine_def_p (df_ref def)
993 {
994 /* Do not consider if it is pre/post modification in MEM. */
995 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
996 return false;
997
998 unsigned int regno = DF_REF_REGNO (def);
999
1000 /* Do not combine frame pointer adjustments. */
1001 if ((regno == FRAME_POINTER_REGNUM
1002 && (!reload_completed || frame_pointer_needed))
1003 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1004 && regno == HARD_FRAME_POINTER_REGNUM
1005 && (!reload_completed || frame_pointer_needed))
1006 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1007 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1008 return false;
1009
1010 return true;
1011 }
1012
1013 /* Return false if we do not want to (or cannot) combine USE. */
1014 static bool
1015 can_combine_use_p (df_ref use)
1016 {
1017 /* Do not consider the usage of the stack pointer by function call. */
1018 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1019 return false;
1020
1021 return true;
1022 }
1023
1024 /* Fill in log links field for all insns. */
1025
1026 static void
1027 create_log_links (void)
1028 {
1029 basic_block bb;
1030 rtx_insn **next_use;
1031 rtx_insn *insn;
1032 df_ref def, use;
1033
1034 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1035
1036 /* Pass through each block from the end, recording the uses of each
1037 register and establishing log links when def is encountered.
1038 Note that we do not clear next_use array in order to save time,
1039 so we have to test whether the use is in the same basic block as def.
1040
1041 There are a few cases below when we do not consider the definition or
1042 usage -- these are taken from original flow.c did. Don't ask me why it is
1043 done this way; I don't know and if it works, I don't want to know. */
1044
1045 FOR_EACH_BB_FN (bb, cfun)
1046 {
1047 FOR_BB_INSNS_REVERSE (bb, insn)
1048 {
1049 if (!NONDEBUG_INSN_P (insn))
1050 continue;
1051
1052 /* Log links are created only once. */
1053 gcc_assert (!LOG_LINKS (insn));
1054
1055 FOR_EACH_INSN_DEF (def, insn)
1056 {
1057 unsigned int regno = DF_REF_REGNO (def);
1058 rtx_insn *use_insn;
1059
1060 if (!next_use[regno])
1061 continue;
1062
1063 if (!can_combine_def_p (def))
1064 continue;
1065
1066 use_insn = next_use[regno];
1067 next_use[regno] = NULL;
1068
1069 if (BLOCK_FOR_INSN (use_insn) != bb)
1070 continue;
1071
1072 /* flow.c claimed:
1073
1074 We don't build a LOG_LINK for hard registers contained
1075 in ASM_OPERANDs. If these registers get replaced,
1076 we might wind up changing the semantics of the insn,
1077 even if reload can make what appear to be valid
1078 assignments later. */
1079 if (regno < FIRST_PSEUDO_REGISTER
1080 && asm_noperands (PATTERN (use_insn)) >= 0)
1081 continue;
1082
1083 /* Don't add duplicate links between instructions. */
1084 struct insn_link *links;
1085 FOR_EACH_LOG_LINK (links, use_insn)
1086 if (insn == links->insn && regno == links->regno)
1087 break;
1088
1089 if (!links)
1090 LOG_LINKS (use_insn)
1091 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1092 }
1093
1094 FOR_EACH_INSN_USE (use, insn)
1095 if (can_combine_use_p (use))
1096 next_use[DF_REF_REGNO (use)] = insn;
1097 }
1098 }
1099
1100 free (next_use);
1101 }
1102
1103 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1104 true if we found a LOG_LINK that proves that A feeds B. This only works
1105 if there are no instructions between A and B which could have a link
1106 depending on A, since in that case we would not record a link for B.
1107 We also check the implicit dependency created by a cc0 setter/user
1108 pair. */
1109
1110 static bool
1111 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1112 {
1113 struct insn_link *links;
1114 FOR_EACH_LOG_LINK (links, b)
1115 if (links->insn == a)
1116 return true;
1117 if (HAVE_cc0 && sets_cc0_p (a))
1118 return true;
1119 return false;
1120 }
1121 \f
1122 /* Main entry point for combiner. F is the first insn of the function.
1123 NREGS is the first unused pseudo-reg number.
1124
1125 Return nonzero if the combiner has turned an indirect jump
1126 instruction into a direct jump. */
1127 static int
1128 combine_instructions (rtx_insn *f, unsigned int nregs)
1129 {
1130 rtx_insn *insn, *next;
1131 rtx_insn *prev;
1132 struct insn_link *links, *nextlinks;
1133 rtx_insn *first;
1134 basic_block last_bb;
1135
1136 int new_direct_jump_p = 0;
1137
1138 for (first = f; first && !NONDEBUG_INSN_P (first); )
1139 first = NEXT_INSN (first);
1140 if (!first)
1141 return 0;
1142
1143 combine_attempts = 0;
1144 combine_merges = 0;
1145 combine_extras = 0;
1146 combine_successes = 0;
1147
1148 rtl_hooks = combine_rtl_hooks;
1149
1150 reg_stat.safe_grow_cleared (nregs);
1151
1152 init_recog_no_volatile ();
1153
1154 /* Allocate array for insn info. */
1155 max_uid_known = get_max_uid ();
1156 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1157 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1158 gcc_obstack_init (&insn_link_obstack);
1159
1160 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1161
1162 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1163 problems when, for example, we have j <<= 1 in a loop. */
1164
1165 nonzero_sign_valid = 0;
1166 label_tick = label_tick_ebb_start = 1;
1167
1168 /* Scan all SETs and see if we can deduce anything about what
1169 bits are known to be zero for some registers and how many copies
1170 of the sign bit are known to exist for those registers.
1171
1172 Also set any known values so that we can use it while searching
1173 for what bits are known to be set. */
1174
1175 setup_incoming_promotions (first);
1176 /* Allow the entry block and the first block to fall into the same EBB.
1177 Conceptually the incoming promotions are assigned to the entry block. */
1178 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1179
1180 create_log_links ();
1181 FOR_EACH_BB_FN (this_basic_block, cfun)
1182 {
1183 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1184 last_call_luid = 0;
1185 mem_last_set = -1;
1186
1187 label_tick++;
1188 if (!single_pred_p (this_basic_block)
1189 || single_pred (this_basic_block) != last_bb)
1190 label_tick_ebb_start = label_tick;
1191 last_bb = this_basic_block;
1192
1193 FOR_BB_INSNS (this_basic_block, insn)
1194 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1195 {
1196 rtx links;
1197
1198 subst_low_luid = DF_INSN_LUID (insn);
1199 subst_insn = insn;
1200
1201 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1202 insn);
1203 record_dead_and_set_regs (insn);
1204
1205 if (AUTO_INC_DEC)
1206 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1207 if (REG_NOTE_KIND (links) == REG_INC)
1208 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1209 insn);
1210
1211 /* Record the current insn_rtx_cost of this instruction. */
1212 if (NONJUMP_INSN_P (insn))
1213 INSN_COST (insn) = insn_rtx_cost (PATTERN (insn),
1214 optimize_this_for_speed_p);
1215 if (dump_file)
1216 {
1217 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1218 dump_insn_slim (dump_file, insn);
1219 }
1220 }
1221 }
1222
1223 nonzero_sign_valid = 1;
1224
1225 /* Now scan all the insns in forward order. */
1226 label_tick = label_tick_ebb_start = 1;
1227 init_reg_last ();
1228 setup_incoming_promotions (first);
1229 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1230 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1231
1232 FOR_EACH_BB_FN (this_basic_block, cfun)
1233 {
1234 rtx_insn *last_combined_insn = NULL;
1235 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1236 last_call_luid = 0;
1237 mem_last_set = -1;
1238
1239 label_tick++;
1240 if (!single_pred_p (this_basic_block)
1241 || single_pred (this_basic_block) != last_bb)
1242 label_tick_ebb_start = label_tick;
1243 last_bb = this_basic_block;
1244
1245 rtl_profile_for_bb (this_basic_block);
1246 for (insn = BB_HEAD (this_basic_block);
1247 insn != NEXT_INSN (BB_END (this_basic_block));
1248 insn = next ? next : NEXT_INSN (insn))
1249 {
1250 next = 0;
1251 if (!NONDEBUG_INSN_P (insn))
1252 continue;
1253
1254 while (last_combined_insn
1255 && (!NONDEBUG_INSN_P (last_combined_insn)
1256 || last_combined_insn->deleted ()))
1257 last_combined_insn = PREV_INSN (last_combined_insn);
1258 if (last_combined_insn == NULL_RTX
1259 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1260 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1261 last_combined_insn = insn;
1262
1263 /* See if we know about function return values before this
1264 insn based upon SUBREG flags. */
1265 check_promoted_subreg (insn, PATTERN (insn));
1266
1267 /* See if we can find hardregs and subreg of pseudos in
1268 narrower modes. This could help turning TRUNCATEs
1269 into SUBREGs. */
1270 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1271
1272 /* Try this insn with each insn it links back to. */
1273
1274 FOR_EACH_LOG_LINK (links, insn)
1275 if ((next = try_combine (insn, links->insn, NULL,
1276 NULL, &new_direct_jump_p,
1277 last_combined_insn)) != 0)
1278 {
1279 statistics_counter_event (cfun, "two-insn combine", 1);
1280 goto retry;
1281 }
1282
1283 /* Try each sequence of three linked insns ending with this one. */
1284
1285 if (max_combine >= 3)
1286 FOR_EACH_LOG_LINK (links, insn)
1287 {
1288 rtx_insn *link = links->insn;
1289
1290 /* If the linked insn has been replaced by a note, then there
1291 is no point in pursuing this chain any further. */
1292 if (NOTE_P (link))
1293 continue;
1294
1295 FOR_EACH_LOG_LINK (nextlinks, link)
1296 if ((next = try_combine (insn, link, nextlinks->insn,
1297 NULL, &new_direct_jump_p,
1298 last_combined_insn)) != 0)
1299 {
1300 statistics_counter_event (cfun, "three-insn combine", 1);
1301 goto retry;
1302 }
1303 }
1304
1305 /* Try to combine a jump insn that uses CC0
1306 with a preceding insn that sets CC0, and maybe with its
1307 logical predecessor as well.
1308 This is how we make decrement-and-branch insns.
1309 We need this special code because data flow connections
1310 via CC0 do not get entered in LOG_LINKS. */
1311
1312 if (HAVE_cc0
1313 && JUMP_P (insn)
1314 && (prev = prev_nonnote_insn (insn)) != 0
1315 && NONJUMP_INSN_P (prev)
1316 && sets_cc0_p (PATTERN (prev)))
1317 {
1318 if ((next = try_combine (insn, prev, NULL, NULL,
1319 &new_direct_jump_p,
1320 last_combined_insn)) != 0)
1321 goto retry;
1322
1323 FOR_EACH_LOG_LINK (nextlinks, prev)
1324 if ((next = try_combine (insn, prev, nextlinks->insn,
1325 NULL, &new_direct_jump_p,
1326 last_combined_insn)) != 0)
1327 goto retry;
1328 }
1329
1330 /* Do the same for an insn that explicitly references CC0. */
1331 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1332 && (prev = prev_nonnote_insn (insn)) != 0
1333 && NONJUMP_INSN_P (prev)
1334 && sets_cc0_p (PATTERN (prev))
1335 && GET_CODE (PATTERN (insn)) == SET
1336 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1337 {
1338 if ((next = try_combine (insn, prev, NULL, NULL,
1339 &new_direct_jump_p,
1340 last_combined_insn)) != 0)
1341 goto retry;
1342
1343 FOR_EACH_LOG_LINK (nextlinks, prev)
1344 if ((next = try_combine (insn, prev, nextlinks->insn,
1345 NULL, &new_direct_jump_p,
1346 last_combined_insn)) != 0)
1347 goto retry;
1348 }
1349
1350 /* Finally, see if any of the insns that this insn links to
1351 explicitly references CC0. If so, try this insn, that insn,
1352 and its predecessor if it sets CC0. */
1353 if (HAVE_cc0)
1354 {
1355 FOR_EACH_LOG_LINK (links, insn)
1356 if (NONJUMP_INSN_P (links->insn)
1357 && GET_CODE (PATTERN (links->insn)) == SET
1358 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1359 && (prev = prev_nonnote_insn (links->insn)) != 0
1360 && NONJUMP_INSN_P (prev)
1361 && sets_cc0_p (PATTERN (prev))
1362 && (next = try_combine (insn, links->insn,
1363 prev, NULL, &new_direct_jump_p,
1364 last_combined_insn)) != 0)
1365 goto retry;
1366 }
1367
1368 /* Try combining an insn with two different insns whose results it
1369 uses. */
1370 if (max_combine >= 3)
1371 FOR_EACH_LOG_LINK (links, insn)
1372 for (nextlinks = links->next; nextlinks;
1373 nextlinks = nextlinks->next)
1374 if ((next = try_combine (insn, links->insn,
1375 nextlinks->insn, NULL,
1376 &new_direct_jump_p,
1377 last_combined_insn)) != 0)
1378
1379 {
1380 statistics_counter_event (cfun, "three-insn combine", 1);
1381 goto retry;
1382 }
1383
1384 /* Try four-instruction combinations. */
1385 if (max_combine >= 4)
1386 FOR_EACH_LOG_LINK (links, insn)
1387 {
1388 struct insn_link *next1;
1389 rtx_insn *link = links->insn;
1390
1391 /* If the linked insn has been replaced by a note, then there
1392 is no point in pursuing this chain any further. */
1393 if (NOTE_P (link))
1394 continue;
1395
1396 FOR_EACH_LOG_LINK (next1, link)
1397 {
1398 rtx_insn *link1 = next1->insn;
1399 if (NOTE_P (link1))
1400 continue;
1401 /* I0 -> I1 -> I2 -> I3. */
1402 FOR_EACH_LOG_LINK (nextlinks, link1)
1403 if ((next = try_combine (insn, link, link1,
1404 nextlinks->insn,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1407 {
1408 statistics_counter_event (cfun, "four-insn combine", 1);
1409 goto retry;
1410 }
1411 /* I0, I1 -> I2, I2 -> I3. */
1412 for (nextlinks = next1->next; nextlinks;
1413 nextlinks = nextlinks->next)
1414 if ((next = try_combine (insn, link, link1,
1415 nextlinks->insn,
1416 &new_direct_jump_p,
1417 last_combined_insn)) != 0)
1418 {
1419 statistics_counter_event (cfun, "four-insn combine", 1);
1420 goto retry;
1421 }
1422 }
1423
1424 for (next1 = links->next; next1; next1 = next1->next)
1425 {
1426 rtx_insn *link1 = next1->insn;
1427 if (NOTE_P (link1))
1428 continue;
1429 /* I0 -> I2; I1, I2 -> I3. */
1430 FOR_EACH_LOG_LINK (nextlinks, link)
1431 if ((next = try_combine (insn, link, link1,
1432 nextlinks->insn,
1433 &new_direct_jump_p,
1434 last_combined_insn)) != 0)
1435 {
1436 statistics_counter_event (cfun, "four-insn combine", 1);
1437 goto retry;
1438 }
1439 /* I0 -> I1; I1, I2 -> I3. */
1440 FOR_EACH_LOG_LINK (nextlinks, link1)
1441 if ((next = try_combine (insn, link, link1,
1442 nextlinks->insn,
1443 &new_direct_jump_p,
1444 last_combined_insn)) != 0)
1445 {
1446 statistics_counter_event (cfun, "four-insn combine", 1);
1447 goto retry;
1448 }
1449 }
1450 }
1451
1452 /* Try this insn with each REG_EQUAL note it links back to. */
1453 FOR_EACH_LOG_LINK (links, insn)
1454 {
1455 rtx set, note;
1456 rtx_insn *temp = links->insn;
1457 if ((set = single_set (temp)) != 0
1458 && (note = find_reg_equal_equiv_note (temp)) != 0
1459 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1460 /* Avoid using a register that may already been marked
1461 dead by an earlier instruction. */
1462 && ! unmentioned_reg_p (note, SET_SRC (set))
1463 && (GET_MODE (note) == VOIDmode
1464 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1465 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1466 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1467 || (GET_MODE (XEXP (SET_DEST (set), 0))
1468 == GET_MODE (note))))))
1469 {
1470 /* Temporarily replace the set's source with the
1471 contents of the REG_EQUAL note. The insn will
1472 be deleted or recognized by try_combine. */
1473 rtx orig_src = SET_SRC (set);
1474 rtx orig_dest = SET_DEST (set);
1475 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1476 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1477 SET_SRC (set) = note;
1478 i2mod = temp;
1479 i2mod_old_rhs = copy_rtx (orig_src);
1480 i2mod_new_rhs = copy_rtx (note);
1481 next = try_combine (insn, i2mod, NULL, NULL,
1482 &new_direct_jump_p,
1483 last_combined_insn);
1484 i2mod = NULL;
1485 if (next)
1486 {
1487 statistics_counter_event (cfun, "insn-with-note combine", 1);
1488 goto retry;
1489 }
1490 SET_SRC (set) = orig_src;
1491 SET_DEST (set) = orig_dest;
1492 }
1493 }
1494
1495 if (!NOTE_P (insn))
1496 record_dead_and_set_regs (insn);
1497
1498 retry:
1499 ;
1500 }
1501 }
1502
1503 default_rtl_profile ();
1504 clear_bb_flags ();
1505 new_direct_jump_p |= purge_all_dead_edges ();
1506 delete_noop_moves ();
1507
1508 /* Clean up. */
1509 obstack_free (&insn_link_obstack, NULL);
1510 free (uid_log_links);
1511 free (uid_insn_cost);
1512 reg_stat.release ();
1513
1514 {
1515 struct undo *undo, *next;
1516 for (undo = undobuf.frees; undo; undo = next)
1517 {
1518 next = undo->next;
1519 free (undo);
1520 }
1521 undobuf.frees = 0;
1522 }
1523
1524 total_attempts += combine_attempts;
1525 total_merges += combine_merges;
1526 total_extras += combine_extras;
1527 total_successes += combine_successes;
1528
1529 nonzero_sign_valid = 0;
1530 rtl_hooks = general_rtl_hooks;
1531
1532 /* Make recognizer allow volatile MEMs again. */
1533 init_recog ();
1534
1535 return new_direct_jump_p;
1536 }
1537
1538 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1539
1540 static void
1541 init_reg_last (void)
1542 {
1543 unsigned int i;
1544 reg_stat_type *p;
1545
1546 FOR_EACH_VEC_ELT (reg_stat, i, p)
1547 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1548 }
1549 \f
1550 /* Set up any promoted values for incoming argument registers. */
1551
1552 static void
1553 setup_incoming_promotions (rtx_insn *first)
1554 {
1555 tree arg;
1556 bool strictly_local = false;
1557
1558 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1559 arg = DECL_CHAIN (arg))
1560 {
1561 rtx x, reg = DECL_INCOMING_RTL (arg);
1562 int uns1, uns3;
1563 machine_mode mode1, mode2, mode3, mode4;
1564
1565 /* Only continue if the incoming argument is in a register. */
1566 if (!REG_P (reg))
1567 continue;
1568
1569 /* Determine, if possible, whether all call sites of the current
1570 function lie within the current compilation unit. (This does
1571 take into account the exporting of a function via taking its
1572 address, and so forth.) */
1573 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1574
1575 /* The mode and signedness of the argument before any promotions happen
1576 (equal to the mode of the pseudo holding it at that stage). */
1577 mode1 = TYPE_MODE (TREE_TYPE (arg));
1578 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1579
1580 /* The mode and signedness of the argument after any source language and
1581 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1582 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1583 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1584
1585 /* The mode and signedness of the argument as it is actually passed,
1586 see assign_parm_setup_reg in function.c. */
1587 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1588 TREE_TYPE (cfun->decl), 0);
1589
1590 /* The mode of the register in which the argument is being passed. */
1591 mode4 = GET_MODE (reg);
1592
1593 /* Eliminate sign extensions in the callee when:
1594 (a) A mode promotion has occurred; */
1595 if (mode1 == mode3)
1596 continue;
1597 /* (b) The mode of the register is the same as the mode of
1598 the argument as it is passed; */
1599 if (mode3 != mode4)
1600 continue;
1601 /* (c) There's no language level extension; */
1602 if (mode1 == mode2)
1603 ;
1604 /* (c.1) All callers are from the current compilation unit. If that's
1605 the case we don't have to rely on an ABI, we only have to know
1606 what we're generating right now, and we know that we will do the
1607 mode1 to mode2 promotion with the given sign. */
1608 else if (!strictly_local)
1609 continue;
1610 /* (c.2) The combination of the two promotions is useful. This is
1611 true when the signs match, or if the first promotion is unsigned.
1612 In the later case, (sign_extend (zero_extend x)) is the same as
1613 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1614 else if (uns1)
1615 uns3 = true;
1616 else if (uns3)
1617 continue;
1618
1619 /* Record that the value was promoted from mode1 to mode3,
1620 so that any sign extension at the head of the current
1621 function may be eliminated. */
1622 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1623 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1624 record_value_for_reg (reg, first, x);
1625 }
1626 }
1627
1628 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1629 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1630 because some machines (maybe most) will actually do the sign-extension and
1631 this is the conservative approach.
1632
1633 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1634 kludge. */
1635
1636 static rtx
1637 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1638 {
1639 scalar_int_mode int_mode;
1640 if (CONST_INT_P (src)
1641 && is_a <scalar_int_mode> (mode, &int_mode)
1642 && GET_MODE_PRECISION (int_mode) < prec
1643 && INTVAL (src) > 0
1644 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1645 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1646
1647 return src;
1648 }
1649
1650 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1651 and SET. */
1652
1653 static void
1654 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1655 rtx x)
1656 {
1657 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1658 unsigned HOST_WIDE_INT bits = 0;
1659 rtx reg_equal = NULL, src = SET_SRC (set);
1660 unsigned int num = 0;
1661
1662 if (reg_equal_note)
1663 reg_equal = XEXP (reg_equal_note, 0);
1664
1665 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1666 {
1667 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1668 if (reg_equal)
1669 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1670 }
1671
1672 /* Don't call nonzero_bits if it cannot change anything. */
1673 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1674 {
1675 bits = nonzero_bits (src, nonzero_bits_mode);
1676 if (reg_equal && bits)
1677 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1678 rsp->nonzero_bits |= bits;
1679 }
1680
1681 /* Don't call num_sign_bit_copies if it cannot change anything. */
1682 if (rsp->sign_bit_copies != 1)
1683 {
1684 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1685 if (reg_equal && num != GET_MODE_PRECISION (GET_MODE (x)))
1686 {
1687 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1688 if (num == 0 || numeq > num)
1689 num = numeq;
1690 }
1691 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1692 rsp->sign_bit_copies = num;
1693 }
1694 }
1695
1696 /* Called via note_stores. If X is a pseudo that is narrower than
1697 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1698
1699 If we are setting only a portion of X and we can't figure out what
1700 portion, assume all bits will be used since we don't know what will
1701 be happening.
1702
1703 Similarly, set how many bits of X are known to be copies of the sign bit
1704 at all locations in the function. This is the smallest number implied
1705 by any set of X. */
1706
1707 static void
1708 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1709 {
1710 rtx_insn *insn = (rtx_insn *) data;
1711 scalar_int_mode mode;
1712
1713 if (REG_P (x)
1714 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1715 /* If this register is undefined at the start of the file, we can't
1716 say what its contents were. */
1717 && ! REGNO_REG_SET_P
1718 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1719 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1720 && HWI_COMPUTABLE_MODE_P (mode))
1721 {
1722 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1723
1724 if (set == 0 || GET_CODE (set) == CLOBBER)
1725 {
1726 rsp->nonzero_bits = GET_MODE_MASK (mode);
1727 rsp->sign_bit_copies = 1;
1728 return;
1729 }
1730
1731 /* If this register is being initialized using itself, and the
1732 register is uninitialized in this basic block, and there are
1733 no LOG_LINKS which set the register, then part of the
1734 register is uninitialized. In that case we can't assume
1735 anything about the number of nonzero bits.
1736
1737 ??? We could do better if we checked this in
1738 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1739 could avoid making assumptions about the insn which initially
1740 sets the register, while still using the information in other
1741 insns. We would have to be careful to check every insn
1742 involved in the combination. */
1743
1744 if (insn
1745 && reg_referenced_p (x, PATTERN (insn))
1746 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1747 REGNO (x)))
1748 {
1749 struct insn_link *link;
1750
1751 FOR_EACH_LOG_LINK (link, insn)
1752 if (dead_or_set_p (link->insn, x))
1753 break;
1754 if (!link)
1755 {
1756 rsp->nonzero_bits = GET_MODE_MASK (mode);
1757 rsp->sign_bit_copies = 1;
1758 return;
1759 }
1760 }
1761
1762 /* If this is a complex assignment, see if we can convert it into a
1763 simple assignment. */
1764 set = expand_field_assignment (set);
1765
1766 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1767 set what we know about X. */
1768
1769 if (SET_DEST (set) == x
1770 || (paradoxical_subreg_p (SET_DEST (set))
1771 && SUBREG_REG (SET_DEST (set)) == x))
1772 update_rsp_from_reg_equal (rsp, insn, set, x);
1773 else
1774 {
1775 rsp->nonzero_bits = GET_MODE_MASK (mode);
1776 rsp->sign_bit_copies = 1;
1777 }
1778 }
1779 }
1780 \f
1781 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1782 optionally insns that were previously combined into I3 or that will be
1783 combined into the merger of INSN and I3. The order is PRED, PRED2,
1784 INSN, SUCC, SUCC2, I3.
1785
1786 Return 0 if the combination is not allowed for any reason.
1787
1788 If the combination is allowed, *PDEST will be set to the single
1789 destination of INSN and *PSRC to the single source, and this function
1790 will return 1. */
1791
1792 static int
1793 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1794 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1795 rtx *pdest, rtx *psrc)
1796 {
1797 int i;
1798 const_rtx set = 0;
1799 rtx src, dest;
1800 rtx_insn *p;
1801 rtx link;
1802 bool all_adjacent = true;
1803 int (*is_volatile_p) (const_rtx);
1804
1805 if (succ)
1806 {
1807 if (succ2)
1808 {
1809 if (next_active_insn (succ2) != i3)
1810 all_adjacent = false;
1811 if (next_active_insn (succ) != succ2)
1812 all_adjacent = false;
1813 }
1814 else if (next_active_insn (succ) != i3)
1815 all_adjacent = false;
1816 if (next_active_insn (insn) != succ)
1817 all_adjacent = false;
1818 }
1819 else if (next_active_insn (insn) != i3)
1820 all_adjacent = false;
1821
1822 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1823 or a PARALLEL consisting of such a SET and CLOBBERs.
1824
1825 If INSN has CLOBBER parallel parts, ignore them for our processing.
1826 By definition, these happen during the execution of the insn. When it
1827 is merged with another insn, all bets are off. If they are, in fact,
1828 needed and aren't also supplied in I3, they may be added by
1829 recog_for_combine. Otherwise, it won't match.
1830
1831 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1832 note.
1833
1834 Get the source and destination of INSN. If more than one, can't
1835 combine. */
1836
1837 if (GET_CODE (PATTERN (insn)) == SET)
1838 set = PATTERN (insn);
1839 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1840 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1841 {
1842 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1843 {
1844 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1845
1846 switch (GET_CODE (elt))
1847 {
1848 /* This is important to combine floating point insns
1849 for the SH4 port. */
1850 case USE:
1851 /* Combining an isolated USE doesn't make sense.
1852 We depend here on combinable_i3pat to reject them. */
1853 /* The code below this loop only verifies that the inputs of
1854 the SET in INSN do not change. We call reg_set_between_p
1855 to verify that the REG in the USE does not change between
1856 I3 and INSN.
1857 If the USE in INSN was for a pseudo register, the matching
1858 insn pattern will likely match any register; combining this
1859 with any other USE would only be safe if we knew that the
1860 used registers have identical values, or if there was
1861 something to tell them apart, e.g. different modes. For
1862 now, we forgo such complicated tests and simply disallow
1863 combining of USES of pseudo registers with any other USE. */
1864 if (REG_P (XEXP (elt, 0))
1865 && GET_CODE (PATTERN (i3)) == PARALLEL)
1866 {
1867 rtx i3pat = PATTERN (i3);
1868 int i = XVECLEN (i3pat, 0) - 1;
1869 unsigned int regno = REGNO (XEXP (elt, 0));
1870
1871 do
1872 {
1873 rtx i3elt = XVECEXP (i3pat, 0, i);
1874
1875 if (GET_CODE (i3elt) == USE
1876 && REG_P (XEXP (i3elt, 0))
1877 && (REGNO (XEXP (i3elt, 0)) == regno
1878 ? reg_set_between_p (XEXP (elt, 0),
1879 PREV_INSN (insn), i3)
1880 : regno >= FIRST_PSEUDO_REGISTER))
1881 return 0;
1882 }
1883 while (--i >= 0);
1884 }
1885 break;
1886
1887 /* We can ignore CLOBBERs. */
1888 case CLOBBER:
1889 break;
1890
1891 case SET:
1892 /* Ignore SETs whose result isn't used but not those that
1893 have side-effects. */
1894 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1895 && insn_nothrow_p (insn)
1896 && !side_effects_p (elt))
1897 break;
1898
1899 /* If we have already found a SET, this is a second one and
1900 so we cannot combine with this insn. */
1901 if (set)
1902 return 0;
1903
1904 set = elt;
1905 break;
1906
1907 default:
1908 /* Anything else means we can't combine. */
1909 return 0;
1910 }
1911 }
1912
1913 if (set == 0
1914 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1915 so don't do anything with it. */
1916 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1917 return 0;
1918 }
1919 else
1920 return 0;
1921
1922 if (set == 0)
1923 return 0;
1924
1925 /* The simplification in expand_field_assignment may call back to
1926 get_last_value, so set safe guard here. */
1927 subst_low_luid = DF_INSN_LUID (insn);
1928
1929 set = expand_field_assignment (set);
1930 src = SET_SRC (set), dest = SET_DEST (set);
1931
1932 /* Do not eliminate user-specified register if it is in an
1933 asm input because we may break the register asm usage defined
1934 in GCC manual if allow to do so.
1935 Be aware that this may cover more cases than we expect but this
1936 should be harmless. */
1937 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1938 && extract_asm_operands (PATTERN (i3)))
1939 return 0;
1940
1941 /* Don't eliminate a store in the stack pointer. */
1942 if (dest == stack_pointer_rtx
1943 /* Don't combine with an insn that sets a register to itself if it has
1944 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1945 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1946 /* Can't merge an ASM_OPERANDS. */
1947 || GET_CODE (src) == ASM_OPERANDS
1948 /* Can't merge a function call. */
1949 || GET_CODE (src) == CALL
1950 /* Don't eliminate a function call argument. */
1951 || (CALL_P (i3)
1952 && (find_reg_fusage (i3, USE, dest)
1953 || (REG_P (dest)
1954 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1955 && global_regs[REGNO (dest)])))
1956 /* Don't substitute into an incremented register. */
1957 || FIND_REG_INC_NOTE (i3, dest)
1958 || (succ && FIND_REG_INC_NOTE (succ, dest))
1959 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1960 /* Don't substitute into a non-local goto, this confuses CFG. */
1961 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1962 /* Make sure that DEST is not used after INSN but before SUCC, or
1963 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1964 || (!all_adjacent
1965 && ((succ2
1966 && (reg_used_between_p (dest, succ2, i3)
1967 || reg_used_between_p (dest, succ, succ2)))
1968 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1969 || (succ
1970 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
1971 that case SUCC is not in the insn stream, so use SUCC2
1972 instead for this test. */
1973 && reg_used_between_p (dest, insn,
1974 succ2
1975 && INSN_UID (succ) == INSN_UID (succ2)
1976 ? succ2 : succ))))
1977 /* Make sure that the value that is to be substituted for the register
1978 does not use any registers whose values alter in between. However,
1979 If the insns are adjacent, a use can't cross a set even though we
1980 think it might (this can happen for a sequence of insns each setting
1981 the same destination; last_set of that register might point to
1982 a NOTE). If INSN has a REG_EQUIV note, the register is always
1983 equivalent to the memory so the substitution is valid even if there
1984 are intervening stores. Also, don't move a volatile asm or
1985 UNSPEC_VOLATILE across any other insns. */
1986 || (! all_adjacent
1987 && (((!MEM_P (src)
1988 || ! find_reg_note (insn, REG_EQUIV, src))
1989 && use_crosses_set_p (src, DF_INSN_LUID (insn)))
1990 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
1991 || GET_CODE (src) == UNSPEC_VOLATILE))
1992 /* Don't combine across a CALL_INSN, because that would possibly
1993 change whether the life span of some REGs crosses calls or not,
1994 and it is a pain to update that information.
1995 Exception: if source is a constant, moving it later can't hurt.
1996 Accept that as a special case. */
1997 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
1998 return 0;
1999
2000 /* DEST must either be a REG or CC0. */
2001 if (REG_P (dest))
2002 {
2003 /* If register alignment is being enforced for multi-word items in all
2004 cases except for parameters, it is possible to have a register copy
2005 insn referencing a hard register that is not allowed to contain the
2006 mode being copied and which would not be valid as an operand of most
2007 insns. Eliminate this problem by not combining with such an insn.
2008
2009 Also, on some machines we don't want to extend the life of a hard
2010 register. */
2011
2012 if (REG_P (src)
2013 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2014 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2015 /* Don't extend the life of a hard register unless it is
2016 user variable (if we have few registers) or it can't
2017 fit into the desired register (meaning something special
2018 is going on).
2019 Also avoid substituting a return register into I3, because
2020 reload can't handle a conflict with constraints of other
2021 inputs. */
2022 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2023 && !targetm.hard_regno_mode_ok (REGNO (src),
2024 GET_MODE (src)))))
2025 return 0;
2026 }
2027 else if (GET_CODE (dest) != CC0)
2028 return 0;
2029
2030
2031 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2032 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2033 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2034 {
2035 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2036
2037 /* If the clobber represents an earlyclobber operand, we must not
2038 substitute an expression containing the clobbered register.
2039 As we do not analyze the constraint strings here, we have to
2040 make the conservative assumption. However, if the register is
2041 a fixed hard reg, the clobber cannot represent any operand;
2042 we leave it up to the machine description to either accept or
2043 reject use-and-clobber patterns. */
2044 if (!REG_P (reg)
2045 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2046 || !fixed_regs[REGNO (reg)])
2047 if (reg_overlap_mentioned_p (reg, src))
2048 return 0;
2049 }
2050
2051 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2052 or not), reject, unless nothing volatile comes between it and I3 */
2053
2054 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2055 {
2056 /* Make sure neither succ nor succ2 contains a volatile reference. */
2057 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2058 return 0;
2059 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2060 return 0;
2061 /* We'll check insns between INSN and I3 below. */
2062 }
2063
2064 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2065 to be an explicit register variable, and was chosen for a reason. */
2066
2067 if (GET_CODE (src) == ASM_OPERANDS
2068 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2069 return 0;
2070
2071 /* If INSN contains volatile references (specifically volatile MEMs),
2072 we cannot combine across any other volatile references.
2073 Even if INSN doesn't contain volatile references, any intervening
2074 volatile insn might affect machine state. */
2075
2076 is_volatile_p = volatile_refs_p (PATTERN (insn))
2077 ? volatile_refs_p
2078 : volatile_insn_p;
2079
2080 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2081 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2082 return 0;
2083
2084 /* If INSN contains an autoincrement or autodecrement, make sure that
2085 register is not used between there and I3, and not already used in
2086 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2087 Also insist that I3 not be a jump; if it were one
2088 and the incremented register were spilled, we would lose. */
2089
2090 if (AUTO_INC_DEC)
2091 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2092 if (REG_NOTE_KIND (link) == REG_INC
2093 && (JUMP_P (i3)
2094 || reg_used_between_p (XEXP (link, 0), insn, i3)
2095 || (pred != NULL_RTX
2096 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2097 || (pred2 != NULL_RTX
2098 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2099 || (succ != NULL_RTX
2100 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2101 || (succ2 != NULL_RTX
2102 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2103 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2104 return 0;
2105
2106 /* Don't combine an insn that follows a CC0-setting insn.
2107 An insn that uses CC0 must not be separated from the one that sets it.
2108 We do, however, allow I2 to follow a CC0-setting insn if that insn
2109 is passed as I1; in that case it will be deleted also.
2110 We also allow combining in this case if all the insns are adjacent
2111 because that would leave the two CC0 insns adjacent as well.
2112 It would be more logical to test whether CC0 occurs inside I1 or I2,
2113 but that would be much slower, and this ought to be equivalent. */
2114
2115 if (HAVE_cc0)
2116 {
2117 p = prev_nonnote_insn (insn);
2118 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2119 && ! all_adjacent)
2120 return 0;
2121 }
2122
2123 /* If we get here, we have passed all the tests and the combination is
2124 to be allowed. */
2125
2126 *pdest = dest;
2127 *psrc = src;
2128
2129 return 1;
2130 }
2131 \f
2132 /* LOC is the location within I3 that contains its pattern or the component
2133 of a PARALLEL of the pattern. We validate that it is valid for combining.
2134
2135 One problem is if I3 modifies its output, as opposed to replacing it
2136 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2137 doing so would produce an insn that is not equivalent to the original insns.
2138
2139 Consider:
2140
2141 (set (reg:DI 101) (reg:DI 100))
2142 (set (subreg:SI (reg:DI 101) 0) <foo>)
2143
2144 This is NOT equivalent to:
2145
2146 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2147 (set (reg:DI 101) (reg:DI 100))])
2148
2149 Not only does this modify 100 (in which case it might still be valid
2150 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2151
2152 We can also run into a problem if I2 sets a register that I1
2153 uses and I1 gets directly substituted into I3 (not via I2). In that
2154 case, we would be getting the wrong value of I2DEST into I3, so we
2155 must reject the combination. This case occurs when I2 and I1 both
2156 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2157 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2158 of a SET must prevent combination from occurring. The same situation
2159 can occur for I0, in which case I0_NOT_IN_SRC is set.
2160
2161 Before doing the above check, we first try to expand a field assignment
2162 into a set of logical operations.
2163
2164 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2165 we place a register that is both set and used within I3. If more than one
2166 such register is detected, we fail.
2167
2168 Return 1 if the combination is valid, zero otherwise. */
2169
2170 static int
2171 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2172 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2173 {
2174 rtx x = *loc;
2175
2176 if (GET_CODE (x) == SET)
2177 {
2178 rtx set = x ;
2179 rtx dest = SET_DEST (set);
2180 rtx src = SET_SRC (set);
2181 rtx inner_dest = dest;
2182 rtx subdest;
2183
2184 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2185 || GET_CODE (inner_dest) == SUBREG
2186 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2187 inner_dest = XEXP (inner_dest, 0);
2188
2189 /* Check for the case where I3 modifies its output, as discussed
2190 above. We don't want to prevent pseudos from being combined
2191 into the address of a MEM, so only prevent the combination if
2192 i1 or i2 set the same MEM. */
2193 if ((inner_dest != dest &&
2194 (!MEM_P (inner_dest)
2195 || rtx_equal_p (i2dest, inner_dest)
2196 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2197 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2198 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2199 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2200 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2201
2202 /* This is the same test done in can_combine_p except we can't test
2203 all_adjacent; we don't have to, since this instruction will stay
2204 in place, thus we are not considering increasing the lifetime of
2205 INNER_DEST.
2206
2207 Also, if this insn sets a function argument, combining it with
2208 something that might need a spill could clobber a previous
2209 function argument; the all_adjacent test in can_combine_p also
2210 checks this; here, we do a more specific test for this case. */
2211
2212 || (REG_P (inner_dest)
2213 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2214 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2215 GET_MODE (inner_dest)))
2216 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2217 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2218 return 0;
2219
2220 /* If DEST is used in I3, it is being killed in this insn, so
2221 record that for later. We have to consider paradoxical
2222 subregs here, since they kill the whole register, but we
2223 ignore partial subregs, STRICT_LOW_PART, etc.
2224 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2225 STACK_POINTER_REGNUM, since these are always considered to be
2226 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2227 subdest = dest;
2228 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2229 subdest = SUBREG_REG (subdest);
2230 if (pi3dest_killed
2231 && REG_P (subdest)
2232 && reg_referenced_p (subdest, PATTERN (i3))
2233 && REGNO (subdest) != FRAME_POINTER_REGNUM
2234 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2235 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2236 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2237 || (REGNO (subdest) != ARG_POINTER_REGNUM
2238 || ! fixed_regs [REGNO (subdest)]))
2239 && REGNO (subdest) != STACK_POINTER_REGNUM)
2240 {
2241 if (*pi3dest_killed)
2242 return 0;
2243
2244 *pi3dest_killed = subdest;
2245 }
2246 }
2247
2248 else if (GET_CODE (x) == PARALLEL)
2249 {
2250 int i;
2251
2252 for (i = 0; i < XVECLEN (x, 0); i++)
2253 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2254 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2255 return 0;
2256 }
2257
2258 return 1;
2259 }
2260 \f
2261 /* Return 1 if X is an arithmetic expression that contains a multiplication
2262 and division. We don't count multiplications by powers of two here. */
2263
2264 static int
2265 contains_muldiv (rtx x)
2266 {
2267 switch (GET_CODE (x))
2268 {
2269 case MOD: case DIV: case UMOD: case UDIV:
2270 return 1;
2271
2272 case MULT:
2273 return ! (CONST_INT_P (XEXP (x, 1))
2274 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2275 default:
2276 if (BINARY_P (x))
2277 return contains_muldiv (XEXP (x, 0))
2278 || contains_muldiv (XEXP (x, 1));
2279
2280 if (UNARY_P (x))
2281 return contains_muldiv (XEXP (x, 0));
2282
2283 return 0;
2284 }
2285 }
2286 \f
2287 /* Determine whether INSN can be used in a combination. Return nonzero if
2288 not. This is used in try_combine to detect early some cases where we
2289 can't perform combinations. */
2290
2291 static int
2292 cant_combine_insn_p (rtx_insn *insn)
2293 {
2294 rtx set;
2295 rtx src, dest;
2296
2297 /* If this isn't really an insn, we can't do anything.
2298 This can occur when flow deletes an insn that it has merged into an
2299 auto-increment address. */
2300 if (!NONDEBUG_INSN_P (insn))
2301 return 1;
2302
2303 /* Never combine loads and stores involving hard regs that are likely
2304 to be spilled. The register allocator can usually handle such
2305 reg-reg moves by tying. If we allow the combiner to make
2306 substitutions of likely-spilled regs, reload might die.
2307 As an exception, we allow combinations involving fixed regs; these are
2308 not available to the register allocator so there's no risk involved. */
2309
2310 set = single_set (insn);
2311 if (! set)
2312 return 0;
2313 src = SET_SRC (set);
2314 dest = SET_DEST (set);
2315 if (GET_CODE (src) == SUBREG)
2316 src = SUBREG_REG (src);
2317 if (GET_CODE (dest) == SUBREG)
2318 dest = SUBREG_REG (dest);
2319 if (REG_P (src) && REG_P (dest)
2320 && ((HARD_REGISTER_P (src)
2321 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2322 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2323 || (HARD_REGISTER_P (dest)
2324 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2325 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2326 return 1;
2327
2328 return 0;
2329 }
2330
2331 struct likely_spilled_retval_info
2332 {
2333 unsigned regno, nregs;
2334 unsigned mask;
2335 };
2336
2337 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2338 hard registers that are known to be written to / clobbered in full. */
2339 static void
2340 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2341 {
2342 struct likely_spilled_retval_info *const info =
2343 (struct likely_spilled_retval_info *) data;
2344 unsigned regno, nregs;
2345 unsigned new_mask;
2346
2347 if (!REG_P (XEXP (set, 0)))
2348 return;
2349 regno = REGNO (x);
2350 if (regno >= info->regno + info->nregs)
2351 return;
2352 nregs = REG_NREGS (x);
2353 if (regno + nregs <= info->regno)
2354 return;
2355 new_mask = (2U << (nregs - 1)) - 1;
2356 if (regno < info->regno)
2357 new_mask >>= info->regno - regno;
2358 else
2359 new_mask <<= regno - info->regno;
2360 info->mask &= ~new_mask;
2361 }
2362
2363 /* Return nonzero iff part of the return value is live during INSN, and
2364 it is likely spilled. This can happen when more than one insn is needed
2365 to copy the return value, e.g. when we consider to combine into the
2366 second copy insn for a complex value. */
2367
2368 static int
2369 likely_spilled_retval_p (rtx_insn *insn)
2370 {
2371 rtx_insn *use = BB_END (this_basic_block);
2372 rtx reg;
2373 rtx_insn *p;
2374 unsigned regno, nregs;
2375 /* We assume here that no machine mode needs more than
2376 32 hard registers when the value overlaps with a register
2377 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2378 unsigned mask;
2379 struct likely_spilled_retval_info info;
2380
2381 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2382 return 0;
2383 reg = XEXP (PATTERN (use), 0);
2384 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2385 return 0;
2386 regno = REGNO (reg);
2387 nregs = REG_NREGS (reg);
2388 if (nregs == 1)
2389 return 0;
2390 mask = (2U << (nregs - 1)) - 1;
2391
2392 /* Disregard parts of the return value that are set later. */
2393 info.regno = regno;
2394 info.nregs = nregs;
2395 info.mask = mask;
2396 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2397 if (INSN_P (p))
2398 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2399 mask = info.mask;
2400
2401 /* Check if any of the (probably) live return value registers is
2402 likely spilled. */
2403 nregs --;
2404 do
2405 {
2406 if ((mask & 1 << nregs)
2407 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2408 return 1;
2409 } while (nregs--);
2410 return 0;
2411 }
2412
2413 /* Adjust INSN after we made a change to its destination.
2414
2415 Changing the destination can invalidate notes that say something about
2416 the results of the insn and a LOG_LINK pointing to the insn. */
2417
2418 static void
2419 adjust_for_new_dest (rtx_insn *insn)
2420 {
2421 /* For notes, be conservative and simply remove them. */
2422 remove_reg_equal_equiv_notes (insn);
2423
2424 /* The new insn will have a destination that was previously the destination
2425 of an insn just above it. Call distribute_links to make a LOG_LINK from
2426 the next use of that destination. */
2427
2428 rtx set = single_set (insn);
2429 gcc_assert (set);
2430
2431 rtx reg = SET_DEST (set);
2432
2433 while (GET_CODE (reg) == ZERO_EXTRACT
2434 || GET_CODE (reg) == STRICT_LOW_PART
2435 || GET_CODE (reg) == SUBREG)
2436 reg = XEXP (reg, 0);
2437 gcc_assert (REG_P (reg));
2438
2439 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2440
2441 df_insn_rescan (insn);
2442 }
2443
2444 /* Return TRUE if combine can reuse reg X in mode MODE.
2445 ADDED_SETS is nonzero if the original set is still required. */
2446 static bool
2447 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2448 {
2449 unsigned int regno;
2450
2451 if (!REG_P (x))
2452 return false;
2453
2454 regno = REGNO (x);
2455 /* Allow hard registers if the new mode is legal, and occupies no more
2456 registers than the old mode. */
2457 if (regno < FIRST_PSEUDO_REGISTER)
2458 return (targetm.hard_regno_mode_ok (regno, mode)
2459 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2460
2461 /* Or a pseudo that is only used once. */
2462 return (regno < reg_n_sets_max
2463 && REG_N_SETS (regno) == 1
2464 && !added_sets
2465 && !REG_USERVAR_P (x));
2466 }
2467
2468
2469 /* Check whether X, the destination of a set, refers to part of
2470 the register specified by REG. */
2471
2472 static bool
2473 reg_subword_p (rtx x, rtx reg)
2474 {
2475 /* Check that reg is an integer mode register. */
2476 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2477 return false;
2478
2479 if (GET_CODE (x) == STRICT_LOW_PART
2480 || GET_CODE (x) == ZERO_EXTRACT)
2481 x = XEXP (x, 0);
2482
2483 return GET_CODE (x) == SUBREG
2484 && SUBREG_REG (x) == reg
2485 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2486 }
2487
2488 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2489 Note that the INSN should be deleted *after* removing dead edges, so
2490 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2491 but not for a (set (pc) (label_ref FOO)). */
2492
2493 static void
2494 update_cfg_for_uncondjump (rtx_insn *insn)
2495 {
2496 basic_block bb = BLOCK_FOR_INSN (insn);
2497 gcc_assert (BB_END (bb) == insn);
2498
2499 purge_dead_edges (bb);
2500
2501 delete_insn (insn);
2502 if (EDGE_COUNT (bb->succs) == 1)
2503 {
2504 rtx_insn *insn;
2505
2506 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2507
2508 /* Remove barriers from the footer if there are any. */
2509 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2510 if (BARRIER_P (insn))
2511 {
2512 if (PREV_INSN (insn))
2513 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2514 else
2515 BB_FOOTER (bb) = NEXT_INSN (insn);
2516 if (NEXT_INSN (insn))
2517 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2518 }
2519 else if (LABEL_P (insn))
2520 break;
2521 }
2522 }
2523
2524 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2525 by an arbitrary number of CLOBBERs. */
2526 static bool
2527 is_parallel_of_n_reg_sets (rtx pat, int n)
2528 {
2529 if (GET_CODE (pat) != PARALLEL)
2530 return false;
2531
2532 int len = XVECLEN (pat, 0);
2533 if (len < n)
2534 return false;
2535
2536 int i;
2537 for (i = 0; i < n; i++)
2538 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2539 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2540 return false;
2541 for ( ; i < len; i++)
2542 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2543 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2544 return false;
2545
2546 return true;
2547 }
2548
2549 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2550 CLOBBERs), can be split into individual SETs in that order, without
2551 changing semantics. */
2552 static bool
2553 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2554 {
2555 if (!insn_nothrow_p (insn))
2556 return false;
2557
2558 rtx pat = PATTERN (insn);
2559
2560 int i, j;
2561 for (i = 0; i < n; i++)
2562 {
2563 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2564 return false;
2565
2566 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2567
2568 for (j = i + 1; j < n; j++)
2569 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2570 return false;
2571 }
2572
2573 return true;
2574 }
2575
2576 /* Try to combine the insns I0, I1 and I2 into I3.
2577 Here I0, I1 and I2 appear earlier than I3.
2578 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2579 I3.
2580
2581 If we are combining more than two insns and the resulting insn is not
2582 recognized, try splitting it into two insns. If that happens, I2 and I3
2583 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2584 Otherwise, I0, I1 and I2 are pseudo-deleted.
2585
2586 Return 0 if the combination does not work. Then nothing is changed.
2587 If we did the combination, return the insn at which combine should
2588 resume scanning.
2589
2590 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2591 new direct jump instruction.
2592
2593 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2594 been I3 passed to an earlier try_combine within the same basic
2595 block. */
2596
2597 static rtx_insn *
2598 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2599 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2600 {
2601 /* New patterns for I3 and I2, respectively. */
2602 rtx newpat, newi2pat = 0;
2603 rtvec newpat_vec_with_clobbers = 0;
2604 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2605 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2606 dead. */
2607 int added_sets_0, added_sets_1, added_sets_2;
2608 /* Total number of SETs to put into I3. */
2609 int total_sets;
2610 /* Nonzero if I2's or I1's body now appears in I3. */
2611 int i2_is_used = 0, i1_is_used = 0;
2612 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2613 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2614 /* Contains I3 if the destination of I3 is used in its source, which means
2615 that the old life of I3 is being killed. If that usage is placed into
2616 I2 and not in I3, a REG_DEAD note must be made. */
2617 rtx i3dest_killed = 0;
2618 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2619 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2620 /* Copy of SET_SRC of I1 and I0, if needed. */
2621 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2622 /* Set if I2DEST was reused as a scratch register. */
2623 bool i2scratch = false;
2624 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2625 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2626 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2627 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2628 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2629 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2630 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2631 /* Notes that must be added to REG_NOTES in I3 and I2. */
2632 rtx new_i3_notes, new_i2_notes;
2633 /* Notes that we substituted I3 into I2 instead of the normal case. */
2634 int i3_subst_into_i2 = 0;
2635 /* Notes that I1, I2 or I3 is a MULT operation. */
2636 int have_mult = 0;
2637 int swap_i2i3 = 0;
2638 int changed_i3_dest = 0;
2639
2640 int maxreg;
2641 rtx_insn *temp_insn;
2642 rtx temp_expr;
2643 struct insn_link *link;
2644 rtx other_pat = 0;
2645 rtx new_other_notes;
2646 int i;
2647 scalar_int_mode dest_mode, temp_mode;
2648
2649 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2650 never be). */
2651 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2652 return 0;
2653
2654 /* Only try four-insn combinations when there's high likelihood of
2655 success. Look for simple insns, such as loads of constants or
2656 binary operations involving a constant. */
2657 if (i0)
2658 {
2659 int i;
2660 int ngood = 0;
2661 int nshift = 0;
2662 rtx set0, set3;
2663
2664 if (!flag_expensive_optimizations)
2665 return 0;
2666
2667 for (i = 0; i < 4; i++)
2668 {
2669 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2670 rtx set = single_set (insn);
2671 rtx src;
2672 if (!set)
2673 continue;
2674 src = SET_SRC (set);
2675 if (CONSTANT_P (src))
2676 {
2677 ngood += 2;
2678 break;
2679 }
2680 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2681 ngood++;
2682 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2683 || GET_CODE (src) == LSHIFTRT)
2684 nshift++;
2685 }
2686
2687 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2688 are likely manipulating its value. Ideally we'll be able to combine
2689 all four insns into a bitfield insertion of some kind.
2690
2691 Note the source in I0 might be inside a sign/zero extension and the
2692 memory modes in I0 and I3 might be different. So extract the address
2693 from the destination of I3 and search for it in the source of I0.
2694
2695 In the event that there's a match but the source/dest do not actually
2696 refer to the same memory, the worst that happens is we try some
2697 combinations that we wouldn't have otherwise. */
2698 if ((set0 = single_set (i0))
2699 /* Ensure the source of SET0 is a MEM, possibly buried inside
2700 an extension. */
2701 && (GET_CODE (SET_SRC (set0)) == MEM
2702 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2703 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2704 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2705 && (set3 = single_set (i3))
2706 /* Ensure the destination of SET3 is a MEM. */
2707 && GET_CODE (SET_DEST (set3)) == MEM
2708 /* Would it be better to extract the base address for the MEM
2709 in SET3 and look for that? I don't have cases where it matters
2710 but I could envision such cases. */
2711 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2712 ngood += 2;
2713
2714 if (ngood < 2 && nshift < 2)
2715 return 0;
2716 }
2717
2718 /* Exit early if one of the insns involved can't be used for
2719 combinations. */
2720 if (CALL_P (i2)
2721 || (i1 && CALL_P (i1))
2722 || (i0 && CALL_P (i0))
2723 || cant_combine_insn_p (i3)
2724 || cant_combine_insn_p (i2)
2725 || (i1 && cant_combine_insn_p (i1))
2726 || (i0 && cant_combine_insn_p (i0))
2727 || likely_spilled_retval_p (i3))
2728 return 0;
2729
2730 combine_attempts++;
2731 undobuf.other_insn = 0;
2732
2733 /* Reset the hard register usage information. */
2734 CLEAR_HARD_REG_SET (newpat_used_regs);
2735
2736 if (dump_file && (dump_flags & TDF_DETAILS))
2737 {
2738 if (i0)
2739 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2740 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2741 else if (i1)
2742 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2743 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2744 else
2745 fprintf (dump_file, "\nTrying %d -> %d:\n",
2746 INSN_UID (i2), INSN_UID (i3));
2747 }
2748
2749 /* If multiple insns feed into one of I2 or I3, they can be in any
2750 order. To simplify the code below, reorder them in sequence. */
2751 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2752 std::swap (i0, i2);
2753 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2754 std::swap (i0, i1);
2755 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2756 std::swap (i1, i2);
2757
2758 added_links_insn = 0;
2759
2760 /* First check for one important special case that the code below will
2761 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2762 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2763 we may be able to replace that destination with the destination of I3.
2764 This occurs in the common code where we compute both a quotient and
2765 remainder into a structure, in which case we want to do the computation
2766 directly into the structure to avoid register-register copies.
2767
2768 Note that this case handles both multiple sets in I2 and also cases
2769 where I2 has a number of CLOBBERs inside the PARALLEL.
2770
2771 We make very conservative checks below and only try to handle the
2772 most common cases of this. For example, we only handle the case
2773 where I2 and I3 are adjacent to avoid making difficult register
2774 usage tests. */
2775
2776 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2777 && REG_P (SET_SRC (PATTERN (i3)))
2778 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2779 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2780 && GET_CODE (PATTERN (i2)) == PARALLEL
2781 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2782 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2783 below would need to check what is inside (and reg_overlap_mentioned_p
2784 doesn't support those codes anyway). Don't allow those destinations;
2785 the resulting insn isn't likely to be recognized anyway. */
2786 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2787 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2788 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2789 SET_DEST (PATTERN (i3)))
2790 && next_active_insn (i2) == i3)
2791 {
2792 rtx p2 = PATTERN (i2);
2793
2794 /* Make sure that the destination of I3,
2795 which we are going to substitute into one output of I2,
2796 is not used within another output of I2. We must avoid making this:
2797 (parallel [(set (mem (reg 69)) ...)
2798 (set (reg 69) ...)])
2799 which is not well-defined as to order of actions.
2800 (Besides, reload can't handle output reloads for this.)
2801
2802 The problem can also happen if the dest of I3 is a memory ref,
2803 if another dest in I2 is an indirect memory ref.
2804
2805 Neither can this PARALLEL be an asm. We do not allow combining
2806 that usually (see can_combine_p), so do not here either. */
2807 bool ok = true;
2808 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2809 {
2810 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2811 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2812 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2813 SET_DEST (XVECEXP (p2, 0, i))))
2814 ok = false;
2815 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2816 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2817 ok = false;
2818 }
2819
2820 if (ok)
2821 for (i = 0; i < XVECLEN (p2, 0); i++)
2822 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2823 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2824 {
2825 combine_merges++;
2826
2827 subst_insn = i3;
2828 subst_low_luid = DF_INSN_LUID (i2);
2829
2830 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2831 i2src = SET_SRC (XVECEXP (p2, 0, i));
2832 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2833 i2dest_killed = dead_or_set_p (i2, i2dest);
2834
2835 /* Replace the dest in I2 with our dest and make the resulting
2836 insn the new pattern for I3. Then skip to where we validate
2837 the pattern. Everything was set up above. */
2838 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2839 newpat = p2;
2840 i3_subst_into_i2 = 1;
2841 goto validate_replacement;
2842 }
2843 }
2844
2845 /* If I2 is setting a pseudo to a constant and I3 is setting some
2846 sub-part of it to another constant, merge them by making a new
2847 constant. */
2848 if (i1 == 0
2849 && (temp_expr = single_set (i2)) != 0
2850 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2851 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2852 && GET_CODE (PATTERN (i3)) == SET
2853 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2854 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2855 {
2856 rtx dest = SET_DEST (PATTERN (i3));
2857 rtx temp_dest = SET_DEST (temp_expr);
2858 int offset = -1;
2859 int width = 0;
2860
2861 if (GET_CODE (dest) == ZERO_EXTRACT)
2862 {
2863 if (CONST_INT_P (XEXP (dest, 1))
2864 && CONST_INT_P (XEXP (dest, 2))
2865 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2866 &dest_mode))
2867 {
2868 width = INTVAL (XEXP (dest, 1));
2869 offset = INTVAL (XEXP (dest, 2));
2870 dest = XEXP (dest, 0);
2871 if (BITS_BIG_ENDIAN)
2872 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2873 }
2874 }
2875 else
2876 {
2877 if (GET_CODE (dest) == STRICT_LOW_PART)
2878 dest = XEXP (dest, 0);
2879 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2880 {
2881 width = GET_MODE_PRECISION (dest_mode);
2882 offset = 0;
2883 }
2884 }
2885
2886 if (offset >= 0)
2887 {
2888 /* If this is the low part, we're done. */
2889 if (subreg_lowpart_p (dest))
2890 ;
2891 /* Handle the case where inner is twice the size of outer. */
2892 else if (GET_MODE_PRECISION (temp_mode)
2893 == 2 * GET_MODE_PRECISION (dest_mode))
2894 offset += GET_MODE_PRECISION (dest_mode);
2895 /* Otherwise give up for now. */
2896 else
2897 offset = -1;
2898 }
2899
2900 if (offset >= 0)
2901 {
2902 rtx inner = SET_SRC (PATTERN (i3));
2903 rtx outer = SET_SRC (temp_expr);
2904
2905 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2906 rtx_mode_t (inner, dest_mode),
2907 offset, width);
2908
2909 combine_merges++;
2910 subst_insn = i3;
2911 subst_low_luid = DF_INSN_LUID (i2);
2912 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2913 i2dest = temp_dest;
2914 i2dest_killed = dead_or_set_p (i2, i2dest);
2915
2916 /* Replace the source in I2 with the new constant and make the
2917 resulting insn the new pattern for I3. Then skip to where we
2918 validate the pattern. Everything was set up above. */
2919 SUBST (SET_SRC (temp_expr),
2920 immed_wide_int_const (o, temp_mode));
2921
2922 newpat = PATTERN (i2);
2923
2924 /* The dest of I3 has been replaced with the dest of I2. */
2925 changed_i3_dest = 1;
2926 goto validate_replacement;
2927 }
2928 }
2929
2930 /* If we have no I1 and I2 looks like:
2931 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2932 (set Y OP)])
2933 make up a dummy I1 that is
2934 (set Y OP)
2935 and change I2 to be
2936 (set (reg:CC X) (compare:CC Y (const_int 0)))
2937
2938 (We can ignore any trailing CLOBBERs.)
2939
2940 This undoes a previous combination and allows us to match a branch-and-
2941 decrement insn. */
2942
2943 if (!HAVE_cc0 && i1 == 0
2944 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2945 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2946 == MODE_CC)
2947 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2948 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2949 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2950 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2951 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2952 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2953 {
2954 /* We make I1 with the same INSN_UID as I2. This gives it
2955 the same DF_INSN_LUID for value tracking. Our fake I1 will
2956 never appear in the insn stream so giving it the same INSN_UID
2957 as I2 will not cause a problem. */
2958
2959 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2960 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
2961 -1, NULL_RTX);
2962 INSN_UID (i1) = INSN_UID (i2);
2963
2964 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
2965 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
2966 SET_DEST (PATTERN (i1)));
2967 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
2968 SUBST_LINK (LOG_LINKS (i2),
2969 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
2970 }
2971
2972 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
2973 make those two SETs separate I1 and I2 insns, and make an I0 that is
2974 the original I1. */
2975 if (!HAVE_cc0 && i0 == 0
2976 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2977 && can_split_parallel_of_n_reg_sets (i2, 2)
2978 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2979 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2980 {
2981 /* If there is no I1, there is no I0 either. */
2982 i0 = i1;
2983
2984 /* We make I1 with the same INSN_UID as I2. This gives it
2985 the same DF_INSN_LUID for value tracking. Our fake I1 will
2986 never appear in the insn stream so giving it the same INSN_UID
2987 as I2 will not cause a problem. */
2988
2989 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
2990 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
2991 -1, NULL_RTX);
2992 INSN_UID (i1) = INSN_UID (i2);
2993
2994 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
2995 }
2996
2997 /* Verify that I2 and I1 are valid for combining. */
2998 if (! can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src)
2999 || (i1 && ! can_combine_p (i1, i3, i0, NULL, i2, NULL,
3000 &i1dest, &i1src))
3001 || (i0 && ! can_combine_p (i0, i3, NULL, NULL, i1, i2,
3002 &i0dest, &i0src)))
3003 {
3004 undo_all ();
3005 return 0;
3006 }
3007
3008 /* Record whether I2DEST is used in I2SRC and similarly for the other
3009 cases. Knowing this will help in register status updating below. */
3010 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3011 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3012 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3013 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3014 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3015 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3016 i2dest_killed = dead_or_set_p (i2, i2dest);
3017 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3018 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3019
3020 /* For the earlier insns, determine which of the subsequent ones they
3021 feed. */
3022 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3023 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3024 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3025 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3026 && reg_overlap_mentioned_p (i0dest, i2src))));
3027
3028 /* Ensure that I3's pattern can be the destination of combines. */
3029 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3030 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3031 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3032 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3033 &i3dest_killed))
3034 {
3035 undo_all ();
3036 return 0;
3037 }
3038
3039 /* See if any of the insns is a MULT operation. Unless one is, we will
3040 reject a combination that is, since it must be slower. Be conservative
3041 here. */
3042 if (GET_CODE (i2src) == MULT
3043 || (i1 != 0 && GET_CODE (i1src) == MULT)
3044 || (i0 != 0 && GET_CODE (i0src) == MULT)
3045 || (GET_CODE (PATTERN (i3)) == SET
3046 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3047 have_mult = 1;
3048
3049 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3050 We used to do this EXCEPT in one case: I3 has a post-inc in an
3051 output operand. However, that exception can give rise to insns like
3052 mov r3,(r3)+
3053 which is a famous insn on the PDP-11 where the value of r3 used as the
3054 source was model-dependent. Avoid this sort of thing. */
3055
3056 #if 0
3057 if (!(GET_CODE (PATTERN (i3)) == SET
3058 && REG_P (SET_SRC (PATTERN (i3)))
3059 && MEM_P (SET_DEST (PATTERN (i3)))
3060 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3061 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3062 /* It's not the exception. */
3063 #endif
3064 if (AUTO_INC_DEC)
3065 {
3066 rtx link;
3067 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3068 if (REG_NOTE_KIND (link) == REG_INC
3069 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3070 || (i1 != 0
3071 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3072 {
3073 undo_all ();
3074 return 0;
3075 }
3076 }
3077
3078 /* See if the SETs in I1 or I2 need to be kept around in the merged
3079 instruction: whenever the value set there is still needed past I3.
3080 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3081
3082 For the SET in I1, we have two cases: if I1 and I2 independently feed
3083 into I3, the set in I1 needs to be kept around unless I1DEST dies
3084 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3085 in I1 needs to be kept around unless I1DEST dies or is set in either
3086 I2 or I3. The same considerations apply to I0. */
3087
3088 added_sets_2 = !dead_or_set_p (i3, i2dest);
3089
3090 if (i1)
3091 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3092 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3093 else
3094 added_sets_1 = 0;
3095
3096 if (i0)
3097 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3098 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3099 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3100 && dead_or_set_p (i2, i0dest)));
3101 else
3102 added_sets_0 = 0;
3103
3104 /* We are about to copy insns for the case where they need to be kept
3105 around. Check that they can be copied in the merged instruction. */
3106
3107 if (targetm.cannot_copy_insn_p
3108 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3109 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3110 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3111 {
3112 undo_all ();
3113 return 0;
3114 }
3115
3116 /* If the set in I2 needs to be kept around, we must make a copy of
3117 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3118 PATTERN (I2), we are only substituting for the original I1DEST, not into
3119 an already-substituted copy. This also prevents making self-referential
3120 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3121 I2DEST. */
3122
3123 if (added_sets_2)
3124 {
3125 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3126 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3127 else
3128 i2pat = copy_rtx (PATTERN (i2));
3129 }
3130
3131 if (added_sets_1)
3132 {
3133 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3134 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3135 else
3136 i1pat = copy_rtx (PATTERN (i1));
3137 }
3138
3139 if (added_sets_0)
3140 {
3141 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3142 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3143 else
3144 i0pat = copy_rtx (PATTERN (i0));
3145 }
3146
3147 combine_merges++;
3148
3149 /* Substitute in the latest insn for the regs set by the earlier ones. */
3150
3151 maxreg = max_reg_num ();
3152
3153 subst_insn = i3;
3154
3155 /* Many machines that don't use CC0 have insns that can both perform an
3156 arithmetic operation and set the condition code. These operations will
3157 be represented as a PARALLEL with the first element of the vector
3158 being a COMPARE of an arithmetic operation with the constant zero.
3159 The second element of the vector will set some pseudo to the result
3160 of the same arithmetic operation. If we simplify the COMPARE, we won't
3161 match such a pattern and so will generate an extra insn. Here we test
3162 for this case, where both the comparison and the operation result are
3163 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3164 I2SRC. Later we will make the PARALLEL that contains I2. */
3165
3166 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3167 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3168 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3169 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3170 {
3171 rtx newpat_dest;
3172 rtx *cc_use_loc = NULL;
3173 rtx_insn *cc_use_insn = NULL;
3174 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3175 machine_mode compare_mode, orig_compare_mode;
3176 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3177 scalar_int_mode mode;
3178
3179 newpat = PATTERN (i3);
3180 newpat_dest = SET_DEST (newpat);
3181 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3182
3183 if (undobuf.other_insn == 0
3184 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3185 &cc_use_insn)))
3186 {
3187 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3188 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3189 compare_code = simplify_compare_const (compare_code, mode,
3190 op0, &op1);
3191 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3192 }
3193
3194 /* Do the rest only if op1 is const0_rtx, which may be the
3195 result of simplification. */
3196 if (op1 == const0_rtx)
3197 {
3198 /* If a single use of the CC is found, prepare to modify it
3199 when SELECT_CC_MODE returns a new CC-class mode, or when
3200 the above simplify_compare_const() returned a new comparison
3201 operator. undobuf.other_insn is assigned the CC use insn
3202 when modifying it. */
3203 if (cc_use_loc)
3204 {
3205 #ifdef SELECT_CC_MODE
3206 machine_mode new_mode
3207 = SELECT_CC_MODE (compare_code, op0, op1);
3208 if (new_mode != orig_compare_mode
3209 && can_change_dest_mode (SET_DEST (newpat),
3210 added_sets_2, new_mode))
3211 {
3212 unsigned int regno = REGNO (newpat_dest);
3213 compare_mode = new_mode;
3214 if (regno < FIRST_PSEUDO_REGISTER)
3215 newpat_dest = gen_rtx_REG (compare_mode, regno);
3216 else
3217 {
3218 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3219 newpat_dest = regno_reg_rtx[regno];
3220 }
3221 }
3222 #endif
3223 /* Cases for modifying the CC-using comparison. */
3224 if (compare_code != orig_compare_code
3225 /* ??? Do we need to verify the zero rtx? */
3226 && XEXP (*cc_use_loc, 1) == const0_rtx)
3227 {
3228 /* Replace cc_use_loc with entire new RTX. */
3229 SUBST (*cc_use_loc,
3230 gen_rtx_fmt_ee (compare_code, compare_mode,
3231 newpat_dest, const0_rtx));
3232 undobuf.other_insn = cc_use_insn;
3233 }
3234 else if (compare_mode != orig_compare_mode)
3235 {
3236 /* Just replace the CC reg with a new mode. */
3237 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3238 undobuf.other_insn = cc_use_insn;
3239 }
3240 }
3241
3242 /* Now we modify the current newpat:
3243 First, SET_DEST(newpat) is updated if the CC mode has been
3244 altered. For targets without SELECT_CC_MODE, this should be
3245 optimized away. */
3246 if (compare_mode != orig_compare_mode)
3247 SUBST (SET_DEST (newpat), newpat_dest);
3248 /* This is always done to propagate i2src into newpat. */
3249 SUBST (SET_SRC (newpat),
3250 gen_rtx_COMPARE (compare_mode, op0, op1));
3251 /* Create new version of i2pat if needed; the below PARALLEL
3252 creation needs this to work correctly. */
3253 if (! rtx_equal_p (i2src, op0))
3254 i2pat = gen_rtx_SET (i2dest, op0);
3255 i2_is_used = 1;
3256 }
3257 }
3258
3259 if (i2_is_used == 0)
3260 {
3261 /* It is possible that the source of I2 or I1 may be performing
3262 an unneeded operation, such as a ZERO_EXTEND of something
3263 that is known to have the high part zero. Handle that case
3264 by letting subst look at the inner insns.
3265
3266 Another way to do this would be to have a function that tries
3267 to simplify a single insn instead of merging two or more
3268 insns. We don't do this because of the potential of infinite
3269 loops and because of the potential extra memory required.
3270 However, doing it the way we are is a bit of a kludge and
3271 doesn't catch all cases.
3272
3273 But only do this if -fexpensive-optimizations since it slows
3274 things down and doesn't usually win.
3275
3276 This is not done in the COMPARE case above because the
3277 unmodified I2PAT is used in the PARALLEL and so a pattern
3278 with a modified I2SRC would not match. */
3279
3280 if (flag_expensive_optimizations)
3281 {
3282 /* Pass pc_rtx so no substitutions are done, just
3283 simplifications. */
3284 if (i1)
3285 {
3286 subst_low_luid = DF_INSN_LUID (i1);
3287 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3288 }
3289
3290 subst_low_luid = DF_INSN_LUID (i2);
3291 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3292 }
3293
3294 n_occurrences = 0; /* `subst' counts here */
3295 subst_low_luid = DF_INSN_LUID (i2);
3296
3297 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3298 copy of I2SRC each time we substitute it, in order to avoid creating
3299 self-referential RTL when we will be substituting I1SRC for I1DEST
3300 later. Likewise if I0 feeds into I2, either directly or indirectly
3301 through I1, and I0DEST is in I0SRC. */
3302 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3303 (i1_feeds_i2_n && i1dest_in_i1src)
3304 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3305 && i0dest_in_i0src));
3306 substed_i2 = 1;
3307
3308 /* Record whether I2's body now appears within I3's body. */
3309 i2_is_used = n_occurrences;
3310 }
3311
3312 /* If we already got a failure, don't try to do more. Otherwise, try to
3313 substitute I1 if we have it. */
3314
3315 if (i1 && GET_CODE (newpat) != CLOBBER)
3316 {
3317 /* Check that an autoincrement side-effect on I1 has not been lost.
3318 This happens if I1DEST is mentioned in I2 and dies there, and
3319 has disappeared from the new pattern. */
3320 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3321 && i1_feeds_i2_n
3322 && dead_or_set_p (i2, i1dest)
3323 && !reg_overlap_mentioned_p (i1dest, newpat))
3324 /* Before we can do this substitution, we must redo the test done
3325 above (see detailed comments there) that ensures I1DEST isn't
3326 mentioned in any SETs in NEWPAT that are field assignments. */
3327 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3328 0, 0, 0))
3329 {
3330 undo_all ();
3331 return 0;
3332 }
3333
3334 n_occurrences = 0;
3335 subst_low_luid = DF_INSN_LUID (i1);
3336
3337 /* If the following substitution will modify I1SRC, make a copy of it
3338 for the case where it is substituted for I1DEST in I2PAT later. */
3339 if (added_sets_2 && i1_feeds_i2_n)
3340 i1src_copy = copy_rtx (i1src);
3341
3342 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3343 copy of I1SRC each time we substitute it, in order to avoid creating
3344 self-referential RTL when we will be substituting I0SRC for I0DEST
3345 later. */
3346 newpat = subst (newpat, i1dest, i1src, 0, 0,
3347 i0_feeds_i1_n && i0dest_in_i0src);
3348 substed_i1 = 1;
3349
3350 /* Record whether I1's body now appears within I3's body. */
3351 i1_is_used = n_occurrences;
3352 }
3353
3354 /* Likewise for I0 if we have it. */
3355
3356 if (i0 && GET_CODE (newpat) != CLOBBER)
3357 {
3358 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3359 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3360 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3361 && !reg_overlap_mentioned_p (i0dest, newpat))
3362 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3363 0, 0, 0))
3364 {
3365 undo_all ();
3366 return 0;
3367 }
3368
3369 /* If the following substitution will modify I0SRC, make a copy of it
3370 for the case where it is substituted for I0DEST in I1PAT later. */
3371 if (added_sets_1 && i0_feeds_i1_n)
3372 i0src_copy = copy_rtx (i0src);
3373 /* And a copy for I0DEST in I2PAT substitution. */
3374 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3375 || (i0_feeds_i2_n)))
3376 i0src_copy2 = copy_rtx (i0src);
3377
3378 n_occurrences = 0;
3379 subst_low_luid = DF_INSN_LUID (i0);
3380 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3381 substed_i0 = 1;
3382 }
3383
3384 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3385 to count all the ways that I2SRC and I1SRC can be used. */
3386 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3387 && i2_is_used + added_sets_2 > 1)
3388 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3389 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3390 > 1))
3391 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3392 && (n_occurrences + added_sets_0
3393 + (added_sets_1 && i0_feeds_i1_n)
3394 + (added_sets_2 && i0_feeds_i2_n)
3395 > 1))
3396 /* Fail if we tried to make a new register. */
3397 || max_reg_num () != maxreg
3398 /* Fail if we couldn't do something and have a CLOBBER. */
3399 || GET_CODE (newpat) == CLOBBER
3400 /* Fail if this new pattern is a MULT and we didn't have one before
3401 at the outer level. */
3402 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3403 && ! have_mult))
3404 {
3405 undo_all ();
3406 return 0;
3407 }
3408
3409 /* If the actions of the earlier insns must be kept
3410 in addition to substituting them into the latest one,
3411 we must make a new PARALLEL for the latest insn
3412 to hold additional the SETs. */
3413
3414 if (added_sets_0 || added_sets_1 || added_sets_2)
3415 {
3416 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3417 combine_extras++;
3418
3419 if (GET_CODE (newpat) == PARALLEL)
3420 {
3421 rtvec old = XVEC (newpat, 0);
3422 total_sets = XVECLEN (newpat, 0) + extra_sets;
3423 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3424 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3425 sizeof (old->elem[0]) * old->num_elem);
3426 }
3427 else
3428 {
3429 rtx old = newpat;
3430 total_sets = 1 + extra_sets;
3431 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3432 XVECEXP (newpat, 0, 0) = old;
3433 }
3434
3435 if (added_sets_0)
3436 XVECEXP (newpat, 0, --total_sets) = i0pat;
3437
3438 if (added_sets_1)
3439 {
3440 rtx t = i1pat;
3441 if (i0_feeds_i1_n)
3442 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3443
3444 XVECEXP (newpat, 0, --total_sets) = t;
3445 }
3446 if (added_sets_2)
3447 {
3448 rtx t = i2pat;
3449 if (i1_feeds_i2_n)
3450 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3451 i0_feeds_i1_n && i0dest_in_i0src);
3452 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3453 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3454
3455 XVECEXP (newpat, 0, --total_sets) = t;
3456 }
3457 }
3458
3459 validate_replacement:
3460
3461 /* Note which hard regs this insn has as inputs. */
3462 mark_used_regs_combine (newpat);
3463
3464 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3465 consider splitting this pattern, we might need these clobbers. */
3466 if (i1 && GET_CODE (newpat) == PARALLEL
3467 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3468 {
3469 int len = XVECLEN (newpat, 0);
3470
3471 newpat_vec_with_clobbers = rtvec_alloc (len);
3472 for (i = 0; i < len; i++)
3473 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3474 }
3475
3476 /* We have recognized nothing yet. */
3477 insn_code_number = -1;
3478
3479 /* See if this is a PARALLEL of two SETs where one SET's destination is
3480 a register that is unused and this isn't marked as an instruction that
3481 might trap in an EH region. In that case, we just need the other SET.
3482 We prefer this over the PARALLEL.
3483
3484 This can occur when simplifying a divmod insn. We *must* test for this
3485 case here because the code below that splits two independent SETs doesn't
3486 handle this case correctly when it updates the register status.
3487
3488 It's pointless doing this if we originally had two sets, one from
3489 i3, and one from i2. Combining then splitting the parallel results
3490 in the original i2 again plus an invalid insn (which we delete).
3491 The net effect is only to move instructions around, which makes
3492 debug info less accurate.
3493
3494 If the remaining SET came from I2 its destination should not be used
3495 between I2 and I3. See PR82024. */
3496
3497 if (!(added_sets_2 && i1 == 0)
3498 && is_parallel_of_n_reg_sets (newpat, 2)
3499 && asm_noperands (newpat) < 0)
3500 {
3501 rtx set0 = XVECEXP (newpat, 0, 0);
3502 rtx set1 = XVECEXP (newpat, 0, 1);
3503 rtx oldpat = newpat;
3504
3505 if (((REG_P (SET_DEST (set1))
3506 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3507 || (GET_CODE (SET_DEST (set1)) == SUBREG
3508 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3509 && insn_nothrow_p (i3)
3510 && !side_effects_p (SET_SRC (set1)))
3511 {
3512 newpat = set0;
3513 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3514 }
3515
3516 else if (((REG_P (SET_DEST (set0))
3517 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3518 || (GET_CODE (SET_DEST (set0)) == SUBREG
3519 && find_reg_note (i3, REG_UNUSED,
3520 SUBREG_REG (SET_DEST (set0)))))
3521 && insn_nothrow_p (i3)
3522 && !side_effects_p (SET_SRC (set0)))
3523 {
3524 rtx dest = SET_DEST (set1);
3525 if (GET_CODE (dest) == SUBREG)
3526 dest = SUBREG_REG (dest);
3527 if (!reg_used_between_p (dest, i2, i3))
3528 {
3529 newpat = set1;
3530 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3531
3532 if (insn_code_number >= 0)
3533 changed_i3_dest = 1;
3534 }
3535 }
3536
3537 if (insn_code_number < 0)
3538 newpat = oldpat;
3539 }
3540
3541 /* Is the result of combination a valid instruction? */
3542 if (insn_code_number < 0)
3543 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3544
3545 /* If we were combining three insns and the result is a simple SET
3546 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3547 insns. There are two ways to do this. It can be split using a
3548 machine-specific method (like when you have an addition of a large
3549 constant) or by combine in the function find_split_point. */
3550
3551 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3552 && asm_noperands (newpat) < 0)
3553 {
3554 rtx parallel, *split;
3555 rtx_insn *m_split_insn;
3556
3557 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3558 use I2DEST as a scratch register will help. In the latter case,
3559 convert I2DEST to the mode of the source of NEWPAT if we can. */
3560
3561 m_split_insn = combine_split_insns (newpat, i3);
3562
3563 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3564 inputs of NEWPAT. */
3565
3566 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3567 possible to try that as a scratch reg. This would require adding
3568 more code to make it work though. */
3569
3570 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3571 {
3572 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3573
3574 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3575 (temporarily, until we are committed to this instruction
3576 combination) does not work: for example, any call to nonzero_bits
3577 on the register (from a splitter in the MD file, for example)
3578 will get the old information, which is invalid.
3579
3580 Since nowadays we can create registers during combine just fine,
3581 we should just create a new one here, not reuse i2dest. */
3582
3583 /* First try to split using the original register as a
3584 scratch register. */
3585 parallel = gen_rtx_PARALLEL (VOIDmode,
3586 gen_rtvec (2, newpat,
3587 gen_rtx_CLOBBER (VOIDmode,
3588 i2dest)));
3589 m_split_insn = combine_split_insns (parallel, i3);
3590
3591 /* If that didn't work, try changing the mode of I2DEST if
3592 we can. */
3593 if (m_split_insn == 0
3594 && new_mode != GET_MODE (i2dest)
3595 && new_mode != VOIDmode
3596 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3597 {
3598 machine_mode old_mode = GET_MODE (i2dest);
3599 rtx ni2dest;
3600
3601 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3602 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3603 else
3604 {
3605 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3606 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3607 }
3608
3609 parallel = (gen_rtx_PARALLEL
3610 (VOIDmode,
3611 gen_rtvec (2, newpat,
3612 gen_rtx_CLOBBER (VOIDmode,
3613 ni2dest))));
3614 m_split_insn = combine_split_insns (parallel, i3);
3615
3616 if (m_split_insn == 0
3617 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3618 {
3619 struct undo *buf;
3620
3621 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3622 buf = undobuf.undos;
3623 undobuf.undos = buf->next;
3624 buf->next = undobuf.frees;
3625 undobuf.frees = buf;
3626 }
3627 }
3628
3629 i2scratch = m_split_insn != 0;
3630 }
3631
3632 /* If recog_for_combine has discarded clobbers, try to use them
3633 again for the split. */
3634 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3635 {
3636 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3637 m_split_insn = combine_split_insns (parallel, i3);
3638 }
3639
3640 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3641 {
3642 rtx m_split_pat = PATTERN (m_split_insn);
3643 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3644 if (insn_code_number >= 0)
3645 newpat = m_split_pat;
3646 }
3647 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3648 && (next_nonnote_nondebug_insn (i2) == i3
3649 || ! use_crosses_set_p (PATTERN (m_split_insn), DF_INSN_LUID (i2))))
3650 {
3651 rtx i2set, i3set;
3652 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3653 newi2pat = PATTERN (m_split_insn);
3654
3655 i3set = single_set (NEXT_INSN (m_split_insn));
3656 i2set = single_set (m_split_insn);
3657
3658 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3659
3660 /* If I2 or I3 has multiple SETs, we won't know how to track
3661 register status, so don't use these insns. If I2's destination
3662 is used between I2 and I3, we also can't use these insns. */
3663
3664 if (i2_code_number >= 0 && i2set && i3set
3665 && (next_nonnote_nondebug_insn (i2) == i3
3666 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3667 insn_code_number = recog_for_combine (&newi3pat, i3,
3668 &new_i3_notes);
3669 if (insn_code_number >= 0)
3670 newpat = newi3pat;
3671
3672 /* It is possible that both insns now set the destination of I3.
3673 If so, we must show an extra use of it. */
3674
3675 if (insn_code_number >= 0)
3676 {
3677 rtx new_i3_dest = SET_DEST (i3set);
3678 rtx new_i2_dest = SET_DEST (i2set);
3679
3680 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3681 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3682 || GET_CODE (new_i3_dest) == SUBREG)
3683 new_i3_dest = XEXP (new_i3_dest, 0);
3684
3685 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3686 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3687 || GET_CODE (new_i2_dest) == SUBREG)
3688 new_i2_dest = XEXP (new_i2_dest, 0);
3689
3690 if (REG_P (new_i3_dest)
3691 && REG_P (new_i2_dest)
3692 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3693 && REGNO (new_i2_dest) < reg_n_sets_max)
3694 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3695 }
3696 }
3697
3698 /* If we can split it and use I2DEST, go ahead and see if that
3699 helps things be recognized. Verify that none of the registers
3700 are set between I2 and I3. */
3701 if (insn_code_number < 0
3702 && (split = find_split_point (&newpat, i3, false)) != 0
3703 && (!HAVE_cc0 || REG_P (i2dest))
3704 /* We need I2DEST in the proper mode. If it is a hard register
3705 or the only use of a pseudo, we can change its mode.
3706 Make sure we don't change a hard register to have a mode that
3707 isn't valid for it, or change the number of registers. */
3708 && (GET_MODE (*split) == GET_MODE (i2dest)
3709 || GET_MODE (*split) == VOIDmode
3710 || can_change_dest_mode (i2dest, added_sets_2,
3711 GET_MODE (*split)))
3712 && (next_nonnote_nondebug_insn (i2) == i3
3713 || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
3714 /* We can't overwrite I2DEST if its value is still used by
3715 NEWPAT. */
3716 && ! reg_referenced_p (i2dest, newpat))
3717 {
3718 rtx newdest = i2dest;
3719 enum rtx_code split_code = GET_CODE (*split);
3720 machine_mode split_mode = GET_MODE (*split);
3721 bool subst_done = false;
3722 newi2pat = NULL_RTX;
3723
3724 i2scratch = true;
3725
3726 /* *SPLIT may be part of I2SRC, so make sure we have the
3727 original expression around for later debug processing.
3728 We should not need I2SRC any more in other cases. */
3729 if (MAY_HAVE_DEBUG_INSNS)
3730 i2src = copy_rtx (i2src);
3731 else
3732 i2src = NULL;
3733
3734 /* Get NEWDEST as a register in the proper mode. We have already
3735 validated that we can do this. */
3736 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3737 {
3738 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3739 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3740 else
3741 {
3742 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3743 newdest = regno_reg_rtx[REGNO (i2dest)];
3744 }
3745 }
3746
3747 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3748 an ASHIFT. This can occur if it was inside a PLUS and hence
3749 appeared to be a memory address. This is a kludge. */
3750 if (split_code == MULT
3751 && CONST_INT_P (XEXP (*split, 1))
3752 && INTVAL (XEXP (*split, 1)) > 0
3753 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3754 {
3755 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3756 XEXP (*split, 0), GEN_INT (i)));
3757 /* Update split_code because we may not have a multiply
3758 anymore. */
3759 split_code = GET_CODE (*split);
3760 }
3761
3762 /* Similarly for (plus (mult FOO (const_int pow2))). */
3763 if (split_code == PLUS
3764 && GET_CODE (XEXP (*split, 0)) == MULT
3765 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3766 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3767 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3768 {
3769 rtx nsplit = XEXP (*split, 0);
3770 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3771 XEXP (nsplit, 0), GEN_INT (i)));
3772 /* Update split_code because we may not have a multiply
3773 anymore. */
3774 split_code = GET_CODE (*split);
3775 }
3776
3777 #ifdef INSN_SCHEDULING
3778 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3779 be written as a ZERO_EXTEND. */
3780 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3781 {
3782 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3783 what it really is. */
3784 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3785 == SIGN_EXTEND)
3786 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3787 SUBREG_REG (*split)));
3788 else
3789 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3790 SUBREG_REG (*split)));
3791 }
3792 #endif
3793
3794 /* Attempt to split binary operators using arithmetic identities. */
3795 if (BINARY_P (SET_SRC (newpat))
3796 && split_mode == GET_MODE (SET_SRC (newpat))
3797 && ! side_effects_p (SET_SRC (newpat)))
3798 {
3799 rtx setsrc = SET_SRC (newpat);
3800 machine_mode mode = GET_MODE (setsrc);
3801 enum rtx_code code = GET_CODE (setsrc);
3802 rtx src_op0 = XEXP (setsrc, 0);
3803 rtx src_op1 = XEXP (setsrc, 1);
3804
3805 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3806 if (rtx_equal_p (src_op0, src_op1))
3807 {
3808 newi2pat = gen_rtx_SET (newdest, src_op0);
3809 SUBST (XEXP (setsrc, 0), newdest);
3810 SUBST (XEXP (setsrc, 1), newdest);
3811 subst_done = true;
3812 }
3813 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3814 else if ((code == PLUS || code == MULT)
3815 && GET_CODE (src_op0) == code
3816 && GET_CODE (XEXP (src_op0, 0)) == code
3817 && (INTEGRAL_MODE_P (mode)
3818 || (FLOAT_MODE_P (mode)
3819 && flag_unsafe_math_optimizations)))
3820 {
3821 rtx p = XEXP (XEXP (src_op0, 0), 0);
3822 rtx q = XEXP (XEXP (src_op0, 0), 1);
3823 rtx r = XEXP (src_op0, 1);
3824 rtx s = src_op1;
3825
3826 /* Split both "((X op Y) op X) op Y" and
3827 "((X op Y) op Y) op X" as "T op T" where T is
3828 "X op Y". */
3829 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3830 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3831 {
3832 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3833 SUBST (XEXP (setsrc, 0), newdest);
3834 SUBST (XEXP (setsrc, 1), newdest);
3835 subst_done = true;
3836 }
3837 /* Split "((X op X) op Y) op Y)" as "T op T" where
3838 T is "X op Y". */
3839 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3840 {
3841 rtx tmp = simplify_gen_binary (code, mode, p, r);
3842 newi2pat = gen_rtx_SET (newdest, tmp);
3843 SUBST (XEXP (setsrc, 0), newdest);
3844 SUBST (XEXP (setsrc, 1), newdest);
3845 subst_done = true;
3846 }
3847 }
3848 }
3849
3850 if (!subst_done)
3851 {
3852 newi2pat = gen_rtx_SET (newdest, *split);
3853 SUBST (*split, newdest);
3854 }
3855
3856 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3857
3858 /* recog_for_combine might have added CLOBBERs to newi2pat.
3859 Make sure NEWPAT does not depend on the clobbered regs. */
3860 if (GET_CODE (newi2pat) == PARALLEL)
3861 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3862 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3863 {
3864 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3865 if (reg_overlap_mentioned_p (reg, newpat))
3866 {
3867 undo_all ();
3868 return 0;
3869 }
3870 }
3871
3872 /* If the split point was a MULT and we didn't have one before,
3873 don't use one now. */
3874 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3875 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3876 }
3877 }
3878
3879 /* Check for a case where we loaded from memory in a narrow mode and
3880 then sign extended it, but we need both registers. In that case,
3881 we have a PARALLEL with both loads from the same memory location.
3882 We can split this into a load from memory followed by a register-register
3883 copy. This saves at least one insn, more if register allocation can
3884 eliminate the copy.
3885
3886 We cannot do this if the destination of the first assignment is a
3887 condition code register or cc0. We eliminate this case by making sure
3888 the SET_DEST and SET_SRC have the same mode.
3889
3890 We cannot do this if the destination of the second assignment is
3891 a register that we have already assumed is zero-extended. Similarly
3892 for a SUBREG of such a register. */
3893
3894 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3895 && GET_CODE (newpat) == PARALLEL
3896 && XVECLEN (newpat, 0) == 2
3897 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3898 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3899 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3900 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3901 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3902 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3903 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3904 && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3905 DF_INSN_LUID (i2))
3906 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3907 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3908 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3909 (REG_P (temp_expr)
3910 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3911 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3912 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3913 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3914 != GET_MODE_MASK (word_mode))))
3915 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3916 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3917 (REG_P (temp_expr)
3918 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3919 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < BITS_PER_WORD
3920 && GET_MODE_PRECISION (GET_MODE (temp_expr)) < HOST_BITS_PER_INT
3921 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3922 != GET_MODE_MASK (word_mode)))))
3923 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3924 SET_SRC (XVECEXP (newpat, 0, 1)))
3925 && ! find_reg_note (i3, REG_UNUSED,
3926 SET_DEST (XVECEXP (newpat, 0, 0))))
3927 {
3928 rtx ni2dest;
3929
3930 newi2pat = XVECEXP (newpat, 0, 0);
3931 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3932 newpat = XVECEXP (newpat, 0, 1);
3933 SUBST (SET_SRC (newpat),
3934 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
3935 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3936
3937 if (i2_code_number >= 0)
3938 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3939
3940 if (insn_code_number >= 0)
3941 swap_i2i3 = 1;
3942 }
3943
3944 /* Similarly, check for a case where we have a PARALLEL of two independent
3945 SETs but we started with three insns. In this case, we can do the sets
3946 as two separate insns. This case occurs when some SET allows two
3947 other insns to combine, but the destination of that SET is still live.
3948
3949 Also do this if we started with two insns and (at least) one of the
3950 resulting sets is a noop; this noop will be deleted later. */
3951
3952 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
3953 && GET_CODE (newpat) == PARALLEL
3954 && XVECLEN (newpat, 0) == 2
3955 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3956 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3957 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
3958 || set_noop_p (XVECEXP (newpat, 0, 1)))
3959 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
3960 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
3961 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3962 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3963 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3964 XVECEXP (newpat, 0, 0))
3965 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
3966 XVECEXP (newpat, 0, 1))
3967 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
3968 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
3969 {
3970 rtx set0 = XVECEXP (newpat, 0, 0);
3971 rtx set1 = XVECEXP (newpat, 0, 1);
3972
3973 /* Normally, it doesn't matter which of the two is done first,
3974 but the one that references cc0 can't be the second, and
3975 one which uses any regs/memory set in between i2 and i3 can't
3976 be first. The PARALLEL might also have been pre-existing in i3,
3977 so we need to make sure that we won't wrongly hoist a SET to i2
3978 that would conflict with a death note present in there. */
3979 if (!use_crosses_set_p (SET_SRC (set1), DF_INSN_LUID (i2))
3980 && !(REG_P (SET_DEST (set1))
3981 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
3982 && !(GET_CODE (SET_DEST (set1)) == SUBREG
3983 && find_reg_note (i2, REG_DEAD,
3984 SUBREG_REG (SET_DEST (set1))))
3985 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
3986 /* If I3 is a jump, ensure that set0 is a jump so that
3987 we do not create invalid RTL. */
3988 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
3989 )
3990 {
3991 newi2pat = set1;
3992 newpat = set0;
3993 }
3994 else if (!use_crosses_set_p (SET_SRC (set0), DF_INSN_LUID (i2))
3995 && !(REG_P (SET_DEST (set0))
3996 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
3997 && !(GET_CODE (SET_DEST (set0)) == SUBREG
3998 && find_reg_note (i2, REG_DEAD,
3999 SUBREG_REG (SET_DEST (set0))))
4000 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4001 /* If I3 is a jump, ensure that set1 is a jump so that
4002 we do not create invalid RTL. */
4003 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4004 )
4005 {
4006 newi2pat = set0;
4007 newpat = set1;
4008 }
4009 else
4010 {
4011 undo_all ();
4012 return 0;
4013 }
4014
4015 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4016
4017 if (i2_code_number >= 0)
4018 {
4019 /* recog_for_combine might have added CLOBBERs to newi2pat.
4020 Make sure NEWPAT does not depend on the clobbered regs. */
4021 if (GET_CODE (newi2pat) == PARALLEL)
4022 {
4023 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4024 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4025 {
4026 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4027 if (reg_overlap_mentioned_p (reg, newpat))
4028 {
4029 undo_all ();
4030 return 0;
4031 }
4032 }
4033 }
4034
4035 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4036 }
4037 }
4038
4039 /* If it still isn't recognized, fail and change things back the way they
4040 were. */
4041 if ((insn_code_number < 0
4042 /* Is the result a reasonable ASM_OPERANDS? */
4043 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4044 {
4045 undo_all ();
4046 return 0;
4047 }
4048
4049 /* If we had to change another insn, make sure it is valid also. */
4050 if (undobuf.other_insn)
4051 {
4052 CLEAR_HARD_REG_SET (newpat_used_regs);
4053
4054 other_pat = PATTERN (undobuf.other_insn);
4055 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4056 &new_other_notes);
4057
4058 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4059 {
4060 undo_all ();
4061 return 0;
4062 }
4063 }
4064
4065 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4066 they are adjacent to each other or not. */
4067 if (HAVE_cc0)
4068 {
4069 rtx_insn *p = prev_nonnote_insn (i3);
4070 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4071 && sets_cc0_p (newi2pat))
4072 {
4073 undo_all ();
4074 return 0;
4075 }
4076 }
4077
4078 /* Only allow this combination if insn_rtx_costs reports that the
4079 replacement instructions are cheaper than the originals. */
4080 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4081 {
4082 undo_all ();
4083 return 0;
4084 }
4085
4086 if (MAY_HAVE_DEBUG_INSNS)
4087 {
4088 struct undo *undo;
4089
4090 for (undo = undobuf.undos; undo; undo = undo->next)
4091 if (undo->kind == UNDO_MODE)
4092 {
4093 rtx reg = *undo->where.r;
4094 machine_mode new_mode = GET_MODE (reg);
4095 machine_mode old_mode = undo->old_contents.m;
4096
4097 /* Temporarily revert mode back. */
4098 adjust_reg_mode (reg, old_mode);
4099
4100 if (reg == i2dest && i2scratch)
4101 {
4102 /* If we used i2dest as a scratch register with a
4103 different mode, substitute it for the original
4104 i2src while its original mode is temporarily
4105 restored, and then clear i2scratch so that we don't
4106 do it again later. */
4107 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4108 this_basic_block);
4109 i2scratch = false;
4110 /* Put back the new mode. */
4111 adjust_reg_mode (reg, new_mode);
4112 }
4113 else
4114 {
4115 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4116 rtx_insn *first, *last;
4117
4118 if (reg == i2dest)
4119 {
4120 first = i2;
4121 last = last_combined_insn;
4122 }
4123 else
4124 {
4125 first = i3;
4126 last = undobuf.other_insn;
4127 gcc_assert (last);
4128 if (DF_INSN_LUID (last)
4129 < DF_INSN_LUID (last_combined_insn))
4130 last = last_combined_insn;
4131 }
4132
4133 /* We're dealing with a reg that changed mode but not
4134 meaning, so we want to turn it into a subreg for
4135 the new mode. However, because of REG sharing and
4136 because its mode had already changed, we have to do
4137 it in two steps. First, replace any debug uses of
4138 reg, with its original mode temporarily restored,
4139 with this copy we have created; then, replace the
4140 copy with the SUBREG of the original shared reg,
4141 once again changed to the new mode. */
4142 propagate_for_debug (first, last, reg, tempreg,
4143 this_basic_block);
4144 adjust_reg_mode (reg, new_mode);
4145 propagate_for_debug (first, last, tempreg,
4146 lowpart_subreg (old_mode, reg, new_mode),
4147 this_basic_block);
4148 }
4149 }
4150 }
4151
4152 /* If we will be able to accept this, we have made a
4153 change to the destination of I3. This requires us to
4154 do a few adjustments. */
4155
4156 if (changed_i3_dest)
4157 {
4158 PATTERN (i3) = newpat;
4159 adjust_for_new_dest (i3);
4160 }
4161
4162 /* We now know that we can do this combination. Merge the insns and
4163 update the status of registers and LOG_LINKS. */
4164
4165 if (undobuf.other_insn)
4166 {
4167 rtx note, next;
4168
4169 PATTERN (undobuf.other_insn) = other_pat;
4170
4171 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4172 ensure that they are still valid. Then add any non-duplicate
4173 notes added by recog_for_combine. */
4174 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4175 {
4176 next = XEXP (note, 1);
4177
4178 if ((REG_NOTE_KIND (note) == REG_DEAD
4179 && !reg_referenced_p (XEXP (note, 0),
4180 PATTERN (undobuf.other_insn)))
4181 ||(REG_NOTE_KIND (note) == REG_UNUSED
4182 && !reg_set_p (XEXP (note, 0),
4183 PATTERN (undobuf.other_insn)))
4184 /* Simply drop equal note since it may be no longer valid
4185 for other_insn. It may be possible to record that CC
4186 register is changed and only discard those notes, but
4187 in practice it's unnecessary complication and doesn't
4188 give any meaningful improvement.
4189
4190 See PR78559. */
4191 || REG_NOTE_KIND (note) == REG_EQUAL
4192 || REG_NOTE_KIND (note) == REG_EQUIV)
4193 remove_note (undobuf.other_insn, note);
4194 }
4195
4196 distribute_notes (new_other_notes, undobuf.other_insn,
4197 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4198 NULL_RTX);
4199 }
4200
4201 if (swap_i2i3)
4202 {
4203 rtx_insn *insn;
4204 struct insn_link *link;
4205 rtx ni2dest;
4206
4207 /* I3 now uses what used to be its destination and which is now
4208 I2's destination. This requires us to do a few adjustments. */
4209 PATTERN (i3) = newpat;
4210 adjust_for_new_dest (i3);
4211
4212 /* We need a LOG_LINK from I3 to I2. But we used to have one,
4213 so we still will.
4214
4215 However, some later insn might be using I2's dest and have
4216 a LOG_LINK pointing at I3. We must remove this link.
4217 The simplest way to remove the link is to point it at I1,
4218 which we know will be a NOTE. */
4219
4220 /* newi2pat is usually a SET here; however, recog_for_combine might
4221 have added some clobbers. */
4222 if (GET_CODE (newi2pat) == PARALLEL)
4223 ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
4224 else
4225 ni2dest = SET_DEST (newi2pat);
4226
4227 for (insn = NEXT_INSN (i3);
4228 insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4229 || insn != BB_HEAD (this_basic_block->next_bb));
4230 insn = NEXT_INSN (insn))
4231 {
4232 if (NONDEBUG_INSN_P (insn)
4233 && reg_referenced_p (ni2dest, PATTERN (insn)))
4234 {
4235 FOR_EACH_LOG_LINK (link, insn)
4236 if (link->insn == i3)
4237 link->insn = i1;
4238
4239 break;
4240 }
4241 }
4242 }
4243
4244 {
4245 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4246 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4247 rtx midnotes = 0;
4248 int from_luid;
4249 /* Compute which registers we expect to eliminate. newi2pat may be setting
4250 either i3dest or i2dest, so we must check it. */
4251 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4252 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4253 || !i2dest_killed
4254 ? 0 : i2dest);
4255 /* For i1, we need to compute both local elimination and global
4256 elimination information with respect to newi2pat because i1dest
4257 may be the same as i3dest, in which case newi2pat may be setting
4258 i1dest. Global information is used when distributing REG_DEAD
4259 note for i2 and i3, in which case it does matter if newi2pat sets
4260 i1dest or not.
4261
4262 Local information is used when distributing REG_DEAD note for i1,
4263 in which case it doesn't matter if newi2pat sets i1dest or not.
4264 See PR62151, if we have four insns combination:
4265 i0: r0 <- i0src
4266 i1: r1 <- i1src (using r0)
4267 REG_DEAD (r0)
4268 i2: r0 <- i2src (using r1)
4269 i3: r3 <- i3src (using r0)
4270 ix: using r0
4271 From i1's point of view, r0 is eliminated, no matter if it is set
4272 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4273 should be discarded.
4274
4275 Note local information only affects cases in forms like "I1->I2->I3",
4276 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4277 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4278 i0dest anyway. */
4279 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4280 || !i1dest_killed
4281 ? 0 : i1dest);
4282 rtx elim_i1 = (local_elim_i1 == 0
4283 || (newi2pat && reg_set_p (i1dest, newi2pat))
4284 ? 0 : i1dest);
4285 /* Same case as i1. */
4286 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4287 ? 0 : i0dest);
4288 rtx elim_i0 = (local_elim_i0 == 0
4289 || (newi2pat && reg_set_p (i0dest, newi2pat))
4290 ? 0 : i0dest);
4291
4292 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4293 clear them. */
4294 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4295 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4296 if (i1)
4297 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4298 if (i0)
4299 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4300
4301 /* Ensure that we do not have something that should not be shared but
4302 occurs multiple times in the new insns. Check this by first
4303 resetting all the `used' flags and then copying anything is shared. */
4304
4305 reset_used_flags (i3notes);
4306 reset_used_flags (i2notes);
4307 reset_used_flags (i1notes);
4308 reset_used_flags (i0notes);
4309 reset_used_flags (newpat);
4310 reset_used_flags (newi2pat);
4311 if (undobuf.other_insn)
4312 reset_used_flags (PATTERN (undobuf.other_insn));
4313
4314 i3notes = copy_rtx_if_shared (i3notes);
4315 i2notes = copy_rtx_if_shared (i2notes);
4316 i1notes = copy_rtx_if_shared (i1notes);
4317 i0notes = copy_rtx_if_shared (i0notes);
4318 newpat = copy_rtx_if_shared (newpat);
4319 newi2pat = copy_rtx_if_shared (newi2pat);
4320 if (undobuf.other_insn)
4321 reset_used_flags (PATTERN (undobuf.other_insn));
4322
4323 INSN_CODE (i3) = insn_code_number;
4324 PATTERN (i3) = newpat;
4325
4326 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4327 {
4328 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4329 link = XEXP (link, 1))
4330 {
4331 if (substed_i2)
4332 {
4333 /* I2SRC must still be meaningful at this point. Some
4334 splitting operations can invalidate I2SRC, but those
4335 operations do not apply to calls. */
4336 gcc_assert (i2src);
4337 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4338 i2dest, i2src);
4339 }
4340 if (substed_i1)
4341 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4342 i1dest, i1src);
4343 if (substed_i0)
4344 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4345 i0dest, i0src);
4346 }
4347 }
4348
4349 if (undobuf.other_insn)
4350 INSN_CODE (undobuf.other_insn) = other_code_number;
4351
4352 /* We had one special case above where I2 had more than one set and
4353 we replaced a destination of one of those sets with the destination
4354 of I3. In that case, we have to update LOG_LINKS of insns later
4355 in this basic block. Note that this (expensive) case is rare.
4356
4357 Also, in this case, we must pretend that all REG_NOTEs for I2
4358 actually came from I3, so that REG_UNUSED notes from I2 will be
4359 properly handled. */
4360
4361 if (i3_subst_into_i2)
4362 {
4363 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4364 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4365 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4366 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4367 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4368 && ! find_reg_note (i2, REG_UNUSED,
4369 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4370 for (temp_insn = NEXT_INSN (i2);
4371 temp_insn
4372 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4373 || BB_HEAD (this_basic_block) != temp_insn);
4374 temp_insn = NEXT_INSN (temp_insn))
4375 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4376 FOR_EACH_LOG_LINK (link, temp_insn)
4377 if (link->insn == i2)
4378 link->insn = i3;
4379
4380 if (i3notes)
4381 {
4382 rtx link = i3notes;
4383 while (XEXP (link, 1))
4384 link = XEXP (link, 1);
4385 XEXP (link, 1) = i2notes;
4386 }
4387 else
4388 i3notes = i2notes;
4389 i2notes = 0;
4390 }
4391
4392 LOG_LINKS (i3) = NULL;
4393 REG_NOTES (i3) = 0;
4394 LOG_LINKS (i2) = NULL;
4395 REG_NOTES (i2) = 0;
4396
4397 if (newi2pat)
4398 {
4399 if (MAY_HAVE_DEBUG_INSNS && i2scratch)
4400 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4401 this_basic_block);
4402 INSN_CODE (i2) = i2_code_number;
4403 PATTERN (i2) = newi2pat;
4404 }
4405 else
4406 {
4407 if (MAY_HAVE_DEBUG_INSNS && i2src)
4408 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4409 this_basic_block);
4410 SET_INSN_DELETED (i2);
4411 }
4412
4413 if (i1)
4414 {
4415 LOG_LINKS (i1) = NULL;
4416 REG_NOTES (i1) = 0;
4417 if (MAY_HAVE_DEBUG_INSNS)
4418 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4419 this_basic_block);
4420 SET_INSN_DELETED (i1);
4421 }
4422
4423 if (i0)
4424 {
4425 LOG_LINKS (i0) = NULL;
4426 REG_NOTES (i0) = 0;
4427 if (MAY_HAVE_DEBUG_INSNS)
4428 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4429 this_basic_block);
4430 SET_INSN_DELETED (i0);
4431 }
4432
4433 /* Get death notes for everything that is now used in either I3 or
4434 I2 and used to die in a previous insn. If we built two new
4435 patterns, move from I1 to I2 then I2 to I3 so that we get the
4436 proper movement on registers that I2 modifies. */
4437
4438 if (i0)
4439 from_luid = DF_INSN_LUID (i0);
4440 else if (i1)
4441 from_luid = DF_INSN_LUID (i1);
4442 else
4443 from_luid = DF_INSN_LUID (i2);
4444 if (newi2pat)
4445 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4446 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4447
4448 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4449 if (i3notes)
4450 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4451 elim_i2, elim_i1, elim_i0);
4452 if (i2notes)
4453 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4454 elim_i2, elim_i1, elim_i0);
4455 if (i1notes)
4456 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4457 elim_i2, local_elim_i1, local_elim_i0);
4458 if (i0notes)
4459 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4460 elim_i2, elim_i1, local_elim_i0);
4461 if (midnotes)
4462 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4463 elim_i2, elim_i1, elim_i0);
4464
4465 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4466 know these are REG_UNUSED and want them to go to the desired insn,
4467 so we always pass it as i3. */
4468
4469 if (newi2pat && new_i2_notes)
4470 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4471 NULL_RTX);
4472
4473 if (new_i3_notes)
4474 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4475 NULL_RTX);
4476
4477 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4478 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4479 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4480 in that case, it might delete I2. Similarly for I2 and I1.
4481 Show an additional death due to the REG_DEAD note we make here. If
4482 we discard it in distribute_notes, we will decrement it again. */
4483
4484 if (i3dest_killed)
4485 {
4486 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4487 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4488 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4489 elim_i1, elim_i0);
4490 else
4491 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4492 elim_i2, elim_i1, elim_i0);
4493 }
4494
4495 if (i2dest_in_i2src)
4496 {
4497 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4498 if (newi2pat && reg_set_p (i2dest, newi2pat))
4499 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4500 NULL_RTX, NULL_RTX);
4501 else
4502 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4503 NULL_RTX, NULL_RTX, NULL_RTX);
4504 }
4505
4506 if (i1dest_in_i1src)
4507 {
4508 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4509 if (newi2pat && reg_set_p (i1dest, newi2pat))
4510 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4511 NULL_RTX, NULL_RTX);
4512 else
4513 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4514 NULL_RTX, NULL_RTX, NULL_RTX);
4515 }
4516
4517 if (i0dest_in_i0src)
4518 {
4519 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4520 if (newi2pat && reg_set_p (i0dest, newi2pat))
4521 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4522 NULL_RTX, NULL_RTX);
4523 else
4524 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4525 NULL_RTX, NULL_RTX, NULL_RTX);
4526 }
4527
4528 distribute_links (i3links);
4529 distribute_links (i2links);
4530 distribute_links (i1links);
4531 distribute_links (i0links);
4532
4533 if (REG_P (i2dest))
4534 {
4535 struct insn_link *link;
4536 rtx_insn *i2_insn = 0;
4537 rtx i2_val = 0, set;
4538
4539 /* The insn that used to set this register doesn't exist, and
4540 this life of the register may not exist either. See if one of
4541 I3's links points to an insn that sets I2DEST. If it does,
4542 that is now the last known value for I2DEST. If we don't update
4543 this and I2 set the register to a value that depended on its old
4544 contents, we will get confused. If this insn is used, thing
4545 will be set correctly in combine_instructions. */
4546 FOR_EACH_LOG_LINK (link, i3)
4547 if ((set = single_set (link->insn)) != 0
4548 && rtx_equal_p (i2dest, SET_DEST (set)))
4549 i2_insn = link->insn, i2_val = SET_SRC (set);
4550
4551 record_value_for_reg (i2dest, i2_insn, i2_val);
4552
4553 /* If the reg formerly set in I2 died only once and that was in I3,
4554 zero its use count so it won't make `reload' do any work. */
4555 if (! added_sets_2
4556 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4557 && ! i2dest_in_i2src
4558 && REGNO (i2dest) < reg_n_sets_max)
4559 INC_REG_N_SETS (REGNO (i2dest), -1);
4560 }
4561
4562 if (i1 && REG_P (i1dest))
4563 {
4564 struct insn_link *link;
4565 rtx_insn *i1_insn = 0;
4566 rtx i1_val = 0, set;
4567
4568 FOR_EACH_LOG_LINK (link, i3)
4569 if ((set = single_set (link->insn)) != 0
4570 && rtx_equal_p (i1dest, SET_DEST (set)))
4571 i1_insn = link->insn, i1_val = SET_SRC (set);
4572
4573 record_value_for_reg (i1dest, i1_insn, i1_val);
4574
4575 if (! added_sets_1
4576 && ! i1dest_in_i1src
4577 && REGNO (i1dest) < reg_n_sets_max)
4578 INC_REG_N_SETS (REGNO (i1dest), -1);
4579 }
4580
4581 if (i0 && REG_P (i0dest))
4582 {
4583 struct insn_link *link;
4584 rtx_insn *i0_insn = 0;
4585 rtx i0_val = 0, set;
4586
4587 FOR_EACH_LOG_LINK (link, i3)
4588 if ((set = single_set (link->insn)) != 0
4589 && rtx_equal_p (i0dest, SET_DEST (set)))
4590 i0_insn = link->insn, i0_val = SET_SRC (set);
4591
4592 record_value_for_reg (i0dest, i0_insn, i0_val);
4593
4594 if (! added_sets_0
4595 && ! i0dest_in_i0src
4596 && REGNO (i0dest) < reg_n_sets_max)
4597 INC_REG_N_SETS (REGNO (i0dest), -1);
4598 }
4599
4600 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4601 been made to this insn. The order is important, because newi2pat
4602 can affect nonzero_bits of newpat. */
4603 if (newi2pat)
4604 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4605 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4606 }
4607
4608 if (undobuf.other_insn != NULL_RTX)
4609 {
4610 if (dump_file)
4611 {
4612 fprintf (dump_file, "modifying other_insn ");
4613 dump_insn_slim (dump_file, undobuf.other_insn);
4614 }
4615 df_insn_rescan (undobuf.other_insn);
4616 }
4617
4618 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4619 {
4620 if (dump_file)
4621 {
4622 fprintf (dump_file, "modifying insn i0 ");
4623 dump_insn_slim (dump_file, i0);
4624 }
4625 df_insn_rescan (i0);
4626 }
4627
4628 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4629 {
4630 if (dump_file)
4631 {
4632 fprintf (dump_file, "modifying insn i1 ");
4633 dump_insn_slim (dump_file, i1);
4634 }
4635 df_insn_rescan (i1);
4636 }
4637
4638 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4639 {
4640 if (dump_file)
4641 {
4642 fprintf (dump_file, "modifying insn i2 ");
4643 dump_insn_slim (dump_file, i2);
4644 }
4645 df_insn_rescan (i2);
4646 }
4647
4648 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4649 {
4650 if (dump_file)
4651 {
4652 fprintf (dump_file, "modifying insn i3 ");
4653 dump_insn_slim (dump_file, i3);
4654 }
4655 df_insn_rescan (i3);
4656 }
4657
4658 /* Set new_direct_jump_p if a new return or simple jump instruction
4659 has been created. Adjust the CFG accordingly. */
4660 if (returnjump_p (i3) || any_uncondjump_p (i3))
4661 {
4662 *new_direct_jump_p = 1;
4663 mark_jump_label (PATTERN (i3), i3, 0);
4664 update_cfg_for_uncondjump (i3);
4665 }
4666
4667 if (undobuf.other_insn != NULL_RTX
4668 && (returnjump_p (undobuf.other_insn)
4669 || any_uncondjump_p (undobuf.other_insn)))
4670 {
4671 *new_direct_jump_p = 1;
4672 update_cfg_for_uncondjump (undobuf.other_insn);
4673 }
4674
4675 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4676 && XEXP (PATTERN (i3), 0) == const1_rtx)
4677 {
4678 basic_block bb = BLOCK_FOR_INSN (i3);
4679 gcc_assert (bb);
4680 remove_edge (split_block (bb, i3));
4681 emit_barrier_after_bb (bb);
4682 *new_direct_jump_p = 1;
4683 }
4684
4685 if (undobuf.other_insn
4686 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4687 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4688 {
4689 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4690 gcc_assert (bb);
4691 remove_edge (split_block (bb, undobuf.other_insn));
4692 emit_barrier_after_bb (bb);
4693 *new_direct_jump_p = 1;
4694 }
4695
4696 /* A noop might also need cleaning up of CFG, if it comes from the
4697 simplification of a jump. */
4698 if (JUMP_P (i3)
4699 && GET_CODE (newpat) == SET
4700 && SET_SRC (newpat) == pc_rtx
4701 && SET_DEST (newpat) == pc_rtx)
4702 {
4703 *new_direct_jump_p = 1;
4704 update_cfg_for_uncondjump (i3);
4705 }
4706
4707 if (undobuf.other_insn != NULL_RTX
4708 && JUMP_P (undobuf.other_insn)
4709 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4710 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4711 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4712 {
4713 *new_direct_jump_p = 1;
4714 update_cfg_for_uncondjump (undobuf.other_insn);
4715 }
4716
4717 combine_successes++;
4718 undo_commit ();
4719
4720 if (added_links_insn
4721 && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
4722 && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
4723 return added_links_insn;
4724 else
4725 return newi2pat ? i2 : i3;
4726 }
4727 \f
4728 /* Get a marker for undoing to the current state. */
4729
4730 static void *
4731 get_undo_marker (void)
4732 {
4733 return undobuf.undos;
4734 }
4735
4736 /* Undo the modifications up to the marker. */
4737
4738 static void
4739 undo_to_marker (void *marker)
4740 {
4741 struct undo *undo, *next;
4742
4743 for (undo = undobuf.undos; undo != marker; undo = next)
4744 {
4745 gcc_assert (undo);
4746
4747 next = undo->next;
4748 switch (undo->kind)
4749 {
4750 case UNDO_RTX:
4751 *undo->where.r = undo->old_contents.r;
4752 break;
4753 case UNDO_INT:
4754 *undo->where.i = undo->old_contents.i;
4755 break;
4756 case UNDO_MODE:
4757 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4758 break;
4759 case UNDO_LINKS:
4760 *undo->where.l = undo->old_contents.l;
4761 break;
4762 default:
4763 gcc_unreachable ();
4764 }
4765
4766 undo->next = undobuf.frees;
4767 undobuf.frees = undo;
4768 }
4769
4770 undobuf.undos = (struct undo *) marker;
4771 }
4772
4773 /* Undo all the modifications recorded in undobuf. */
4774
4775 static void
4776 undo_all (void)
4777 {
4778 undo_to_marker (0);
4779 }
4780
4781 /* We've committed to accepting the changes we made. Move all
4782 of the undos to the free list. */
4783
4784 static void
4785 undo_commit (void)
4786 {
4787 struct undo *undo, *next;
4788
4789 for (undo = undobuf.undos; undo; undo = next)
4790 {
4791 next = undo->next;
4792 undo->next = undobuf.frees;
4793 undobuf.frees = undo;
4794 }
4795 undobuf.undos = 0;
4796 }
4797 \f
4798 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4799 where we have an arithmetic expression and return that point. LOC will
4800 be inside INSN.
4801
4802 try_combine will call this function to see if an insn can be split into
4803 two insns. */
4804
4805 static rtx *
4806 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4807 {
4808 rtx x = *loc;
4809 enum rtx_code code = GET_CODE (x);
4810 rtx *split;
4811 unsigned HOST_WIDE_INT len = 0;
4812 HOST_WIDE_INT pos = 0;
4813 int unsignedp = 0;
4814 rtx inner = NULL_RTX;
4815 scalar_int_mode mode, inner_mode;
4816
4817 /* First special-case some codes. */
4818 switch (code)
4819 {
4820 case SUBREG:
4821 #ifdef INSN_SCHEDULING
4822 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4823 point. */
4824 if (MEM_P (SUBREG_REG (x)))
4825 return loc;
4826 #endif
4827 return find_split_point (&SUBREG_REG (x), insn, false);
4828
4829 case MEM:
4830 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4831 using LO_SUM and HIGH. */
4832 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4833 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4834 {
4835 machine_mode address_mode = get_address_mode (x);
4836
4837 SUBST (XEXP (x, 0),
4838 gen_rtx_LO_SUM (address_mode,
4839 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4840 XEXP (x, 0)));
4841 return &XEXP (XEXP (x, 0), 0);
4842 }
4843
4844 /* If we have a PLUS whose second operand is a constant and the
4845 address is not valid, perhaps will can split it up using
4846 the machine-specific way to split large constants. We use
4847 the first pseudo-reg (one of the virtual regs) as a placeholder;
4848 it will not remain in the result. */
4849 if (GET_CODE (XEXP (x, 0)) == PLUS
4850 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4851 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4852 MEM_ADDR_SPACE (x)))
4853 {
4854 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4855 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4856 subst_insn);
4857
4858 /* This should have produced two insns, each of which sets our
4859 placeholder. If the source of the second is a valid address,
4860 we can make put both sources together and make a split point
4861 in the middle. */
4862
4863 if (seq
4864 && NEXT_INSN (seq) != NULL_RTX
4865 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4866 && NONJUMP_INSN_P (seq)
4867 && GET_CODE (PATTERN (seq)) == SET
4868 && SET_DEST (PATTERN (seq)) == reg
4869 && ! reg_mentioned_p (reg,
4870 SET_SRC (PATTERN (seq)))
4871 && NONJUMP_INSN_P (NEXT_INSN (seq))
4872 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4873 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4874 && memory_address_addr_space_p
4875 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4876 MEM_ADDR_SPACE (x)))
4877 {
4878 rtx src1 = SET_SRC (PATTERN (seq));
4879 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4880
4881 /* Replace the placeholder in SRC2 with SRC1. If we can
4882 find where in SRC2 it was placed, that can become our
4883 split point and we can replace this address with SRC2.
4884 Just try two obvious places. */
4885
4886 src2 = replace_rtx (src2, reg, src1);
4887 split = 0;
4888 if (XEXP (src2, 0) == src1)
4889 split = &XEXP (src2, 0);
4890 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4891 && XEXP (XEXP (src2, 0), 0) == src1)
4892 split = &XEXP (XEXP (src2, 0), 0);
4893
4894 if (split)
4895 {
4896 SUBST (XEXP (x, 0), src2);
4897 return split;
4898 }
4899 }
4900
4901 /* If that didn't work, perhaps the first operand is complex and
4902 needs to be computed separately, so make a split point there.
4903 This will occur on machines that just support REG + CONST
4904 and have a constant moved through some previous computation. */
4905
4906 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4907 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4908 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4909 return &XEXP (XEXP (x, 0), 0);
4910 }
4911
4912 /* If we have a PLUS whose first operand is complex, try computing it
4913 separately by making a split there. */
4914 if (GET_CODE (XEXP (x, 0)) == PLUS
4915 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4916 MEM_ADDR_SPACE (x))
4917 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4918 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4919 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4920 return &XEXP (XEXP (x, 0), 0);
4921 break;
4922
4923 case SET:
4924 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
4925 ZERO_EXTRACT, the most likely reason why this doesn't match is that
4926 we need to put the operand into a register. So split at that
4927 point. */
4928
4929 if (SET_DEST (x) == cc0_rtx
4930 && GET_CODE (SET_SRC (x)) != COMPARE
4931 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
4932 && !OBJECT_P (SET_SRC (x))
4933 && ! (GET_CODE (SET_SRC (x)) == SUBREG
4934 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
4935 return &SET_SRC (x);
4936
4937 /* See if we can split SET_SRC as it stands. */
4938 split = find_split_point (&SET_SRC (x), insn, true);
4939 if (split && split != &SET_SRC (x))
4940 return split;
4941
4942 /* See if we can split SET_DEST as it stands. */
4943 split = find_split_point (&SET_DEST (x), insn, false);
4944 if (split && split != &SET_DEST (x))
4945 return split;
4946
4947 /* See if this is a bitfield assignment with everything constant. If
4948 so, this is an IOR of an AND, so split it into that. */
4949 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
4950 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
4951 &inner_mode)
4952 && HWI_COMPUTABLE_MODE_P (inner_mode)
4953 && CONST_INT_P (XEXP (SET_DEST (x), 1))
4954 && CONST_INT_P (XEXP (SET_DEST (x), 2))
4955 && CONST_INT_P (SET_SRC (x))
4956 && ((INTVAL (XEXP (SET_DEST (x), 1))
4957 + INTVAL (XEXP (SET_DEST (x), 2)))
4958 <= GET_MODE_PRECISION (inner_mode))
4959 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
4960 {
4961 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
4962 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
4963 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
4964 rtx dest = XEXP (SET_DEST (x), 0);
4965 unsigned HOST_WIDE_INT mask
4966 = (HOST_WIDE_INT_1U << len) - 1;
4967 rtx or_mask;
4968
4969 if (BITS_BIG_ENDIAN)
4970 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
4971
4972 or_mask = gen_int_mode (src << pos, inner_mode);
4973 if (src == mask)
4974 SUBST (SET_SRC (x),
4975 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
4976 else
4977 {
4978 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
4979 SUBST (SET_SRC (x),
4980 simplify_gen_binary (IOR, inner_mode,
4981 simplify_gen_binary (AND, inner_mode,
4982 dest, negmask),
4983 or_mask));
4984 }
4985
4986 SUBST (SET_DEST (x), dest);
4987
4988 split = find_split_point (&SET_SRC (x), insn, true);
4989 if (split && split != &SET_SRC (x))
4990 return split;
4991 }
4992
4993 /* Otherwise, see if this is an operation that we can split into two.
4994 If so, try to split that. */
4995 code = GET_CODE (SET_SRC (x));
4996
4997 switch (code)
4998 {
4999 case AND:
5000 /* If we are AND'ing with a large constant that is only a single
5001 bit and the result is only being used in a context where we
5002 need to know if it is zero or nonzero, replace it with a bit
5003 extraction. This will avoid the large constant, which might
5004 have taken more than one insn to make. If the constant were
5005 not a valid argument to the AND but took only one insn to make,
5006 this is no worse, but if it took more than one insn, it will
5007 be better. */
5008
5009 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5010 && REG_P (XEXP (SET_SRC (x), 0))
5011 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5012 && REG_P (SET_DEST (x))
5013 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5014 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5015 && XEXP (*split, 0) == SET_DEST (x)
5016 && XEXP (*split, 1) == const0_rtx)
5017 {
5018 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5019 XEXP (SET_SRC (x), 0),
5020 pos, NULL_RTX, 1, 1, 0, 0);
5021 if (extraction != 0)
5022 {
5023 SUBST (SET_SRC (x), extraction);
5024 return find_split_point (loc, insn, false);
5025 }
5026 }
5027 break;
5028
5029 case NE:
5030 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5031 is known to be on, this can be converted into a NEG of a shift. */
5032 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5033 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5034 && 1 <= (pos = exact_log2
5035 (nonzero_bits (XEXP (SET_SRC (x), 0),
5036 GET_MODE (XEXP (SET_SRC (x), 0))))))
5037 {
5038 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5039
5040 SUBST (SET_SRC (x),
5041 gen_rtx_NEG (mode,
5042 gen_rtx_LSHIFTRT (mode,
5043 XEXP (SET_SRC (x), 0),
5044 GEN_INT (pos))));
5045
5046 split = find_split_point (&SET_SRC (x), insn, true);
5047 if (split && split != &SET_SRC (x))
5048 return split;
5049 }
5050 break;
5051
5052 case SIGN_EXTEND:
5053 inner = XEXP (SET_SRC (x), 0);
5054
5055 /* We can't optimize if either mode is a partial integer
5056 mode as we don't know how many bits are significant
5057 in those modes. */
5058 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5059 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5060 break;
5061
5062 pos = 0;
5063 len = GET_MODE_PRECISION (inner_mode);
5064 unsignedp = 0;
5065 break;
5066
5067 case SIGN_EXTRACT:
5068 case ZERO_EXTRACT:
5069 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5070 &inner_mode)
5071 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5072 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5073 {
5074 inner = XEXP (SET_SRC (x), 0);
5075 len = INTVAL (XEXP (SET_SRC (x), 1));
5076 pos = INTVAL (XEXP (SET_SRC (x), 2));
5077
5078 if (BITS_BIG_ENDIAN)
5079 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5080 unsignedp = (code == ZERO_EXTRACT);
5081 }
5082 break;
5083
5084 default:
5085 break;
5086 }
5087
5088 if (len && pos >= 0
5089 && pos + len <= GET_MODE_PRECISION (GET_MODE (inner))
5090 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5091 {
5092 /* For unsigned, we have a choice of a shift followed by an
5093 AND or two shifts. Use two shifts for field sizes where the
5094 constant might be too large. We assume here that we can
5095 always at least get 8-bit constants in an AND insn, which is
5096 true for every current RISC. */
5097
5098 if (unsignedp && len <= 8)
5099 {
5100 unsigned HOST_WIDE_INT mask
5101 = (HOST_WIDE_INT_1U << len) - 1;
5102 SUBST (SET_SRC (x),
5103 gen_rtx_AND (mode,
5104 gen_rtx_LSHIFTRT
5105 (mode, gen_lowpart (mode, inner),
5106 GEN_INT (pos)),
5107 gen_int_mode (mask, mode)));
5108
5109 split = find_split_point (&SET_SRC (x), insn, true);
5110 if (split && split != &SET_SRC (x))
5111 return split;
5112 }
5113 else
5114 {
5115 SUBST (SET_SRC (x),
5116 gen_rtx_fmt_ee
5117 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5118 gen_rtx_ASHIFT (mode,
5119 gen_lowpart (mode, inner),
5120 GEN_INT (GET_MODE_PRECISION (mode)
5121 - len - pos)),
5122 GEN_INT (GET_MODE_PRECISION (mode) - len)));
5123
5124 split = find_split_point (&SET_SRC (x), insn, true);
5125 if (split && split != &SET_SRC (x))
5126 return split;
5127 }
5128 }
5129
5130 /* See if this is a simple operation with a constant as the second
5131 operand. It might be that this constant is out of range and hence
5132 could be used as a split point. */
5133 if (BINARY_P (SET_SRC (x))
5134 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5135 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5136 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5137 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5138 return &XEXP (SET_SRC (x), 1);
5139
5140 /* Finally, see if this is a simple operation with its first operand
5141 not in a register. The operation might require this operand in a
5142 register, so return it as a split point. We can always do this
5143 because if the first operand were another operation, we would have
5144 already found it as a split point. */
5145 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5146 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5147 return &XEXP (SET_SRC (x), 0);
5148
5149 return 0;
5150
5151 case AND:
5152 case IOR:
5153 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5154 it is better to write this as (not (ior A B)) so we can split it.
5155 Similarly for IOR. */
5156 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5157 {
5158 SUBST (*loc,
5159 gen_rtx_NOT (GET_MODE (x),
5160 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5161 GET_MODE (x),
5162 XEXP (XEXP (x, 0), 0),
5163 XEXP (XEXP (x, 1), 0))));
5164 return find_split_point (loc, insn, set_src);
5165 }
5166
5167 /* Many RISC machines have a large set of logical insns. If the
5168 second operand is a NOT, put it first so we will try to split the
5169 other operand first. */
5170 if (GET_CODE (XEXP (x, 1)) == NOT)
5171 {
5172 rtx tem = XEXP (x, 0);
5173 SUBST (XEXP (x, 0), XEXP (x, 1));
5174 SUBST (XEXP (x, 1), tem);
5175 }
5176 break;
5177
5178 case PLUS:
5179 case MINUS:
5180 /* Canonicalization can produce (minus A (mult B C)), where C is a
5181 constant. It may be better to try splitting (plus (mult B -C) A)
5182 instead if this isn't a multiply by a power of two. */
5183 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5184 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5185 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5186 {
5187 machine_mode mode = GET_MODE (x);
5188 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5189 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5190 SUBST (*loc, gen_rtx_PLUS (mode,
5191 gen_rtx_MULT (mode,
5192 XEXP (XEXP (x, 1), 0),
5193 gen_int_mode (other_int,
5194 mode)),
5195 XEXP (x, 0)));
5196 return find_split_point (loc, insn, set_src);
5197 }
5198
5199 /* Split at a multiply-accumulate instruction. However if this is
5200 the SET_SRC, we likely do not have such an instruction and it's
5201 worthless to try this split. */
5202 if (!set_src
5203 && (GET_CODE (XEXP (x, 0)) == MULT
5204 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5205 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5206 return loc;
5207
5208 default:
5209 break;
5210 }
5211
5212 /* Otherwise, select our actions depending on our rtx class. */
5213 switch (GET_RTX_CLASS (code))
5214 {
5215 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5216 case RTX_TERNARY:
5217 split = find_split_point (&XEXP (x, 2), insn, false);
5218 if (split)
5219 return split;
5220 /* fall through */
5221 case RTX_BIN_ARITH:
5222 case RTX_COMM_ARITH:
5223 case RTX_COMPARE:
5224 case RTX_COMM_COMPARE:
5225 split = find_split_point (&XEXP (x, 1), insn, false);
5226 if (split)
5227 return split;
5228 /* fall through */
5229 case RTX_UNARY:
5230 /* Some machines have (and (shift ...) ...) insns. If X is not
5231 an AND, but XEXP (X, 0) is, use it as our split point. */
5232 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5233 return &XEXP (x, 0);
5234
5235 split = find_split_point (&XEXP (x, 0), insn, false);
5236 if (split)
5237 return split;
5238 return loc;
5239
5240 default:
5241 /* Otherwise, we don't have a split point. */
5242 return 0;
5243 }
5244 }
5245 \f
5246 /* Throughout X, replace FROM with TO, and return the result.
5247 The result is TO if X is FROM;
5248 otherwise the result is X, but its contents may have been modified.
5249 If they were modified, a record was made in undobuf so that
5250 undo_all will (among other things) return X to its original state.
5251
5252 If the number of changes necessary is too much to record to undo,
5253 the excess changes are not made, so the result is invalid.
5254 The changes already made can still be undone.
5255 undobuf.num_undo is incremented for such changes, so by testing that
5256 the caller can tell whether the result is valid.
5257
5258 `n_occurrences' is incremented each time FROM is replaced.
5259
5260 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5261
5262 IN_COND is nonzero if we are at the top level of a condition.
5263
5264 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5265 by copying if `n_occurrences' is nonzero. */
5266
5267 static rtx
5268 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5269 {
5270 enum rtx_code code = GET_CODE (x);
5271 machine_mode op0_mode = VOIDmode;
5272 const char *fmt;
5273 int len, i;
5274 rtx new_rtx;
5275
5276 /* Two expressions are equal if they are identical copies of a shared
5277 RTX or if they are both registers with the same register number
5278 and mode. */
5279
5280 #define COMBINE_RTX_EQUAL_P(X,Y) \
5281 ((X) == (Y) \
5282 || (REG_P (X) && REG_P (Y) \
5283 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5284
5285 /* Do not substitute into clobbers of regs -- this will never result in
5286 valid RTL. */
5287 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5288 return x;
5289
5290 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5291 {
5292 n_occurrences++;
5293 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5294 }
5295
5296 /* If X and FROM are the same register but different modes, they
5297 will not have been seen as equal above. However, the log links code
5298 will make a LOG_LINKS entry for that case. If we do nothing, we
5299 will try to rerecognize our original insn and, when it succeeds,
5300 we will delete the feeding insn, which is incorrect.
5301
5302 So force this insn not to match in this (rare) case. */
5303 if (! in_dest && code == REG && REG_P (from)
5304 && reg_overlap_mentioned_p (x, from))
5305 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5306
5307 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5308 of which may contain things that can be combined. */
5309 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5310 return x;
5311
5312 /* It is possible to have a subexpression appear twice in the insn.
5313 Suppose that FROM is a register that appears within TO.
5314 Then, after that subexpression has been scanned once by `subst',
5315 the second time it is scanned, TO may be found. If we were
5316 to scan TO here, we would find FROM within it and create a
5317 self-referent rtl structure which is completely wrong. */
5318 if (COMBINE_RTX_EQUAL_P (x, to))
5319 return to;
5320
5321 /* Parallel asm_operands need special attention because all of the
5322 inputs are shared across the arms. Furthermore, unsharing the
5323 rtl results in recognition failures. Failure to handle this case
5324 specially can result in circular rtl.
5325
5326 Solve this by doing a normal pass across the first entry of the
5327 parallel, and only processing the SET_DESTs of the subsequent
5328 entries. Ug. */
5329
5330 if (code == PARALLEL
5331 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5332 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5333 {
5334 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5335
5336 /* If this substitution failed, this whole thing fails. */
5337 if (GET_CODE (new_rtx) == CLOBBER
5338 && XEXP (new_rtx, 0) == const0_rtx)
5339 return new_rtx;
5340
5341 SUBST (XVECEXP (x, 0, 0), new_rtx);
5342
5343 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5344 {
5345 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5346
5347 if (!REG_P (dest)
5348 && GET_CODE (dest) != CC0
5349 && GET_CODE (dest) != PC)
5350 {
5351 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5352
5353 /* If this substitution failed, this whole thing fails. */
5354 if (GET_CODE (new_rtx) == CLOBBER
5355 && XEXP (new_rtx, 0) == const0_rtx)
5356 return new_rtx;
5357
5358 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5359 }
5360 }
5361 }
5362 else
5363 {
5364 len = GET_RTX_LENGTH (code);
5365 fmt = GET_RTX_FORMAT (code);
5366
5367 /* We don't need to process a SET_DEST that is a register, CC0,
5368 or PC, so set up to skip this common case. All other cases
5369 where we want to suppress replacing something inside a
5370 SET_SRC are handled via the IN_DEST operand. */
5371 if (code == SET
5372 && (REG_P (SET_DEST (x))
5373 || GET_CODE (SET_DEST (x)) == CC0
5374 || GET_CODE (SET_DEST (x)) == PC))
5375 fmt = "ie";
5376
5377 /* Trying to simplify the operands of a widening MULT is not likely
5378 to create RTL matching a machine insn. */
5379 if (code == MULT
5380 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5381 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5382 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5383 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5384 && REG_P (XEXP (XEXP (x, 0), 0))
5385 && REG_P (XEXP (XEXP (x, 1), 0))
5386 && from == to)
5387 return x;
5388
5389
5390 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5391 constant. */
5392 if (fmt[0] == 'e')
5393 op0_mode = GET_MODE (XEXP (x, 0));
5394
5395 for (i = 0; i < len; i++)
5396 {
5397 if (fmt[i] == 'E')
5398 {
5399 int j;
5400 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5401 {
5402 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5403 {
5404 new_rtx = (unique_copy && n_occurrences
5405 ? copy_rtx (to) : to);
5406 n_occurrences++;
5407 }
5408 else
5409 {
5410 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5411 unique_copy);
5412
5413 /* If this substitution failed, this whole thing
5414 fails. */
5415 if (GET_CODE (new_rtx) == CLOBBER
5416 && XEXP (new_rtx, 0) == const0_rtx)
5417 return new_rtx;
5418 }
5419
5420 SUBST (XVECEXP (x, i, j), new_rtx);
5421 }
5422 }
5423 else if (fmt[i] == 'e')
5424 {
5425 /* If this is a register being set, ignore it. */
5426 new_rtx = XEXP (x, i);
5427 if (in_dest
5428 && i == 0
5429 && (((code == SUBREG || code == ZERO_EXTRACT)
5430 && REG_P (new_rtx))
5431 || code == STRICT_LOW_PART))
5432 ;
5433
5434 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5435 {
5436 /* In general, don't install a subreg involving two
5437 modes not tieable. It can worsen register
5438 allocation, and can even make invalid reload
5439 insns, since the reg inside may need to be copied
5440 from in the outside mode, and that may be invalid
5441 if it is an fp reg copied in integer mode.
5442
5443 We allow two exceptions to this: It is valid if
5444 it is inside another SUBREG and the mode of that
5445 SUBREG and the mode of the inside of TO is
5446 tieable and it is valid if X is a SET that copies
5447 FROM to CC0. */
5448
5449 if (GET_CODE (to) == SUBREG
5450 && !targetm.modes_tieable_p (GET_MODE (to),
5451 GET_MODE (SUBREG_REG (to)))
5452 && ! (code == SUBREG
5453 && (targetm.modes_tieable_p
5454 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5455 && (!HAVE_cc0
5456 || (! (code == SET
5457 && i == 1
5458 && XEXP (x, 0) == cc0_rtx))))
5459 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5460
5461 if (code == SUBREG
5462 && REG_P (to)
5463 && REGNO (to) < FIRST_PSEUDO_REGISTER
5464 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5465 SUBREG_BYTE (x),
5466 GET_MODE (x)) < 0)
5467 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5468
5469 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5470 n_occurrences++;
5471 }
5472 else
5473 /* If we are in a SET_DEST, suppress most cases unless we
5474 have gone inside a MEM, in which case we want to
5475 simplify the address. We assume here that things that
5476 are actually part of the destination have their inner
5477 parts in the first expression. This is true for SUBREG,
5478 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5479 things aside from REG and MEM that should appear in a
5480 SET_DEST. */
5481 new_rtx = subst (XEXP (x, i), from, to,
5482 (((in_dest
5483 && (code == SUBREG || code == STRICT_LOW_PART
5484 || code == ZERO_EXTRACT))
5485 || code == SET)
5486 && i == 0),
5487 code == IF_THEN_ELSE && i == 0,
5488 unique_copy);
5489
5490 /* If we found that we will have to reject this combination,
5491 indicate that by returning the CLOBBER ourselves, rather than
5492 an expression containing it. This will speed things up as
5493 well as prevent accidents where two CLOBBERs are considered
5494 to be equal, thus producing an incorrect simplification. */
5495
5496 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5497 return new_rtx;
5498
5499 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5500 {
5501 machine_mode mode = GET_MODE (x);
5502
5503 x = simplify_subreg (GET_MODE (x), new_rtx,
5504 GET_MODE (SUBREG_REG (x)),
5505 SUBREG_BYTE (x));
5506 if (! x)
5507 x = gen_rtx_CLOBBER (mode, const0_rtx);
5508 }
5509 else if (CONST_SCALAR_INT_P (new_rtx)
5510 && GET_CODE (x) == ZERO_EXTEND)
5511 {
5512 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
5513 new_rtx, GET_MODE (XEXP (x, 0)));
5514 gcc_assert (x);
5515 }
5516 else
5517 SUBST (XEXP (x, i), new_rtx);
5518 }
5519 }
5520 }
5521
5522 /* Check if we are loading something from the constant pool via float
5523 extension; in this case we would undo compress_float_constant
5524 optimization and degenerate constant load to an immediate value. */
5525 if (GET_CODE (x) == FLOAT_EXTEND
5526 && MEM_P (XEXP (x, 0))
5527 && MEM_READONLY_P (XEXP (x, 0)))
5528 {
5529 rtx tmp = avoid_constant_pool_reference (x);
5530 if (x != tmp)
5531 return x;
5532 }
5533
5534 /* Try to simplify X. If the simplification changed the code, it is likely
5535 that further simplification will help, so loop, but limit the number
5536 of repetitions that will be performed. */
5537
5538 for (i = 0; i < 4; i++)
5539 {
5540 /* If X is sufficiently simple, don't bother trying to do anything
5541 with it. */
5542 if (code != CONST_INT && code != REG && code != CLOBBER)
5543 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5544
5545 if (GET_CODE (x) == code)
5546 break;
5547
5548 code = GET_CODE (x);
5549
5550 /* We no longer know the original mode of operand 0 since we
5551 have changed the form of X) */
5552 op0_mode = VOIDmode;
5553 }
5554
5555 return x;
5556 }
5557 \f
5558 /* If X is a commutative operation whose operands are not in the canonical
5559 order, use substitutions to swap them. */
5560
5561 static void
5562 maybe_swap_commutative_operands (rtx x)
5563 {
5564 if (COMMUTATIVE_ARITH_P (x)
5565 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5566 {
5567 rtx temp = XEXP (x, 0);
5568 SUBST (XEXP (x, 0), XEXP (x, 1));
5569 SUBST (XEXP (x, 1), temp);
5570 }
5571 }
5572
5573 /* Simplify X, a piece of RTL. We just operate on the expression at the
5574 outer level; call `subst' to simplify recursively. Return the new
5575 expression.
5576
5577 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5578 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5579 of a condition. */
5580
5581 static rtx
5582 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5583 int in_cond)
5584 {
5585 enum rtx_code code = GET_CODE (x);
5586 machine_mode mode = GET_MODE (x);
5587 scalar_int_mode int_mode;
5588 rtx temp;
5589 int i;
5590
5591 /* If this is a commutative operation, put a constant last and a complex
5592 expression first. We don't need to do this for comparisons here. */
5593 maybe_swap_commutative_operands (x);
5594
5595 /* Try to fold this expression in case we have constants that weren't
5596 present before. */
5597 temp = 0;
5598 switch (GET_RTX_CLASS (code))
5599 {
5600 case RTX_UNARY:
5601 if (op0_mode == VOIDmode)
5602 op0_mode = GET_MODE (XEXP (x, 0));
5603 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5604 break;
5605 case RTX_COMPARE:
5606 case RTX_COMM_COMPARE:
5607 {
5608 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5609 if (cmp_mode == VOIDmode)
5610 {
5611 cmp_mode = GET_MODE (XEXP (x, 1));
5612 if (cmp_mode == VOIDmode)
5613 cmp_mode = op0_mode;
5614 }
5615 temp = simplify_relational_operation (code, mode, cmp_mode,
5616 XEXP (x, 0), XEXP (x, 1));
5617 }
5618 break;
5619 case RTX_COMM_ARITH:
5620 case RTX_BIN_ARITH:
5621 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5622 break;
5623 case RTX_BITFIELD_OPS:
5624 case RTX_TERNARY:
5625 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5626 XEXP (x, 1), XEXP (x, 2));
5627 break;
5628 default:
5629 break;
5630 }
5631
5632 if (temp)
5633 {
5634 x = temp;
5635 code = GET_CODE (temp);
5636 op0_mode = VOIDmode;
5637 mode = GET_MODE (temp);
5638 }
5639
5640 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5641 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5642 things. Check for cases where both arms are testing the same
5643 condition.
5644
5645 Don't do anything if all operands are very simple. */
5646
5647 if ((BINARY_P (x)
5648 && ((!OBJECT_P (XEXP (x, 0))
5649 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5650 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5651 || (!OBJECT_P (XEXP (x, 1))
5652 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5653 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5654 || (UNARY_P (x)
5655 && (!OBJECT_P (XEXP (x, 0))
5656 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5657 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5658 {
5659 rtx cond, true_rtx, false_rtx;
5660
5661 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5662 if (cond != 0
5663 /* If everything is a comparison, what we have is highly unlikely
5664 to be simpler, so don't use it. */
5665 && ! (COMPARISON_P (x)
5666 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
5667 {
5668 rtx cop1 = const0_rtx;
5669 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5670
5671 if (cond_code == NE && COMPARISON_P (cond))
5672 return x;
5673
5674 /* Simplify the alternative arms; this may collapse the true and
5675 false arms to store-flag values. Be careful to use copy_rtx
5676 here since true_rtx or false_rtx might share RTL with x as a
5677 result of the if_then_else_cond call above. */
5678 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5679 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5680
5681 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5682 is unlikely to be simpler. */
5683 if (general_operand (true_rtx, VOIDmode)
5684 && general_operand (false_rtx, VOIDmode))
5685 {
5686 enum rtx_code reversed;
5687
5688 /* Restarting if we generate a store-flag expression will cause
5689 us to loop. Just drop through in this case. */
5690
5691 /* If the result values are STORE_FLAG_VALUE and zero, we can
5692 just make the comparison operation. */
5693 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5694 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5695 cond, cop1);
5696 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5697 && ((reversed = reversed_comparison_code_parts
5698 (cond_code, cond, cop1, NULL))
5699 != UNKNOWN))
5700 x = simplify_gen_relational (reversed, mode, VOIDmode,
5701 cond, cop1);
5702
5703 /* Likewise, we can make the negate of a comparison operation
5704 if the result values are - STORE_FLAG_VALUE and zero. */
5705 else if (CONST_INT_P (true_rtx)
5706 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5707 && false_rtx == const0_rtx)
5708 x = simplify_gen_unary (NEG, mode,
5709 simplify_gen_relational (cond_code,
5710 mode, VOIDmode,
5711 cond, cop1),
5712 mode);
5713 else if (CONST_INT_P (false_rtx)
5714 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5715 && true_rtx == const0_rtx
5716 && ((reversed = reversed_comparison_code_parts
5717 (cond_code, cond, cop1, NULL))
5718 != UNKNOWN))
5719 x = simplify_gen_unary (NEG, mode,
5720 simplify_gen_relational (reversed,
5721 mode, VOIDmode,
5722 cond, cop1),
5723 mode);
5724 else
5725 return gen_rtx_IF_THEN_ELSE (mode,
5726 simplify_gen_relational (cond_code,
5727 mode,
5728 VOIDmode,
5729 cond,
5730 cop1),
5731 true_rtx, false_rtx);
5732
5733 code = GET_CODE (x);
5734 op0_mode = VOIDmode;
5735 }
5736 }
5737 }
5738
5739 /* First see if we can apply the inverse distributive law. */
5740 if (code == PLUS || code == MINUS
5741 || code == AND || code == IOR || code == XOR)
5742 {
5743 x = apply_distributive_law (x);
5744 code = GET_CODE (x);
5745 op0_mode = VOIDmode;
5746 }
5747
5748 /* If CODE is an associative operation not otherwise handled, see if we
5749 can associate some operands. This can win if they are constants or
5750 if they are logically related (i.e. (a & b) & a). */
5751 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5752 || code == AND || code == IOR || code == XOR
5753 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5754 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5755 || (flag_associative_math && FLOAT_MODE_P (mode))))
5756 {
5757 if (GET_CODE (XEXP (x, 0)) == code)
5758 {
5759 rtx other = XEXP (XEXP (x, 0), 0);
5760 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5761 rtx inner_op1 = XEXP (x, 1);
5762 rtx inner;
5763
5764 /* Make sure we pass the constant operand if any as the second
5765 one if this is a commutative operation. */
5766 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5767 std::swap (inner_op0, inner_op1);
5768 inner = simplify_binary_operation (code == MINUS ? PLUS
5769 : code == DIV ? MULT
5770 : code,
5771 mode, inner_op0, inner_op1);
5772
5773 /* For commutative operations, try the other pair if that one
5774 didn't simplify. */
5775 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5776 {
5777 other = XEXP (XEXP (x, 0), 1);
5778 inner = simplify_binary_operation (code, mode,
5779 XEXP (XEXP (x, 0), 0),
5780 XEXP (x, 1));
5781 }
5782
5783 if (inner)
5784 return simplify_gen_binary (code, mode, other, inner);
5785 }
5786 }
5787
5788 /* A little bit of algebraic simplification here. */
5789 switch (code)
5790 {
5791 case MEM:
5792 /* Ensure that our address has any ASHIFTs converted to MULT in case
5793 address-recognizing predicates are called later. */
5794 temp = make_compound_operation (XEXP (x, 0), MEM);
5795 SUBST (XEXP (x, 0), temp);
5796 break;
5797
5798 case SUBREG:
5799 if (op0_mode == VOIDmode)
5800 op0_mode = GET_MODE (SUBREG_REG (x));
5801
5802 /* See if this can be moved to simplify_subreg. */
5803 if (CONSTANT_P (SUBREG_REG (x))
5804 && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
5805 /* Don't call gen_lowpart if the inner mode
5806 is VOIDmode and we cannot simplify it, as SUBREG without
5807 inner mode is invalid. */
5808 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5809 || gen_lowpart_common (mode, SUBREG_REG (x))))
5810 return gen_lowpart (mode, SUBREG_REG (x));
5811
5812 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5813 break;
5814 {
5815 rtx temp;
5816 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5817 SUBREG_BYTE (x));
5818 if (temp)
5819 return temp;
5820
5821 /* If op is known to have all lower bits zero, the result is zero. */
5822 scalar_int_mode int_mode, int_op0_mode;
5823 if (!in_dest
5824 && is_a <scalar_int_mode> (mode, &int_mode)
5825 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5826 && (GET_MODE_PRECISION (int_mode)
5827 < GET_MODE_PRECISION (int_op0_mode))
5828 && (subreg_lowpart_offset (int_mode, int_op0_mode)
5829 == SUBREG_BYTE (x))
5830 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5831 && (nonzero_bits (SUBREG_REG (x), int_op0_mode)
5832 & GET_MODE_MASK (int_mode)) == 0)
5833 return CONST0_RTX (int_mode);
5834 }
5835
5836 /* Don't change the mode of the MEM if that would change the meaning
5837 of the address. */
5838 if (MEM_P (SUBREG_REG (x))
5839 && (MEM_VOLATILE_P (SUBREG_REG (x))
5840 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5841 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5842 return gen_rtx_CLOBBER (mode, const0_rtx);
5843
5844 /* Note that we cannot do any narrowing for non-constants since
5845 we might have been counting on using the fact that some bits were
5846 zero. We now do this in the SET. */
5847
5848 break;
5849
5850 case NEG:
5851 temp = expand_compound_operation (XEXP (x, 0));
5852
5853 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5854 replaced by (lshiftrt X C). This will convert
5855 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5856
5857 if (GET_CODE (temp) == ASHIFTRT
5858 && CONST_INT_P (XEXP (temp, 1))
5859 && INTVAL (XEXP (temp, 1)) == GET_MODE_PRECISION (mode) - 1)
5860 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5861 INTVAL (XEXP (temp, 1)));
5862
5863 /* If X has only a single bit that might be nonzero, say, bit I, convert
5864 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5865 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5866 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5867 or a SUBREG of one since we'd be making the expression more
5868 complex if it was just a register. */
5869
5870 if (!REG_P (temp)
5871 && ! (GET_CODE (temp) == SUBREG
5872 && REG_P (SUBREG_REG (temp)))
5873 && is_a <scalar_int_mode> (mode, &int_mode)
5874 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5875 {
5876 rtx temp1 = simplify_shift_const
5877 (NULL_RTX, ASHIFTRT, int_mode,
5878 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5879 GET_MODE_PRECISION (int_mode) - 1 - i),
5880 GET_MODE_PRECISION (int_mode) - 1 - i);
5881
5882 /* If all we did was surround TEMP with the two shifts, we
5883 haven't improved anything, so don't use it. Otherwise,
5884 we are better off with TEMP1. */
5885 if (GET_CODE (temp1) != ASHIFTRT
5886 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5887 || XEXP (XEXP (temp1, 0), 0) != temp)
5888 return temp1;
5889 }
5890 break;
5891
5892 case TRUNCATE:
5893 /* We can't handle truncation to a partial integer mode here
5894 because we don't know the real bitsize of the partial
5895 integer mode. */
5896 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5897 break;
5898
5899 if (HWI_COMPUTABLE_MODE_P (mode))
5900 SUBST (XEXP (x, 0),
5901 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5902 GET_MODE_MASK (mode), 0));
5903
5904 /* We can truncate a constant value and return it. */
5905 if (CONST_INT_P (XEXP (x, 0)))
5906 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5907
5908 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5909 whose value is a comparison can be replaced with a subreg if
5910 STORE_FLAG_VALUE permits. */
5911 if (HWI_COMPUTABLE_MODE_P (mode)
5912 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
5913 && (temp = get_last_value (XEXP (x, 0)))
5914 && COMPARISON_P (temp))
5915 return gen_lowpart (mode, XEXP (x, 0));
5916 break;
5917
5918 case CONST:
5919 /* (const (const X)) can become (const X). Do it this way rather than
5920 returning the inner CONST since CONST can be shared with a
5921 REG_EQUAL note. */
5922 if (GET_CODE (XEXP (x, 0)) == CONST)
5923 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
5924 break;
5925
5926 case LO_SUM:
5927 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
5928 can add in an offset. find_split_point will split this address up
5929 again if it doesn't match. */
5930 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
5931 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
5932 return XEXP (x, 1);
5933 break;
5934
5935 case PLUS:
5936 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
5937 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
5938 bit-field and can be replaced by either a sign_extend or a
5939 sign_extract. The `and' may be a zero_extend and the two
5940 <c>, -<c> constants may be reversed. */
5941 if (GET_CODE (XEXP (x, 0)) == XOR
5942 && is_a <scalar_int_mode> (mode, &int_mode)
5943 && CONST_INT_P (XEXP (x, 1))
5944 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
5945 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
5946 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
5947 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
5948 && HWI_COMPUTABLE_MODE_P (int_mode)
5949 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
5950 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5951 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
5952 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
5953 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
5954 && (GET_MODE_PRECISION (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
5955 == (unsigned int) i + 1))))
5956 return simplify_shift_const
5957 (NULL_RTX, ASHIFTRT, int_mode,
5958 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5959 XEXP (XEXP (XEXP (x, 0), 0), 0),
5960 GET_MODE_PRECISION (int_mode) - (i + 1)),
5961 GET_MODE_PRECISION (int_mode) - (i + 1));
5962
5963 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
5964 can become (ashiftrt (ashift (xor x 1) C) C) where C is
5965 the bitsize of the mode - 1. This allows simplification of
5966 "a = (b & 8) == 0;" */
5967 if (XEXP (x, 1) == constm1_rtx
5968 && !REG_P (XEXP (x, 0))
5969 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5970 && REG_P (SUBREG_REG (XEXP (x, 0))))
5971 && is_a <scalar_int_mode> (mode, &int_mode)
5972 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
5973 return simplify_shift_const
5974 (NULL_RTX, ASHIFTRT, int_mode,
5975 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
5976 gen_rtx_XOR (int_mode, XEXP (x, 0),
5977 const1_rtx),
5978 GET_MODE_PRECISION (int_mode) - 1),
5979 GET_MODE_PRECISION (int_mode) - 1);
5980
5981 /* If we are adding two things that have no bits in common, convert
5982 the addition into an IOR. This will often be further simplified,
5983 for example in cases like ((a & 1) + (a & 2)), which can
5984 become a & 3. */
5985
5986 if (HWI_COMPUTABLE_MODE_P (mode)
5987 && (nonzero_bits (XEXP (x, 0), mode)
5988 & nonzero_bits (XEXP (x, 1), mode)) == 0)
5989 {
5990 /* Try to simplify the expression further. */
5991 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
5992 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
5993
5994 /* If we could, great. If not, do not go ahead with the IOR
5995 replacement, since PLUS appears in many special purpose
5996 address arithmetic instructions. */
5997 if (GET_CODE (temp) != CLOBBER
5998 && (GET_CODE (temp) != IOR
5999 || ((XEXP (temp, 0) != XEXP (x, 0)
6000 || XEXP (temp, 1) != XEXP (x, 1))
6001 && (XEXP (temp, 0) != XEXP (x, 1)
6002 || XEXP (temp, 1) != XEXP (x, 0)))))
6003 return temp;
6004 }
6005
6006 /* Canonicalize x + x into x << 1. */
6007 if (GET_MODE_CLASS (mode) == MODE_INT
6008 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6009 && !side_effects_p (XEXP (x, 0)))
6010 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6011
6012 break;
6013
6014 case MINUS:
6015 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6016 (and <foo> (const_int pow2-1)) */
6017 if (is_a <scalar_int_mode> (mode, &int_mode)
6018 && GET_CODE (XEXP (x, 1)) == AND
6019 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6020 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6021 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6022 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6023 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6024 break;
6025
6026 case MULT:
6027 /* If we have (mult (plus A B) C), apply the distributive law and then
6028 the inverse distributive law to see if things simplify. This
6029 occurs mostly in addresses, often when unrolling loops. */
6030
6031 if (GET_CODE (XEXP (x, 0)) == PLUS)
6032 {
6033 rtx result = distribute_and_simplify_rtx (x, 0);
6034 if (result)
6035 return result;
6036 }
6037
6038 /* Try simplify a*(b/c) as (a*b)/c. */
6039 if (FLOAT_MODE_P (mode) && flag_associative_math
6040 && GET_CODE (XEXP (x, 0)) == DIV)
6041 {
6042 rtx tem = simplify_binary_operation (MULT, mode,
6043 XEXP (XEXP (x, 0), 0),
6044 XEXP (x, 1));
6045 if (tem)
6046 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6047 }
6048 break;
6049
6050 case UDIV:
6051 /* If this is a divide by a power of two, treat it as a shift if
6052 its first operand is a shift. */
6053 if (is_a <scalar_int_mode> (mode, &int_mode)
6054 && CONST_INT_P (XEXP (x, 1))
6055 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6056 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6057 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6058 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6059 || GET_CODE (XEXP (x, 0)) == ROTATE
6060 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6061 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6062 XEXP (x, 0), i);
6063 break;
6064
6065 case EQ: case NE:
6066 case GT: case GTU: case GE: case GEU:
6067 case LT: case LTU: case LE: case LEU:
6068 case UNEQ: case LTGT:
6069 case UNGT: case UNGE:
6070 case UNLT: case UNLE:
6071 case UNORDERED: case ORDERED:
6072 /* If the first operand is a condition code, we can't do anything
6073 with it. */
6074 if (GET_CODE (XEXP (x, 0)) == COMPARE
6075 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6076 && ! CC0_P (XEXP (x, 0))))
6077 {
6078 rtx op0 = XEXP (x, 0);
6079 rtx op1 = XEXP (x, 1);
6080 enum rtx_code new_code;
6081
6082 if (GET_CODE (op0) == COMPARE)
6083 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6084
6085 /* Simplify our comparison, if possible. */
6086 new_code = simplify_comparison (code, &op0, &op1);
6087
6088 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6089 if only the low-order bit is possibly nonzero in X (such as when
6090 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6091 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6092 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6093 (plus X 1).
6094
6095 Remove any ZERO_EXTRACT we made when thinking this was a
6096 comparison. It may now be simpler to use, e.g., an AND. If a
6097 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6098 the call to make_compound_operation in the SET case.
6099
6100 Don't apply these optimizations if the caller would
6101 prefer a comparison rather than a value.
6102 E.g., for the condition in an IF_THEN_ELSE most targets need
6103 an explicit comparison. */
6104
6105 if (in_cond)
6106 ;
6107
6108 else if (STORE_FLAG_VALUE == 1
6109 && new_code == NE
6110 && is_int_mode (mode, &int_mode)
6111 && op1 == const0_rtx
6112 && int_mode == GET_MODE (op0)
6113 && nonzero_bits (op0, int_mode) == 1)
6114 return gen_lowpart (int_mode,
6115 expand_compound_operation (op0));
6116
6117 else if (STORE_FLAG_VALUE == 1
6118 && new_code == NE
6119 && is_int_mode (mode, &int_mode)
6120 && op1 == const0_rtx
6121 && int_mode == GET_MODE (op0)
6122 && (num_sign_bit_copies (op0, int_mode)
6123 == GET_MODE_PRECISION (int_mode)))
6124 {
6125 op0 = expand_compound_operation (op0);
6126 return simplify_gen_unary (NEG, int_mode,
6127 gen_lowpart (int_mode, op0),
6128 int_mode);
6129 }
6130
6131 else if (STORE_FLAG_VALUE == 1
6132 && new_code == EQ
6133 && is_int_mode (mode, &int_mode)
6134 && op1 == const0_rtx
6135 && int_mode == GET_MODE (op0)
6136 && nonzero_bits (op0, int_mode) == 1)
6137 {
6138 op0 = expand_compound_operation (op0);
6139 return simplify_gen_binary (XOR, int_mode,
6140 gen_lowpart (int_mode, op0),
6141 const1_rtx);
6142 }
6143
6144 else if (STORE_FLAG_VALUE == 1
6145 && new_code == EQ
6146 && is_int_mode (mode, &int_mode)
6147 && op1 == const0_rtx
6148 && int_mode == GET_MODE (op0)
6149 && (num_sign_bit_copies (op0, int_mode)
6150 == GET_MODE_PRECISION (int_mode)))
6151 {
6152 op0 = expand_compound_operation (op0);
6153 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6154 }
6155
6156 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6157 those above. */
6158 if (in_cond)
6159 ;
6160
6161 else if (STORE_FLAG_VALUE == -1
6162 && new_code == NE
6163 && is_int_mode (mode, &int_mode)
6164 && op1 == const0_rtx
6165 && int_mode == GET_MODE (op0)
6166 && (num_sign_bit_copies (op0, int_mode)
6167 == GET_MODE_PRECISION (int_mode)))
6168 return gen_lowpart (int_mode, expand_compound_operation (op0));
6169
6170 else if (STORE_FLAG_VALUE == -1
6171 && new_code == NE
6172 && is_int_mode (mode, &int_mode)
6173 && op1 == const0_rtx
6174 && int_mode == GET_MODE (op0)
6175 && nonzero_bits (op0, int_mode) == 1)
6176 {
6177 op0 = expand_compound_operation (op0);
6178 return simplify_gen_unary (NEG, int_mode,
6179 gen_lowpart (int_mode, op0),
6180 int_mode);
6181 }
6182
6183 else if (STORE_FLAG_VALUE == -1
6184 && new_code == EQ
6185 && is_int_mode (mode, &int_mode)
6186 && op1 == const0_rtx
6187 && int_mode == GET_MODE (op0)
6188 && (num_sign_bit_copies (op0, int_mode)
6189 == GET_MODE_PRECISION (int_mode)))
6190 {
6191 op0 = expand_compound_operation (op0);
6192 return simplify_gen_unary (NOT, int_mode,
6193 gen_lowpart (int_mode, op0),
6194 int_mode);
6195 }
6196
6197 /* If X is 0/1, (eq X 0) is X-1. */
6198 else if (STORE_FLAG_VALUE == -1
6199 && new_code == EQ
6200 && is_int_mode (mode, &int_mode)
6201 && op1 == const0_rtx
6202 && int_mode == GET_MODE (op0)
6203 && nonzero_bits (op0, int_mode) == 1)
6204 {
6205 op0 = expand_compound_operation (op0);
6206 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6207 }
6208
6209 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6210 one bit that might be nonzero, we can convert (ne x 0) to
6211 (ashift x c) where C puts the bit in the sign bit. Remove any
6212 AND with STORE_FLAG_VALUE when we are done, since we are only
6213 going to test the sign bit. */
6214 if (new_code == NE
6215 && is_int_mode (mode, &int_mode)
6216 && HWI_COMPUTABLE_MODE_P (int_mode)
6217 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6218 && op1 == const0_rtx
6219 && int_mode == GET_MODE (op0)
6220 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6221 {
6222 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6223 expand_compound_operation (op0),
6224 GET_MODE_PRECISION (int_mode) - 1 - i);
6225 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6226 return XEXP (x, 0);
6227 else
6228 return x;
6229 }
6230
6231 /* If the code changed, return a whole new comparison.
6232 We also need to avoid using SUBST in cases where
6233 simplify_comparison has widened a comparison with a CONST_INT,
6234 since in that case the wider CONST_INT may fail the sanity
6235 checks in do_SUBST. */
6236 if (new_code != code
6237 || (CONST_INT_P (op1)
6238 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6239 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6240 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6241
6242 /* Otherwise, keep this operation, but maybe change its operands.
6243 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6244 SUBST (XEXP (x, 0), op0);
6245 SUBST (XEXP (x, 1), op1);
6246 }
6247 break;
6248
6249 case IF_THEN_ELSE:
6250 return simplify_if_then_else (x);
6251
6252 case ZERO_EXTRACT:
6253 case SIGN_EXTRACT:
6254 case ZERO_EXTEND:
6255 case SIGN_EXTEND:
6256 /* If we are processing SET_DEST, we are done. */
6257 if (in_dest)
6258 return x;
6259
6260 return expand_compound_operation (x);
6261
6262 case SET:
6263 return simplify_set (x);
6264
6265 case AND:
6266 case IOR:
6267 return simplify_logical (x);
6268
6269 case ASHIFT:
6270 case LSHIFTRT:
6271 case ASHIFTRT:
6272 case ROTATE:
6273 case ROTATERT:
6274 /* If this is a shift by a constant amount, simplify it. */
6275 if (CONST_INT_P (XEXP (x, 1)))
6276 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6277 INTVAL (XEXP (x, 1)));
6278
6279 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6280 SUBST (XEXP (x, 1),
6281 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6282 (HOST_WIDE_INT_1U
6283 << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
6284 - 1,
6285 0));
6286 break;
6287
6288 default:
6289 break;
6290 }
6291
6292 return x;
6293 }
6294 \f
6295 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6296
6297 static rtx
6298 simplify_if_then_else (rtx x)
6299 {
6300 machine_mode mode = GET_MODE (x);
6301 rtx cond = XEXP (x, 0);
6302 rtx true_rtx = XEXP (x, 1);
6303 rtx false_rtx = XEXP (x, 2);
6304 enum rtx_code true_code = GET_CODE (cond);
6305 int comparison_p = COMPARISON_P (cond);
6306 rtx temp;
6307 int i;
6308 enum rtx_code false_code;
6309 rtx reversed;
6310 scalar_int_mode int_mode, inner_mode;
6311
6312 /* Simplify storing of the truth value. */
6313 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6314 return simplify_gen_relational (true_code, mode, VOIDmode,
6315 XEXP (cond, 0), XEXP (cond, 1));
6316
6317 /* Also when the truth value has to be reversed. */
6318 if (comparison_p
6319 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6320 && (reversed = reversed_comparison (cond, mode)))
6321 return reversed;
6322
6323 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6324 in it is being compared against certain values. Get the true and false
6325 comparisons and see if that says anything about the value of each arm. */
6326
6327 if (comparison_p
6328 && ((false_code = reversed_comparison_code (cond, NULL))
6329 != UNKNOWN)
6330 && REG_P (XEXP (cond, 0)))
6331 {
6332 HOST_WIDE_INT nzb;
6333 rtx from = XEXP (cond, 0);
6334 rtx true_val = XEXP (cond, 1);
6335 rtx false_val = true_val;
6336 int swapped = 0;
6337
6338 /* If FALSE_CODE is EQ, swap the codes and arms. */
6339
6340 if (false_code == EQ)
6341 {
6342 swapped = 1, true_code = EQ, false_code = NE;
6343 std::swap (true_rtx, false_rtx);
6344 }
6345
6346 scalar_int_mode from_mode;
6347 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6348 {
6349 /* If we are comparing against zero and the expression being
6350 tested has only a single bit that might be nonzero, that is
6351 its value when it is not equal to zero. Similarly if it is
6352 known to be -1 or 0. */
6353 if (true_code == EQ
6354 && true_val == const0_rtx
6355 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6356 {
6357 false_code = EQ;
6358 false_val = gen_int_mode (nzb, from_mode);
6359 }
6360 else if (true_code == EQ
6361 && true_val == const0_rtx
6362 && (num_sign_bit_copies (from, from_mode)
6363 == GET_MODE_PRECISION (from_mode)))
6364 {
6365 false_code = EQ;
6366 false_val = constm1_rtx;
6367 }
6368 }
6369
6370 /* Now simplify an arm if we know the value of the register in the
6371 branch and it is used in the arm. Be careful due to the potential
6372 of locally-shared RTL. */
6373
6374 if (reg_mentioned_p (from, true_rtx))
6375 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6376 from, true_val),
6377 pc_rtx, pc_rtx, 0, 0, 0);
6378 if (reg_mentioned_p (from, false_rtx))
6379 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6380 from, false_val),
6381 pc_rtx, pc_rtx, 0, 0, 0);
6382
6383 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6384 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6385
6386 true_rtx = XEXP (x, 1);
6387 false_rtx = XEXP (x, 2);
6388 true_code = GET_CODE (cond);
6389 }
6390
6391 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6392 reversed, do so to avoid needing two sets of patterns for
6393 subtract-and-branch insns. Similarly if we have a constant in the true
6394 arm, the false arm is the same as the first operand of the comparison, or
6395 the false arm is more complicated than the true arm. */
6396
6397 if (comparison_p
6398 && reversed_comparison_code (cond, NULL) != UNKNOWN
6399 && (true_rtx == pc_rtx
6400 || (CONSTANT_P (true_rtx)
6401 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6402 || true_rtx == const0_rtx
6403 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6404 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6405 && !OBJECT_P (false_rtx))
6406 || reg_mentioned_p (true_rtx, false_rtx)
6407 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6408 {
6409 true_code = reversed_comparison_code (cond, NULL);
6410 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6411 SUBST (XEXP (x, 1), false_rtx);
6412 SUBST (XEXP (x, 2), true_rtx);
6413
6414 std::swap (true_rtx, false_rtx);
6415 cond = XEXP (x, 0);
6416
6417 /* It is possible that the conditional has been simplified out. */
6418 true_code = GET_CODE (cond);
6419 comparison_p = COMPARISON_P (cond);
6420 }
6421
6422 /* If the two arms are identical, we don't need the comparison. */
6423
6424 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6425 return true_rtx;
6426
6427 /* Convert a == b ? b : a to "a". */
6428 if (true_code == EQ && ! side_effects_p (cond)
6429 && !HONOR_NANS (mode)
6430 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6431 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6432 return false_rtx;
6433 else if (true_code == NE && ! side_effects_p (cond)
6434 && !HONOR_NANS (mode)
6435 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6436 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6437 return true_rtx;
6438
6439 /* Look for cases where we have (abs x) or (neg (abs X)). */
6440
6441 if (GET_MODE_CLASS (mode) == MODE_INT
6442 && comparison_p
6443 && XEXP (cond, 1) == const0_rtx
6444 && GET_CODE (false_rtx) == NEG
6445 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6446 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6447 && ! side_effects_p (true_rtx))
6448 switch (true_code)
6449 {
6450 case GT:
6451 case GE:
6452 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6453 case LT:
6454 case LE:
6455 return
6456 simplify_gen_unary (NEG, mode,
6457 simplify_gen_unary (ABS, mode, true_rtx, mode),
6458 mode);
6459 default:
6460 break;
6461 }
6462
6463 /* Look for MIN or MAX. */
6464
6465 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6466 && comparison_p
6467 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6468 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6469 && ! side_effects_p (cond))
6470 switch (true_code)
6471 {
6472 case GE:
6473 case GT:
6474 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6475 case LE:
6476 case LT:
6477 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6478 case GEU:
6479 case GTU:
6480 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6481 case LEU:
6482 case LTU:
6483 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6484 default:
6485 break;
6486 }
6487
6488 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6489 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6490 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6491 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6492 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6493 neither 1 or -1, but it isn't worth checking for. */
6494
6495 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6496 && comparison_p
6497 && is_int_mode (mode, &int_mode)
6498 && ! side_effects_p (x))
6499 {
6500 rtx t = make_compound_operation (true_rtx, SET);
6501 rtx f = make_compound_operation (false_rtx, SET);
6502 rtx cond_op0 = XEXP (cond, 0);
6503 rtx cond_op1 = XEXP (cond, 1);
6504 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6505 scalar_int_mode m = int_mode;
6506 rtx z = 0, c1 = NULL_RTX;
6507
6508 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6509 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6510 || GET_CODE (t) == ASHIFT
6511 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6512 && rtx_equal_p (XEXP (t, 0), f))
6513 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6514
6515 /* If an identity-zero op is commutative, check whether there
6516 would be a match if we swapped the operands. */
6517 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6518 || GET_CODE (t) == XOR)
6519 && rtx_equal_p (XEXP (t, 1), f))
6520 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6521 else if (GET_CODE (t) == SIGN_EXTEND
6522 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6523 && (GET_CODE (XEXP (t, 0)) == PLUS
6524 || GET_CODE (XEXP (t, 0)) == MINUS
6525 || GET_CODE (XEXP (t, 0)) == IOR
6526 || GET_CODE (XEXP (t, 0)) == XOR
6527 || GET_CODE (XEXP (t, 0)) == ASHIFT
6528 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6529 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6530 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6531 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6532 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6533 && (num_sign_bit_copies (f, GET_MODE (f))
6534 > (unsigned int)
6535 (GET_MODE_PRECISION (int_mode)
6536 - GET_MODE_PRECISION (inner_mode))))
6537 {
6538 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6539 extend_op = SIGN_EXTEND;
6540 m = inner_mode;
6541 }
6542 else if (GET_CODE (t) == SIGN_EXTEND
6543 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6544 && (GET_CODE (XEXP (t, 0)) == PLUS
6545 || GET_CODE (XEXP (t, 0)) == IOR
6546 || GET_CODE (XEXP (t, 0)) == XOR)
6547 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6548 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6549 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6550 && (num_sign_bit_copies (f, GET_MODE (f))
6551 > (unsigned int)
6552 (GET_MODE_PRECISION (int_mode)
6553 - GET_MODE_PRECISION (inner_mode))))
6554 {
6555 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6556 extend_op = SIGN_EXTEND;
6557 m = inner_mode;
6558 }
6559 else if (GET_CODE (t) == ZERO_EXTEND
6560 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6561 && (GET_CODE (XEXP (t, 0)) == PLUS
6562 || GET_CODE (XEXP (t, 0)) == MINUS
6563 || GET_CODE (XEXP (t, 0)) == IOR
6564 || GET_CODE (XEXP (t, 0)) == XOR
6565 || GET_CODE (XEXP (t, 0)) == ASHIFT
6566 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6567 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6568 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6569 && HWI_COMPUTABLE_MODE_P (int_mode)
6570 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6571 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6572 && ((nonzero_bits (f, GET_MODE (f))
6573 & ~GET_MODE_MASK (inner_mode))
6574 == 0))
6575 {
6576 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6577 extend_op = ZERO_EXTEND;
6578 m = inner_mode;
6579 }
6580 else if (GET_CODE (t) == ZERO_EXTEND
6581 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6582 && (GET_CODE (XEXP (t, 0)) == PLUS
6583 || GET_CODE (XEXP (t, 0)) == IOR
6584 || GET_CODE (XEXP (t, 0)) == XOR)
6585 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6586 && HWI_COMPUTABLE_MODE_P (int_mode)
6587 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6588 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6589 && ((nonzero_bits (f, GET_MODE (f))
6590 & ~GET_MODE_MASK (inner_mode))
6591 == 0))
6592 {
6593 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6594 extend_op = ZERO_EXTEND;
6595 m = inner_mode;
6596 }
6597
6598 if (z)
6599 {
6600 temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
6601 cond_op0, cond_op1),
6602 pc_rtx, pc_rtx, 0, 0, 0);
6603 temp = simplify_gen_binary (MULT, m, temp,
6604 simplify_gen_binary (MULT, m, c1,
6605 const_true_rtx));
6606 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6607 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6608
6609 if (extend_op != UNKNOWN)
6610 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6611
6612 return temp;
6613 }
6614 }
6615
6616 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6617 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6618 negation of a single bit, we can convert this operation to a shift. We
6619 can actually do this more generally, but it doesn't seem worth it. */
6620
6621 if (true_code == NE
6622 && is_a <scalar_int_mode> (mode, &int_mode)
6623 && XEXP (cond, 1) == const0_rtx
6624 && false_rtx == const0_rtx
6625 && CONST_INT_P (true_rtx)
6626 && ((1 == nonzero_bits (XEXP (cond, 0), int_mode)
6627 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6628 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6629 == GET_MODE_PRECISION (int_mode))
6630 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6631 return
6632 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6633 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6634
6635 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6636 non-zero bit in A is C1. */
6637 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6638 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6639 && is_a <scalar_int_mode> (mode, &int_mode)
6640 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6641 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6642 == nonzero_bits (XEXP (cond, 0), inner_mode)
6643 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6644 {
6645 rtx val = XEXP (cond, 0);
6646 if (inner_mode == int_mode)
6647 return val;
6648 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6649 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6650 }
6651
6652 return x;
6653 }
6654 \f
6655 /* Simplify X, a SET expression. Return the new expression. */
6656
6657 static rtx
6658 simplify_set (rtx x)
6659 {
6660 rtx src = SET_SRC (x);
6661 rtx dest = SET_DEST (x);
6662 machine_mode mode
6663 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6664 rtx_insn *other_insn;
6665 rtx *cc_use;
6666 scalar_int_mode int_mode;
6667
6668 /* (set (pc) (return)) gets written as (return). */
6669 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6670 return src;
6671
6672 /* Now that we know for sure which bits of SRC we are using, see if we can
6673 simplify the expression for the object knowing that we only need the
6674 low-order bits. */
6675
6676 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6677 {
6678 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6679 SUBST (SET_SRC (x), src);
6680 }
6681
6682 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6683 the comparison result and try to simplify it unless we already have used
6684 undobuf.other_insn. */
6685 if ((GET_MODE_CLASS (mode) == MODE_CC
6686 || GET_CODE (src) == COMPARE
6687 || CC0_P (dest))
6688 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6689 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6690 && COMPARISON_P (*cc_use)
6691 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6692 {
6693 enum rtx_code old_code = GET_CODE (*cc_use);
6694 enum rtx_code new_code;
6695 rtx op0, op1, tmp;
6696 int other_changed = 0;
6697 rtx inner_compare = NULL_RTX;
6698 machine_mode compare_mode = GET_MODE (dest);
6699
6700 if (GET_CODE (src) == COMPARE)
6701 {
6702 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6703 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6704 {
6705 inner_compare = op0;
6706 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6707 }
6708 }
6709 else
6710 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6711
6712 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6713 op0, op1);
6714 if (!tmp)
6715 new_code = old_code;
6716 else if (!CONSTANT_P (tmp))
6717 {
6718 new_code = GET_CODE (tmp);
6719 op0 = XEXP (tmp, 0);
6720 op1 = XEXP (tmp, 1);
6721 }
6722 else
6723 {
6724 rtx pat = PATTERN (other_insn);
6725 undobuf.other_insn = other_insn;
6726 SUBST (*cc_use, tmp);
6727
6728 /* Attempt to simplify CC user. */
6729 if (GET_CODE (pat) == SET)
6730 {
6731 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6732 if (new_rtx != NULL_RTX)
6733 SUBST (SET_SRC (pat), new_rtx);
6734 }
6735
6736 /* Convert X into a no-op move. */
6737 SUBST (SET_DEST (x), pc_rtx);
6738 SUBST (SET_SRC (x), pc_rtx);
6739 return x;
6740 }
6741
6742 /* Simplify our comparison, if possible. */
6743 new_code = simplify_comparison (new_code, &op0, &op1);
6744
6745 #ifdef SELECT_CC_MODE
6746 /* If this machine has CC modes other than CCmode, check to see if we
6747 need to use a different CC mode here. */
6748 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6749 compare_mode = GET_MODE (op0);
6750 else if (inner_compare
6751 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6752 && new_code == old_code
6753 && op0 == XEXP (inner_compare, 0)
6754 && op1 == XEXP (inner_compare, 1))
6755 compare_mode = GET_MODE (inner_compare);
6756 else
6757 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6758
6759 /* If the mode changed, we have to change SET_DEST, the mode in the
6760 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6761 a hard register, just build new versions with the proper mode. If it
6762 is a pseudo, we lose unless it is only time we set the pseudo, in
6763 which case we can safely change its mode. */
6764 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6765 {
6766 if (can_change_dest_mode (dest, 0, compare_mode))
6767 {
6768 unsigned int regno = REGNO (dest);
6769 rtx new_dest;
6770
6771 if (regno < FIRST_PSEUDO_REGISTER)
6772 new_dest = gen_rtx_REG (compare_mode, regno);
6773 else
6774 {
6775 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6776 new_dest = regno_reg_rtx[regno];
6777 }
6778
6779 SUBST (SET_DEST (x), new_dest);
6780 SUBST (XEXP (*cc_use, 0), new_dest);
6781 other_changed = 1;
6782
6783 dest = new_dest;
6784 }
6785 }
6786 #endif /* SELECT_CC_MODE */
6787
6788 /* If the code changed, we have to build a new comparison in
6789 undobuf.other_insn. */
6790 if (new_code != old_code)
6791 {
6792 int other_changed_previously = other_changed;
6793 unsigned HOST_WIDE_INT mask;
6794 rtx old_cc_use = *cc_use;
6795
6796 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6797 dest, const0_rtx));
6798 other_changed = 1;
6799
6800 /* If the only change we made was to change an EQ into an NE or
6801 vice versa, OP0 has only one bit that might be nonzero, and OP1
6802 is zero, check if changing the user of the condition code will
6803 produce a valid insn. If it won't, we can keep the original code
6804 in that insn by surrounding our operation with an XOR. */
6805
6806 if (((old_code == NE && new_code == EQ)
6807 || (old_code == EQ && new_code == NE))
6808 && ! other_changed_previously && op1 == const0_rtx
6809 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6810 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6811 {
6812 rtx pat = PATTERN (other_insn), note = 0;
6813
6814 if ((recog_for_combine (&pat, other_insn, &note) < 0
6815 && ! check_asm_operands (pat)))
6816 {
6817 *cc_use = old_cc_use;
6818 other_changed = 0;
6819
6820 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6821 gen_int_mode (mask,
6822 GET_MODE (op0)));
6823 }
6824 }
6825 }
6826
6827 if (other_changed)
6828 undobuf.other_insn = other_insn;
6829
6830 /* Don't generate a compare of a CC with 0, just use that CC. */
6831 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6832 {
6833 SUBST (SET_SRC (x), op0);
6834 src = SET_SRC (x);
6835 }
6836 /* Otherwise, if we didn't previously have the same COMPARE we
6837 want, create it from scratch. */
6838 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6839 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6840 {
6841 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6842 src = SET_SRC (x);
6843 }
6844 }
6845 else
6846 {
6847 /* Get SET_SRC in a form where we have placed back any
6848 compound expressions. Then do the checks below. */
6849 src = make_compound_operation (src, SET);
6850 SUBST (SET_SRC (x), src);
6851 }
6852
6853 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6854 and X being a REG or (subreg (reg)), we may be able to convert this to
6855 (set (subreg:m2 x) (op)).
6856
6857 We can always do this if M1 is narrower than M2 because that means that
6858 we only care about the low bits of the result.
6859
6860 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6861 perform a narrower operation than requested since the high-order bits will
6862 be undefined. On machine where it is defined, this transformation is safe
6863 as long as M1 and M2 have the same number of words. */
6864
6865 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6866 && !OBJECT_P (SUBREG_REG (src))
6867 && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
6868 / UNITS_PER_WORD)
6869 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
6870 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
6871 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6872 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6873 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6874 GET_MODE (SUBREG_REG (src)),
6875 GET_MODE (src)))
6876 && (REG_P (dest)
6877 || (GET_CODE (dest) == SUBREG
6878 && REG_P (SUBREG_REG (dest)))))
6879 {
6880 SUBST (SET_DEST (x),
6881 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6882 dest));
6883 SUBST (SET_SRC (x), SUBREG_REG (src));
6884
6885 src = SET_SRC (x), dest = SET_DEST (x);
6886 }
6887
6888 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6889 in SRC. */
6890 if (dest == cc0_rtx
6891 && partial_subreg_p (src)
6892 && subreg_lowpart_p (src))
6893 {
6894 rtx inner = SUBREG_REG (src);
6895 machine_mode inner_mode = GET_MODE (inner);
6896
6897 /* Here we make sure that we don't have a sign bit on. */
6898 if (val_signbit_known_clear_p (GET_MODE (src),
6899 nonzero_bits (inner, inner_mode)))
6900 {
6901 SUBST (SET_SRC (x), inner);
6902 src = SET_SRC (x);
6903 }
6904 }
6905
6906 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
6907 would require a paradoxical subreg. Replace the subreg with a
6908 zero_extend to avoid the reload that would otherwise be required. */
6909
6910 enum rtx_code extend_op;
6911 if (paradoxical_subreg_p (src)
6912 && MEM_P (SUBREG_REG (src))
6913 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
6914 {
6915 SUBST (SET_SRC (x),
6916 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
6917
6918 src = SET_SRC (x);
6919 }
6920
6921 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
6922 are comparing an item known to be 0 or -1 against 0, use a logical
6923 operation instead. Check for one of the arms being an IOR of the other
6924 arm with some value. We compute three terms to be IOR'ed together. In
6925 practice, at most two will be nonzero. Then we do the IOR's. */
6926
6927 if (GET_CODE (dest) != PC
6928 && GET_CODE (src) == IF_THEN_ELSE
6929 && is_int_mode (GET_MODE (src), &int_mode)
6930 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
6931 && XEXP (XEXP (src, 0), 1) == const0_rtx
6932 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
6933 && (!HAVE_conditional_move
6934 || ! can_conditionally_move_p (int_mode))
6935 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
6936 == GET_MODE_PRECISION (int_mode))
6937 && ! side_effects_p (src))
6938 {
6939 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
6940 ? XEXP (src, 1) : XEXP (src, 2));
6941 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
6942 ? XEXP (src, 2) : XEXP (src, 1));
6943 rtx term1 = const0_rtx, term2, term3;
6944
6945 if (GET_CODE (true_rtx) == IOR
6946 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
6947 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
6948 else if (GET_CODE (true_rtx) == IOR
6949 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
6950 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
6951 else if (GET_CODE (false_rtx) == IOR
6952 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
6953 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
6954 else if (GET_CODE (false_rtx) == IOR
6955 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
6956 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
6957
6958 term2 = simplify_gen_binary (AND, int_mode,
6959 XEXP (XEXP (src, 0), 0), true_rtx);
6960 term3 = simplify_gen_binary (AND, int_mode,
6961 simplify_gen_unary (NOT, int_mode,
6962 XEXP (XEXP (src, 0), 0),
6963 int_mode),
6964 false_rtx);
6965
6966 SUBST (SET_SRC (x),
6967 simplify_gen_binary (IOR, int_mode,
6968 simplify_gen_binary (IOR, int_mode,
6969 term1, term2),
6970 term3));
6971
6972 src = SET_SRC (x);
6973 }
6974
6975 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
6976 whole thing fail. */
6977 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
6978 return src;
6979 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
6980 return dest;
6981 else
6982 /* Convert this into a field assignment operation, if possible. */
6983 return make_field_assignment (x);
6984 }
6985 \f
6986 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
6987 result. */
6988
6989 static rtx
6990 simplify_logical (rtx x)
6991 {
6992 rtx op0 = XEXP (x, 0);
6993 rtx op1 = XEXP (x, 1);
6994 scalar_int_mode mode;
6995
6996 switch (GET_CODE (x))
6997 {
6998 case AND:
6999 /* We can call simplify_and_const_int only if we don't lose
7000 any (sign) bits when converting INTVAL (op1) to
7001 "unsigned HOST_WIDE_INT". */
7002 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7003 && CONST_INT_P (op1)
7004 && (HWI_COMPUTABLE_MODE_P (mode)
7005 || INTVAL (op1) > 0))
7006 {
7007 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7008 if (GET_CODE (x) != AND)
7009 return x;
7010
7011 op0 = XEXP (x, 0);
7012 op1 = XEXP (x, 1);
7013 }
7014
7015 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7016 apply the distributive law and then the inverse distributive
7017 law to see if things simplify. */
7018 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7019 {
7020 rtx result = distribute_and_simplify_rtx (x, 0);
7021 if (result)
7022 return result;
7023 }
7024 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7025 {
7026 rtx result = distribute_and_simplify_rtx (x, 1);
7027 if (result)
7028 return result;
7029 }
7030 break;
7031
7032 case IOR:
7033 /* If we have (ior (and A B) C), apply the distributive law and then
7034 the inverse distributive law to see if things simplify. */
7035
7036 if (GET_CODE (op0) == AND)
7037 {
7038 rtx result = distribute_and_simplify_rtx (x, 0);
7039 if (result)
7040 return result;
7041 }
7042
7043 if (GET_CODE (op1) == AND)
7044 {
7045 rtx result = distribute_and_simplify_rtx (x, 1);
7046 if (result)
7047 return result;
7048 }
7049 break;
7050
7051 default:
7052 gcc_unreachable ();
7053 }
7054
7055 return x;
7056 }
7057 \f
7058 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7059 operations" because they can be replaced with two more basic operations.
7060 ZERO_EXTEND is also considered "compound" because it can be replaced with
7061 an AND operation, which is simpler, though only one operation.
7062
7063 The function expand_compound_operation is called with an rtx expression
7064 and will convert it to the appropriate shifts and AND operations,
7065 simplifying at each stage.
7066
7067 The function make_compound_operation is called to convert an expression
7068 consisting of shifts and ANDs into the equivalent compound expression.
7069 It is the inverse of this function, loosely speaking. */
7070
7071 static rtx
7072 expand_compound_operation (rtx x)
7073 {
7074 unsigned HOST_WIDE_INT pos = 0, len;
7075 int unsignedp = 0;
7076 unsigned int modewidth;
7077 rtx tem;
7078 scalar_int_mode inner_mode;
7079
7080 switch (GET_CODE (x))
7081 {
7082 case ZERO_EXTEND:
7083 unsignedp = 1;
7084 /* FALLTHRU */
7085 case SIGN_EXTEND:
7086 /* We can't necessarily use a const_int for a multiword mode;
7087 it depends on implicitly extending the value.
7088 Since we don't know the right way to extend it,
7089 we can't tell whether the implicit way is right.
7090
7091 Even for a mode that is no wider than a const_int,
7092 we can't win, because we need to sign extend one of its bits through
7093 the rest of it, and we don't know which bit. */
7094 if (CONST_INT_P (XEXP (x, 0)))
7095 return x;
7096
7097 /* Reject modes that aren't scalar integers because turning vector
7098 or complex modes into shifts causes problems. */
7099 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7100 return x;
7101
7102 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7103 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7104 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7105 reloaded. If not for that, MEM's would very rarely be safe.
7106
7107 Reject modes bigger than a word, because we might not be able
7108 to reference a two-register group starting with an arbitrary register
7109 (and currently gen_lowpart might crash for a SUBREG). */
7110
7111 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7112 return x;
7113
7114 len = GET_MODE_PRECISION (inner_mode);
7115 /* If the inner object has VOIDmode (the only way this can happen
7116 is if it is an ASM_OPERANDS), we can't do anything since we don't
7117 know how much masking to do. */
7118 if (len == 0)
7119 return x;
7120
7121 break;
7122
7123 case ZERO_EXTRACT:
7124 unsignedp = 1;
7125
7126 /* fall through */
7127
7128 case SIGN_EXTRACT:
7129 /* If the operand is a CLOBBER, just return it. */
7130 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7131 return XEXP (x, 0);
7132
7133 if (!CONST_INT_P (XEXP (x, 1))
7134 || !CONST_INT_P (XEXP (x, 2)))
7135 return x;
7136
7137 /* Reject modes that aren't scalar integers because turning vector
7138 or complex modes into shifts causes problems. */
7139 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7140 return x;
7141
7142 len = INTVAL (XEXP (x, 1));
7143 pos = INTVAL (XEXP (x, 2));
7144
7145 /* This should stay within the object being extracted, fail otherwise. */
7146 if (len + pos > GET_MODE_PRECISION (inner_mode))
7147 return x;
7148
7149 if (BITS_BIG_ENDIAN)
7150 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7151
7152 break;
7153
7154 default:
7155 return x;
7156 }
7157
7158 /* We've rejected non-scalar operations by now. */
7159 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7160
7161 /* Convert sign extension to zero extension, if we know that the high
7162 bit is not set, as this is easier to optimize. It will be converted
7163 back to cheaper alternative in make_extraction. */
7164 if (GET_CODE (x) == SIGN_EXTEND
7165 && HWI_COMPUTABLE_MODE_P (mode)
7166 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7167 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7168 == 0))
7169 {
7170 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7171 rtx temp2 = expand_compound_operation (temp);
7172
7173 /* Make sure this is a profitable operation. */
7174 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7175 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7176 return temp2;
7177 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7178 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7179 return temp;
7180 else
7181 return x;
7182 }
7183
7184 /* We can optimize some special cases of ZERO_EXTEND. */
7185 if (GET_CODE (x) == ZERO_EXTEND)
7186 {
7187 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7188 know that the last value didn't have any inappropriate bits
7189 set. */
7190 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7191 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7192 && HWI_COMPUTABLE_MODE_P (mode)
7193 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7194 & ~GET_MODE_MASK (inner_mode)) == 0)
7195 return XEXP (XEXP (x, 0), 0);
7196
7197 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7198 if (GET_CODE (XEXP (x, 0)) == SUBREG
7199 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7200 && subreg_lowpart_p (XEXP (x, 0))
7201 && HWI_COMPUTABLE_MODE_P (mode)
7202 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7203 & ~GET_MODE_MASK (inner_mode)) == 0)
7204 return SUBREG_REG (XEXP (x, 0));
7205
7206 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7207 is a comparison and STORE_FLAG_VALUE permits. This is like
7208 the first case, but it works even when MODE is larger
7209 than HOST_WIDE_INT. */
7210 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7211 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7212 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7213 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7214 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7215 return XEXP (XEXP (x, 0), 0);
7216
7217 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7218 if (GET_CODE (XEXP (x, 0)) == SUBREG
7219 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7220 && subreg_lowpart_p (XEXP (x, 0))
7221 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7222 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7223 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7224 return SUBREG_REG (XEXP (x, 0));
7225
7226 }
7227
7228 /* If we reach here, we want to return a pair of shifts. The inner
7229 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7230 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7231 logical depending on the value of UNSIGNEDP.
7232
7233 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7234 converted into an AND of a shift.
7235
7236 We must check for the case where the left shift would have a negative
7237 count. This can happen in a case like (x >> 31) & 255 on machines
7238 that can't shift by a constant. On those machines, we would first
7239 combine the shift with the AND to produce a variable-position
7240 extraction. Then the constant of 31 would be substituted in
7241 to produce such a position. */
7242
7243 modewidth = GET_MODE_PRECISION (mode);
7244 if (modewidth >= pos + len)
7245 {
7246 tem = gen_lowpart (mode, XEXP (x, 0));
7247 if (!tem || GET_CODE (tem) == CLOBBER)
7248 return x;
7249 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7250 tem, modewidth - pos - len);
7251 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7252 mode, tem, modewidth - len);
7253 }
7254 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7255 tem = simplify_and_const_int (NULL_RTX, mode,
7256 simplify_shift_const (NULL_RTX, LSHIFTRT,
7257 mode, XEXP (x, 0),
7258 pos),
7259 (HOST_WIDE_INT_1U << len) - 1);
7260 else
7261 /* Any other cases we can't handle. */
7262 return x;
7263
7264 /* If we couldn't do this for some reason, return the original
7265 expression. */
7266 if (GET_CODE (tem) == CLOBBER)
7267 return x;
7268
7269 return tem;
7270 }
7271 \f
7272 /* X is a SET which contains an assignment of one object into
7273 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7274 or certain SUBREGS). If possible, convert it into a series of
7275 logical operations.
7276
7277 We half-heartedly support variable positions, but do not at all
7278 support variable lengths. */
7279
7280 static const_rtx
7281 expand_field_assignment (const_rtx x)
7282 {
7283 rtx inner;
7284 rtx pos; /* Always counts from low bit. */
7285 int len;
7286 rtx mask, cleared, masked;
7287 scalar_int_mode compute_mode;
7288
7289 /* Loop until we find something we can't simplify. */
7290 while (1)
7291 {
7292 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7293 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7294 {
7295 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7296 len = GET_MODE_PRECISION (GET_MODE (XEXP (SET_DEST (x), 0)));
7297 pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
7298 }
7299 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7300 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7301 {
7302 inner = XEXP (SET_DEST (x), 0);
7303 len = INTVAL (XEXP (SET_DEST (x), 1));
7304 pos = XEXP (SET_DEST (x), 2);
7305
7306 /* A constant position should stay within the width of INNER. */
7307 if (CONST_INT_P (pos)
7308 && INTVAL (pos) + len > GET_MODE_PRECISION (GET_MODE (inner)))
7309 break;
7310
7311 if (BITS_BIG_ENDIAN)
7312 {
7313 if (CONST_INT_P (pos))
7314 pos = GEN_INT (GET_MODE_PRECISION (GET_MODE (inner)) - len
7315 - INTVAL (pos));
7316 else if (GET_CODE (pos) == MINUS
7317 && CONST_INT_P (XEXP (pos, 1))
7318 && (INTVAL (XEXP (pos, 1))
7319 == GET_MODE_PRECISION (GET_MODE (inner)) - len))
7320 /* If position is ADJUST - X, new position is X. */
7321 pos = XEXP (pos, 0);
7322 else
7323 {
7324 HOST_WIDE_INT prec = GET_MODE_PRECISION (GET_MODE (inner));
7325 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7326 gen_int_mode (prec - len,
7327 GET_MODE (pos)),
7328 pos);
7329 }
7330 }
7331 }
7332
7333 /* A SUBREG between two modes that occupy the same numbers of words
7334 can be done by moving the SUBREG to the source. */
7335 else if (GET_CODE (SET_DEST (x)) == SUBREG
7336 /* We need SUBREGs to compute nonzero_bits properly. */
7337 && nonzero_sign_valid
7338 && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
7339 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
7340 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
7341 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
7342 {
7343 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7344 gen_lowpart
7345 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7346 SET_SRC (x)));
7347 continue;
7348 }
7349 else
7350 break;
7351
7352 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7353 inner = SUBREG_REG (inner);
7354
7355 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7356 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7357 {
7358 /* Don't do anything for vector or complex integral types. */
7359 if (! FLOAT_MODE_P (GET_MODE (inner)))
7360 break;
7361
7362 /* Try to find an integral mode to pun with. */
7363 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7364 .exists (&compute_mode))
7365 break;
7366
7367 inner = gen_lowpart (compute_mode, inner);
7368 }
7369
7370 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7371 if (len >= HOST_BITS_PER_WIDE_INT)
7372 break;
7373
7374 /* Don't try to compute in too wide unsupported modes. */
7375 if (!targetm.scalar_mode_supported_p (compute_mode))
7376 break;
7377
7378 /* Now compute the equivalent expression. Make a copy of INNER
7379 for the SET_DEST in case it is a MEM into which we will substitute;
7380 we don't want shared RTL in that case. */
7381 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7382 compute_mode);
7383 cleared = simplify_gen_binary (AND, compute_mode,
7384 simplify_gen_unary (NOT, compute_mode,
7385 simplify_gen_binary (ASHIFT,
7386 compute_mode,
7387 mask, pos),
7388 compute_mode),
7389 inner);
7390 masked = simplify_gen_binary (ASHIFT, compute_mode,
7391 simplify_gen_binary (
7392 AND, compute_mode,
7393 gen_lowpart (compute_mode, SET_SRC (x)),
7394 mask),
7395 pos);
7396
7397 x = gen_rtx_SET (copy_rtx (inner),
7398 simplify_gen_binary (IOR, compute_mode,
7399 cleared, masked));
7400 }
7401
7402 return x;
7403 }
7404 \f
7405 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7406 it is an RTX that represents the (variable) starting position; otherwise,
7407 POS is the (constant) starting bit position. Both are counted from the LSB.
7408
7409 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7410
7411 IN_DEST is nonzero if this is a reference in the destination of a SET.
7412 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7413 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7414 be used.
7415
7416 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7417 ZERO_EXTRACT should be built even for bits starting at bit 0.
7418
7419 MODE is the desired mode of the result (if IN_DEST == 0).
7420
7421 The result is an RTX for the extraction or NULL_RTX if the target
7422 can't handle it. */
7423
7424 static rtx
7425 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7426 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7427 int in_dest, int in_compare)
7428 {
7429 /* This mode describes the size of the storage area
7430 to fetch the overall value from. Within that, we
7431 ignore the POS lowest bits, etc. */
7432 machine_mode is_mode = GET_MODE (inner);
7433 machine_mode inner_mode;
7434 scalar_int_mode wanted_inner_mode;
7435 scalar_int_mode wanted_inner_reg_mode = word_mode;
7436 scalar_int_mode pos_mode = word_mode;
7437 machine_mode extraction_mode = word_mode;
7438 rtx new_rtx = 0;
7439 rtx orig_pos_rtx = pos_rtx;
7440 HOST_WIDE_INT orig_pos;
7441
7442 if (pos_rtx && CONST_INT_P (pos_rtx))
7443 pos = INTVAL (pos_rtx), pos_rtx = 0;
7444
7445 if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7446 {
7447 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7448 consider just the QI as the memory to extract from.
7449 The subreg adds or removes high bits; its mode is
7450 irrelevant to the meaning of this extraction,
7451 since POS and LEN count from the lsb. */
7452 if (MEM_P (SUBREG_REG (inner)))
7453 is_mode = GET_MODE (SUBREG_REG (inner));
7454 inner = SUBREG_REG (inner);
7455 }
7456 else if (GET_CODE (inner) == ASHIFT
7457 && CONST_INT_P (XEXP (inner, 1))
7458 && pos_rtx == 0 && pos == 0
7459 && len > UINTVAL (XEXP (inner, 1)))
7460 {
7461 /* We're extracting the least significant bits of an rtx
7462 (ashift X (const_int C)), where LEN > C. Extract the
7463 least significant (LEN - C) bits of X, giving an rtx
7464 whose mode is MODE, then shift it left C times. */
7465 new_rtx = make_extraction (mode, XEXP (inner, 0),
7466 0, 0, len - INTVAL (XEXP (inner, 1)),
7467 unsignedp, in_dest, in_compare);
7468 if (new_rtx != 0)
7469 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7470 }
7471 else if (GET_CODE (inner) == TRUNCATE)
7472 inner = XEXP (inner, 0);
7473
7474 inner_mode = GET_MODE (inner);
7475
7476 /* See if this can be done without an extraction. We never can if the
7477 width of the field is not the same as that of some integer mode. For
7478 registers, we can only avoid the extraction if the position is at the
7479 low-order bit and this is either not in the destination or we have the
7480 appropriate STRICT_LOW_PART operation available.
7481
7482 For MEM, we can avoid an extract if the field starts on an appropriate
7483 boundary and we can change the mode of the memory reference. */
7484
7485 scalar_int_mode tmode;
7486 if (int_mode_for_size (len, 1).exists (&tmode)
7487 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7488 && !MEM_P (inner)
7489 && (pos == 0 || REG_P (inner))
7490 && (inner_mode == tmode
7491 || !REG_P (inner)
7492 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7493 || reg_truncated_to_mode (tmode, inner))
7494 && (! in_dest
7495 || (REG_P (inner)
7496 && have_insn_for (STRICT_LOW_PART, tmode))))
7497 || (MEM_P (inner) && pos_rtx == 0
7498 && (pos
7499 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7500 : BITS_PER_UNIT)) == 0
7501 /* We can't do this if we are widening INNER_MODE (it
7502 may not be aligned, for one thing). */
7503 && !paradoxical_subreg_p (tmode, inner_mode)
7504 && (inner_mode == tmode
7505 || (! mode_dependent_address_p (XEXP (inner, 0),
7506 MEM_ADDR_SPACE (inner))
7507 && ! MEM_VOLATILE_P (inner))))))
7508 {
7509 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7510 field. If the original and current mode are the same, we need not
7511 adjust the offset. Otherwise, we do if bytes big endian.
7512
7513 If INNER is not a MEM, get a piece consisting of just the field
7514 of interest (in this case POS % BITS_PER_WORD must be 0). */
7515
7516 if (MEM_P (inner))
7517 {
7518 HOST_WIDE_INT offset;
7519
7520 /* POS counts from lsb, but make OFFSET count in memory order. */
7521 if (BYTES_BIG_ENDIAN)
7522 offset = (GET_MODE_PRECISION (is_mode) - len - pos) / BITS_PER_UNIT;
7523 else
7524 offset = pos / BITS_PER_UNIT;
7525
7526 new_rtx = adjust_address_nv (inner, tmode, offset);
7527 }
7528 else if (REG_P (inner))
7529 {
7530 if (tmode != inner_mode)
7531 {
7532 /* We can't call gen_lowpart in a DEST since we
7533 always want a SUBREG (see below) and it would sometimes
7534 return a new hard register. */
7535 if (pos || in_dest)
7536 {
7537 unsigned int offset
7538 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7539
7540 /* Avoid creating invalid subregs, for example when
7541 simplifying (x>>32)&255. */
7542 if (!validate_subreg (tmode, inner_mode, inner, offset))
7543 return NULL_RTX;
7544
7545 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7546 }
7547 else
7548 new_rtx = gen_lowpart (tmode, inner);
7549 }
7550 else
7551 new_rtx = inner;
7552 }
7553 else
7554 new_rtx = force_to_mode (inner, tmode,
7555 len >= HOST_BITS_PER_WIDE_INT
7556 ? HOST_WIDE_INT_M1U
7557 : (HOST_WIDE_INT_1U << len) - 1, 0);
7558
7559 /* If this extraction is going into the destination of a SET,
7560 make a STRICT_LOW_PART unless we made a MEM. */
7561
7562 if (in_dest)
7563 return (MEM_P (new_rtx) ? new_rtx
7564 : (GET_CODE (new_rtx) != SUBREG
7565 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7566 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7567
7568 if (mode == tmode)
7569 return new_rtx;
7570
7571 if (CONST_SCALAR_INT_P (new_rtx))
7572 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7573 mode, new_rtx, tmode);
7574
7575 /* If we know that no extraneous bits are set, and that the high
7576 bit is not set, convert the extraction to the cheaper of
7577 sign and zero extension, that are equivalent in these cases. */
7578 if (flag_expensive_optimizations
7579 && (HWI_COMPUTABLE_MODE_P (tmode)
7580 && ((nonzero_bits (new_rtx, tmode)
7581 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7582 == 0)))
7583 {
7584 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7585 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7586
7587 /* Prefer ZERO_EXTENSION, since it gives more information to
7588 backends. */
7589 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7590 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7591 return temp;
7592 return temp1;
7593 }
7594
7595 /* Otherwise, sign- or zero-extend unless we already are in the
7596 proper mode. */
7597
7598 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7599 mode, new_rtx));
7600 }
7601
7602 /* Unless this is a COMPARE or we have a funny memory reference,
7603 don't do anything with zero-extending field extracts starting at
7604 the low-order bit since they are simple AND operations. */
7605 if (pos_rtx == 0 && pos == 0 && ! in_dest
7606 && ! in_compare && unsignedp)
7607 return 0;
7608
7609 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7610 if the position is not a constant and the length is not 1. In all
7611 other cases, we would only be going outside our object in cases when
7612 an original shift would have been undefined. */
7613 if (MEM_P (inner)
7614 && ((pos_rtx == 0 && pos + len > GET_MODE_PRECISION (is_mode))
7615 || (pos_rtx != 0 && len != 1)))
7616 return 0;
7617
7618 enum extraction_pattern pattern = (in_dest ? EP_insv
7619 : unsignedp ? EP_extzv : EP_extv);
7620
7621 /* If INNER is not from memory, we want it to have the mode of a register
7622 extraction pattern's structure operand, or word_mode if there is no
7623 such pattern. The same applies to extraction_mode and pos_mode
7624 and their respective operands.
7625
7626 For memory, assume that the desired extraction_mode and pos_mode
7627 are the same as for a register operation, since at present we don't
7628 have named patterns for aligned memory structures. */
7629 struct extraction_insn insn;
7630 if (get_best_reg_extraction_insn (&insn, pattern,
7631 GET_MODE_BITSIZE (inner_mode), mode))
7632 {
7633 wanted_inner_reg_mode = insn.struct_mode.require ();
7634 pos_mode = insn.pos_mode;
7635 extraction_mode = insn.field_mode;
7636 }
7637
7638 /* Never narrow an object, since that might not be safe. */
7639
7640 if (mode != VOIDmode
7641 && partial_subreg_p (extraction_mode, mode))
7642 extraction_mode = mode;
7643
7644 if (!MEM_P (inner))
7645 wanted_inner_mode = wanted_inner_reg_mode;
7646 else
7647 {
7648 /* Be careful not to go beyond the extracted object and maintain the
7649 natural alignment of the memory. */
7650 wanted_inner_mode = smallest_int_mode_for_size (len);
7651 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7652 > GET_MODE_BITSIZE (wanted_inner_mode))
7653 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7654 }
7655
7656 orig_pos = pos;
7657
7658 if (BITS_BIG_ENDIAN)
7659 {
7660 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7661 BITS_BIG_ENDIAN style. If position is constant, compute new
7662 position. Otherwise, build subtraction.
7663 Note that POS is relative to the mode of the original argument.
7664 If it's a MEM we need to recompute POS relative to that.
7665 However, if we're extracting from (or inserting into) a register,
7666 we want to recompute POS relative to wanted_inner_mode. */
7667 int width = (MEM_P (inner)
7668 ? GET_MODE_BITSIZE (is_mode)
7669 : GET_MODE_BITSIZE (wanted_inner_mode));
7670
7671 if (pos_rtx == 0)
7672 pos = width - len - pos;
7673 else
7674 pos_rtx
7675 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7676 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7677 pos_rtx);
7678 /* POS may be less than 0 now, but we check for that below.
7679 Note that it can only be less than 0 if !MEM_P (inner). */
7680 }
7681
7682 /* If INNER has a wider mode, and this is a constant extraction, try to
7683 make it smaller and adjust the byte to point to the byte containing
7684 the value. */
7685 if (wanted_inner_mode != VOIDmode
7686 && inner_mode != wanted_inner_mode
7687 && ! pos_rtx
7688 && partial_subreg_p (wanted_inner_mode, is_mode)
7689 && MEM_P (inner)
7690 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7691 && ! MEM_VOLATILE_P (inner))
7692 {
7693 int offset = 0;
7694
7695 /* The computations below will be correct if the machine is big
7696 endian in both bits and bytes or little endian in bits and bytes.
7697 If it is mixed, we must adjust. */
7698
7699 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7700 adjust OFFSET to compensate. */
7701 if (BYTES_BIG_ENDIAN
7702 && paradoxical_subreg_p (is_mode, inner_mode))
7703 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7704
7705 /* We can now move to the desired byte. */
7706 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7707 * GET_MODE_SIZE (wanted_inner_mode);
7708 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7709
7710 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7711 && is_mode != wanted_inner_mode)
7712 offset = (GET_MODE_SIZE (is_mode)
7713 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7714
7715 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7716 }
7717
7718 /* If INNER is not memory, get it into the proper mode. If we are changing
7719 its mode, POS must be a constant and smaller than the size of the new
7720 mode. */
7721 else if (!MEM_P (inner))
7722 {
7723 /* On the LHS, don't create paradoxical subregs implicitely truncating
7724 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7725 if (in_dest
7726 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7727 wanted_inner_mode))
7728 return NULL_RTX;
7729
7730 if (GET_MODE (inner) != wanted_inner_mode
7731 && (pos_rtx != 0
7732 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7733 return NULL_RTX;
7734
7735 if (orig_pos < 0)
7736 return NULL_RTX;
7737
7738 inner = force_to_mode (inner, wanted_inner_mode,
7739 pos_rtx
7740 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7741 ? HOST_WIDE_INT_M1U
7742 : (((HOST_WIDE_INT_1U << len) - 1)
7743 << orig_pos),
7744 0);
7745 }
7746
7747 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7748 have to zero extend. Otherwise, we can just use a SUBREG.
7749
7750 We dealt with constant rtxes earlier, so pos_rtx cannot
7751 have VOIDmode at this point. */
7752 if (pos_rtx != 0
7753 && (GET_MODE_SIZE (pos_mode)
7754 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7755 {
7756 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7757 GET_MODE (pos_rtx));
7758
7759 /* If we know that no extraneous bits are set, and that the high
7760 bit is not set, convert extraction to cheaper one - either
7761 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7762 cases. */
7763 if (flag_expensive_optimizations
7764 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7765 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7766 & ~(((unsigned HOST_WIDE_INT)
7767 GET_MODE_MASK (GET_MODE (pos_rtx)))
7768 >> 1))
7769 == 0)))
7770 {
7771 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7772 GET_MODE (pos_rtx));
7773
7774 /* Prefer ZERO_EXTENSION, since it gives more information to
7775 backends. */
7776 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7777 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7778 temp = temp1;
7779 }
7780 pos_rtx = temp;
7781 }
7782
7783 /* Make POS_RTX unless we already have it and it is correct. If we don't
7784 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7785 be a CONST_INT. */
7786 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7787 pos_rtx = orig_pos_rtx;
7788
7789 else if (pos_rtx == 0)
7790 pos_rtx = GEN_INT (pos);
7791
7792 /* Make the required operation. See if we can use existing rtx. */
7793 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7794 extraction_mode, inner, GEN_INT (len), pos_rtx);
7795 if (! in_dest)
7796 new_rtx = gen_lowpart (mode, new_rtx);
7797
7798 return new_rtx;
7799 }
7800 \f
7801 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7802 can be commuted with any other operations in X. Return X without
7803 that shift if so. */
7804
7805 static rtx
7806 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7807 {
7808 enum rtx_code code = GET_CODE (x);
7809 rtx tem;
7810
7811 switch (code)
7812 {
7813 case ASHIFT:
7814 /* This is the shift itself. If it is wide enough, we will return
7815 either the value being shifted if the shift count is equal to
7816 COUNT or a shift for the difference. */
7817 if (CONST_INT_P (XEXP (x, 1))
7818 && INTVAL (XEXP (x, 1)) >= count)
7819 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7820 INTVAL (XEXP (x, 1)) - count);
7821 break;
7822
7823 case NEG: case NOT:
7824 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7825 return simplify_gen_unary (code, mode, tem, mode);
7826
7827 break;
7828
7829 case PLUS: case IOR: case XOR: case AND:
7830 /* If we can safely shift this constant and we find the inner shift,
7831 make a new operation. */
7832 if (CONST_INT_P (XEXP (x, 1))
7833 && (UINTVAL (XEXP (x, 1))
7834 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7835 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7836 {
7837 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7838 return simplify_gen_binary (code, mode, tem,
7839 gen_int_mode (val, mode));
7840 }
7841 break;
7842
7843 default:
7844 break;
7845 }
7846
7847 return 0;
7848 }
7849 \f
7850 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7851 level of the expression and MODE is its mode. IN_CODE is as for
7852 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7853 that should be used when recursing on operands of *X_PTR.
7854
7855 There are two possible actions:
7856
7857 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7858 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7859
7860 - Return a new rtx, which the caller returns directly. */
7861
7862 static rtx
7863 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7864 enum rtx_code in_code,
7865 enum rtx_code *next_code_ptr)
7866 {
7867 rtx x = *x_ptr;
7868 enum rtx_code next_code = *next_code_ptr;
7869 enum rtx_code code = GET_CODE (x);
7870 int mode_width = GET_MODE_PRECISION (mode);
7871 rtx rhs, lhs;
7872 rtx new_rtx = 0;
7873 int i;
7874 rtx tem;
7875 scalar_int_mode inner_mode;
7876 bool equality_comparison = false;
7877
7878 if (in_code == EQ)
7879 {
7880 equality_comparison = true;
7881 in_code = COMPARE;
7882 }
7883
7884 /* Process depending on the code of this operation. If NEW is set
7885 nonzero, it will be returned. */
7886
7887 switch (code)
7888 {
7889 case ASHIFT:
7890 /* Convert shifts by constants into multiplications if inside
7891 an address. */
7892 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
7893 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
7894 && INTVAL (XEXP (x, 1)) >= 0)
7895 {
7896 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
7897 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
7898
7899 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
7900 if (GET_CODE (new_rtx) == NEG)
7901 {
7902 new_rtx = XEXP (new_rtx, 0);
7903 multval = -multval;
7904 }
7905 multval = trunc_int_for_mode (multval, mode);
7906 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
7907 }
7908 break;
7909
7910 case PLUS:
7911 lhs = XEXP (x, 0);
7912 rhs = XEXP (x, 1);
7913 lhs = make_compound_operation (lhs, next_code);
7914 rhs = make_compound_operation (rhs, next_code);
7915 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
7916 {
7917 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
7918 XEXP (lhs, 1));
7919 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7920 }
7921 else if (GET_CODE (lhs) == MULT
7922 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
7923 {
7924 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
7925 simplify_gen_unary (NEG, mode,
7926 XEXP (lhs, 1),
7927 mode));
7928 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
7929 }
7930 else
7931 {
7932 SUBST (XEXP (x, 0), lhs);
7933 SUBST (XEXP (x, 1), rhs);
7934 }
7935 maybe_swap_commutative_operands (x);
7936 return x;
7937
7938 case MINUS:
7939 lhs = XEXP (x, 0);
7940 rhs = XEXP (x, 1);
7941 lhs = make_compound_operation (lhs, next_code);
7942 rhs = make_compound_operation (rhs, next_code);
7943 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
7944 {
7945 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
7946 XEXP (rhs, 1));
7947 return simplify_gen_binary (PLUS, mode, tem, lhs);
7948 }
7949 else if (GET_CODE (rhs) == MULT
7950 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
7951 {
7952 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
7953 simplify_gen_unary (NEG, mode,
7954 XEXP (rhs, 1),
7955 mode));
7956 return simplify_gen_binary (PLUS, mode, tem, lhs);
7957 }
7958 else
7959 {
7960 SUBST (XEXP (x, 0), lhs);
7961 SUBST (XEXP (x, 1), rhs);
7962 return x;
7963 }
7964
7965 case AND:
7966 /* If the second operand is not a constant, we can't do anything
7967 with it. */
7968 if (!CONST_INT_P (XEXP (x, 1)))
7969 break;
7970
7971 /* If the constant is a power of two minus one and the first operand
7972 is a logical right shift, make an extraction. */
7973 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
7974 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7975 {
7976 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
7977 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
7978 i, 1, 0, in_code == COMPARE);
7979 }
7980
7981 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
7982 else if (GET_CODE (XEXP (x, 0)) == SUBREG
7983 && subreg_lowpart_p (XEXP (x, 0))
7984 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
7985 &inner_mode)
7986 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
7987 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
7988 {
7989 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
7990 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
7991 new_rtx = make_extraction (inner_mode, new_rtx, 0,
7992 XEXP (inner_x0, 1),
7993 i, 1, 0, in_code == COMPARE);
7994
7995 /* If we narrowed the mode when dropping the subreg, then we lose. */
7996 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
7997 new_rtx = NULL;
7998
7999 /* If that didn't give anything, see if the AND simplifies on
8000 its own. */
8001 if (!new_rtx && i >= 0)
8002 {
8003 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8004 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8005 0, in_code == COMPARE);
8006 }
8007 }
8008 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8009 else if ((GET_CODE (XEXP (x, 0)) == XOR
8010 || GET_CODE (XEXP (x, 0)) == IOR)
8011 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8012 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8013 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8014 {
8015 /* Apply the distributive law, and then try to make extractions. */
8016 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8017 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8018 XEXP (x, 1)),
8019 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8020 XEXP (x, 1)));
8021 new_rtx = make_compound_operation (new_rtx, in_code);
8022 }
8023
8024 /* If we are have (and (rotate X C) M) and C is larger than the number
8025 of bits in M, this is an extraction. */
8026
8027 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8028 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8029 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8030 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8031 {
8032 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8033 new_rtx = make_extraction (mode, new_rtx,
8034 (GET_MODE_PRECISION (mode)
8035 - INTVAL (XEXP (XEXP (x, 0), 1))),
8036 NULL_RTX, i, 1, 0, in_code == COMPARE);
8037 }
8038
8039 /* On machines without logical shifts, if the operand of the AND is
8040 a logical shift and our mask turns off all the propagated sign
8041 bits, we can replace the logical shift with an arithmetic shift. */
8042 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8043 && !have_insn_for (LSHIFTRT, mode)
8044 && have_insn_for (ASHIFTRT, mode)
8045 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8046 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8047 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8048 && mode_width <= HOST_BITS_PER_WIDE_INT)
8049 {
8050 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8051
8052 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8053 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8054 SUBST (XEXP (x, 0),
8055 gen_rtx_ASHIFTRT (mode,
8056 make_compound_operation (XEXP (XEXP (x,
8057 0),
8058 0),
8059 next_code),
8060 XEXP (XEXP (x, 0), 1)));
8061 }
8062
8063 /* If the constant is one less than a power of two, this might be
8064 representable by an extraction even if no shift is present.
8065 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8066 we are in a COMPARE. */
8067 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8068 new_rtx = make_extraction (mode,
8069 make_compound_operation (XEXP (x, 0),
8070 next_code),
8071 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8072
8073 /* If we are in a comparison and this is an AND with a power of two,
8074 convert this into the appropriate bit extract. */
8075 else if (in_code == COMPARE
8076 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8077 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8078 new_rtx = make_extraction (mode,
8079 make_compound_operation (XEXP (x, 0),
8080 next_code),
8081 i, NULL_RTX, 1, 1, 0, 1);
8082
8083 /* If the one operand is a paradoxical subreg of a register or memory and
8084 the constant (limited to the smaller mode) has only zero bits where
8085 the sub expression has known zero bits, this can be expressed as
8086 a zero_extend. */
8087 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8088 {
8089 rtx sub;
8090
8091 sub = XEXP (XEXP (x, 0), 0);
8092 machine_mode sub_mode = GET_MODE (sub);
8093 if ((REG_P (sub) || MEM_P (sub))
8094 && GET_MODE_PRECISION (sub_mode) < mode_width)
8095 {
8096 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8097 unsigned HOST_WIDE_INT mask;
8098
8099 /* original AND constant with all the known zero bits set */
8100 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8101 if ((mask & mode_mask) == mode_mask)
8102 {
8103 new_rtx = make_compound_operation (sub, next_code);
8104 new_rtx = make_extraction (mode, new_rtx, 0, 0,
8105 GET_MODE_PRECISION (sub_mode),
8106 1, 0, in_code == COMPARE);
8107 }
8108 }
8109 }
8110
8111 break;
8112
8113 case LSHIFTRT:
8114 /* If the sign bit is known to be zero, replace this with an
8115 arithmetic shift. */
8116 if (have_insn_for (ASHIFTRT, mode)
8117 && ! have_insn_for (LSHIFTRT, mode)
8118 && mode_width <= HOST_BITS_PER_WIDE_INT
8119 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8120 {
8121 new_rtx = gen_rtx_ASHIFTRT (mode,
8122 make_compound_operation (XEXP (x, 0),
8123 next_code),
8124 XEXP (x, 1));
8125 break;
8126 }
8127
8128 /* fall through */
8129
8130 case ASHIFTRT:
8131 lhs = XEXP (x, 0);
8132 rhs = XEXP (x, 1);
8133
8134 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8135 this is a SIGN_EXTRACT. */
8136 if (CONST_INT_P (rhs)
8137 && GET_CODE (lhs) == ASHIFT
8138 && CONST_INT_P (XEXP (lhs, 1))
8139 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8140 && INTVAL (XEXP (lhs, 1)) >= 0
8141 && INTVAL (rhs) < mode_width)
8142 {
8143 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8144 new_rtx = make_extraction (mode, new_rtx,
8145 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8146 NULL_RTX, mode_width - INTVAL (rhs),
8147 code == LSHIFTRT, 0, in_code == COMPARE);
8148 break;
8149 }
8150
8151 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8152 If so, try to merge the shifts into a SIGN_EXTEND. We could
8153 also do this for some cases of SIGN_EXTRACT, but it doesn't
8154 seem worth the effort; the case checked for occurs on Alpha. */
8155
8156 if (!OBJECT_P (lhs)
8157 && ! (GET_CODE (lhs) == SUBREG
8158 && (OBJECT_P (SUBREG_REG (lhs))))
8159 && CONST_INT_P (rhs)
8160 && INTVAL (rhs) >= 0
8161 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8162 && INTVAL (rhs) < mode_width
8163 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8164 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8165 next_code),
8166 0, NULL_RTX, mode_width - INTVAL (rhs),
8167 code == LSHIFTRT, 0, in_code == COMPARE);
8168
8169 break;
8170
8171 case SUBREG:
8172 /* Call ourselves recursively on the inner expression. If we are
8173 narrowing the object and it has a different RTL code from
8174 what it originally did, do this SUBREG as a force_to_mode. */
8175 {
8176 rtx inner = SUBREG_REG (x), simplified;
8177 enum rtx_code subreg_code = in_code;
8178
8179 /* If the SUBREG is masking of a logical right shift,
8180 make an extraction. */
8181 if (GET_CODE (inner) == LSHIFTRT
8182 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8183 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8184 && CONST_INT_P (XEXP (inner, 1))
8185 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8186 && subreg_lowpart_p (x))
8187 {
8188 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8189 int width = GET_MODE_PRECISION (inner_mode)
8190 - INTVAL (XEXP (inner, 1));
8191 if (width > mode_width)
8192 width = mode_width;
8193 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8194 width, 1, 0, in_code == COMPARE);
8195 break;
8196 }
8197
8198 /* If in_code is COMPARE, it isn't always safe to pass it through
8199 to the recursive make_compound_operation call. */
8200 if (subreg_code == COMPARE
8201 && (!subreg_lowpart_p (x)
8202 || GET_CODE (inner) == SUBREG
8203 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8204 is (const_int 0), rather than
8205 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8206 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8207 for non-equality comparisons against 0 is not equivalent
8208 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8209 || (GET_CODE (inner) == AND
8210 && CONST_INT_P (XEXP (inner, 1))
8211 && partial_subreg_p (x)
8212 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8213 >= GET_MODE_BITSIZE (mode) - 1)))
8214 subreg_code = SET;
8215
8216 tem = make_compound_operation (inner, subreg_code);
8217
8218 simplified
8219 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8220 if (simplified)
8221 tem = simplified;
8222
8223 if (GET_CODE (tem) != GET_CODE (inner)
8224 && partial_subreg_p (x)
8225 && subreg_lowpart_p (x))
8226 {
8227 rtx newer
8228 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8229
8230 /* If we have something other than a SUBREG, we might have
8231 done an expansion, so rerun ourselves. */
8232 if (GET_CODE (newer) != SUBREG)
8233 newer = make_compound_operation (newer, in_code);
8234
8235 /* force_to_mode can expand compounds. If it just re-expanded
8236 the compound, use gen_lowpart to convert to the desired
8237 mode. */
8238 if (rtx_equal_p (newer, x)
8239 /* Likewise if it re-expanded the compound only partially.
8240 This happens for SUBREG of ZERO_EXTRACT if they extract
8241 the same number of bits. */
8242 || (GET_CODE (newer) == SUBREG
8243 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8244 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8245 && GET_CODE (inner) == AND
8246 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8247 return gen_lowpart (GET_MODE (x), tem);
8248
8249 return newer;
8250 }
8251
8252 if (simplified)
8253 return tem;
8254 }
8255 break;
8256
8257 default:
8258 break;
8259 }
8260
8261 if (new_rtx)
8262 *x_ptr = gen_lowpart (mode, new_rtx);
8263 *next_code_ptr = next_code;
8264 return NULL_RTX;
8265 }
8266
8267 /* Look at the expression rooted at X. Look for expressions
8268 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8269 Form these expressions.
8270
8271 Return the new rtx, usually just X.
8272
8273 Also, for machines like the VAX that don't have logical shift insns,
8274 try to convert logical to arithmetic shift operations in cases where
8275 they are equivalent. This undoes the canonicalizations to logical
8276 shifts done elsewhere.
8277
8278 We try, as much as possible, to re-use rtl expressions to save memory.
8279
8280 IN_CODE says what kind of expression we are processing. Normally, it is
8281 SET. In a memory address it is MEM. When processing the arguments of
8282 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8283 precisely it is an equality comparison against zero. */
8284
8285 rtx
8286 make_compound_operation (rtx x, enum rtx_code in_code)
8287 {
8288 enum rtx_code code = GET_CODE (x);
8289 const char *fmt;
8290 int i, j;
8291 enum rtx_code next_code;
8292 rtx new_rtx, tem;
8293
8294 /* Select the code to be used in recursive calls. Once we are inside an
8295 address, we stay there. If we have a comparison, set to COMPARE,
8296 but once inside, go back to our default of SET. */
8297
8298 next_code = (code == MEM ? MEM
8299 : ((code == COMPARE || COMPARISON_P (x))
8300 && XEXP (x, 1) == const0_rtx) ? COMPARE
8301 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8302
8303 scalar_int_mode mode;
8304 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8305 {
8306 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8307 &next_code);
8308 if (new_rtx)
8309 return new_rtx;
8310 code = GET_CODE (x);
8311 }
8312
8313 /* Now recursively process each operand of this operation. We need to
8314 handle ZERO_EXTEND specially so that we don't lose track of the
8315 inner mode. */
8316 if (code == ZERO_EXTEND)
8317 {
8318 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8319 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8320 new_rtx, GET_MODE (XEXP (x, 0)));
8321 if (tem)
8322 return tem;
8323 SUBST (XEXP (x, 0), new_rtx);
8324 return x;
8325 }
8326
8327 fmt = GET_RTX_FORMAT (code);
8328 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8329 if (fmt[i] == 'e')
8330 {
8331 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8332 SUBST (XEXP (x, i), new_rtx);
8333 }
8334 else if (fmt[i] == 'E')
8335 for (j = 0; j < XVECLEN (x, i); j++)
8336 {
8337 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8338 SUBST (XVECEXP (x, i, j), new_rtx);
8339 }
8340
8341 maybe_swap_commutative_operands (x);
8342 return x;
8343 }
8344 \f
8345 /* Given M see if it is a value that would select a field of bits
8346 within an item, but not the entire word. Return -1 if not.
8347 Otherwise, return the starting position of the field, where 0 is the
8348 low-order bit.
8349
8350 *PLEN is set to the length of the field. */
8351
8352 static int
8353 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8354 {
8355 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8356 int pos = m ? ctz_hwi (m) : -1;
8357 int len = 0;
8358
8359 if (pos >= 0)
8360 /* Now shift off the low-order zero bits and see if we have a
8361 power of two minus 1. */
8362 len = exact_log2 ((m >> pos) + 1);
8363
8364 if (len <= 0)
8365 pos = -1;
8366
8367 *plen = len;
8368 return pos;
8369 }
8370 \f
8371 /* If X refers to a register that equals REG in value, replace these
8372 references with REG. */
8373 static rtx
8374 canon_reg_for_combine (rtx x, rtx reg)
8375 {
8376 rtx op0, op1, op2;
8377 const char *fmt;
8378 int i;
8379 bool copied;
8380
8381 enum rtx_code code = GET_CODE (x);
8382 switch (GET_RTX_CLASS (code))
8383 {
8384 case RTX_UNARY:
8385 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8386 if (op0 != XEXP (x, 0))
8387 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8388 GET_MODE (reg));
8389 break;
8390
8391 case RTX_BIN_ARITH:
8392 case RTX_COMM_ARITH:
8393 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8394 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8395 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8396 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8397 break;
8398
8399 case RTX_COMPARE:
8400 case RTX_COMM_COMPARE:
8401 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8402 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8403 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8404 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8405 GET_MODE (op0), op0, op1);
8406 break;
8407
8408 case RTX_TERNARY:
8409 case RTX_BITFIELD_OPS:
8410 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8411 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8412 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8413 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8414 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8415 GET_MODE (op0), op0, op1, op2);
8416 /* FALLTHRU */
8417
8418 case RTX_OBJ:
8419 if (REG_P (x))
8420 {
8421 if (rtx_equal_p (get_last_value (reg), x)
8422 || rtx_equal_p (reg, get_last_value (x)))
8423 return reg;
8424 else
8425 break;
8426 }
8427
8428 /* fall through */
8429
8430 default:
8431 fmt = GET_RTX_FORMAT (code);
8432 copied = false;
8433 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8434 if (fmt[i] == 'e')
8435 {
8436 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8437 if (op != XEXP (x, i))
8438 {
8439 if (!copied)
8440 {
8441 copied = true;
8442 x = copy_rtx (x);
8443 }
8444 XEXP (x, i) = op;
8445 }
8446 }
8447 else if (fmt[i] == 'E')
8448 {
8449 int j;
8450 for (j = 0; j < XVECLEN (x, i); j++)
8451 {
8452 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8453 if (op != XVECEXP (x, i, j))
8454 {
8455 if (!copied)
8456 {
8457 copied = true;
8458 x = copy_rtx (x);
8459 }
8460 XVECEXP (x, i, j) = op;
8461 }
8462 }
8463 }
8464
8465 break;
8466 }
8467
8468 return x;
8469 }
8470
8471 /* Return X converted to MODE. If the value is already truncated to
8472 MODE we can just return a subreg even though in the general case we
8473 would need an explicit truncation. */
8474
8475 static rtx
8476 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8477 {
8478 if (!CONST_INT_P (x)
8479 && partial_subreg_p (mode, GET_MODE (x))
8480 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8481 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8482 {
8483 /* Bit-cast X into an integer mode. */
8484 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8485 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8486 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8487 x, GET_MODE (x));
8488 }
8489
8490 return gen_lowpart (mode, x);
8491 }
8492
8493 /* See if X can be simplified knowing that we will only refer to it in
8494 MODE and will only refer to those bits that are nonzero in MASK.
8495 If other bits are being computed or if masking operations are done
8496 that select a superset of the bits in MASK, they can sometimes be
8497 ignored.
8498
8499 Return a possibly simplified expression, but always convert X to
8500 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8501
8502 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8503 are all off in X. This is used when X will be complemented, by either
8504 NOT, NEG, or XOR. */
8505
8506 static rtx
8507 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8508 int just_select)
8509 {
8510 enum rtx_code code = GET_CODE (x);
8511 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8512 machine_mode op_mode;
8513 unsigned HOST_WIDE_INT nonzero;
8514
8515 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8516 code below will do the wrong thing since the mode of such an
8517 expression is VOIDmode.
8518
8519 Also do nothing if X is a CLOBBER; this can happen if X was
8520 the return value from a call to gen_lowpart. */
8521 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8522 return x;
8523
8524 /* We want to perform the operation in its present mode unless we know
8525 that the operation is valid in MODE, in which case we do the operation
8526 in MODE. */
8527 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8528 && have_insn_for (code, mode))
8529 ? mode : GET_MODE (x));
8530
8531 /* It is not valid to do a right-shift in a narrower mode
8532 than the one it came in with. */
8533 if ((code == LSHIFTRT || code == ASHIFTRT)
8534 && partial_subreg_p (mode, GET_MODE (x)))
8535 op_mode = GET_MODE (x);
8536
8537 /* Truncate MASK to fit OP_MODE. */
8538 if (op_mode)
8539 mask &= GET_MODE_MASK (op_mode);
8540
8541 /* Determine what bits of X are guaranteed to be (non)zero. */
8542 nonzero = nonzero_bits (x, mode);
8543
8544 /* If none of the bits in X are needed, return a zero. */
8545 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8546 x = const0_rtx;
8547
8548 /* If X is a CONST_INT, return a new one. Do this here since the
8549 test below will fail. */
8550 if (CONST_INT_P (x))
8551 {
8552 if (SCALAR_INT_MODE_P (mode))
8553 return gen_int_mode (INTVAL (x) & mask, mode);
8554 else
8555 {
8556 x = GEN_INT (INTVAL (x) & mask);
8557 return gen_lowpart_common (mode, x);
8558 }
8559 }
8560
8561 /* If X is narrower than MODE and we want all the bits in X's mode, just
8562 get X in the proper mode. */
8563 if (paradoxical_subreg_p (mode, GET_MODE (x))
8564 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8565 return gen_lowpart (mode, x);
8566
8567 /* We can ignore the effect of a SUBREG if it narrows the mode or
8568 if the constant masks to zero all the bits the mode doesn't have. */
8569 if (GET_CODE (x) == SUBREG
8570 && subreg_lowpart_p (x)
8571 && (partial_subreg_p (x)
8572 || (0 == (mask
8573 & GET_MODE_MASK (GET_MODE (x))
8574 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
8575 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8576
8577 scalar_int_mode int_mode, xmode;
8578 if (is_a <scalar_int_mode> (mode, &int_mode)
8579 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8580 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8581 integer too. */
8582 return force_int_to_mode (x, int_mode, xmode,
8583 as_a <scalar_int_mode> (op_mode),
8584 mask, just_select);
8585
8586 return gen_lowpart_or_truncate (mode, x);
8587 }
8588
8589 /* Subroutine of force_to_mode that handles cases in which both X and
8590 the result are scalar integers. MODE is the mode of the result,
8591 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8592 is preferred for simplified versions of X. The other arguments
8593 are as for force_to_mode. */
8594
8595 static rtx
8596 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8597 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8598 int just_select)
8599 {
8600 enum rtx_code code = GET_CODE (x);
8601 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8602 unsigned HOST_WIDE_INT fuller_mask;
8603 rtx op0, op1, temp;
8604
8605 /* When we have an arithmetic operation, or a shift whose count we
8606 do not know, we need to assume that all bits up to the highest-order
8607 bit in MASK will be needed. This is how we form such a mask. */
8608 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8609 fuller_mask = HOST_WIDE_INT_M1U;
8610 else
8611 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8612 - 1);
8613
8614 switch (code)
8615 {
8616 case CLOBBER:
8617 /* If X is a (clobber (const_int)), return it since we know we are
8618 generating something that won't match. */
8619 return x;
8620
8621 case SIGN_EXTEND:
8622 case ZERO_EXTEND:
8623 case ZERO_EXTRACT:
8624 case SIGN_EXTRACT:
8625 x = expand_compound_operation (x);
8626 if (GET_CODE (x) != code)
8627 return force_to_mode (x, mode, mask, next_select);
8628 break;
8629
8630 case TRUNCATE:
8631 /* Similarly for a truncate. */
8632 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8633
8634 case AND:
8635 /* If this is an AND with a constant, convert it into an AND
8636 whose constant is the AND of that constant with MASK. If it
8637 remains an AND of MASK, delete it since it is redundant. */
8638
8639 if (CONST_INT_P (XEXP (x, 1)))
8640 {
8641 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8642 mask & INTVAL (XEXP (x, 1)));
8643 xmode = op_mode;
8644
8645 /* If X is still an AND, see if it is an AND with a mask that
8646 is just some low-order bits. If so, and it is MASK, we don't
8647 need it. */
8648
8649 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8650 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8651 x = XEXP (x, 0);
8652
8653 /* If it remains an AND, try making another AND with the bits
8654 in the mode mask that aren't in MASK turned on. If the
8655 constant in the AND is wide enough, this might make a
8656 cheaper constant. */
8657
8658 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8659 && GET_MODE_MASK (xmode) != mask
8660 && HWI_COMPUTABLE_MODE_P (xmode))
8661 {
8662 unsigned HOST_WIDE_INT cval
8663 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8664 rtx y;
8665
8666 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8667 gen_int_mode (cval, xmode));
8668 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8669 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8670 x = y;
8671 }
8672
8673 break;
8674 }
8675
8676 goto binop;
8677
8678 case PLUS:
8679 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8680 low-order bits (as in an alignment operation) and FOO is already
8681 aligned to that boundary, mask C1 to that boundary as well.
8682 This may eliminate that PLUS and, later, the AND. */
8683
8684 {
8685 unsigned int width = GET_MODE_PRECISION (mode);
8686 unsigned HOST_WIDE_INT smask = mask;
8687
8688 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8689 number, sign extend it. */
8690
8691 if (width < HOST_BITS_PER_WIDE_INT
8692 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8693 smask |= HOST_WIDE_INT_M1U << width;
8694
8695 if (CONST_INT_P (XEXP (x, 1))
8696 && pow2p_hwi (- smask)
8697 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8698 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8699 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8700 (INTVAL (XEXP (x, 1)) & smask)),
8701 mode, smask, next_select);
8702 }
8703
8704 /* fall through */
8705
8706 case MULT:
8707 /* Substituting into the operands of a widening MULT is not likely to
8708 create RTL matching a machine insn. */
8709 if (code == MULT
8710 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8711 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8712 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8713 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8714 && REG_P (XEXP (XEXP (x, 0), 0))
8715 && REG_P (XEXP (XEXP (x, 1), 0)))
8716 return gen_lowpart_or_truncate (mode, x);
8717
8718 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8719 most significant bit in MASK since carries from those bits will
8720 affect the bits we are interested in. */
8721 mask = fuller_mask;
8722 goto binop;
8723
8724 case MINUS:
8725 /* If X is (minus C Y) where C's least set bit is larger than any bit
8726 in the mask, then we may replace with (neg Y). */
8727 if (CONST_INT_P (XEXP (x, 0))
8728 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8729 {
8730 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8731 return force_to_mode (x, mode, mask, next_select);
8732 }
8733
8734 /* Similarly, if C contains every bit in the fuller_mask, then we may
8735 replace with (not Y). */
8736 if (CONST_INT_P (XEXP (x, 0))
8737 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8738 {
8739 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8740 return force_to_mode (x, mode, mask, next_select);
8741 }
8742
8743 mask = fuller_mask;
8744 goto binop;
8745
8746 case IOR:
8747 case XOR:
8748 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8749 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8750 operation which may be a bitfield extraction. Ensure that the
8751 constant we form is not wider than the mode of X. */
8752
8753 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8754 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8755 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8756 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8757 && CONST_INT_P (XEXP (x, 1))
8758 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8759 + floor_log2 (INTVAL (XEXP (x, 1))))
8760 < GET_MODE_PRECISION (xmode))
8761 && (UINTVAL (XEXP (x, 1))
8762 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8763 {
8764 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8765 << INTVAL (XEXP (XEXP (x, 0), 1)),
8766 xmode);
8767 temp = simplify_gen_binary (GET_CODE (x), xmode,
8768 XEXP (XEXP (x, 0), 0), temp);
8769 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8770 XEXP (XEXP (x, 0), 1));
8771 return force_to_mode (x, mode, mask, next_select);
8772 }
8773
8774 binop:
8775 /* For most binary operations, just propagate into the operation and
8776 change the mode if we have an operation of that mode. */
8777
8778 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8779 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8780
8781 /* If we ended up truncating both operands, truncate the result of the
8782 operation instead. */
8783 if (GET_CODE (op0) == TRUNCATE
8784 && GET_CODE (op1) == TRUNCATE)
8785 {
8786 op0 = XEXP (op0, 0);
8787 op1 = XEXP (op1, 0);
8788 }
8789
8790 op0 = gen_lowpart_or_truncate (op_mode, op0);
8791 op1 = gen_lowpart_or_truncate (op_mode, op1);
8792
8793 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8794 {
8795 x = simplify_gen_binary (code, op_mode, op0, op1);
8796 xmode = op_mode;
8797 }
8798 break;
8799
8800 case ASHIFT:
8801 /* For left shifts, do the same, but just for the first operand.
8802 However, we cannot do anything with shifts where we cannot
8803 guarantee that the counts are smaller than the size of the mode
8804 because such a count will have a different meaning in a
8805 wider mode. */
8806
8807 if (! (CONST_INT_P (XEXP (x, 1))
8808 && INTVAL (XEXP (x, 1)) >= 0
8809 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8810 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8811 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8812 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8813 break;
8814
8815 /* If the shift count is a constant and we can do arithmetic in
8816 the mode of the shift, refine which bits we need. Otherwise, use the
8817 conservative form of the mask. */
8818 if (CONST_INT_P (XEXP (x, 1))
8819 && INTVAL (XEXP (x, 1)) >= 0
8820 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8821 && HWI_COMPUTABLE_MODE_P (op_mode))
8822 mask >>= INTVAL (XEXP (x, 1));
8823 else
8824 mask = fuller_mask;
8825
8826 op0 = gen_lowpart_or_truncate (op_mode,
8827 force_to_mode (XEXP (x, 0), op_mode,
8828 mask, next_select));
8829
8830 if (op_mode != xmode || op0 != XEXP (x, 0))
8831 {
8832 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8833 xmode = op_mode;
8834 }
8835 break;
8836
8837 case LSHIFTRT:
8838 /* Here we can only do something if the shift count is a constant,
8839 this shift constant is valid for the host, and we can do arithmetic
8840 in OP_MODE. */
8841
8842 if (CONST_INT_P (XEXP (x, 1))
8843 && INTVAL (XEXP (x, 1)) >= 0
8844 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8845 && HWI_COMPUTABLE_MODE_P (op_mode))
8846 {
8847 rtx inner = XEXP (x, 0);
8848 unsigned HOST_WIDE_INT inner_mask;
8849
8850 /* Select the mask of the bits we need for the shift operand. */
8851 inner_mask = mask << INTVAL (XEXP (x, 1));
8852
8853 /* We can only change the mode of the shift if we can do arithmetic
8854 in the mode of the shift and INNER_MASK is no wider than the
8855 width of X's mode. */
8856 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8857 op_mode = xmode;
8858
8859 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8860
8861 if (xmode != op_mode || inner != XEXP (x, 0))
8862 {
8863 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8864 xmode = op_mode;
8865 }
8866 }
8867
8868 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8869 shift and AND produces only copies of the sign bit (C2 is one less
8870 than a power of two), we can do this with just a shift. */
8871
8872 if (GET_CODE (x) == LSHIFTRT
8873 && CONST_INT_P (XEXP (x, 1))
8874 /* The shift puts one of the sign bit copies in the least significant
8875 bit. */
8876 && ((INTVAL (XEXP (x, 1))
8877 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8878 >= GET_MODE_PRECISION (xmode))
8879 && pow2p_hwi (mask + 1)
8880 /* Number of bits left after the shift must be more than the mask
8881 needs. */
8882 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8883 <= GET_MODE_PRECISION (xmode))
8884 /* Must be more sign bit copies than the mask needs. */
8885 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
8886 >= exact_log2 (mask + 1)))
8887 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
8888 GEN_INT (GET_MODE_PRECISION (xmode)
8889 - exact_log2 (mask + 1)));
8890
8891 goto shiftrt;
8892
8893 case ASHIFTRT:
8894 /* If we are just looking for the sign bit, we don't need this shift at
8895 all, even if it has a variable count. */
8896 if (val_signbit_p (xmode, mask))
8897 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8898
8899 /* If this is a shift by a constant, get a mask that contains those bits
8900 that are not copies of the sign bit. We then have two cases: If
8901 MASK only includes those bits, this can be a logical shift, which may
8902 allow simplifications. If MASK is a single-bit field not within
8903 those bits, we are requesting a copy of the sign bit and hence can
8904 shift the sign bit to the appropriate location. */
8905
8906 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
8907 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
8908 {
8909 unsigned HOST_WIDE_INT nonzero;
8910 int i;
8911
8912 /* If the considered data is wider than HOST_WIDE_INT, we can't
8913 represent a mask for all its bits in a single scalar.
8914 But we only care about the lower bits, so calculate these. */
8915
8916 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
8917 {
8918 nonzero = HOST_WIDE_INT_M1U;
8919
8920 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
8921 is the number of bits a full-width mask would have set.
8922 We need only shift if these are fewer than nonzero can
8923 hold. If not, we must keep all bits set in nonzero. */
8924
8925 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
8926 < HOST_BITS_PER_WIDE_INT)
8927 nonzero >>= INTVAL (XEXP (x, 1))
8928 + HOST_BITS_PER_WIDE_INT
8929 - GET_MODE_PRECISION (xmode);
8930 }
8931 else
8932 {
8933 nonzero = GET_MODE_MASK (xmode);
8934 nonzero >>= INTVAL (XEXP (x, 1));
8935 }
8936
8937 if ((mask & ~nonzero) == 0)
8938 {
8939 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
8940 XEXP (x, 0), INTVAL (XEXP (x, 1)));
8941 if (GET_CODE (x) != ASHIFTRT)
8942 return force_to_mode (x, mode, mask, next_select);
8943 }
8944
8945 else if ((i = exact_log2 (mask)) >= 0)
8946 {
8947 x = simplify_shift_const
8948 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
8949 GET_MODE_PRECISION (xmode) - 1 - i);
8950
8951 if (GET_CODE (x) != ASHIFTRT)
8952 return force_to_mode (x, mode, mask, next_select);
8953 }
8954 }
8955
8956 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
8957 even if the shift count isn't a constant. */
8958 if (mask == 1)
8959 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
8960
8961 shiftrt:
8962
8963 /* If this is a zero- or sign-extension operation that just affects bits
8964 we don't care about, remove it. Be sure the call above returned
8965 something that is still a shift. */
8966
8967 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
8968 && CONST_INT_P (XEXP (x, 1))
8969 && INTVAL (XEXP (x, 1)) >= 0
8970 && (INTVAL (XEXP (x, 1))
8971 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
8972 && GET_CODE (XEXP (x, 0)) == ASHIFT
8973 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
8974 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
8975 next_select);
8976
8977 break;
8978
8979 case ROTATE:
8980 case ROTATERT:
8981 /* If the shift count is constant and we can do computations
8982 in the mode of X, compute where the bits we care about are.
8983 Otherwise, we can't do anything. Don't change the mode of
8984 the shift or propagate MODE into the shift, though. */
8985 if (CONST_INT_P (XEXP (x, 1))
8986 && INTVAL (XEXP (x, 1)) >= 0)
8987 {
8988 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
8989 xmode, gen_int_mode (mask, xmode),
8990 XEXP (x, 1));
8991 if (temp && CONST_INT_P (temp))
8992 x = simplify_gen_binary (code, xmode,
8993 force_to_mode (XEXP (x, 0), xmode,
8994 INTVAL (temp), next_select),
8995 XEXP (x, 1));
8996 }
8997 break;
8998
8999 case NEG:
9000 /* If we just want the low-order bit, the NEG isn't needed since it
9001 won't change the low-order bit. */
9002 if (mask == 1)
9003 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9004
9005 /* We need any bits less significant than the most significant bit in
9006 MASK since carries from those bits will affect the bits we are
9007 interested in. */
9008 mask = fuller_mask;
9009 goto unop;
9010
9011 case NOT:
9012 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9013 same as the XOR case above. Ensure that the constant we form is not
9014 wider than the mode of X. */
9015
9016 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9017 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9018 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9019 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9020 < GET_MODE_PRECISION (xmode))
9021 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9022 {
9023 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9024 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9025 x = simplify_gen_binary (LSHIFTRT, xmode,
9026 temp, XEXP (XEXP (x, 0), 1));
9027
9028 return force_to_mode (x, mode, mask, next_select);
9029 }
9030
9031 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9032 use the full mask inside the NOT. */
9033 mask = fuller_mask;
9034
9035 unop:
9036 op0 = gen_lowpart_or_truncate (op_mode,
9037 force_to_mode (XEXP (x, 0), mode, mask,
9038 next_select));
9039 if (op_mode != xmode || op0 != XEXP (x, 0))
9040 {
9041 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9042 xmode = op_mode;
9043 }
9044 break;
9045
9046 case NE:
9047 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9048 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9049 which is equal to STORE_FLAG_VALUE. */
9050 if ((mask & ~STORE_FLAG_VALUE) == 0
9051 && XEXP (x, 1) == const0_rtx
9052 && GET_MODE (XEXP (x, 0)) == mode
9053 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9054 && (nonzero_bits (XEXP (x, 0), mode)
9055 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9056 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9057
9058 break;
9059
9060 case IF_THEN_ELSE:
9061 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9062 written in a narrower mode. We play it safe and do not do so. */
9063
9064 op0 = gen_lowpart_or_truncate (xmode,
9065 force_to_mode (XEXP (x, 1), mode,
9066 mask, next_select));
9067 op1 = gen_lowpart_or_truncate (xmode,
9068 force_to_mode (XEXP (x, 2), mode,
9069 mask, next_select));
9070 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9071 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9072 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9073 op0, op1);
9074 break;
9075
9076 default:
9077 break;
9078 }
9079
9080 /* Ensure we return a value of the proper mode. */
9081 return gen_lowpart_or_truncate (mode, x);
9082 }
9083 \f
9084 /* Return nonzero if X is an expression that has one of two values depending on
9085 whether some other value is zero or nonzero. In that case, we return the
9086 value that is being tested, *PTRUE is set to the value if the rtx being
9087 returned has a nonzero value, and *PFALSE is set to the other alternative.
9088
9089 If we return zero, we set *PTRUE and *PFALSE to X. */
9090
9091 static rtx
9092 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9093 {
9094 machine_mode mode = GET_MODE (x);
9095 enum rtx_code code = GET_CODE (x);
9096 rtx cond0, cond1, true0, true1, false0, false1;
9097 unsigned HOST_WIDE_INT nz;
9098 scalar_int_mode int_mode;
9099
9100 /* If we are comparing a value against zero, we are done. */
9101 if ((code == NE || code == EQ)
9102 && XEXP (x, 1) == const0_rtx)
9103 {
9104 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9105 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9106 return XEXP (x, 0);
9107 }
9108
9109 /* If this is a unary operation whose operand has one of two values, apply
9110 our opcode to compute those values. */
9111 else if (UNARY_P (x)
9112 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9113 {
9114 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9115 *pfalse = simplify_gen_unary (code, mode, false0,
9116 GET_MODE (XEXP (x, 0)));
9117 return cond0;
9118 }
9119
9120 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9121 make can't possibly match and would suppress other optimizations. */
9122 else if (code == COMPARE)
9123 ;
9124
9125 /* If this is a binary operation, see if either side has only one of two
9126 values. If either one does or if both do and they are conditional on
9127 the same value, compute the new true and false values. */
9128 else if (BINARY_P (x))
9129 {
9130 rtx op0 = XEXP (x, 0);
9131 rtx op1 = XEXP (x, 1);
9132 cond0 = if_then_else_cond (op0, &true0, &false0);
9133 cond1 = if_then_else_cond (op1, &true1, &false1);
9134
9135 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9136 && (REG_P (op0) || REG_P (op1)))
9137 {
9138 /* Try to enable a simplification by undoing work done by
9139 if_then_else_cond if it converted a REG into something more
9140 complex. */
9141 if (REG_P (op0))
9142 {
9143 cond0 = 0;
9144 true0 = false0 = op0;
9145 }
9146 else
9147 {
9148 cond1 = 0;
9149 true1 = false1 = op1;
9150 }
9151 }
9152
9153 if ((cond0 != 0 || cond1 != 0)
9154 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9155 {
9156 /* If if_then_else_cond returned zero, then true/false are the
9157 same rtl. We must copy one of them to prevent invalid rtl
9158 sharing. */
9159 if (cond0 == 0)
9160 true0 = copy_rtx (true0);
9161 else if (cond1 == 0)
9162 true1 = copy_rtx (true1);
9163
9164 if (COMPARISON_P (x))
9165 {
9166 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9167 true0, true1);
9168 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9169 false0, false1);
9170 }
9171 else
9172 {
9173 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9174 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9175 }
9176
9177 return cond0 ? cond0 : cond1;
9178 }
9179
9180 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9181 operands is zero when the other is nonzero, and vice-versa,
9182 and STORE_FLAG_VALUE is 1 or -1. */
9183
9184 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9185 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9186 || code == UMAX)
9187 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9188 {
9189 rtx op0 = XEXP (XEXP (x, 0), 1);
9190 rtx op1 = XEXP (XEXP (x, 1), 1);
9191
9192 cond0 = XEXP (XEXP (x, 0), 0);
9193 cond1 = XEXP (XEXP (x, 1), 0);
9194
9195 if (COMPARISON_P (cond0)
9196 && COMPARISON_P (cond1)
9197 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9198 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9199 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9200 || ((swap_condition (GET_CODE (cond0))
9201 == reversed_comparison_code (cond1, NULL))
9202 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9203 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9204 && ! side_effects_p (x))
9205 {
9206 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9207 *pfalse = simplify_gen_binary (MULT, mode,
9208 (code == MINUS
9209 ? simplify_gen_unary (NEG, mode,
9210 op1, mode)
9211 : op1),
9212 const_true_rtx);
9213 return cond0;
9214 }
9215 }
9216
9217 /* Similarly for MULT, AND and UMIN, except that for these the result
9218 is always zero. */
9219 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9220 && (code == MULT || code == AND || code == UMIN)
9221 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9222 {
9223 cond0 = XEXP (XEXP (x, 0), 0);
9224 cond1 = XEXP (XEXP (x, 1), 0);
9225
9226 if (COMPARISON_P (cond0)
9227 && COMPARISON_P (cond1)
9228 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9229 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9230 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9231 || ((swap_condition (GET_CODE (cond0))
9232 == reversed_comparison_code (cond1, NULL))
9233 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9234 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9235 && ! side_effects_p (x))
9236 {
9237 *ptrue = *pfalse = const0_rtx;
9238 return cond0;
9239 }
9240 }
9241 }
9242
9243 else if (code == IF_THEN_ELSE)
9244 {
9245 /* If we have IF_THEN_ELSE already, extract the condition and
9246 canonicalize it if it is NE or EQ. */
9247 cond0 = XEXP (x, 0);
9248 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9249 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9250 return XEXP (cond0, 0);
9251 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9252 {
9253 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9254 return XEXP (cond0, 0);
9255 }
9256 else
9257 return cond0;
9258 }
9259
9260 /* If X is a SUBREG, we can narrow both the true and false values
9261 if the inner expression, if there is a condition. */
9262 else if (code == SUBREG
9263 && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
9264 &true0, &false0)))
9265 {
9266 true0 = simplify_gen_subreg (mode, true0,
9267 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9268 false0 = simplify_gen_subreg (mode, false0,
9269 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9270 if (true0 && false0)
9271 {
9272 *ptrue = true0;
9273 *pfalse = false0;
9274 return cond0;
9275 }
9276 }
9277
9278 /* If X is a constant, this isn't special and will cause confusions
9279 if we treat it as such. Likewise if it is equivalent to a constant. */
9280 else if (CONSTANT_P (x)
9281 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9282 ;
9283
9284 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9285 will be least confusing to the rest of the compiler. */
9286 else if (mode == BImode)
9287 {
9288 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9289 return x;
9290 }
9291
9292 /* If X is known to be either 0 or -1, those are the true and
9293 false values when testing X. */
9294 else if (x == constm1_rtx || x == const0_rtx
9295 || (is_a <scalar_int_mode> (mode, &int_mode)
9296 && (num_sign_bit_copies (x, int_mode)
9297 == GET_MODE_PRECISION (int_mode))))
9298 {
9299 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9300 return x;
9301 }
9302
9303 /* Likewise for 0 or a single bit. */
9304 else if (HWI_COMPUTABLE_MODE_P (mode)
9305 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9306 {
9307 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9308 return x;
9309 }
9310
9311 /* Otherwise fail; show no condition with true and false values the same. */
9312 *ptrue = *pfalse = x;
9313 return 0;
9314 }
9315 \f
9316 /* Return the value of expression X given the fact that condition COND
9317 is known to be true when applied to REG as its first operand and VAL
9318 as its second. X is known to not be shared and so can be modified in
9319 place.
9320
9321 We only handle the simplest cases, and specifically those cases that
9322 arise with IF_THEN_ELSE expressions. */
9323
9324 static rtx
9325 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9326 {
9327 enum rtx_code code = GET_CODE (x);
9328 const char *fmt;
9329 int i, j;
9330
9331 if (side_effects_p (x))
9332 return x;
9333
9334 /* If either operand of the condition is a floating point value,
9335 then we have to avoid collapsing an EQ comparison. */
9336 if (cond == EQ
9337 && rtx_equal_p (x, reg)
9338 && ! FLOAT_MODE_P (GET_MODE (x))
9339 && ! FLOAT_MODE_P (GET_MODE (val)))
9340 return val;
9341
9342 if (cond == UNEQ && rtx_equal_p (x, reg))
9343 return val;
9344
9345 /* If X is (abs REG) and we know something about REG's relationship
9346 with zero, we may be able to simplify this. */
9347
9348 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9349 switch (cond)
9350 {
9351 case GE: case GT: case EQ:
9352 return XEXP (x, 0);
9353 case LT: case LE:
9354 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9355 XEXP (x, 0),
9356 GET_MODE (XEXP (x, 0)));
9357 default:
9358 break;
9359 }
9360
9361 /* The only other cases we handle are MIN, MAX, and comparisons if the
9362 operands are the same as REG and VAL. */
9363
9364 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9365 {
9366 if (rtx_equal_p (XEXP (x, 0), val))
9367 {
9368 std::swap (val, reg);
9369 cond = swap_condition (cond);
9370 }
9371
9372 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9373 {
9374 if (COMPARISON_P (x))
9375 {
9376 if (comparison_dominates_p (cond, code))
9377 return const_true_rtx;
9378
9379 code = reversed_comparison_code (x, NULL);
9380 if (code != UNKNOWN
9381 && comparison_dominates_p (cond, code))
9382 return const0_rtx;
9383 else
9384 return x;
9385 }
9386 else if (code == SMAX || code == SMIN
9387 || code == UMIN || code == UMAX)
9388 {
9389 int unsignedp = (code == UMIN || code == UMAX);
9390
9391 /* Do not reverse the condition when it is NE or EQ.
9392 This is because we cannot conclude anything about
9393 the value of 'SMAX (x, y)' when x is not equal to y,
9394 but we can when x equals y. */
9395 if ((code == SMAX || code == UMAX)
9396 && ! (cond == EQ || cond == NE))
9397 cond = reverse_condition (cond);
9398
9399 switch (cond)
9400 {
9401 case GE: case GT:
9402 return unsignedp ? x : XEXP (x, 1);
9403 case LE: case LT:
9404 return unsignedp ? x : XEXP (x, 0);
9405 case GEU: case GTU:
9406 return unsignedp ? XEXP (x, 1) : x;
9407 case LEU: case LTU:
9408 return unsignedp ? XEXP (x, 0) : x;
9409 default:
9410 break;
9411 }
9412 }
9413 }
9414 }
9415 else if (code == SUBREG)
9416 {
9417 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9418 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9419
9420 if (SUBREG_REG (x) != r)
9421 {
9422 /* We must simplify subreg here, before we lose track of the
9423 original inner_mode. */
9424 new_rtx = simplify_subreg (GET_MODE (x), r,
9425 inner_mode, SUBREG_BYTE (x));
9426 if (new_rtx)
9427 return new_rtx;
9428 else
9429 SUBST (SUBREG_REG (x), r);
9430 }
9431
9432 return x;
9433 }
9434 /* We don't have to handle SIGN_EXTEND here, because even in the
9435 case of replacing something with a modeless CONST_INT, a
9436 CONST_INT is already (supposed to be) a valid sign extension for
9437 its narrower mode, which implies it's already properly
9438 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9439 story is different. */
9440 else if (code == ZERO_EXTEND)
9441 {
9442 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9443 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9444
9445 if (XEXP (x, 0) != r)
9446 {
9447 /* We must simplify the zero_extend here, before we lose
9448 track of the original inner_mode. */
9449 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9450 r, inner_mode);
9451 if (new_rtx)
9452 return new_rtx;
9453 else
9454 SUBST (XEXP (x, 0), r);
9455 }
9456
9457 return x;
9458 }
9459
9460 fmt = GET_RTX_FORMAT (code);
9461 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9462 {
9463 if (fmt[i] == 'e')
9464 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9465 else if (fmt[i] == 'E')
9466 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9467 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9468 cond, reg, val));
9469 }
9470
9471 return x;
9472 }
9473 \f
9474 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9475 assignment as a field assignment. */
9476
9477 static int
9478 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9479 {
9480 if (widen_x && GET_MODE (x) != GET_MODE (y))
9481 {
9482 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9483 return 0;
9484 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9485 return 0;
9486 /* For big endian, adjust the memory offset. */
9487 if (BYTES_BIG_ENDIAN)
9488 x = adjust_address_nv (x, GET_MODE (y),
9489 -subreg_lowpart_offset (GET_MODE (x),
9490 GET_MODE (y)));
9491 else
9492 x = adjust_address_nv (x, GET_MODE (y), 0);
9493 }
9494
9495 if (x == y || rtx_equal_p (x, y))
9496 return 1;
9497
9498 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9499 return 0;
9500
9501 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9502 Note that all SUBREGs of MEM are paradoxical; otherwise they
9503 would have been rewritten. */
9504 if (MEM_P (x) && GET_CODE (y) == SUBREG
9505 && MEM_P (SUBREG_REG (y))
9506 && rtx_equal_p (SUBREG_REG (y),
9507 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9508 return 1;
9509
9510 if (MEM_P (y) && GET_CODE (x) == SUBREG
9511 && MEM_P (SUBREG_REG (x))
9512 && rtx_equal_p (SUBREG_REG (x),
9513 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9514 return 1;
9515
9516 /* We used to see if get_last_value of X and Y were the same but that's
9517 not correct. In one direction, we'll cause the assignment to have
9518 the wrong destination and in the case, we'll import a register into this
9519 insn that might have already have been dead. So fail if none of the
9520 above cases are true. */
9521 return 0;
9522 }
9523 \f
9524 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9525 Return that assignment if so.
9526
9527 We only handle the most common cases. */
9528
9529 static rtx
9530 make_field_assignment (rtx x)
9531 {
9532 rtx dest = SET_DEST (x);
9533 rtx src = SET_SRC (x);
9534 rtx assign;
9535 rtx rhs, lhs;
9536 HOST_WIDE_INT c1;
9537 HOST_WIDE_INT pos;
9538 unsigned HOST_WIDE_INT len;
9539 rtx other;
9540
9541 /* All the rules in this function are specific to scalar integers. */
9542 scalar_int_mode mode;
9543 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9544 return x;
9545
9546 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9547 a clear of a one-bit field. We will have changed it to
9548 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9549 for a SUBREG. */
9550
9551 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9552 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9553 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9554 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9555 {
9556 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9557 1, 1, 1, 0);
9558 if (assign != 0)
9559 return gen_rtx_SET (assign, const0_rtx);
9560 return x;
9561 }
9562
9563 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9564 && subreg_lowpart_p (XEXP (src, 0))
9565 && partial_subreg_p (XEXP (src, 0))
9566 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9567 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9568 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9569 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9570 {
9571 assign = make_extraction (VOIDmode, dest, 0,
9572 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9573 1, 1, 1, 0);
9574 if (assign != 0)
9575 return gen_rtx_SET (assign, const0_rtx);
9576 return x;
9577 }
9578
9579 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9580 one-bit field. */
9581 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9582 && XEXP (XEXP (src, 0), 0) == const1_rtx
9583 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9584 {
9585 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9586 1, 1, 1, 0);
9587 if (assign != 0)
9588 return gen_rtx_SET (assign, const1_rtx);
9589 return x;
9590 }
9591
9592 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9593 SRC is an AND with all bits of that field set, then we can discard
9594 the AND. */
9595 if (GET_CODE (dest) == ZERO_EXTRACT
9596 && CONST_INT_P (XEXP (dest, 1))
9597 && GET_CODE (src) == AND
9598 && CONST_INT_P (XEXP (src, 1)))
9599 {
9600 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9601 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9602 unsigned HOST_WIDE_INT ze_mask;
9603
9604 if (width >= HOST_BITS_PER_WIDE_INT)
9605 ze_mask = -1;
9606 else
9607 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9608
9609 /* Complete overlap. We can remove the source AND. */
9610 if ((and_mask & ze_mask) == ze_mask)
9611 return gen_rtx_SET (dest, XEXP (src, 0));
9612
9613 /* Partial overlap. We can reduce the source AND. */
9614 if ((and_mask & ze_mask) != and_mask)
9615 {
9616 src = gen_rtx_AND (mode, XEXP (src, 0),
9617 gen_int_mode (and_mask & ze_mask, mode));
9618 return gen_rtx_SET (dest, src);
9619 }
9620 }
9621
9622 /* The other case we handle is assignments into a constant-position
9623 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9624 a mask that has all one bits except for a group of zero bits and
9625 OTHER is known to have zeros where C1 has ones, this is such an
9626 assignment. Compute the position and length from C1. Shift OTHER
9627 to the appropriate position, force it to the required mode, and
9628 make the extraction. Check for the AND in both operands. */
9629
9630 /* One or more SUBREGs might obscure the constant-position field
9631 assignment. The first one we are likely to encounter is an outer
9632 narrowing SUBREG, which we can just strip for the purposes of
9633 identifying the constant-field assignment. */
9634 scalar_int_mode src_mode = mode;
9635 if (GET_CODE (src) == SUBREG
9636 && subreg_lowpart_p (src)
9637 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9638 src = SUBREG_REG (src);
9639
9640 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9641 return x;
9642
9643 rhs = expand_compound_operation (XEXP (src, 0));
9644 lhs = expand_compound_operation (XEXP (src, 1));
9645
9646 if (GET_CODE (rhs) == AND
9647 && CONST_INT_P (XEXP (rhs, 1))
9648 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9649 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9650 /* The second SUBREG that might get in the way is a paradoxical
9651 SUBREG around the first operand of the AND. We want to
9652 pretend the operand is as wide as the destination here. We
9653 do this by adjusting the MEM to wider mode for the sole
9654 purpose of the call to rtx_equal_for_field_assignment_p. Also
9655 note this trick only works for MEMs. */
9656 else if (GET_CODE (rhs) == AND
9657 && paradoxical_subreg_p (XEXP (rhs, 0))
9658 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9659 && CONST_INT_P (XEXP (rhs, 1))
9660 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9661 dest, true))
9662 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9663 else if (GET_CODE (lhs) == AND
9664 && CONST_INT_P (XEXP (lhs, 1))
9665 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9666 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9667 /* The second SUBREG that might get in the way is a paradoxical
9668 SUBREG around the first operand of the AND. We want to
9669 pretend the operand is as wide as the destination here. We
9670 do this by adjusting the MEM to wider mode for the sole
9671 purpose of the call to rtx_equal_for_field_assignment_p. Also
9672 note this trick only works for MEMs. */
9673 else if (GET_CODE (lhs) == AND
9674 && paradoxical_subreg_p (XEXP (lhs, 0))
9675 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9676 && CONST_INT_P (XEXP (lhs, 1))
9677 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9678 dest, true))
9679 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9680 else
9681 return x;
9682
9683 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9684 if (pos < 0
9685 || pos + len > GET_MODE_PRECISION (mode)
9686 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9687 || (c1 & nonzero_bits (other, mode)) != 0)
9688 return x;
9689
9690 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9691 if (assign == 0)
9692 return x;
9693
9694 /* The mode to use for the source is the mode of the assignment, or of
9695 what is inside a possible STRICT_LOW_PART. */
9696 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9697 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9698
9699 /* Shift OTHER right POS places and make it the source, restricting it
9700 to the proper length and mode. */
9701
9702 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9703 src_mode, other, pos),
9704 dest);
9705 src = force_to_mode (src, new_mode,
9706 len >= HOST_BITS_PER_WIDE_INT
9707 ? HOST_WIDE_INT_M1U
9708 : (HOST_WIDE_INT_1U << len) - 1,
9709 0);
9710
9711 /* If SRC is masked by an AND that does not make a difference in
9712 the value being stored, strip it. */
9713 if (GET_CODE (assign) == ZERO_EXTRACT
9714 && CONST_INT_P (XEXP (assign, 1))
9715 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9716 && GET_CODE (src) == AND
9717 && CONST_INT_P (XEXP (src, 1))
9718 && UINTVAL (XEXP (src, 1))
9719 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9720 src = XEXP (src, 0);
9721
9722 return gen_rtx_SET (assign, src);
9723 }
9724 \f
9725 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9726 if so. */
9727
9728 static rtx
9729 apply_distributive_law (rtx x)
9730 {
9731 enum rtx_code code = GET_CODE (x);
9732 enum rtx_code inner_code;
9733 rtx lhs, rhs, other;
9734 rtx tem;
9735
9736 /* Distributivity is not true for floating point as it can change the
9737 value. So we don't do it unless -funsafe-math-optimizations. */
9738 if (FLOAT_MODE_P (GET_MODE (x))
9739 && ! flag_unsafe_math_optimizations)
9740 return x;
9741
9742 /* The outer operation can only be one of the following: */
9743 if (code != IOR && code != AND && code != XOR
9744 && code != PLUS && code != MINUS)
9745 return x;
9746
9747 lhs = XEXP (x, 0);
9748 rhs = XEXP (x, 1);
9749
9750 /* If either operand is a primitive we can't do anything, so get out
9751 fast. */
9752 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9753 return x;
9754
9755 lhs = expand_compound_operation (lhs);
9756 rhs = expand_compound_operation (rhs);
9757 inner_code = GET_CODE (lhs);
9758 if (inner_code != GET_CODE (rhs))
9759 return x;
9760
9761 /* See if the inner and outer operations distribute. */
9762 switch (inner_code)
9763 {
9764 case LSHIFTRT:
9765 case ASHIFTRT:
9766 case AND:
9767 case IOR:
9768 /* These all distribute except over PLUS. */
9769 if (code == PLUS || code == MINUS)
9770 return x;
9771 break;
9772
9773 case MULT:
9774 if (code != PLUS && code != MINUS)
9775 return x;
9776 break;
9777
9778 case ASHIFT:
9779 /* This is also a multiply, so it distributes over everything. */
9780 break;
9781
9782 /* This used to handle SUBREG, but this turned out to be counter-
9783 productive, since (subreg (op ...)) usually is not handled by
9784 insn patterns, and this "optimization" therefore transformed
9785 recognizable patterns into unrecognizable ones. Therefore the
9786 SUBREG case was removed from here.
9787
9788 It is possible that distributing SUBREG over arithmetic operations
9789 leads to an intermediate result than can then be optimized further,
9790 e.g. by moving the outer SUBREG to the other side of a SET as done
9791 in simplify_set. This seems to have been the original intent of
9792 handling SUBREGs here.
9793
9794 However, with current GCC this does not appear to actually happen,
9795 at least on major platforms. If some case is found where removing
9796 the SUBREG case here prevents follow-on optimizations, distributing
9797 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9798
9799 default:
9800 return x;
9801 }
9802
9803 /* Set LHS and RHS to the inner operands (A and B in the example
9804 above) and set OTHER to the common operand (C in the example).
9805 There is only one way to do this unless the inner operation is
9806 commutative. */
9807 if (COMMUTATIVE_ARITH_P (lhs)
9808 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9809 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9810 else if (COMMUTATIVE_ARITH_P (lhs)
9811 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9812 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9813 else if (COMMUTATIVE_ARITH_P (lhs)
9814 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9815 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9816 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9817 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9818 else
9819 return x;
9820
9821 /* Form the new inner operation, seeing if it simplifies first. */
9822 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9823
9824 /* There is one exception to the general way of distributing:
9825 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9826 if (code == XOR && inner_code == IOR)
9827 {
9828 inner_code = AND;
9829 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9830 }
9831
9832 /* We may be able to continuing distributing the result, so call
9833 ourselves recursively on the inner operation before forming the
9834 outer operation, which we return. */
9835 return simplify_gen_binary (inner_code, GET_MODE (x),
9836 apply_distributive_law (tem), other);
9837 }
9838
9839 /* See if X is of the form (* (+ A B) C), and if so convert to
9840 (+ (* A C) (* B C)) and try to simplify.
9841
9842 Most of the time, this results in no change. However, if some of
9843 the operands are the same or inverses of each other, simplifications
9844 will result.
9845
9846 For example, (and (ior A B) (not B)) can occur as the result of
9847 expanding a bit field assignment. When we apply the distributive
9848 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9849 which then simplifies to (and (A (not B))).
9850
9851 Note that no checks happen on the validity of applying the inverse
9852 distributive law. This is pointless since we can do it in the
9853 few places where this routine is called.
9854
9855 N is the index of the term that is decomposed (the arithmetic operation,
9856 i.e. (+ A B) in the first example above). !N is the index of the term that
9857 is distributed, i.e. of C in the first example above. */
9858 static rtx
9859 distribute_and_simplify_rtx (rtx x, int n)
9860 {
9861 machine_mode mode;
9862 enum rtx_code outer_code, inner_code;
9863 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9864
9865 /* Distributivity is not true for floating point as it can change the
9866 value. So we don't do it unless -funsafe-math-optimizations. */
9867 if (FLOAT_MODE_P (GET_MODE (x))
9868 && ! flag_unsafe_math_optimizations)
9869 return NULL_RTX;
9870
9871 decomposed = XEXP (x, n);
9872 if (!ARITHMETIC_P (decomposed))
9873 return NULL_RTX;
9874
9875 mode = GET_MODE (x);
9876 outer_code = GET_CODE (x);
9877 distributed = XEXP (x, !n);
9878
9879 inner_code = GET_CODE (decomposed);
9880 inner_op0 = XEXP (decomposed, 0);
9881 inner_op1 = XEXP (decomposed, 1);
9882
9883 /* Special case (and (xor B C) (not A)), which is equivalent to
9884 (xor (ior A B) (ior A C)) */
9885 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9886 {
9887 distributed = XEXP (distributed, 0);
9888 outer_code = IOR;
9889 }
9890
9891 if (n == 0)
9892 {
9893 /* Distribute the second term. */
9894 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
9895 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
9896 }
9897 else
9898 {
9899 /* Distribute the first term. */
9900 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
9901 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
9902 }
9903
9904 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
9905 new_op0, new_op1));
9906 if (GET_CODE (tmp) != outer_code
9907 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
9908 < set_src_cost (x, mode, optimize_this_for_speed_p)))
9909 return tmp;
9910
9911 return NULL_RTX;
9912 }
9913 \f
9914 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
9915 in MODE. Return an equivalent form, if different from (and VAROP
9916 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
9917
9918 static rtx
9919 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
9920 unsigned HOST_WIDE_INT constop)
9921 {
9922 unsigned HOST_WIDE_INT nonzero;
9923 unsigned HOST_WIDE_INT orig_constop;
9924 rtx orig_varop;
9925 int i;
9926
9927 orig_varop = varop;
9928 orig_constop = constop;
9929 if (GET_CODE (varop) == CLOBBER)
9930 return NULL_RTX;
9931
9932 /* Simplify VAROP knowing that we will be only looking at some of the
9933 bits in it.
9934
9935 Note by passing in CONSTOP, we guarantee that the bits not set in
9936 CONSTOP are not significant and will never be examined. We must
9937 ensure that is the case by explicitly masking out those bits
9938 before returning. */
9939 varop = force_to_mode (varop, mode, constop, 0);
9940
9941 /* If VAROP is a CLOBBER, we will fail so return it. */
9942 if (GET_CODE (varop) == CLOBBER)
9943 return varop;
9944
9945 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
9946 to VAROP and return the new constant. */
9947 if (CONST_INT_P (varop))
9948 return gen_int_mode (INTVAL (varop) & constop, mode);
9949
9950 /* See what bits may be nonzero in VAROP. Unlike the general case of
9951 a call to nonzero_bits, here we don't care about bits outside
9952 MODE. */
9953
9954 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
9955
9956 /* Turn off all bits in the constant that are known to already be zero.
9957 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
9958 which is tested below. */
9959
9960 constop &= nonzero;
9961
9962 /* If we don't have any bits left, return zero. */
9963 if (constop == 0)
9964 return const0_rtx;
9965
9966 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
9967 a power of two, we can replace this with an ASHIFT. */
9968 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
9969 && (i = exact_log2 (constop)) >= 0)
9970 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
9971
9972 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
9973 or XOR, then try to apply the distributive law. This may eliminate
9974 operations if either branch can be simplified because of the AND.
9975 It may also make some cases more complex, but those cases probably
9976 won't match a pattern either with or without this. */
9977
9978 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
9979 {
9980 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
9981 return
9982 gen_lowpart
9983 (mode,
9984 apply_distributive_law
9985 (simplify_gen_binary (GET_CODE (varop), varop_mode,
9986 simplify_and_const_int (NULL_RTX, varop_mode,
9987 XEXP (varop, 0),
9988 constop),
9989 simplify_and_const_int (NULL_RTX, varop_mode,
9990 XEXP (varop, 1),
9991 constop))));
9992 }
9993
9994 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
9995 the AND and see if one of the operands simplifies to zero. If so, we
9996 may eliminate it. */
9997
9998 if (GET_CODE (varop) == PLUS
9999 && pow2p_hwi (constop + 1))
10000 {
10001 rtx o0, o1;
10002
10003 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10004 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10005 if (o0 == const0_rtx)
10006 return o1;
10007 if (o1 == const0_rtx)
10008 return o0;
10009 }
10010
10011 /* Make a SUBREG if necessary. If we can't make it, fail. */
10012 varop = gen_lowpart (mode, varop);
10013 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10014 return NULL_RTX;
10015
10016 /* If we are only masking insignificant bits, return VAROP. */
10017 if (constop == nonzero)
10018 return varop;
10019
10020 if (varop == orig_varop && constop == orig_constop)
10021 return NULL_RTX;
10022
10023 /* Otherwise, return an AND. */
10024 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10025 }
10026
10027
10028 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10029 in MODE.
10030
10031 Return an equivalent form, if different from X. Otherwise, return X. If
10032 X is zero, we are to always construct the equivalent form. */
10033
10034 static rtx
10035 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10036 unsigned HOST_WIDE_INT constop)
10037 {
10038 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10039 if (tem)
10040 return tem;
10041
10042 if (!x)
10043 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10044 gen_int_mode (constop, mode));
10045 if (GET_MODE (x) != mode)
10046 x = gen_lowpart (mode, x);
10047 return x;
10048 }
10049 \f
10050 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10051 We don't care about bits outside of those defined in MODE.
10052
10053 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10054 a shift, AND, or zero_extract, we can do better. */
10055
10056 static rtx
10057 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10058 scalar_int_mode mode,
10059 unsigned HOST_WIDE_INT *nonzero)
10060 {
10061 rtx tem;
10062 reg_stat_type *rsp;
10063
10064 /* If X is a register whose nonzero bits value is current, use it.
10065 Otherwise, if X is a register whose value we can find, use that
10066 value. Otherwise, use the previously-computed global nonzero bits
10067 for this register. */
10068
10069 rsp = &reg_stat[REGNO (x)];
10070 if (rsp->last_set_value != 0
10071 && (rsp->last_set_mode == mode
10072 || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10073 && GET_MODE_CLASS (mode) == MODE_INT))
10074 && ((rsp->last_set_label >= label_tick_ebb_start
10075 && rsp->last_set_label < label_tick)
10076 || (rsp->last_set_label == label_tick
10077 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10078 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10079 && REGNO (x) < reg_n_sets_max
10080 && REG_N_SETS (REGNO (x)) == 1
10081 && !REGNO_REG_SET_P
10082 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10083 REGNO (x)))))
10084 {
10085 /* Note that, even if the precision of last_set_mode is lower than that
10086 of mode, record_value_for_reg invoked nonzero_bits on the register
10087 with nonzero_bits_mode (because last_set_mode is necessarily integral
10088 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10089 are all valid, hence in mode too since nonzero_bits_mode is defined
10090 to the largest HWI_COMPUTABLE_MODE_P mode. */
10091 *nonzero &= rsp->last_set_nonzero_bits;
10092 return NULL;
10093 }
10094
10095 tem = get_last_value (x);
10096 if (tem)
10097 {
10098 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10099 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10100
10101 return tem;
10102 }
10103
10104 if (nonzero_sign_valid && rsp->nonzero_bits)
10105 {
10106 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10107
10108 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10109 /* We don't know anything about the upper bits. */
10110 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10111
10112 *nonzero &= mask;
10113 }
10114
10115 return NULL;
10116 }
10117
10118 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10119 end of X that are known to be equal to the sign bit. X will be used
10120 in mode MODE; the returned value will always be between 1 and the
10121 number of bits in MODE. */
10122
10123 static rtx
10124 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10125 scalar_int_mode mode,
10126 unsigned int *result)
10127 {
10128 rtx tem;
10129 reg_stat_type *rsp;
10130
10131 rsp = &reg_stat[REGNO (x)];
10132 if (rsp->last_set_value != 0
10133 && rsp->last_set_mode == mode
10134 && ((rsp->last_set_label >= label_tick_ebb_start
10135 && rsp->last_set_label < label_tick)
10136 || (rsp->last_set_label == label_tick
10137 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10138 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10139 && REGNO (x) < reg_n_sets_max
10140 && REG_N_SETS (REGNO (x)) == 1
10141 && !REGNO_REG_SET_P
10142 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10143 REGNO (x)))))
10144 {
10145 *result = rsp->last_set_sign_bit_copies;
10146 return NULL;
10147 }
10148
10149 tem = get_last_value (x);
10150 if (tem != 0)
10151 return tem;
10152
10153 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10154 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10155 *result = rsp->sign_bit_copies;
10156
10157 return NULL;
10158 }
10159 \f
10160 /* Return the number of "extended" bits there are in X, when interpreted
10161 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10162 unsigned quantities, this is the number of high-order zero bits.
10163 For signed quantities, this is the number of copies of the sign bit
10164 minus 1. In both case, this function returns the number of "spare"
10165 bits. For example, if two quantities for which this function returns
10166 at least 1 are added, the addition is known not to overflow.
10167
10168 This function will always return 0 unless called during combine, which
10169 implies that it must be called from a define_split. */
10170
10171 unsigned int
10172 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10173 {
10174 if (nonzero_sign_valid == 0)
10175 return 0;
10176
10177 scalar_int_mode int_mode;
10178 return (unsignedp
10179 ? (is_a <scalar_int_mode> (mode, &int_mode)
10180 && HWI_COMPUTABLE_MODE_P (int_mode)
10181 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10182 - floor_log2 (nonzero_bits (x, int_mode)))
10183 : 0)
10184 : num_sign_bit_copies (x, mode) - 1);
10185 }
10186
10187 /* This function is called from `simplify_shift_const' to merge two
10188 outer operations. Specifically, we have already found that we need
10189 to perform operation *POP0 with constant *PCONST0 at the outermost
10190 position. We would now like to also perform OP1 with constant CONST1
10191 (with *POP0 being done last).
10192
10193 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10194 the resulting operation. *PCOMP_P is set to 1 if we would need to
10195 complement the innermost operand, otherwise it is unchanged.
10196
10197 MODE is the mode in which the operation will be done. No bits outside
10198 the width of this mode matter. It is assumed that the width of this mode
10199 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10200
10201 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10202 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10203 result is simply *PCONST0.
10204
10205 If the resulting operation cannot be expressed as one operation, we
10206 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10207
10208 static int
10209 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10210 {
10211 enum rtx_code op0 = *pop0;
10212 HOST_WIDE_INT const0 = *pconst0;
10213
10214 const0 &= GET_MODE_MASK (mode);
10215 const1 &= GET_MODE_MASK (mode);
10216
10217 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10218 if (op0 == AND)
10219 const1 &= const0;
10220
10221 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10222 if OP0 is SET. */
10223
10224 if (op1 == UNKNOWN || op0 == SET)
10225 return 1;
10226
10227 else if (op0 == UNKNOWN)
10228 op0 = op1, const0 = const1;
10229
10230 else if (op0 == op1)
10231 {
10232 switch (op0)
10233 {
10234 case AND:
10235 const0 &= const1;
10236 break;
10237 case IOR:
10238 const0 |= const1;
10239 break;
10240 case XOR:
10241 const0 ^= const1;
10242 break;
10243 case PLUS:
10244 const0 += const1;
10245 break;
10246 case NEG:
10247 op0 = UNKNOWN;
10248 break;
10249 default:
10250 break;
10251 }
10252 }
10253
10254 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10255 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10256 return 0;
10257
10258 /* If the two constants aren't the same, we can't do anything. The
10259 remaining six cases can all be done. */
10260 else if (const0 != const1)
10261 return 0;
10262
10263 else
10264 switch (op0)
10265 {
10266 case IOR:
10267 if (op1 == AND)
10268 /* (a & b) | b == b */
10269 op0 = SET;
10270 else /* op1 == XOR */
10271 /* (a ^ b) | b == a | b */
10272 {;}
10273 break;
10274
10275 case XOR:
10276 if (op1 == AND)
10277 /* (a & b) ^ b == (~a) & b */
10278 op0 = AND, *pcomp_p = 1;
10279 else /* op1 == IOR */
10280 /* (a | b) ^ b == a & ~b */
10281 op0 = AND, const0 = ~const0;
10282 break;
10283
10284 case AND:
10285 if (op1 == IOR)
10286 /* (a | b) & b == b */
10287 op0 = SET;
10288 else /* op1 == XOR */
10289 /* (a ^ b) & b) == (~a) & b */
10290 *pcomp_p = 1;
10291 break;
10292 default:
10293 break;
10294 }
10295
10296 /* Check for NO-OP cases. */
10297 const0 &= GET_MODE_MASK (mode);
10298 if (const0 == 0
10299 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10300 op0 = UNKNOWN;
10301 else if (const0 == 0 && op0 == AND)
10302 op0 = SET;
10303 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10304 && op0 == AND)
10305 op0 = UNKNOWN;
10306
10307 *pop0 = op0;
10308
10309 /* ??? Slightly redundant with the above mask, but not entirely.
10310 Moving this above means we'd have to sign-extend the mode mask
10311 for the final test. */
10312 if (op0 != UNKNOWN && op0 != NEG)
10313 *pconst0 = trunc_int_for_mode (const0, mode);
10314
10315 return 1;
10316 }
10317 \f
10318 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10319 the shift in. The original shift operation CODE is performed on OP in
10320 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10321 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10322 result of the shift is subject to operation OUTER_CODE with operand
10323 OUTER_CONST. */
10324
10325 static scalar_int_mode
10326 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10327 scalar_int_mode orig_mode, scalar_int_mode mode,
10328 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10329 {
10330 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10331
10332 /* In general we can't perform in wider mode for right shift and rotate. */
10333 switch (code)
10334 {
10335 case ASHIFTRT:
10336 /* We can still widen if the bits brought in from the left are identical
10337 to the sign bit of ORIG_MODE. */
10338 if (num_sign_bit_copies (op, mode)
10339 > (unsigned) (GET_MODE_PRECISION (mode)
10340 - GET_MODE_PRECISION (orig_mode)))
10341 return mode;
10342 return orig_mode;
10343
10344 case LSHIFTRT:
10345 /* Similarly here but with zero bits. */
10346 if (HWI_COMPUTABLE_MODE_P (mode)
10347 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10348 return mode;
10349
10350 /* We can also widen if the bits brought in will be masked off. This
10351 operation is performed in ORIG_MODE. */
10352 if (outer_code == AND)
10353 {
10354 int care_bits = low_bitmask_len (orig_mode, outer_const);
10355
10356 if (care_bits >= 0
10357 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10358 return mode;
10359 }
10360 /* fall through */
10361
10362 case ROTATE:
10363 return orig_mode;
10364
10365 case ROTATERT:
10366 gcc_unreachable ();
10367
10368 default:
10369 return mode;
10370 }
10371 }
10372
10373 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10374 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10375 if we cannot simplify it. Otherwise, return a simplified value.
10376
10377 The shift is normally computed in the widest mode we find in VAROP, as
10378 long as it isn't a different number of words than RESULT_MODE. Exceptions
10379 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10380
10381 static rtx
10382 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10383 rtx varop, int orig_count)
10384 {
10385 enum rtx_code orig_code = code;
10386 rtx orig_varop = varop;
10387 int count;
10388 machine_mode mode = result_mode;
10389 machine_mode shift_mode;
10390 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10391 unsigned int mode_words
10392 = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
10393 /* We form (outer_op (code varop count) (outer_const)). */
10394 enum rtx_code outer_op = UNKNOWN;
10395 HOST_WIDE_INT outer_const = 0;
10396 int complement_p = 0;
10397 rtx new_rtx, x;
10398
10399 /* Make sure and truncate the "natural" shift on the way in. We don't
10400 want to do this inside the loop as it makes it more difficult to
10401 combine shifts. */
10402 if (SHIFT_COUNT_TRUNCATED)
10403 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10404
10405 /* If we were given an invalid count, don't do anything except exactly
10406 what was requested. */
10407
10408 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10409 return NULL_RTX;
10410
10411 count = orig_count;
10412
10413 /* Unless one of the branches of the `if' in this loop does a `continue',
10414 we will `break' the loop after the `if'. */
10415
10416 while (count != 0)
10417 {
10418 /* If we have an operand of (clobber (const_int 0)), fail. */
10419 if (GET_CODE (varop) == CLOBBER)
10420 return NULL_RTX;
10421
10422 /* Convert ROTATERT to ROTATE. */
10423 if (code == ROTATERT)
10424 {
10425 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10426 code = ROTATE;
10427 count = bitsize - count;
10428 }
10429
10430 shift_mode = result_mode;
10431 if (shift_mode != mode)
10432 {
10433 /* We only change the modes of scalar shifts. */
10434 int_mode = as_a <scalar_int_mode> (mode);
10435 int_result_mode = as_a <scalar_int_mode> (result_mode);
10436 shift_mode = try_widen_shift_mode (code, varop, count,
10437 int_result_mode, int_mode,
10438 outer_op, outer_const);
10439 }
10440
10441 scalar_int_mode shift_unit_mode
10442 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10443
10444 /* Handle cases where the count is greater than the size of the mode
10445 minus 1. For ASHIFT, use the size minus one as the count (this can
10446 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10447 take the count modulo the size. For other shifts, the result is
10448 zero.
10449
10450 Since these shifts are being produced by the compiler by combining
10451 multiple operations, each of which are defined, we know what the
10452 result is supposed to be. */
10453
10454 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10455 {
10456 if (code == ASHIFTRT)
10457 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10458 else if (code == ROTATE || code == ROTATERT)
10459 count %= GET_MODE_PRECISION (shift_unit_mode);
10460 else
10461 {
10462 /* We can't simply return zero because there may be an
10463 outer op. */
10464 varop = const0_rtx;
10465 count = 0;
10466 break;
10467 }
10468 }
10469
10470 /* If we discovered we had to complement VAROP, leave. Making a NOT
10471 here would cause an infinite loop. */
10472 if (complement_p)
10473 break;
10474
10475 if (shift_mode == shift_unit_mode)
10476 {
10477 /* An arithmetic right shift of a quantity known to be -1 or 0
10478 is a no-op. */
10479 if (code == ASHIFTRT
10480 && (num_sign_bit_copies (varop, shift_unit_mode)
10481 == GET_MODE_PRECISION (shift_unit_mode)))
10482 {
10483 count = 0;
10484 break;
10485 }
10486
10487 /* If we are doing an arithmetic right shift and discarding all but
10488 the sign bit copies, this is equivalent to doing a shift by the
10489 bitsize minus one. Convert it into that shift because it will
10490 often allow other simplifications. */
10491
10492 if (code == ASHIFTRT
10493 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10494 >= GET_MODE_PRECISION (shift_unit_mode)))
10495 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10496
10497 /* We simplify the tests below and elsewhere by converting
10498 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10499 `make_compound_operation' will convert it to an ASHIFTRT for
10500 those machines (such as VAX) that don't have an LSHIFTRT. */
10501 if (code == ASHIFTRT
10502 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10503 && val_signbit_known_clear_p (shift_unit_mode,
10504 nonzero_bits (varop,
10505 shift_unit_mode)))
10506 code = LSHIFTRT;
10507
10508 if (((code == LSHIFTRT
10509 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10510 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10511 || (code == ASHIFT
10512 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10513 && !((nonzero_bits (varop, shift_unit_mode) << count)
10514 & GET_MODE_MASK (shift_unit_mode))))
10515 && !side_effects_p (varop))
10516 varop = const0_rtx;
10517 }
10518
10519 switch (GET_CODE (varop))
10520 {
10521 case SIGN_EXTEND:
10522 case ZERO_EXTEND:
10523 case SIGN_EXTRACT:
10524 case ZERO_EXTRACT:
10525 new_rtx = expand_compound_operation (varop);
10526 if (new_rtx != varop)
10527 {
10528 varop = new_rtx;
10529 continue;
10530 }
10531 break;
10532
10533 case MEM:
10534 /* The following rules apply only to scalars. */
10535 if (shift_mode != shift_unit_mode)
10536 break;
10537 int_mode = as_a <scalar_int_mode> (mode);
10538
10539 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10540 minus the width of a smaller mode, we can do this with a
10541 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10542 if ((code == ASHIFTRT || code == LSHIFTRT)
10543 && ! mode_dependent_address_p (XEXP (varop, 0),
10544 MEM_ADDR_SPACE (varop))
10545 && ! MEM_VOLATILE_P (varop)
10546 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10547 .exists (&tmode)))
10548 {
10549 new_rtx = adjust_address_nv (varop, tmode,
10550 BYTES_BIG_ENDIAN ? 0
10551 : count / BITS_PER_UNIT);
10552
10553 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10554 : ZERO_EXTEND, int_mode, new_rtx);
10555 count = 0;
10556 continue;
10557 }
10558 break;
10559
10560 case SUBREG:
10561 /* The following rules apply only to scalars. */
10562 if (shift_mode != shift_unit_mode)
10563 break;
10564 int_mode = as_a <scalar_int_mode> (mode);
10565 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10566
10567 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10568 the same number of words as what we've seen so far. Then store
10569 the widest mode in MODE. */
10570 if (subreg_lowpart_p (varop)
10571 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10572 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10573 && (unsigned int) ((GET_MODE_SIZE (inner_mode)
10574 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
10575 == mode_words
10576 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10577 {
10578 varop = SUBREG_REG (varop);
10579 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10580 mode = inner_mode;
10581 continue;
10582 }
10583 break;
10584
10585 case MULT:
10586 /* Some machines use MULT instead of ASHIFT because MULT
10587 is cheaper. But it is still better on those machines to
10588 merge two shifts into one. */
10589 if (CONST_INT_P (XEXP (varop, 1))
10590 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10591 {
10592 varop
10593 = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10594 XEXP (varop, 0),
10595 GEN_INT (exact_log2 (
10596 UINTVAL (XEXP (varop, 1)))));
10597 continue;
10598 }
10599 break;
10600
10601 case UDIV:
10602 /* Similar, for when divides are cheaper. */
10603 if (CONST_INT_P (XEXP (varop, 1))
10604 && exact_log2 (UINTVAL (XEXP (varop, 1))) >= 0)
10605 {
10606 varop
10607 = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10608 XEXP (varop, 0),
10609 GEN_INT (exact_log2 (
10610 UINTVAL (XEXP (varop, 1)))));
10611 continue;
10612 }
10613 break;
10614
10615 case ASHIFTRT:
10616 /* If we are extracting just the sign bit of an arithmetic
10617 right shift, that shift is not needed. However, the sign
10618 bit of a wider mode may be different from what would be
10619 interpreted as the sign bit in a narrower mode, so, if
10620 the result is narrower, don't discard the shift. */
10621 if (code == LSHIFTRT
10622 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10623 && (GET_MODE_UNIT_BITSIZE (result_mode)
10624 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10625 {
10626 varop = XEXP (varop, 0);
10627 continue;
10628 }
10629
10630 /* fall through */
10631
10632 case LSHIFTRT:
10633 case ASHIFT:
10634 case ROTATE:
10635 /* The following rules apply only to scalars. */
10636 if (shift_mode != shift_unit_mode)
10637 break;
10638 int_mode = as_a <scalar_int_mode> (mode);
10639 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10640 int_result_mode = as_a <scalar_int_mode> (result_mode);
10641
10642 /* Here we have two nested shifts. The result is usually the
10643 AND of a new shift with a mask. We compute the result below. */
10644 if (CONST_INT_P (XEXP (varop, 1))
10645 && INTVAL (XEXP (varop, 1)) >= 0
10646 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10647 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10648 && HWI_COMPUTABLE_MODE_P (int_mode))
10649 {
10650 enum rtx_code first_code = GET_CODE (varop);
10651 unsigned int first_count = INTVAL (XEXP (varop, 1));
10652 unsigned HOST_WIDE_INT mask;
10653 rtx mask_rtx;
10654
10655 /* We have one common special case. We can't do any merging if
10656 the inner code is an ASHIFTRT of a smaller mode. However, if
10657 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10658 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10659 we can convert it to
10660 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10661 This simplifies certain SIGN_EXTEND operations. */
10662 if (code == ASHIFT && first_code == ASHIFTRT
10663 && count == (GET_MODE_PRECISION (int_result_mode)
10664 - GET_MODE_PRECISION (int_varop_mode)))
10665 {
10666 /* C3 has the low-order C1 bits zero. */
10667
10668 mask = GET_MODE_MASK (int_mode)
10669 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10670
10671 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10672 XEXP (varop, 0), mask);
10673 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10674 int_result_mode, varop, count);
10675 count = first_count;
10676 code = ASHIFTRT;
10677 continue;
10678 }
10679
10680 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10681 than C1 high-order bits equal to the sign bit, we can convert
10682 this to either an ASHIFT or an ASHIFTRT depending on the
10683 two counts.
10684
10685 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10686
10687 if (code == ASHIFTRT && first_code == ASHIFT
10688 && int_varop_mode == shift_unit_mode
10689 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10690 > first_count))
10691 {
10692 varop = XEXP (varop, 0);
10693 count -= first_count;
10694 if (count < 0)
10695 {
10696 count = -count;
10697 code = ASHIFT;
10698 }
10699
10700 continue;
10701 }
10702
10703 /* There are some cases we can't do. If CODE is ASHIFTRT,
10704 we can only do this if FIRST_CODE is also ASHIFTRT.
10705
10706 We can't do the case when CODE is ROTATE and FIRST_CODE is
10707 ASHIFTRT.
10708
10709 If the mode of this shift is not the mode of the outer shift,
10710 we can't do this if either shift is a right shift or ROTATE.
10711
10712 Finally, we can't do any of these if the mode is too wide
10713 unless the codes are the same.
10714
10715 Handle the case where the shift codes are the same
10716 first. */
10717
10718 if (code == first_code)
10719 {
10720 if (int_varop_mode != int_result_mode
10721 && (code == ASHIFTRT || code == LSHIFTRT
10722 || code == ROTATE))
10723 break;
10724
10725 count += first_count;
10726 varop = XEXP (varop, 0);
10727 continue;
10728 }
10729
10730 if (code == ASHIFTRT
10731 || (code == ROTATE && first_code == ASHIFTRT)
10732 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10733 || (int_varop_mode != int_result_mode
10734 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10735 || first_code == ROTATE
10736 || code == ROTATE)))
10737 break;
10738
10739 /* To compute the mask to apply after the shift, shift the
10740 nonzero bits of the inner shift the same way the
10741 outer shift will. */
10742
10743 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10744 int_result_mode);
10745
10746 mask_rtx
10747 = simplify_const_binary_operation (code, int_result_mode,
10748 mask_rtx, GEN_INT (count));
10749
10750 /* Give up if we can't compute an outer operation to use. */
10751 if (mask_rtx == 0
10752 || !CONST_INT_P (mask_rtx)
10753 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10754 INTVAL (mask_rtx),
10755 int_result_mode, &complement_p))
10756 break;
10757
10758 /* If the shifts are in the same direction, we add the
10759 counts. Otherwise, we subtract them. */
10760 if ((code == ASHIFTRT || code == LSHIFTRT)
10761 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10762 count += first_count;
10763 else
10764 count -= first_count;
10765
10766 /* If COUNT is positive, the new shift is usually CODE,
10767 except for the two exceptions below, in which case it is
10768 FIRST_CODE. If the count is negative, FIRST_CODE should
10769 always be used */
10770 if (count > 0
10771 && ((first_code == ROTATE && code == ASHIFT)
10772 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10773 code = first_code;
10774 else if (count < 0)
10775 code = first_code, count = -count;
10776
10777 varop = XEXP (varop, 0);
10778 continue;
10779 }
10780
10781 /* If we have (A << B << C) for any shift, we can convert this to
10782 (A << C << B). This wins if A is a constant. Only try this if
10783 B is not a constant. */
10784
10785 else if (GET_CODE (varop) == code
10786 && CONST_INT_P (XEXP (varop, 0))
10787 && !CONST_INT_P (XEXP (varop, 1)))
10788 {
10789 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10790 sure the result will be masked. See PR70222. */
10791 if (code == LSHIFTRT
10792 && int_mode != int_result_mode
10793 && !merge_outer_ops (&outer_op, &outer_const, AND,
10794 GET_MODE_MASK (int_result_mode)
10795 >> orig_count, int_result_mode,
10796 &complement_p))
10797 break;
10798 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10799 up outer sign extension (often left and right shift) is
10800 hardly more efficient than the original. See PR70429. */
10801 if (code == ASHIFTRT && int_mode != int_result_mode)
10802 break;
10803
10804 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10805 XEXP (varop, 0),
10806 GEN_INT (count));
10807 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10808 count = 0;
10809 continue;
10810 }
10811 break;
10812
10813 case NOT:
10814 /* The following rules apply only to scalars. */
10815 if (shift_mode != shift_unit_mode)
10816 break;
10817
10818 /* Make this fit the case below. */
10819 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10820 continue;
10821
10822 case IOR:
10823 case AND:
10824 case XOR:
10825 /* The following rules apply only to scalars. */
10826 if (shift_mode != shift_unit_mode)
10827 break;
10828 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10829 int_result_mode = as_a <scalar_int_mode> (result_mode);
10830
10831 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10832 with C the size of VAROP - 1 and the shift is logical if
10833 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10834 we have an (le X 0) operation. If we have an arithmetic shift
10835 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10836 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10837
10838 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10839 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10840 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10841 && (code == LSHIFTRT || code == ASHIFTRT)
10842 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10843 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10844 {
10845 count = 0;
10846 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10847 const0_rtx);
10848
10849 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10850 varop = gen_rtx_NEG (int_varop_mode, varop);
10851
10852 continue;
10853 }
10854
10855 /* If we have (shift (logical)), move the logical to the outside
10856 to allow it to possibly combine with another logical and the
10857 shift to combine with another shift. This also canonicalizes to
10858 what a ZERO_EXTRACT looks like. Also, some machines have
10859 (and (shift)) insns. */
10860
10861 if (CONST_INT_P (XEXP (varop, 1))
10862 /* We can't do this if we have (ashiftrt (xor)) and the
10863 constant has its sign bit set in shift_unit_mode with
10864 shift_unit_mode wider than result_mode. */
10865 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10866 && int_result_mode != shift_unit_mode
10867 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10868 shift_unit_mode))
10869 && (new_rtx = simplify_const_binary_operation
10870 (code, int_result_mode,
10871 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10872 GEN_INT (count))) != 0
10873 && CONST_INT_P (new_rtx)
10874 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10875 INTVAL (new_rtx), int_result_mode,
10876 &complement_p))
10877 {
10878 varop = XEXP (varop, 0);
10879 continue;
10880 }
10881
10882 /* If we can't do that, try to simplify the shift in each arm of the
10883 logical expression, make a new logical expression, and apply
10884 the inverse distributive law. This also can't be done for
10885 (ashiftrt (xor)) where we've widened the shift and the constant
10886 changes the sign bit. */
10887 if (CONST_INT_P (XEXP (varop, 1))
10888 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10889 && int_result_mode != shift_unit_mode
10890 && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10891 shift_unit_mode)))
10892 {
10893 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10894 XEXP (varop, 0), count);
10895 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
10896 XEXP (varop, 1), count);
10897
10898 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
10899 lhs, rhs);
10900 varop = apply_distributive_law (varop);
10901
10902 count = 0;
10903 continue;
10904 }
10905 break;
10906
10907 case EQ:
10908 /* The following rules apply only to scalars. */
10909 if (shift_mode != shift_unit_mode)
10910 break;
10911 int_result_mode = as_a <scalar_int_mode> (result_mode);
10912
10913 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
10914 says that the sign bit can be tested, FOO has mode MODE, C is
10915 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
10916 that may be nonzero. */
10917 if (code == LSHIFTRT
10918 && XEXP (varop, 1) == const0_rtx
10919 && GET_MODE (XEXP (varop, 0)) == int_result_mode
10920 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10921 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10922 && STORE_FLAG_VALUE == -1
10923 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10924 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10925 int_result_mode, &complement_p))
10926 {
10927 varop = XEXP (varop, 0);
10928 count = 0;
10929 continue;
10930 }
10931 break;
10932
10933 case NEG:
10934 /* The following rules apply only to scalars. */
10935 if (shift_mode != shift_unit_mode)
10936 break;
10937 int_result_mode = as_a <scalar_int_mode> (result_mode);
10938
10939 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
10940 than the number of bits in the mode is equivalent to A. */
10941 if (code == LSHIFTRT
10942 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10943 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
10944 {
10945 varop = XEXP (varop, 0);
10946 count = 0;
10947 continue;
10948 }
10949
10950 /* NEG commutes with ASHIFT since it is multiplication. Move the
10951 NEG outside to allow shifts to combine. */
10952 if (code == ASHIFT
10953 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
10954 int_result_mode, &complement_p))
10955 {
10956 varop = XEXP (varop, 0);
10957 continue;
10958 }
10959 break;
10960
10961 case PLUS:
10962 /* The following rules apply only to scalars. */
10963 if (shift_mode != shift_unit_mode)
10964 break;
10965 int_result_mode = as_a <scalar_int_mode> (result_mode);
10966
10967 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
10968 is one less than the number of bits in the mode is
10969 equivalent to (xor A 1). */
10970 if (code == LSHIFTRT
10971 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
10972 && XEXP (varop, 1) == constm1_rtx
10973 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
10974 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
10975 int_result_mode, &complement_p))
10976 {
10977 count = 0;
10978 varop = XEXP (varop, 0);
10979 continue;
10980 }
10981
10982 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
10983 that might be nonzero in BAR are those being shifted out and those
10984 bits are known zero in FOO, we can replace the PLUS with FOO.
10985 Similarly in the other operand order. This code occurs when
10986 we are computing the size of a variable-size array. */
10987
10988 if ((code == ASHIFTRT || code == LSHIFTRT)
10989 && count < HOST_BITS_PER_WIDE_INT
10990 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
10991 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
10992 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
10993 {
10994 varop = XEXP (varop, 0);
10995 continue;
10996 }
10997 else if ((code == ASHIFTRT || code == LSHIFTRT)
10998 && count < HOST_BITS_PER_WIDE_INT
10999 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11000 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11001 >> count)
11002 && 0 == (nonzero_bits (XEXP (varop, 0), int_result_mode)
11003 & nonzero_bits (XEXP (varop, 1), int_result_mode)))
11004 {
11005 varop = XEXP (varop, 1);
11006 continue;
11007 }
11008
11009 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11010 if (code == ASHIFT
11011 && CONST_INT_P (XEXP (varop, 1))
11012 && (new_rtx = simplify_const_binary_operation
11013 (ASHIFT, int_result_mode,
11014 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11015 GEN_INT (count))) != 0
11016 && CONST_INT_P (new_rtx)
11017 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11018 INTVAL (new_rtx), int_result_mode,
11019 &complement_p))
11020 {
11021 varop = XEXP (varop, 0);
11022 continue;
11023 }
11024
11025 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11026 signbit', and attempt to change the PLUS to an XOR and move it to
11027 the outer operation as is done above in the AND/IOR/XOR case
11028 leg for shift(logical). See details in logical handling above
11029 for reasoning in doing so. */
11030 if (code == LSHIFTRT
11031 && CONST_INT_P (XEXP (varop, 1))
11032 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11033 && (new_rtx = simplify_const_binary_operation
11034 (code, int_result_mode,
11035 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11036 GEN_INT (count))) != 0
11037 && CONST_INT_P (new_rtx)
11038 && merge_outer_ops (&outer_op, &outer_const, XOR,
11039 INTVAL (new_rtx), int_result_mode,
11040 &complement_p))
11041 {
11042 varop = XEXP (varop, 0);
11043 continue;
11044 }
11045
11046 break;
11047
11048 case MINUS:
11049 /* The following rules apply only to scalars. */
11050 if (shift_mode != shift_unit_mode)
11051 break;
11052 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11053
11054 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11055 with C the size of VAROP - 1 and the shift is logical if
11056 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11057 we have a (gt X 0) operation. If the shift is arithmetic with
11058 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11059 we have a (neg (gt X 0)) operation. */
11060
11061 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11062 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11063 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11064 && (code == LSHIFTRT || code == ASHIFTRT)
11065 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11066 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11067 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11068 {
11069 count = 0;
11070 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11071 const0_rtx);
11072
11073 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11074 varop = gen_rtx_NEG (int_varop_mode, varop);
11075
11076 continue;
11077 }
11078 break;
11079
11080 case TRUNCATE:
11081 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11082 if the truncate does not affect the value. */
11083 if (code == LSHIFTRT
11084 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11085 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11086 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11087 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11088 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11089 {
11090 rtx varop_inner = XEXP (varop, 0);
11091
11092 varop_inner
11093 = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11094 XEXP (varop_inner, 0),
11095 GEN_INT
11096 (count + INTVAL (XEXP (varop_inner, 1))));
11097 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11098 count = 0;
11099 continue;
11100 }
11101 break;
11102
11103 default:
11104 break;
11105 }
11106
11107 break;
11108 }
11109
11110 shift_mode = result_mode;
11111 if (shift_mode != mode)
11112 {
11113 /* We only change the modes of scalar shifts. */
11114 int_mode = as_a <scalar_int_mode> (mode);
11115 int_result_mode = as_a <scalar_int_mode> (result_mode);
11116 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11117 int_mode, outer_op, outer_const);
11118 }
11119
11120 /* We have now finished analyzing the shift. The result should be
11121 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11122 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11123 to the result of the shift. OUTER_CONST is the relevant constant,
11124 but we must turn off all bits turned off in the shift. */
11125
11126 if (outer_op == UNKNOWN
11127 && orig_code == code && orig_count == count
11128 && varop == orig_varop
11129 && shift_mode == GET_MODE (varop))
11130 return NULL_RTX;
11131
11132 /* Make a SUBREG if necessary. If we can't make it, fail. */
11133 varop = gen_lowpart (shift_mode, varop);
11134 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11135 return NULL_RTX;
11136
11137 /* If we have an outer operation and we just made a shift, it is
11138 possible that we could have simplified the shift were it not
11139 for the outer operation. So try to do the simplification
11140 recursively. */
11141
11142 if (outer_op != UNKNOWN)
11143 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11144 else
11145 x = NULL_RTX;
11146
11147 if (x == NULL_RTX)
11148 x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));
11149
11150 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11151 turn off all the bits that the shift would have turned off. */
11152 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11153 /* We only change the modes of scalar shifts. */
11154 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11155 x, GET_MODE_MASK (result_mode) >> orig_count);
11156
11157 /* Do the remainder of the processing in RESULT_MODE. */
11158 x = gen_lowpart_or_truncate (result_mode, x);
11159
11160 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11161 operation. */
11162 if (complement_p)
11163 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11164
11165 if (outer_op != UNKNOWN)
11166 {
11167 int_result_mode = as_a <scalar_int_mode> (result_mode);
11168
11169 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11170 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11171 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11172
11173 if (outer_op == AND)
11174 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11175 else if (outer_op == SET)
11176 {
11177 /* This means that we have determined that the result is
11178 equivalent to a constant. This should be rare. */
11179 if (!side_effects_p (x))
11180 x = GEN_INT (outer_const);
11181 }
11182 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11183 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11184 else
11185 x = simplify_gen_binary (outer_op, int_result_mode, x,
11186 GEN_INT (outer_const));
11187 }
11188
11189 return x;
11190 }
11191
11192 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11193 The result of the shift is RESULT_MODE. If we cannot simplify it,
11194 return X or, if it is NULL, synthesize the expression with
11195 simplify_gen_binary. Otherwise, return a simplified value.
11196
11197 The shift is normally computed in the widest mode we find in VAROP, as
11198 long as it isn't a different number of words than RESULT_MODE. Exceptions
11199 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11200
11201 static rtx
11202 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11203 rtx varop, int count)
11204 {
11205 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11206 if (tem)
11207 return tem;
11208
11209 if (!x)
11210 x = simplify_gen_binary (code, GET_MODE (varop), varop, GEN_INT (count));
11211 if (GET_MODE (x) != result_mode)
11212 x = gen_lowpart (result_mode, x);
11213 return x;
11214 }
11215
11216 \f
11217 /* A subroutine of recog_for_combine. See there for arguments and
11218 return value. */
11219
11220 static int
11221 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11222 {
11223 rtx pat = *pnewpat;
11224 rtx pat_without_clobbers;
11225 int insn_code_number;
11226 int num_clobbers_to_add = 0;
11227 int i;
11228 rtx notes = NULL_RTX;
11229 rtx old_notes, old_pat;
11230 int old_icode;
11231
11232 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11233 we use to indicate that something didn't match. If we find such a
11234 thing, force rejection. */
11235 if (GET_CODE (pat) == PARALLEL)
11236 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11237 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11238 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11239 return -1;
11240
11241 old_pat = PATTERN (insn);
11242 old_notes = REG_NOTES (insn);
11243 PATTERN (insn) = pat;
11244 REG_NOTES (insn) = NULL_RTX;
11245
11246 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11247 if (dump_file && (dump_flags & TDF_DETAILS))
11248 {
11249 if (insn_code_number < 0)
11250 fputs ("Failed to match this instruction:\n", dump_file);
11251 else
11252 fputs ("Successfully matched this instruction:\n", dump_file);
11253 print_rtl_single (dump_file, pat);
11254 }
11255
11256 /* If it isn't, there is the possibility that we previously had an insn
11257 that clobbered some register as a side effect, but the combined
11258 insn doesn't need to do that. So try once more without the clobbers
11259 unless this represents an ASM insn. */
11260
11261 if (insn_code_number < 0 && ! check_asm_operands (pat)
11262 && GET_CODE (pat) == PARALLEL)
11263 {
11264 int pos;
11265
11266 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11267 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11268 {
11269 if (i != pos)
11270 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11271 pos++;
11272 }
11273
11274 SUBST_INT (XVECLEN (pat, 0), pos);
11275
11276 if (pos == 1)
11277 pat = XVECEXP (pat, 0, 0);
11278
11279 PATTERN (insn) = pat;
11280 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11281 if (dump_file && (dump_flags & TDF_DETAILS))
11282 {
11283 if (insn_code_number < 0)
11284 fputs ("Failed to match this instruction:\n", dump_file);
11285 else
11286 fputs ("Successfully matched this instruction:\n", dump_file);
11287 print_rtl_single (dump_file, pat);
11288 }
11289 }
11290
11291 pat_without_clobbers = pat;
11292
11293 PATTERN (insn) = old_pat;
11294 REG_NOTES (insn) = old_notes;
11295
11296 /* Recognize all noop sets, these will be killed by followup pass. */
11297 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11298 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11299
11300 /* If we had any clobbers to add, make a new pattern than contains
11301 them. Then check to make sure that all of them are dead. */
11302 if (num_clobbers_to_add)
11303 {
11304 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11305 rtvec_alloc (GET_CODE (pat) == PARALLEL
11306 ? (XVECLEN (pat, 0)
11307 + num_clobbers_to_add)
11308 : num_clobbers_to_add + 1));
11309
11310 if (GET_CODE (pat) == PARALLEL)
11311 for (i = 0; i < XVECLEN (pat, 0); i++)
11312 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11313 else
11314 XVECEXP (newpat, 0, 0) = pat;
11315
11316 add_clobbers (newpat, insn_code_number);
11317
11318 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11319 i < XVECLEN (newpat, 0); i++)
11320 {
11321 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11322 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11323 return -1;
11324 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11325 {
11326 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11327 notes = alloc_reg_note (REG_UNUSED,
11328 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11329 }
11330 }
11331 pat = newpat;
11332 }
11333
11334 if (insn_code_number >= 0
11335 && insn_code_number != NOOP_MOVE_INSN_CODE)
11336 {
11337 old_pat = PATTERN (insn);
11338 old_notes = REG_NOTES (insn);
11339 old_icode = INSN_CODE (insn);
11340 PATTERN (insn) = pat;
11341 REG_NOTES (insn) = notes;
11342 INSN_CODE (insn) = insn_code_number;
11343
11344 /* Allow targets to reject combined insn. */
11345 if (!targetm.legitimate_combined_insn (insn))
11346 {
11347 if (dump_file && (dump_flags & TDF_DETAILS))
11348 fputs ("Instruction not appropriate for target.",
11349 dump_file);
11350
11351 /* Callers expect recog_for_combine to strip
11352 clobbers from the pattern on failure. */
11353 pat = pat_without_clobbers;
11354 notes = NULL_RTX;
11355
11356 insn_code_number = -1;
11357 }
11358
11359 PATTERN (insn) = old_pat;
11360 REG_NOTES (insn) = old_notes;
11361 INSN_CODE (insn) = old_icode;
11362 }
11363
11364 *pnewpat = pat;
11365 *pnotes = notes;
11366
11367 return insn_code_number;
11368 }
11369
11370 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11371 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11372 Return whether anything was so changed. */
11373
11374 static bool
11375 change_zero_ext (rtx pat)
11376 {
11377 bool changed = false;
11378 rtx *src = &SET_SRC (pat);
11379
11380 subrtx_ptr_iterator::array_type array;
11381 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11382 {
11383 rtx x = **iter;
11384 scalar_int_mode mode, inner_mode;
11385 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11386 continue;
11387 int size;
11388
11389 if (GET_CODE (x) == ZERO_EXTRACT
11390 && CONST_INT_P (XEXP (x, 1))
11391 && CONST_INT_P (XEXP (x, 2))
11392 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11393 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11394 {
11395 size = INTVAL (XEXP (x, 1));
11396
11397 int start = INTVAL (XEXP (x, 2));
11398 if (BITS_BIG_ENDIAN)
11399 start = GET_MODE_PRECISION (inner_mode) - size - start;
11400
11401 if (start)
11402 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0), GEN_INT (start));
11403 else
11404 x = XEXP (x, 0);
11405 if (mode != inner_mode)
11406 x = gen_lowpart_SUBREG (mode, x);
11407 }
11408 else if (GET_CODE (x) == ZERO_EXTEND
11409 && GET_CODE (XEXP (x, 0)) == SUBREG
11410 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11411 && !paradoxical_subreg_p (XEXP (x, 0))
11412 && subreg_lowpart_p (XEXP (x, 0)))
11413 {
11414 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11415 size = GET_MODE_PRECISION (inner_mode);
11416 x = SUBREG_REG (XEXP (x, 0));
11417 if (GET_MODE (x) != mode)
11418 x = gen_lowpart_SUBREG (mode, x);
11419 }
11420 else if (GET_CODE (x) == ZERO_EXTEND
11421 && REG_P (XEXP (x, 0))
11422 && HARD_REGISTER_P (XEXP (x, 0))
11423 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11424 {
11425 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11426 size = GET_MODE_PRECISION (inner_mode);
11427 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11428 }
11429 else
11430 continue;
11431
11432 if (!(GET_CODE (x) == LSHIFTRT
11433 && CONST_INT_P (XEXP (x, 1))
11434 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11435 {
11436 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11437 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11438 }
11439
11440 SUBST (**iter, x);
11441 changed = true;
11442 }
11443
11444 if (changed)
11445 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11446 maybe_swap_commutative_operands (**iter);
11447
11448 rtx *dst = &SET_DEST (pat);
11449 scalar_int_mode mode;
11450 if (GET_CODE (*dst) == ZERO_EXTRACT
11451 && REG_P (XEXP (*dst, 0))
11452 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11453 && CONST_INT_P (XEXP (*dst, 1))
11454 && CONST_INT_P (XEXP (*dst, 2)))
11455 {
11456 rtx reg = XEXP (*dst, 0);
11457 int width = INTVAL (XEXP (*dst, 1));
11458 int offset = INTVAL (XEXP (*dst, 2));
11459 int reg_width = GET_MODE_PRECISION (mode);
11460 if (BITS_BIG_ENDIAN)
11461 offset = reg_width - width - offset;
11462
11463 rtx x, y, z, w;
11464 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11465 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11466 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11467 if (offset)
11468 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11469 else
11470 y = SET_SRC (pat);
11471 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11472 w = gen_rtx_IOR (mode, x, z);
11473 SUBST (SET_DEST (pat), reg);
11474 SUBST (SET_SRC (pat), w);
11475
11476 changed = true;
11477 }
11478
11479 return changed;
11480 }
11481
11482 /* Like recog, but we receive the address of a pointer to a new pattern.
11483 We try to match the rtx that the pointer points to.
11484 If that fails, we may try to modify or replace the pattern,
11485 storing the replacement into the same pointer object.
11486
11487 Modifications include deletion or addition of CLOBBERs. If the
11488 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11489 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11490 (and undo if that fails).
11491
11492 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11493 the CLOBBERs are placed.
11494
11495 The value is the final insn code from the pattern ultimately matched,
11496 or -1. */
11497
11498 static int
11499 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11500 {
11501 rtx pat = *pnewpat;
11502 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11503 if (insn_code_number >= 0 || check_asm_operands (pat))
11504 return insn_code_number;
11505
11506 void *marker = get_undo_marker ();
11507 bool changed = false;
11508
11509 if (GET_CODE (pat) == SET)
11510 changed = change_zero_ext (pat);
11511 else if (GET_CODE (pat) == PARALLEL)
11512 {
11513 int i;
11514 for (i = 0; i < XVECLEN (pat, 0); i++)
11515 {
11516 rtx set = XVECEXP (pat, 0, i);
11517 if (GET_CODE (set) == SET)
11518 changed |= change_zero_ext (set);
11519 }
11520 }
11521
11522 if (changed)
11523 {
11524 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11525
11526 if (insn_code_number < 0)
11527 undo_to_marker (marker);
11528 }
11529
11530 return insn_code_number;
11531 }
11532 \f
11533 /* Like gen_lowpart_general but for use by combine. In combine it
11534 is not possible to create any new pseudoregs. However, it is
11535 safe to create invalid memory addresses, because combine will
11536 try to recognize them and all they will do is make the combine
11537 attempt fail.
11538
11539 If for some reason this cannot do its job, an rtx
11540 (clobber (const_int 0)) is returned.
11541 An insn containing that will not be recognized. */
11542
11543 static rtx
11544 gen_lowpart_for_combine (machine_mode omode, rtx x)
11545 {
11546 machine_mode imode = GET_MODE (x);
11547 unsigned int osize = GET_MODE_SIZE (omode);
11548 unsigned int isize = GET_MODE_SIZE (imode);
11549 rtx result;
11550
11551 if (omode == imode)
11552 return x;
11553
11554 /* We can only support MODE being wider than a word if X is a
11555 constant integer or has a mode the same size. */
11556 if (GET_MODE_SIZE (omode) > UNITS_PER_WORD
11557 && ! (CONST_SCALAR_INT_P (x) || isize == osize))
11558 goto fail;
11559
11560 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11561 won't know what to do. So we will strip off the SUBREG here and
11562 process normally. */
11563 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11564 {
11565 x = SUBREG_REG (x);
11566
11567 /* For use in case we fall down into the address adjustments
11568 further below, we need to adjust the known mode and size of
11569 x; imode and isize, since we just adjusted x. */
11570 imode = GET_MODE (x);
11571
11572 if (imode == omode)
11573 return x;
11574
11575 isize = GET_MODE_SIZE (imode);
11576 }
11577
11578 result = gen_lowpart_common (omode, x);
11579
11580 if (result)
11581 return result;
11582
11583 if (MEM_P (x))
11584 {
11585 int offset = 0;
11586
11587 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11588 address. */
11589 if (MEM_VOLATILE_P (x)
11590 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11591 goto fail;
11592
11593 /* If we want to refer to something bigger than the original memref,
11594 generate a paradoxical subreg instead. That will force a reload
11595 of the original memref X. */
11596 if (paradoxical_subreg_p (omode, imode))
11597 return gen_rtx_SUBREG (omode, x, 0);
11598
11599 if (WORDS_BIG_ENDIAN)
11600 offset = MAX (isize, UNITS_PER_WORD) - MAX (osize, UNITS_PER_WORD);
11601
11602 /* Adjust the address so that the address-after-the-data is
11603 unchanged. */
11604 if (BYTES_BIG_ENDIAN)
11605 offset -= MIN (UNITS_PER_WORD, osize) - MIN (UNITS_PER_WORD, isize);
11606
11607 return adjust_address_nv (x, omode, offset);
11608 }
11609
11610 /* If X is a comparison operator, rewrite it in a new mode. This
11611 probably won't match, but may allow further simplifications. */
11612 else if (COMPARISON_P (x))
11613 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11614
11615 /* If we couldn't simplify X any other way, just enclose it in a
11616 SUBREG. Normally, this SUBREG won't match, but some patterns may
11617 include an explicit SUBREG or we may simplify it further in combine. */
11618 else
11619 {
11620 rtx res;
11621
11622 if (imode == VOIDmode)
11623 {
11624 imode = int_mode_for_mode (omode).require ();
11625 x = gen_lowpart_common (imode, x);
11626 if (x == NULL)
11627 goto fail;
11628 }
11629 res = lowpart_subreg (omode, x, imode);
11630 if (res)
11631 return res;
11632 }
11633
11634 fail:
11635 return gen_rtx_CLOBBER (omode, const0_rtx);
11636 }
11637 \f
11638 /* Try to simplify a comparison between OP0 and a constant OP1,
11639 where CODE is the comparison code that will be tested, into a
11640 (CODE OP0 const0_rtx) form.
11641
11642 The result is a possibly different comparison code to use.
11643 *POP1 may be updated. */
11644
11645 static enum rtx_code
11646 simplify_compare_const (enum rtx_code code, machine_mode mode,
11647 rtx op0, rtx *pop1)
11648 {
11649 scalar_int_mode int_mode;
11650 HOST_WIDE_INT const_op = INTVAL (*pop1);
11651
11652 /* Get the constant we are comparing against and turn off all bits
11653 not on in our mode. */
11654 if (mode != VOIDmode)
11655 const_op = trunc_int_for_mode (const_op, mode);
11656
11657 /* If we are comparing against a constant power of two and the value
11658 being compared can only have that single bit nonzero (e.g., it was
11659 `and'ed with that bit), we can replace this with a comparison
11660 with zero. */
11661 if (const_op
11662 && (code == EQ || code == NE || code == GE || code == GEU
11663 || code == LT || code == LTU)
11664 && is_a <scalar_int_mode> (mode, &int_mode)
11665 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11666 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11667 && (nonzero_bits (op0, int_mode)
11668 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11669 {
11670 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11671 const_op = 0;
11672 }
11673
11674 /* Similarly, if we are comparing a value known to be either -1 or
11675 0 with -1, change it to the opposite comparison against zero. */
11676 if (const_op == -1
11677 && (code == EQ || code == NE || code == GT || code == LE
11678 || code == GEU || code == LTU)
11679 && is_a <scalar_int_mode> (mode, &int_mode)
11680 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11681 {
11682 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11683 const_op = 0;
11684 }
11685
11686 /* Do some canonicalizations based on the comparison code. We prefer
11687 comparisons against zero and then prefer equality comparisons.
11688 If we can reduce the size of a constant, we will do that too. */
11689 switch (code)
11690 {
11691 case LT:
11692 /* < C is equivalent to <= (C - 1) */
11693 if (const_op > 0)
11694 {
11695 const_op -= 1;
11696 code = LE;
11697 /* ... fall through to LE case below. */
11698 gcc_fallthrough ();
11699 }
11700 else
11701 break;
11702
11703 case LE:
11704 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11705 if (const_op < 0)
11706 {
11707 const_op += 1;
11708 code = LT;
11709 }
11710
11711 /* If we are doing a <= 0 comparison on a value known to have
11712 a zero sign bit, we can replace this with == 0. */
11713 else if (const_op == 0
11714 && is_a <scalar_int_mode> (mode, &int_mode)
11715 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11716 && (nonzero_bits (op0, int_mode)
11717 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11718 == 0)
11719 code = EQ;
11720 break;
11721
11722 case GE:
11723 /* >= C is equivalent to > (C - 1). */
11724 if (const_op > 0)
11725 {
11726 const_op -= 1;
11727 code = GT;
11728 /* ... fall through to GT below. */
11729 gcc_fallthrough ();
11730 }
11731 else
11732 break;
11733
11734 case GT:
11735 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11736 if (const_op < 0)
11737 {
11738 const_op += 1;
11739 code = GE;
11740 }
11741
11742 /* If we are doing a > 0 comparison on a value known to have
11743 a zero sign bit, we can replace this with != 0. */
11744 else if (const_op == 0
11745 && is_a <scalar_int_mode> (mode, &int_mode)
11746 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11747 && (nonzero_bits (op0, int_mode)
11748 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11749 == 0)
11750 code = NE;
11751 break;
11752
11753 case LTU:
11754 /* < C is equivalent to <= (C - 1). */
11755 if (const_op > 0)
11756 {
11757 const_op -= 1;
11758 code = LEU;
11759 /* ... fall through ... */
11760 }
11761 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11762 else if (is_a <scalar_int_mode> (mode, &int_mode)
11763 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11764 && ((unsigned HOST_WIDE_INT) const_op
11765 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11766 {
11767 const_op = 0;
11768 code = GE;
11769 break;
11770 }
11771 else
11772 break;
11773
11774 case LEU:
11775 /* unsigned <= 0 is equivalent to == 0 */
11776 if (const_op == 0)
11777 code = EQ;
11778 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11779 else if (is_a <scalar_int_mode> (mode, &int_mode)
11780 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11781 && ((unsigned HOST_WIDE_INT) const_op
11782 == ((HOST_WIDE_INT_1U
11783 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11784 {
11785 const_op = 0;
11786 code = GE;
11787 }
11788 break;
11789
11790 case GEU:
11791 /* >= C is equivalent to > (C - 1). */
11792 if (const_op > 1)
11793 {
11794 const_op -= 1;
11795 code = GTU;
11796 /* ... fall through ... */
11797 }
11798
11799 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11800 else if (is_a <scalar_int_mode> (mode, &int_mode)
11801 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11802 && ((unsigned HOST_WIDE_INT) const_op
11803 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11804 {
11805 const_op = 0;
11806 code = LT;
11807 break;
11808 }
11809 else
11810 break;
11811
11812 case GTU:
11813 /* unsigned > 0 is equivalent to != 0 */
11814 if (const_op == 0)
11815 code = NE;
11816 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11817 else if (is_a <scalar_int_mode> (mode, &int_mode)
11818 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11819 && ((unsigned HOST_WIDE_INT) const_op
11820 == (HOST_WIDE_INT_1U
11821 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11822 {
11823 const_op = 0;
11824 code = LT;
11825 }
11826 break;
11827
11828 default:
11829 break;
11830 }
11831
11832 *pop1 = GEN_INT (const_op);
11833 return code;
11834 }
11835 \f
11836 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11837 comparison code that will be tested.
11838
11839 The result is a possibly different comparison code to use. *POP0 and
11840 *POP1 may be updated.
11841
11842 It is possible that we might detect that a comparison is either always
11843 true or always false. However, we do not perform general constant
11844 folding in combine, so this knowledge isn't useful. Such tautologies
11845 should have been detected earlier. Hence we ignore all such cases. */
11846
11847 static enum rtx_code
11848 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11849 {
11850 rtx op0 = *pop0;
11851 rtx op1 = *pop1;
11852 rtx tem, tem1;
11853 int i;
11854 scalar_int_mode mode, inner_mode, tmode;
11855 opt_scalar_int_mode tmode_iter;
11856
11857 /* Try a few ways of applying the same transformation to both operands. */
11858 while (1)
11859 {
11860 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11861 so check specially. */
11862 if (!WORD_REGISTER_OPERATIONS
11863 && code != GTU && code != GEU && code != LTU && code != LEU
11864 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11865 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11866 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11867 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11868 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11869 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11870 && (is_a <scalar_int_mode>
11871 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11872 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11873 && CONST_INT_P (XEXP (op0, 1))
11874 && XEXP (op0, 1) == XEXP (op1, 1)
11875 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11876 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11877 && (INTVAL (XEXP (op0, 1))
11878 == (GET_MODE_PRECISION (mode)
11879 - GET_MODE_PRECISION (inner_mode))))
11880 {
11881 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11882 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11883 }
11884
11885 /* If both operands are the same constant shift, see if we can ignore the
11886 shift. We can if the shift is a rotate or if the bits shifted out of
11887 this shift are known to be zero for both inputs and if the type of
11888 comparison is compatible with the shift. */
11889 if (GET_CODE (op0) == GET_CODE (op1)
11890 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
11891 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
11892 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
11893 && (code != GT && code != LT && code != GE && code != LE))
11894 || (GET_CODE (op0) == ASHIFTRT
11895 && (code != GTU && code != LTU
11896 && code != GEU && code != LEU)))
11897 && CONST_INT_P (XEXP (op0, 1))
11898 && INTVAL (XEXP (op0, 1)) >= 0
11899 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
11900 && XEXP (op0, 1) == XEXP (op1, 1))
11901 {
11902 machine_mode mode = GET_MODE (op0);
11903 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
11904 int shift_count = INTVAL (XEXP (op0, 1));
11905
11906 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
11907 mask &= (mask >> shift_count) << shift_count;
11908 else if (GET_CODE (op0) == ASHIFT)
11909 mask = (mask & (mask << shift_count)) >> shift_count;
11910
11911 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
11912 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
11913 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
11914 else
11915 break;
11916 }
11917
11918 /* If both operands are AND's of a paradoxical SUBREG by constant, the
11919 SUBREGs are of the same mode, and, in both cases, the AND would
11920 be redundant if the comparison was done in the narrower mode,
11921 do the comparison in the narrower mode (e.g., we are AND'ing with 1
11922 and the operand's possibly nonzero bits are 0xffffff01; in that case
11923 if we only care about QImode, we don't need the AND). This case
11924 occurs if the output mode of an scc insn is not SImode and
11925 STORE_FLAG_VALUE == 1 (e.g., the 386).
11926
11927 Similarly, check for a case where the AND's are ZERO_EXTEND
11928 operations from some narrower mode even though a SUBREG is not
11929 present. */
11930
11931 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
11932 && CONST_INT_P (XEXP (op0, 1))
11933 && CONST_INT_P (XEXP (op1, 1)))
11934 {
11935 rtx inner_op0 = XEXP (op0, 0);
11936 rtx inner_op1 = XEXP (op1, 0);
11937 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
11938 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
11939 int changed = 0;
11940
11941 if (paradoxical_subreg_p (inner_op0)
11942 && GET_CODE (inner_op1) == SUBREG
11943 && (GET_MODE (SUBREG_REG (inner_op0))
11944 == GET_MODE (SUBREG_REG (inner_op1)))
11945 && (GET_MODE_PRECISION (GET_MODE (SUBREG_REG (inner_op0)))
11946 <= HOST_BITS_PER_WIDE_INT)
11947 && (0 == ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
11948 GET_MODE (SUBREG_REG (inner_op0)))))
11949 && (0 == ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
11950 GET_MODE (SUBREG_REG (inner_op1))))))
11951 {
11952 op0 = SUBREG_REG (inner_op0);
11953 op1 = SUBREG_REG (inner_op1);
11954
11955 /* The resulting comparison is always unsigned since we masked
11956 off the original sign bit. */
11957 code = unsigned_condition (code);
11958
11959 changed = 1;
11960 }
11961
11962 else if (c0 == c1)
11963 FOR_EACH_MODE_UNTIL (tmode,
11964 as_a <scalar_int_mode> (GET_MODE (op0)))
11965 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
11966 {
11967 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
11968 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
11969 code = unsigned_condition (code);
11970 changed = 1;
11971 break;
11972 }
11973
11974 if (! changed)
11975 break;
11976 }
11977
11978 /* If both operands are NOT, we can strip off the outer operation
11979 and adjust the comparison code for swapped operands; similarly for
11980 NEG, except that this must be an equality comparison. */
11981 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
11982 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
11983 && (code == EQ || code == NE)))
11984 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
11985
11986 else
11987 break;
11988 }
11989
11990 /* If the first operand is a constant, swap the operands and adjust the
11991 comparison code appropriately, but don't do this if the second operand
11992 is already a constant integer. */
11993 if (swap_commutative_operands_p (op0, op1))
11994 {
11995 std::swap (op0, op1);
11996 code = swap_condition (code);
11997 }
11998
11999 /* We now enter a loop during which we will try to simplify the comparison.
12000 For the most part, we only are concerned with comparisons with zero,
12001 but some things may really be comparisons with zero but not start
12002 out looking that way. */
12003
12004 while (CONST_INT_P (op1))
12005 {
12006 machine_mode raw_mode = GET_MODE (op0);
12007 scalar_int_mode int_mode;
12008 int equality_comparison_p;
12009 int sign_bit_comparison_p;
12010 int unsigned_comparison_p;
12011 HOST_WIDE_INT const_op;
12012
12013 /* We only want to handle integral modes. This catches VOIDmode,
12014 CCmode, and the floating-point modes. An exception is that we
12015 can handle VOIDmode if OP0 is a COMPARE or a comparison
12016 operation. */
12017
12018 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12019 && ! (raw_mode == VOIDmode
12020 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12021 break;
12022
12023 /* Try to simplify the compare to constant, possibly changing the
12024 comparison op, and/or changing op1 to zero. */
12025 code = simplify_compare_const (code, raw_mode, op0, &op1);
12026 const_op = INTVAL (op1);
12027
12028 /* Compute some predicates to simplify code below. */
12029
12030 equality_comparison_p = (code == EQ || code == NE);
12031 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12032 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12033 || code == GEU);
12034
12035 /* If this is a sign bit comparison and we can do arithmetic in
12036 MODE, say that we will only be needing the sign bit of OP0. */
12037 if (sign_bit_comparison_p
12038 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12039 && HWI_COMPUTABLE_MODE_P (int_mode))
12040 op0 = force_to_mode (op0, int_mode,
12041 HOST_WIDE_INT_1U
12042 << (GET_MODE_PRECISION (int_mode) - 1),
12043 0);
12044
12045 if (COMPARISON_P (op0))
12046 {
12047 /* We can't do anything if OP0 is a condition code value, rather
12048 than an actual data value. */
12049 if (const_op != 0
12050 || CC0_P (XEXP (op0, 0))
12051 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12052 break;
12053
12054 /* Get the two operands being compared. */
12055 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12056 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12057 else
12058 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12059
12060 /* Check for the cases where we simply want the result of the
12061 earlier test or the opposite of that result. */
12062 if (code == NE || code == EQ
12063 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12064 && (code == LT || code == GE)))
12065 {
12066 enum rtx_code new_code;
12067 if (code == LT || code == NE)
12068 new_code = GET_CODE (op0);
12069 else
12070 new_code = reversed_comparison_code (op0, NULL);
12071
12072 if (new_code != UNKNOWN)
12073 {
12074 code = new_code;
12075 op0 = tem;
12076 op1 = tem1;
12077 continue;
12078 }
12079 }
12080 break;
12081 }
12082
12083 if (raw_mode == VOIDmode)
12084 break;
12085 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12086
12087 /* Now try cases based on the opcode of OP0. If none of the cases
12088 does a "continue", we exit this loop immediately after the
12089 switch. */
12090
12091 unsigned int mode_width = GET_MODE_PRECISION (mode);
12092 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12093 switch (GET_CODE (op0))
12094 {
12095 case ZERO_EXTRACT:
12096 /* If we are extracting a single bit from a variable position in
12097 a constant that has only a single bit set and are comparing it
12098 with zero, we can convert this into an equality comparison
12099 between the position and the location of the single bit. */
12100 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12101 have already reduced the shift count modulo the word size. */
12102 if (!SHIFT_COUNT_TRUNCATED
12103 && CONST_INT_P (XEXP (op0, 0))
12104 && XEXP (op0, 1) == const1_rtx
12105 && equality_comparison_p && const_op == 0
12106 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12107 {
12108 if (BITS_BIG_ENDIAN)
12109 i = BITS_PER_WORD - 1 - i;
12110
12111 op0 = XEXP (op0, 2);
12112 op1 = GEN_INT (i);
12113 const_op = i;
12114
12115 /* Result is nonzero iff shift count is equal to I. */
12116 code = reverse_condition (code);
12117 continue;
12118 }
12119
12120 /* fall through */
12121
12122 case SIGN_EXTRACT:
12123 tem = expand_compound_operation (op0);
12124 if (tem != op0)
12125 {
12126 op0 = tem;
12127 continue;
12128 }
12129 break;
12130
12131 case NOT:
12132 /* If testing for equality, we can take the NOT of the constant. */
12133 if (equality_comparison_p
12134 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12135 {
12136 op0 = XEXP (op0, 0);
12137 op1 = tem;
12138 continue;
12139 }
12140
12141 /* If just looking at the sign bit, reverse the sense of the
12142 comparison. */
12143 if (sign_bit_comparison_p)
12144 {
12145 op0 = XEXP (op0, 0);
12146 code = (code == GE ? LT : GE);
12147 continue;
12148 }
12149 break;
12150
12151 case NEG:
12152 /* If testing for equality, we can take the NEG of the constant. */
12153 if (equality_comparison_p
12154 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12155 {
12156 op0 = XEXP (op0, 0);
12157 op1 = tem;
12158 continue;
12159 }
12160
12161 /* The remaining cases only apply to comparisons with zero. */
12162 if (const_op != 0)
12163 break;
12164
12165 /* When X is ABS or is known positive,
12166 (neg X) is < 0 if and only if X != 0. */
12167
12168 if (sign_bit_comparison_p
12169 && (GET_CODE (XEXP (op0, 0)) == ABS
12170 || (mode_width <= HOST_BITS_PER_WIDE_INT
12171 && (nonzero_bits (XEXP (op0, 0), mode)
12172 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12173 == 0)))
12174 {
12175 op0 = XEXP (op0, 0);
12176 code = (code == LT ? NE : EQ);
12177 continue;
12178 }
12179
12180 /* If we have NEG of something whose two high-order bits are the
12181 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12182 if (num_sign_bit_copies (op0, mode) >= 2)
12183 {
12184 op0 = XEXP (op0, 0);
12185 code = swap_condition (code);
12186 continue;
12187 }
12188 break;
12189
12190 case ROTATE:
12191 /* If we are testing equality and our count is a constant, we
12192 can perform the inverse operation on our RHS. */
12193 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12194 && (tem = simplify_binary_operation (ROTATERT, mode,
12195 op1, XEXP (op0, 1))) != 0)
12196 {
12197 op0 = XEXP (op0, 0);
12198 op1 = tem;
12199 continue;
12200 }
12201
12202 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12203 a particular bit. Convert it to an AND of a constant of that
12204 bit. This will be converted into a ZERO_EXTRACT. */
12205 if (const_op == 0 && sign_bit_comparison_p
12206 && CONST_INT_P (XEXP (op0, 1))
12207 && mode_width <= HOST_BITS_PER_WIDE_INT)
12208 {
12209 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12210 (HOST_WIDE_INT_1U
12211 << (mode_width - 1
12212 - INTVAL (XEXP (op0, 1)))));
12213 code = (code == LT ? NE : EQ);
12214 continue;
12215 }
12216
12217 /* Fall through. */
12218
12219 case ABS:
12220 /* ABS is ignorable inside an equality comparison with zero. */
12221 if (const_op == 0 && equality_comparison_p)
12222 {
12223 op0 = XEXP (op0, 0);
12224 continue;
12225 }
12226 break;
12227
12228 case SIGN_EXTEND:
12229 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12230 (compare FOO CONST) if CONST fits in FOO's mode and we
12231 are either testing inequality or have an unsigned
12232 comparison with ZERO_EXTEND or a signed comparison with
12233 SIGN_EXTEND. But don't do it if we don't have a compare
12234 insn of the given mode, since we'd have to revert it
12235 later on, and then we wouldn't know whether to sign- or
12236 zero-extend. */
12237 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12238 && ! unsigned_comparison_p
12239 && HWI_COMPUTABLE_MODE_P (mode)
12240 && trunc_int_for_mode (const_op, mode) == const_op
12241 && have_insn_for (COMPARE, mode))
12242 {
12243 op0 = XEXP (op0, 0);
12244 continue;
12245 }
12246 break;
12247
12248 case SUBREG:
12249 /* Check for the case where we are comparing A - C1 with C2, that is
12250
12251 (subreg:MODE (plus (A) (-C1))) op (C2)
12252
12253 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12254 comparison in the wider mode. One of the following two conditions
12255 must be true in order for this to be valid:
12256
12257 1. The mode extension results in the same bit pattern being added
12258 on both sides and the comparison is equality or unsigned. As
12259 C2 has been truncated to fit in MODE, the pattern can only be
12260 all 0s or all 1s.
12261
12262 2. The mode extension results in the sign bit being copied on
12263 each side.
12264
12265 The difficulty here is that we have predicates for A but not for
12266 (A - C1) so we need to check that C1 is within proper bounds so
12267 as to perturbate A as little as possible. */
12268
12269 if (mode_width <= HOST_BITS_PER_WIDE_INT
12270 && subreg_lowpart_p (op0)
12271 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12272 &inner_mode)
12273 && GET_MODE_PRECISION (inner_mode) > mode_width
12274 && GET_CODE (SUBREG_REG (op0)) == PLUS
12275 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12276 {
12277 rtx a = XEXP (SUBREG_REG (op0), 0);
12278 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12279
12280 if ((c1 > 0
12281 && (unsigned HOST_WIDE_INT) c1
12282 < HOST_WIDE_INT_1U << (mode_width - 1)
12283 && (equality_comparison_p || unsigned_comparison_p)
12284 /* (A - C1) zero-extends if it is positive and sign-extends
12285 if it is negative, C2 both zero- and sign-extends. */
12286 && ((0 == (nonzero_bits (a, inner_mode)
12287 & ~GET_MODE_MASK (mode))
12288 && const_op >= 0)
12289 /* (A - C1) sign-extends if it is positive and 1-extends
12290 if it is negative, C2 both sign- and 1-extends. */
12291 || (num_sign_bit_copies (a, inner_mode)
12292 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12293 - mode_width)
12294 && const_op < 0)))
12295 || ((unsigned HOST_WIDE_INT) c1
12296 < HOST_WIDE_INT_1U << (mode_width - 2)
12297 /* (A - C1) always sign-extends, like C2. */
12298 && num_sign_bit_copies (a, inner_mode)
12299 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12300 - (mode_width - 1))))
12301 {
12302 op0 = SUBREG_REG (op0);
12303 continue;
12304 }
12305 }
12306
12307 /* If the inner mode is narrower and we are extracting the low part,
12308 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12309 if (paradoxical_subreg_p (op0))
12310 ;
12311 else if (subreg_lowpart_p (op0)
12312 && GET_MODE_CLASS (mode) == MODE_INT
12313 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12314 && (code == NE || code == EQ)
12315 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12316 && !paradoxical_subreg_p (op0)
12317 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12318 & ~GET_MODE_MASK (mode)) == 0)
12319 {
12320 /* Remove outer subregs that don't do anything. */
12321 tem = gen_lowpart (inner_mode, op1);
12322
12323 if ((nonzero_bits (tem, inner_mode)
12324 & ~GET_MODE_MASK (mode)) == 0)
12325 {
12326 op0 = SUBREG_REG (op0);
12327 op1 = tem;
12328 continue;
12329 }
12330 break;
12331 }
12332 else
12333 break;
12334
12335 /* FALLTHROUGH */
12336
12337 case ZERO_EXTEND:
12338 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12339 && (unsigned_comparison_p || equality_comparison_p)
12340 && HWI_COMPUTABLE_MODE_P (mode)
12341 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12342 && const_op >= 0
12343 && have_insn_for (COMPARE, mode))
12344 {
12345 op0 = XEXP (op0, 0);
12346 continue;
12347 }
12348 break;
12349
12350 case PLUS:
12351 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12352 this for equality comparisons due to pathological cases involving
12353 overflows. */
12354 if (equality_comparison_p
12355 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12356 op1, XEXP (op0, 1))))
12357 {
12358 op0 = XEXP (op0, 0);
12359 op1 = tem;
12360 continue;
12361 }
12362
12363 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12364 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12365 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12366 {
12367 op0 = XEXP (XEXP (op0, 0), 0);
12368 code = (code == LT ? EQ : NE);
12369 continue;
12370 }
12371 break;
12372
12373 case MINUS:
12374 /* We used to optimize signed comparisons against zero, but that
12375 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12376 arrive here as equality comparisons, or (GEU, LTU) are
12377 optimized away. No need to special-case them. */
12378
12379 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12380 (eq B (minus A C)), whichever simplifies. We can only do
12381 this for equality comparisons due to pathological cases involving
12382 overflows. */
12383 if (equality_comparison_p
12384 && 0 != (tem = simplify_binary_operation (PLUS, mode,
12385 XEXP (op0, 1), op1)))
12386 {
12387 op0 = XEXP (op0, 0);
12388 op1 = tem;
12389 continue;
12390 }
12391
12392 if (equality_comparison_p
12393 && 0 != (tem = simplify_binary_operation (MINUS, mode,
12394 XEXP (op0, 0), op1)))
12395 {
12396 op0 = XEXP (op0, 1);
12397 op1 = tem;
12398 continue;
12399 }
12400
12401 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12402 of bits in X minus 1, is one iff X > 0. */
12403 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12404 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12405 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12406 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12407 {
12408 op0 = XEXP (op0, 1);
12409 code = (code == GE ? LE : GT);
12410 continue;
12411 }
12412 break;
12413
12414 case XOR:
12415 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12416 if C is zero or B is a constant. */
12417 if (equality_comparison_p
12418 && 0 != (tem = simplify_binary_operation (XOR, mode,
12419 XEXP (op0, 1), op1)))
12420 {
12421 op0 = XEXP (op0, 0);
12422 op1 = tem;
12423 continue;
12424 }
12425 break;
12426
12427
12428 case IOR:
12429 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12430 iff X <= 0. */
12431 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12432 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12433 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12434 {
12435 op0 = XEXP (op0, 1);
12436 code = (code == GE ? GT : LE);
12437 continue;
12438 }
12439 break;
12440
12441 case AND:
12442 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12443 will be converted to a ZERO_EXTRACT later. */
12444 if (const_op == 0 && equality_comparison_p
12445 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12446 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12447 {
12448 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12449 XEXP (XEXP (op0, 0), 1));
12450 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12451 continue;
12452 }
12453
12454 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12455 zero and X is a comparison and C1 and C2 describe only bits set
12456 in STORE_FLAG_VALUE, we can compare with X. */
12457 if (const_op == 0 && equality_comparison_p
12458 && mode_width <= HOST_BITS_PER_WIDE_INT
12459 && CONST_INT_P (XEXP (op0, 1))
12460 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12461 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12462 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12463 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12464 {
12465 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12466 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12467 if ((~STORE_FLAG_VALUE & mask) == 0
12468 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12469 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12470 && COMPARISON_P (tem))))
12471 {
12472 op0 = XEXP (XEXP (op0, 0), 0);
12473 continue;
12474 }
12475 }
12476
12477 /* If we are doing an equality comparison of an AND of a bit equal
12478 to the sign bit, replace this with a LT or GE comparison of
12479 the underlying value. */
12480 if (equality_comparison_p
12481 && const_op == 0
12482 && CONST_INT_P (XEXP (op0, 1))
12483 && mode_width <= HOST_BITS_PER_WIDE_INT
12484 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12485 == HOST_WIDE_INT_1U << (mode_width - 1)))
12486 {
12487 op0 = XEXP (op0, 0);
12488 code = (code == EQ ? GE : LT);
12489 continue;
12490 }
12491
12492 /* If this AND operation is really a ZERO_EXTEND from a narrower
12493 mode, the constant fits within that mode, and this is either an
12494 equality or unsigned comparison, try to do this comparison in
12495 the narrower mode.
12496
12497 Note that in:
12498
12499 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12500 -> (ne:DI (reg:SI 4) (const_int 0))
12501
12502 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12503 known to hold a value of the required mode the
12504 transformation is invalid. */
12505 if ((equality_comparison_p || unsigned_comparison_p)
12506 && CONST_INT_P (XEXP (op0, 1))
12507 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12508 & GET_MODE_MASK (mode))
12509 + 1)) >= 0
12510 && const_op >> i == 0
12511 && int_mode_for_size (i, 1).exists (&tmode))
12512 {
12513 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12514 continue;
12515 }
12516
12517 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12518 fits in both M1 and M2 and the SUBREG is either paradoxical
12519 or represents the low part, permute the SUBREG and the AND
12520 and try again. */
12521 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12522 && CONST_INT_P (XEXP (op0, 1)))
12523 {
12524 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12525 /* Require an integral mode, to avoid creating something like
12526 (AND:SF ...). */
12527 if ((is_a <scalar_int_mode>
12528 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12529 /* It is unsafe to commute the AND into the SUBREG if the
12530 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12531 not defined. As originally written the upper bits
12532 have a defined value due to the AND operation.
12533 However, if we commute the AND inside the SUBREG then
12534 they no longer have defined values and the meaning of
12535 the code has been changed.
12536 Also C1 should not change value in the smaller mode,
12537 see PR67028 (a positive C1 can become negative in the
12538 smaller mode, so that the AND does no longer mask the
12539 upper bits). */
12540 && ((WORD_REGISTER_OPERATIONS
12541 && mode_width > GET_MODE_PRECISION (tmode)
12542 && mode_width <= BITS_PER_WORD
12543 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12544 || (mode_width <= GET_MODE_PRECISION (tmode)
12545 && subreg_lowpart_p (XEXP (op0, 0))))
12546 && mode_width <= HOST_BITS_PER_WIDE_INT
12547 && HWI_COMPUTABLE_MODE_P (tmode)
12548 && (c1 & ~mask) == 0
12549 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12550 && c1 != mask
12551 && c1 != GET_MODE_MASK (tmode))
12552 {
12553 op0 = simplify_gen_binary (AND, tmode,
12554 SUBREG_REG (XEXP (op0, 0)),
12555 gen_int_mode (c1, tmode));
12556 op0 = gen_lowpart (mode, op0);
12557 continue;
12558 }
12559 }
12560
12561 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12562 if (const_op == 0 && equality_comparison_p
12563 && XEXP (op0, 1) == const1_rtx
12564 && GET_CODE (XEXP (op0, 0)) == NOT)
12565 {
12566 op0 = simplify_and_const_int (NULL_RTX, mode,
12567 XEXP (XEXP (op0, 0), 0), 1);
12568 code = (code == NE ? EQ : NE);
12569 continue;
12570 }
12571
12572 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12573 (eq (and (lshiftrt X) 1) 0).
12574 Also handle the case where (not X) is expressed using xor. */
12575 if (const_op == 0 && equality_comparison_p
12576 && XEXP (op0, 1) == const1_rtx
12577 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12578 {
12579 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12580 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12581
12582 if (GET_CODE (shift_op) == NOT
12583 || (GET_CODE (shift_op) == XOR
12584 && CONST_INT_P (XEXP (shift_op, 1))
12585 && CONST_INT_P (shift_count)
12586 && HWI_COMPUTABLE_MODE_P (mode)
12587 && (UINTVAL (XEXP (shift_op, 1))
12588 == HOST_WIDE_INT_1U
12589 << INTVAL (shift_count))))
12590 {
12591 op0
12592 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12593 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12594 code = (code == NE ? EQ : NE);
12595 continue;
12596 }
12597 }
12598 break;
12599
12600 case ASHIFT:
12601 /* If we have (compare (ashift FOO N) (const_int C)) and
12602 the high order N bits of FOO (N+1 if an inequality comparison)
12603 are known to be zero, we can do this by comparing FOO with C
12604 shifted right N bits so long as the low-order N bits of C are
12605 zero. */
12606 if (CONST_INT_P (XEXP (op0, 1))
12607 && INTVAL (XEXP (op0, 1)) >= 0
12608 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12609 < HOST_BITS_PER_WIDE_INT)
12610 && (((unsigned HOST_WIDE_INT) const_op
12611 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12612 - 1)) == 0)
12613 && mode_width <= HOST_BITS_PER_WIDE_INT
12614 && (nonzero_bits (XEXP (op0, 0), mode)
12615 & ~(mask >> (INTVAL (XEXP (op0, 1))
12616 + ! equality_comparison_p))) == 0)
12617 {
12618 /* We must perform a logical shift, not an arithmetic one,
12619 as we want the top N bits of C to be zero. */
12620 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12621
12622 temp >>= INTVAL (XEXP (op0, 1));
12623 op1 = gen_int_mode (temp, mode);
12624 op0 = XEXP (op0, 0);
12625 continue;
12626 }
12627
12628 /* If we are doing a sign bit comparison, it means we are testing
12629 a particular bit. Convert it to the appropriate AND. */
12630 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12631 && mode_width <= HOST_BITS_PER_WIDE_INT)
12632 {
12633 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12634 (HOST_WIDE_INT_1U
12635 << (mode_width - 1
12636 - INTVAL (XEXP (op0, 1)))));
12637 code = (code == LT ? NE : EQ);
12638 continue;
12639 }
12640
12641 /* If this an equality comparison with zero and we are shifting
12642 the low bit to the sign bit, we can convert this to an AND of the
12643 low-order bit. */
12644 if (const_op == 0 && equality_comparison_p
12645 && CONST_INT_P (XEXP (op0, 1))
12646 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12647 {
12648 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12649 continue;
12650 }
12651 break;
12652
12653 case ASHIFTRT:
12654 /* If this is an equality comparison with zero, we can do this
12655 as a logical shift, which might be much simpler. */
12656 if (equality_comparison_p && const_op == 0
12657 && CONST_INT_P (XEXP (op0, 1)))
12658 {
12659 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12660 XEXP (op0, 0),
12661 INTVAL (XEXP (op0, 1)));
12662 continue;
12663 }
12664
12665 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12666 do the comparison in a narrower mode. */
12667 if (! unsigned_comparison_p
12668 && CONST_INT_P (XEXP (op0, 1))
12669 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12670 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12671 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12672 .exists (&tmode))
12673 && (((unsigned HOST_WIDE_INT) const_op
12674 + (GET_MODE_MASK (tmode) >> 1) + 1)
12675 <= GET_MODE_MASK (tmode)))
12676 {
12677 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12678 continue;
12679 }
12680
12681 /* Likewise if OP0 is a PLUS of a sign extension with a
12682 constant, which is usually represented with the PLUS
12683 between the shifts. */
12684 if (! unsigned_comparison_p
12685 && CONST_INT_P (XEXP (op0, 1))
12686 && GET_CODE (XEXP (op0, 0)) == PLUS
12687 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12688 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12689 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12690 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12691 .exists (&tmode))
12692 && (((unsigned HOST_WIDE_INT) const_op
12693 + (GET_MODE_MASK (tmode) >> 1) + 1)
12694 <= GET_MODE_MASK (tmode)))
12695 {
12696 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12697 rtx add_const = XEXP (XEXP (op0, 0), 1);
12698 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12699 add_const, XEXP (op0, 1));
12700
12701 op0 = simplify_gen_binary (PLUS, tmode,
12702 gen_lowpart (tmode, inner),
12703 new_const);
12704 continue;
12705 }
12706
12707 /* FALLTHROUGH */
12708 case LSHIFTRT:
12709 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12710 the low order N bits of FOO are known to be zero, we can do this
12711 by comparing FOO with C shifted left N bits so long as no
12712 overflow occurs. Even if the low order N bits of FOO aren't known
12713 to be zero, if the comparison is >= or < we can use the same
12714 optimization and for > or <= by setting all the low
12715 order N bits in the comparison constant. */
12716 if (CONST_INT_P (XEXP (op0, 1))
12717 && INTVAL (XEXP (op0, 1)) > 0
12718 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12719 && mode_width <= HOST_BITS_PER_WIDE_INT
12720 && (((unsigned HOST_WIDE_INT) const_op
12721 + (GET_CODE (op0) != LSHIFTRT
12722 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12723 + 1)
12724 : 0))
12725 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12726 {
12727 unsigned HOST_WIDE_INT low_bits
12728 = (nonzero_bits (XEXP (op0, 0), mode)
12729 & ((HOST_WIDE_INT_1U
12730 << INTVAL (XEXP (op0, 1))) - 1));
12731 if (low_bits == 0 || !equality_comparison_p)
12732 {
12733 /* If the shift was logical, then we must make the condition
12734 unsigned. */
12735 if (GET_CODE (op0) == LSHIFTRT)
12736 code = unsigned_condition (code);
12737
12738 const_op = (unsigned HOST_WIDE_INT) const_op
12739 << INTVAL (XEXP (op0, 1));
12740 if (low_bits != 0
12741 && (code == GT || code == GTU
12742 || code == LE || code == LEU))
12743 const_op
12744 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12745 op1 = GEN_INT (const_op);
12746 op0 = XEXP (op0, 0);
12747 continue;
12748 }
12749 }
12750
12751 /* If we are using this shift to extract just the sign bit, we
12752 can replace this with an LT or GE comparison. */
12753 if (const_op == 0
12754 && (equality_comparison_p || sign_bit_comparison_p)
12755 && CONST_INT_P (XEXP (op0, 1))
12756 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12757 {
12758 op0 = XEXP (op0, 0);
12759 code = (code == NE || code == GT ? LT : GE);
12760 continue;
12761 }
12762 break;
12763
12764 default:
12765 break;
12766 }
12767
12768 break;
12769 }
12770
12771 /* Now make any compound operations involved in this comparison. Then,
12772 check for an outmost SUBREG on OP0 that is not doing anything or is
12773 paradoxical. The latter transformation must only be performed when
12774 it is known that the "extra" bits will be the same in op0 and op1 or
12775 that they don't matter. There are three cases to consider:
12776
12777 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12778 care bits and we can assume they have any convenient value. So
12779 making the transformation is safe.
12780
12781 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12782 In this case the upper bits of op0 are undefined. We should not make
12783 the simplification in that case as we do not know the contents of
12784 those bits.
12785
12786 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12787 In that case we know those bits are zeros or ones. We must also be
12788 sure that they are the same as the upper bits of op1.
12789
12790 We can never remove a SUBREG for a non-equality comparison because
12791 the sign bit is in a different place in the underlying object. */
12792
12793 rtx_code op0_mco_code = SET;
12794 if (op1 == const0_rtx)
12795 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12796
12797 op0 = make_compound_operation (op0, op0_mco_code);
12798 op1 = make_compound_operation (op1, SET);
12799
12800 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12801 && is_int_mode (GET_MODE (op0), &mode)
12802 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12803 && (code == NE || code == EQ))
12804 {
12805 if (paradoxical_subreg_p (op0))
12806 {
12807 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12808 implemented. */
12809 if (REG_P (SUBREG_REG (op0)))
12810 {
12811 op0 = SUBREG_REG (op0);
12812 op1 = gen_lowpart (inner_mode, op1);
12813 }
12814 }
12815 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12816 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12817 & ~GET_MODE_MASK (mode)) == 0)
12818 {
12819 tem = gen_lowpart (inner_mode, op1);
12820
12821 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12822 op0 = SUBREG_REG (op0), op1 = tem;
12823 }
12824 }
12825
12826 /* We now do the opposite procedure: Some machines don't have compare
12827 insns in all modes. If OP0's mode is an integer mode smaller than a
12828 word and we can't do a compare in that mode, see if there is a larger
12829 mode for which we can do the compare. There are a number of cases in
12830 which we can use the wider mode. */
12831
12832 if (is_int_mode (GET_MODE (op0), &mode)
12833 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12834 && ! have_insn_for (COMPARE, mode))
12835 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12836 {
12837 tmode = tmode_iter.require ();
12838 if (!HWI_COMPUTABLE_MODE_P (tmode))
12839 break;
12840 if (have_insn_for (COMPARE, tmode))
12841 {
12842 int zero_extended;
12843
12844 /* If this is a test for negative, we can make an explicit
12845 test of the sign bit. Test this first so we can use
12846 a paradoxical subreg to extend OP0. */
12847
12848 if (op1 == const0_rtx && (code == LT || code == GE)
12849 && HWI_COMPUTABLE_MODE_P (mode))
12850 {
12851 unsigned HOST_WIDE_INT sign
12852 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12853 op0 = simplify_gen_binary (AND, tmode,
12854 gen_lowpart (tmode, op0),
12855 gen_int_mode (sign, tmode));
12856 code = (code == LT) ? NE : EQ;
12857 break;
12858 }
12859
12860 /* If the only nonzero bits in OP0 and OP1 are those in the
12861 narrower mode and this is an equality or unsigned comparison,
12862 we can use the wider mode. Similarly for sign-extended
12863 values, in which case it is true for all comparisons. */
12864 zero_extended = ((code == EQ || code == NE
12865 || code == GEU || code == GTU
12866 || code == LEU || code == LTU)
12867 && (nonzero_bits (op0, tmode)
12868 & ~GET_MODE_MASK (mode)) == 0
12869 && ((CONST_INT_P (op1)
12870 || (nonzero_bits (op1, tmode)
12871 & ~GET_MODE_MASK (mode)) == 0)));
12872
12873 if (zero_extended
12874 || ((num_sign_bit_copies (op0, tmode)
12875 > (unsigned int) (GET_MODE_PRECISION (tmode)
12876 - GET_MODE_PRECISION (mode)))
12877 && (num_sign_bit_copies (op1, tmode)
12878 > (unsigned int) (GET_MODE_PRECISION (tmode)
12879 - GET_MODE_PRECISION (mode)))))
12880 {
12881 /* If OP0 is an AND and we don't have an AND in MODE either,
12882 make a new AND in the proper mode. */
12883 if (GET_CODE (op0) == AND
12884 && !have_insn_for (AND, mode))
12885 op0 = simplify_gen_binary (AND, tmode,
12886 gen_lowpart (tmode,
12887 XEXP (op0, 0)),
12888 gen_lowpart (tmode,
12889 XEXP (op0, 1)));
12890 else
12891 {
12892 if (zero_extended)
12893 {
12894 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
12895 op0, mode);
12896 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
12897 op1, mode);
12898 }
12899 else
12900 {
12901 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
12902 op0, mode);
12903 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
12904 op1, mode);
12905 }
12906 break;
12907 }
12908 }
12909 }
12910 }
12911
12912 /* We may have changed the comparison operands. Re-canonicalize. */
12913 if (swap_commutative_operands_p (op0, op1))
12914 {
12915 std::swap (op0, op1);
12916 code = swap_condition (code);
12917 }
12918
12919 /* If this machine only supports a subset of valid comparisons, see if we
12920 can convert an unsupported one into a supported one. */
12921 target_canonicalize_comparison (&code, &op0, &op1, 0);
12922
12923 *pop0 = op0;
12924 *pop1 = op1;
12925
12926 return code;
12927 }
12928 \f
12929 /* Utility function for record_value_for_reg. Count number of
12930 rtxs in X. */
12931 static int
12932 count_rtxs (rtx x)
12933 {
12934 enum rtx_code code = GET_CODE (x);
12935 const char *fmt;
12936 int i, j, ret = 1;
12937
12938 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
12939 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
12940 {
12941 rtx x0 = XEXP (x, 0);
12942 rtx x1 = XEXP (x, 1);
12943
12944 if (x0 == x1)
12945 return 1 + 2 * count_rtxs (x0);
12946
12947 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
12948 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
12949 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
12950 return 2 + 2 * count_rtxs (x0)
12951 + count_rtxs (x == XEXP (x1, 0)
12952 ? XEXP (x1, 1) : XEXP (x1, 0));
12953
12954 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
12955 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
12956 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
12957 return 2 + 2 * count_rtxs (x1)
12958 + count_rtxs (x == XEXP (x0, 0)
12959 ? XEXP (x0, 1) : XEXP (x0, 0));
12960 }
12961
12962 fmt = GET_RTX_FORMAT (code);
12963 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
12964 if (fmt[i] == 'e')
12965 ret += count_rtxs (XEXP (x, i));
12966 else if (fmt[i] == 'E')
12967 for (j = 0; j < XVECLEN (x, i); j++)
12968 ret += count_rtxs (XVECEXP (x, i, j));
12969
12970 return ret;
12971 }
12972 \f
12973 /* Utility function for following routine. Called when X is part of a value
12974 being stored into last_set_value. Sets last_set_table_tick
12975 for each register mentioned. Similar to mention_regs in cse.c */
12976
12977 static void
12978 update_table_tick (rtx x)
12979 {
12980 enum rtx_code code = GET_CODE (x);
12981 const char *fmt = GET_RTX_FORMAT (code);
12982 int i, j;
12983
12984 if (code == REG)
12985 {
12986 unsigned int regno = REGNO (x);
12987 unsigned int endregno = END_REGNO (x);
12988 unsigned int r;
12989
12990 for (r = regno; r < endregno; r++)
12991 {
12992 reg_stat_type *rsp = &reg_stat[r];
12993 rsp->last_set_table_tick = label_tick;
12994 }
12995
12996 return;
12997 }
12998
12999 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13000 if (fmt[i] == 'e')
13001 {
13002 /* Check for identical subexpressions. If x contains
13003 identical subexpression we only have to traverse one of
13004 them. */
13005 if (i == 0 && ARITHMETIC_P (x))
13006 {
13007 /* Note that at this point x1 has already been
13008 processed. */
13009 rtx x0 = XEXP (x, 0);
13010 rtx x1 = XEXP (x, 1);
13011
13012 /* If x0 and x1 are identical then there is no need to
13013 process x0. */
13014 if (x0 == x1)
13015 break;
13016
13017 /* If x0 is identical to a subexpression of x1 then while
13018 processing x1, x0 has already been processed. Thus we
13019 are done with x. */
13020 if (ARITHMETIC_P (x1)
13021 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13022 break;
13023
13024 /* If x1 is identical to a subexpression of x0 then we
13025 still have to process the rest of x0. */
13026 if (ARITHMETIC_P (x0)
13027 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13028 {
13029 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13030 break;
13031 }
13032 }
13033
13034 update_table_tick (XEXP (x, i));
13035 }
13036 else if (fmt[i] == 'E')
13037 for (j = 0; j < XVECLEN (x, i); j++)
13038 update_table_tick (XVECEXP (x, i, j));
13039 }
13040
13041 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13042 are saying that the register is clobbered and we no longer know its
13043 value. If INSN is zero, don't update reg_stat[].last_set; this is
13044 only permitted with VALUE also zero and is used to invalidate the
13045 register. */
13046
13047 static void
13048 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13049 {
13050 unsigned int regno = REGNO (reg);
13051 unsigned int endregno = END_REGNO (reg);
13052 unsigned int i;
13053 reg_stat_type *rsp;
13054
13055 /* If VALUE contains REG and we have a previous value for REG, substitute
13056 the previous value. */
13057 if (value && insn && reg_overlap_mentioned_p (reg, value))
13058 {
13059 rtx tem;
13060
13061 /* Set things up so get_last_value is allowed to see anything set up to
13062 our insn. */
13063 subst_low_luid = DF_INSN_LUID (insn);
13064 tem = get_last_value (reg);
13065
13066 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13067 it isn't going to be useful and will take a lot of time to process,
13068 so just use the CLOBBER. */
13069
13070 if (tem)
13071 {
13072 if (ARITHMETIC_P (tem)
13073 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13074 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13075 tem = XEXP (tem, 0);
13076 else if (count_occurrences (value, reg, 1) >= 2)
13077 {
13078 /* If there are two or more occurrences of REG in VALUE,
13079 prevent the value from growing too much. */
13080 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13081 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13082 }
13083
13084 value = replace_rtx (copy_rtx (value), reg, tem);
13085 }
13086 }
13087
13088 /* For each register modified, show we don't know its value, that
13089 we don't know about its bitwise content, that its value has been
13090 updated, and that we don't know the location of the death of the
13091 register. */
13092 for (i = regno; i < endregno; i++)
13093 {
13094 rsp = &reg_stat[i];
13095
13096 if (insn)
13097 rsp->last_set = insn;
13098
13099 rsp->last_set_value = 0;
13100 rsp->last_set_mode = VOIDmode;
13101 rsp->last_set_nonzero_bits = 0;
13102 rsp->last_set_sign_bit_copies = 0;
13103 rsp->last_death = 0;
13104 rsp->truncated_to_mode = VOIDmode;
13105 }
13106
13107 /* Mark registers that are being referenced in this value. */
13108 if (value)
13109 update_table_tick (value);
13110
13111 /* Now update the status of each register being set.
13112 If someone is using this register in this block, set this register
13113 to invalid since we will get confused between the two lives in this
13114 basic block. This makes using this register always invalid. In cse, we
13115 scan the table to invalidate all entries using this register, but this
13116 is too much work for us. */
13117
13118 for (i = regno; i < endregno; i++)
13119 {
13120 rsp = &reg_stat[i];
13121 rsp->last_set_label = label_tick;
13122 if (!insn
13123 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13124 rsp->last_set_invalid = 1;
13125 else
13126 rsp->last_set_invalid = 0;
13127 }
13128
13129 /* The value being assigned might refer to X (like in "x++;"). In that
13130 case, we must replace it with (clobber (const_int 0)) to prevent
13131 infinite loops. */
13132 rsp = &reg_stat[regno];
13133 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13134 {
13135 value = copy_rtx (value);
13136 if (!get_last_value_validate (&value, insn, label_tick, 1))
13137 value = 0;
13138 }
13139
13140 /* For the main register being modified, update the value, the mode, the
13141 nonzero bits, and the number of sign bit copies. */
13142
13143 rsp->last_set_value = value;
13144
13145 if (value)
13146 {
13147 machine_mode mode = GET_MODE (reg);
13148 subst_low_luid = DF_INSN_LUID (insn);
13149 rsp->last_set_mode = mode;
13150 if (GET_MODE_CLASS (mode) == MODE_INT
13151 && HWI_COMPUTABLE_MODE_P (mode))
13152 mode = nonzero_bits_mode;
13153 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13154 rsp->last_set_sign_bit_copies
13155 = num_sign_bit_copies (value, GET_MODE (reg));
13156 }
13157 }
13158
13159 /* Called via note_stores from record_dead_and_set_regs to handle one
13160 SET or CLOBBER in an insn. DATA is the instruction in which the
13161 set is occurring. */
13162
13163 static void
13164 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13165 {
13166 rtx_insn *record_dead_insn = (rtx_insn *) data;
13167
13168 if (GET_CODE (dest) == SUBREG)
13169 dest = SUBREG_REG (dest);
13170
13171 if (!record_dead_insn)
13172 {
13173 if (REG_P (dest))
13174 record_value_for_reg (dest, NULL, NULL_RTX);
13175 return;
13176 }
13177
13178 if (REG_P (dest))
13179 {
13180 /* If we are setting the whole register, we know its value. Otherwise
13181 show that we don't know the value. We can handle SUBREG in
13182 some cases. */
13183 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13184 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13185 else if (GET_CODE (setter) == SET
13186 && GET_CODE (SET_DEST (setter)) == SUBREG
13187 && SUBREG_REG (SET_DEST (setter)) == dest
13188 && GET_MODE_PRECISION (GET_MODE (dest)) <= BITS_PER_WORD
13189 && subreg_lowpart_p (SET_DEST (setter)))
13190 record_value_for_reg (dest, record_dead_insn,
13191 gen_lowpart (GET_MODE (dest),
13192 SET_SRC (setter)));
13193 else
13194 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13195 }
13196 else if (MEM_P (dest)
13197 /* Ignore pushes, they clobber nothing. */
13198 && ! push_operand (dest, GET_MODE (dest)))
13199 mem_last_set = DF_INSN_LUID (record_dead_insn);
13200 }
13201
13202 /* Update the records of when each REG was most recently set or killed
13203 for the things done by INSN. This is the last thing done in processing
13204 INSN in the combiner loop.
13205
13206 We update reg_stat[], in particular fields last_set, last_set_value,
13207 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13208 last_death, and also the similar information mem_last_set (which insn
13209 most recently modified memory) and last_call_luid (which insn was the
13210 most recent subroutine call). */
13211
13212 static void
13213 record_dead_and_set_regs (rtx_insn *insn)
13214 {
13215 rtx link;
13216 unsigned int i;
13217
13218 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13219 {
13220 if (REG_NOTE_KIND (link) == REG_DEAD
13221 && REG_P (XEXP (link, 0)))
13222 {
13223 unsigned int regno = REGNO (XEXP (link, 0));
13224 unsigned int endregno = END_REGNO (XEXP (link, 0));
13225
13226 for (i = regno; i < endregno; i++)
13227 {
13228 reg_stat_type *rsp;
13229
13230 rsp = &reg_stat[i];
13231 rsp->last_death = insn;
13232 }
13233 }
13234 else if (REG_NOTE_KIND (link) == REG_INC)
13235 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13236 }
13237
13238 if (CALL_P (insn))
13239 {
13240 hard_reg_set_iterator hrsi;
13241 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13242 {
13243 reg_stat_type *rsp;
13244
13245 rsp = &reg_stat[i];
13246 rsp->last_set_invalid = 1;
13247 rsp->last_set = insn;
13248 rsp->last_set_value = 0;
13249 rsp->last_set_mode = VOIDmode;
13250 rsp->last_set_nonzero_bits = 0;
13251 rsp->last_set_sign_bit_copies = 0;
13252 rsp->last_death = 0;
13253 rsp->truncated_to_mode = VOIDmode;
13254 }
13255
13256 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13257
13258 /* We can't combine into a call pattern. Remember, though, that
13259 the return value register is set at this LUID. We could
13260 still replace a register with the return value from the
13261 wrong subroutine call! */
13262 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13263 }
13264 else
13265 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13266 }
13267
13268 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13269 register present in the SUBREG, so for each such SUBREG go back and
13270 adjust nonzero and sign bit information of the registers that are
13271 known to have some zero/sign bits set.
13272
13273 This is needed because when combine blows the SUBREGs away, the
13274 information on zero/sign bits is lost and further combines can be
13275 missed because of that. */
13276
13277 static void
13278 record_promoted_value (rtx_insn *insn, rtx subreg)
13279 {
13280 struct insn_link *links;
13281 rtx set;
13282 unsigned int regno = REGNO (SUBREG_REG (subreg));
13283 machine_mode mode = GET_MODE (subreg);
13284
13285 if (GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT)
13286 return;
13287
13288 for (links = LOG_LINKS (insn); links;)
13289 {
13290 reg_stat_type *rsp;
13291
13292 insn = links->insn;
13293 set = single_set (insn);
13294
13295 if (! set || !REG_P (SET_DEST (set))
13296 || REGNO (SET_DEST (set)) != regno
13297 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13298 {
13299 links = links->next;
13300 continue;
13301 }
13302
13303 rsp = &reg_stat[regno];
13304 if (rsp->last_set == insn)
13305 {
13306 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13307 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13308 }
13309
13310 if (REG_P (SET_SRC (set)))
13311 {
13312 regno = REGNO (SET_SRC (set));
13313 links = LOG_LINKS (insn);
13314 }
13315 else
13316 break;
13317 }
13318 }
13319
13320 /* Check if X, a register, is known to contain a value already
13321 truncated to MODE. In this case we can use a subreg to refer to
13322 the truncated value even though in the generic case we would need
13323 an explicit truncation. */
13324
13325 static bool
13326 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13327 {
13328 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13329 machine_mode truncated = rsp->truncated_to_mode;
13330
13331 if (truncated == 0
13332 || rsp->truncation_label < label_tick_ebb_start)
13333 return false;
13334 if (!partial_subreg_p (mode, truncated))
13335 return true;
13336 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13337 return true;
13338 return false;
13339 }
13340
13341 /* If X is a hard reg or a subreg record the mode that the register is
13342 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13343 able to turn a truncate into a subreg using this information. Return true
13344 if traversing X is complete. */
13345
13346 static bool
13347 record_truncated_value (rtx x)
13348 {
13349 machine_mode truncated_mode;
13350 reg_stat_type *rsp;
13351
13352 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13353 {
13354 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13355 truncated_mode = GET_MODE (x);
13356
13357 if (!partial_subreg_p (truncated_mode, original_mode))
13358 return true;
13359
13360 truncated_mode = GET_MODE (x);
13361 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13362 return true;
13363
13364 x = SUBREG_REG (x);
13365 }
13366 /* ??? For hard-regs we now record everything. We might be able to
13367 optimize this using last_set_mode. */
13368 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13369 truncated_mode = GET_MODE (x);
13370 else
13371 return false;
13372
13373 rsp = &reg_stat[REGNO (x)];
13374 if (rsp->truncated_to_mode == 0
13375 || rsp->truncation_label < label_tick_ebb_start
13376 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13377 {
13378 rsp->truncated_to_mode = truncated_mode;
13379 rsp->truncation_label = label_tick;
13380 }
13381
13382 return true;
13383 }
13384
13385 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13386 the modes they are used in. This can help truning TRUNCATEs into
13387 SUBREGs. */
13388
13389 static void
13390 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13391 {
13392 subrtx_var_iterator::array_type array;
13393 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13394 if (record_truncated_value (*iter))
13395 iter.skip_subrtxes ();
13396 }
13397
13398 /* Scan X for promoted SUBREGs. For each one found,
13399 note what it implies to the registers used in it. */
13400
13401 static void
13402 check_promoted_subreg (rtx_insn *insn, rtx x)
13403 {
13404 if (GET_CODE (x) == SUBREG
13405 && SUBREG_PROMOTED_VAR_P (x)
13406 && REG_P (SUBREG_REG (x)))
13407 record_promoted_value (insn, x);
13408 else
13409 {
13410 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13411 int i, j;
13412
13413 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13414 switch (format[i])
13415 {
13416 case 'e':
13417 check_promoted_subreg (insn, XEXP (x, i));
13418 break;
13419 case 'V':
13420 case 'E':
13421 if (XVEC (x, i) != 0)
13422 for (j = 0; j < XVECLEN (x, i); j++)
13423 check_promoted_subreg (insn, XVECEXP (x, i, j));
13424 break;
13425 }
13426 }
13427 }
13428 \f
13429 /* Verify that all the registers and memory references mentioned in *LOC are
13430 still valid. *LOC was part of a value set in INSN when label_tick was
13431 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13432 the invalid references with (clobber (const_int 0)) and return 1. This
13433 replacement is useful because we often can get useful information about
13434 the form of a value (e.g., if it was produced by a shift that always
13435 produces -1 or 0) even though we don't know exactly what registers it
13436 was produced from. */
13437
13438 static int
13439 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13440 {
13441 rtx x = *loc;
13442 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13443 int len = GET_RTX_LENGTH (GET_CODE (x));
13444 int i, j;
13445
13446 if (REG_P (x))
13447 {
13448 unsigned int regno = REGNO (x);
13449 unsigned int endregno = END_REGNO (x);
13450 unsigned int j;
13451
13452 for (j = regno; j < endregno; j++)
13453 {
13454 reg_stat_type *rsp = &reg_stat[j];
13455 if (rsp->last_set_invalid
13456 /* If this is a pseudo-register that was only set once and not
13457 live at the beginning of the function, it is always valid. */
13458 || (! (regno >= FIRST_PSEUDO_REGISTER
13459 && regno < reg_n_sets_max
13460 && REG_N_SETS (regno) == 1
13461 && (!REGNO_REG_SET_P
13462 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13463 regno)))
13464 && rsp->last_set_label > tick))
13465 {
13466 if (replace)
13467 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13468 return replace;
13469 }
13470 }
13471
13472 return 1;
13473 }
13474 /* If this is a memory reference, make sure that there were no stores after
13475 it that might have clobbered the value. We don't have alias info, so we
13476 assume any store invalidates it. Moreover, we only have local UIDs, so
13477 we also assume that there were stores in the intervening basic blocks. */
13478 else if (MEM_P (x) && !MEM_READONLY_P (x)
13479 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13480 {
13481 if (replace)
13482 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13483 return replace;
13484 }
13485
13486 for (i = 0; i < len; i++)
13487 {
13488 if (fmt[i] == 'e')
13489 {
13490 /* Check for identical subexpressions. If x contains
13491 identical subexpression we only have to traverse one of
13492 them. */
13493 if (i == 1 && ARITHMETIC_P (x))
13494 {
13495 /* Note that at this point x0 has already been checked
13496 and found valid. */
13497 rtx x0 = XEXP (x, 0);
13498 rtx x1 = XEXP (x, 1);
13499
13500 /* If x0 and x1 are identical then x is also valid. */
13501 if (x0 == x1)
13502 return 1;
13503
13504 /* If x1 is identical to a subexpression of x0 then
13505 while checking x0, x1 has already been checked. Thus
13506 it is valid and so as x. */
13507 if (ARITHMETIC_P (x0)
13508 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13509 return 1;
13510
13511 /* If x0 is identical to a subexpression of x1 then x is
13512 valid iff the rest of x1 is valid. */
13513 if (ARITHMETIC_P (x1)
13514 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13515 return
13516 get_last_value_validate (&XEXP (x1,
13517 x0 == XEXP (x1, 0) ? 1 : 0),
13518 insn, tick, replace);
13519 }
13520
13521 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13522 replace) == 0)
13523 return 0;
13524 }
13525 else if (fmt[i] == 'E')
13526 for (j = 0; j < XVECLEN (x, i); j++)
13527 if (get_last_value_validate (&XVECEXP (x, i, j),
13528 insn, tick, replace) == 0)
13529 return 0;
13530 }
13531
13532 /* If we haven't found a reason for it to be invalid, it is valid. */
13533 return 1;
13534 }
13535
13536 /* Get the last value assigned to X, if known. Some registers
13537 in the value may be replaced with (clobber (const_int 0)) if their value
13538 is known longer known reliably. */
13539
13540 static rtx
13541 get_last_value (const_rtx x)
13542 {
13543 unsigned int regno;
13544 rtx value;
13545 reg_stat_type *rsp;
13546
13547 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13548 then convert it to the desired mode. If this is a paradoxical SUBREG,
13549 we cannot predict what values the "extra" bits might have. */
13550 if (GET_CODE (x) == SUBREG
13551 && subreg_lowpart_p (x)
13552 && !paradoxical_subreg_p (x)
13553 && (value = get_last_value (SUBREG_REG (x))) != 0)
13554 return gen_lowpart (GET_MODE (x), value);
13555
13556 if (!REG_P (x))
13557 return 0;
13558
13559 regno = REGNO (x);
13560 rsp = &reg_stat[regno];
13561 value = rsp->last_set_value;
13562
13563 /* If we don't have a value, or if it isn't for this basic block and
13564 it's either a hard register, set more than once, or it's a live
13565 at the beginning of the function, return 0.
13566
13567 Because if it's not live at the beginning of the function then the reg
13568 is always set before being used (is never used without being set).
13569 And, if it's set only once, and it's always set before use, then all
13570 uses must have the same last value, even if it's not from this basic
13571 block. */
13572
13573 if (value == 0
13574 || (rsp->last_set_label < label_tick_ebb_start
13575 && (regno < FIRST_PSEUDO_REGISTER
13576 || regno >= reg_n_sets_max
13577 || REG_N_SETS (regno) != 1
13578 || REGNO_REG_SET_P
13579 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13580 return 0;
13581
13582 /* If the value was set in a later insn than the ones we are processing,
13583 we can't use it even if the register was only set once. */
13584 if (rsp->last_set_label == label_tick
13585 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13586 return 0;
13587
13588 /* If fewer bits were set than what we are asked for now, we cannot use
13589 the value. */
13590 if (GET_MODE_PRECISION (rsp->last_set_mode)
13591 < GET_MODE_PRECISION (GET_MODE (x)))
13592 return 0;
13593
13594 /* If the value has all its registers valid, return it. */
13595 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13596 return value;
13597
13598 /* Otherwise, make a copy and replace any invalid register with
13599 (clobber (const_int 0)). If that fails for some reason, return 0. */
13600
13601 value = copy_rtx (value);
13602 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13603 return value;
13604
13605 return 0;
13606 }
13607 \f
13608 /* Return nonzero if expression X refers to a REG or to memory
13609 that is set in an instruction more recent than FROM_LUID. */
13610
13611 static int
13612 use_crosses_set_p (const_rtx x, int from_luid)
13613 {
13614 const char *fmt;
13615 int i;
13616 enum rtx_code code = GET_CODE (x);
13617
13618 if (code == REG)
13619 {
13620 unsigned int regno = REGNO (x);
13621 unsigned endreg = END_REGNO (x);
13622
13623 #ifdef PUSH_ROUNDING
13624 /* Don't allow uses of the stack pointer to be moved,
13625 because we don't know whether the move crosses a push insn. */
13626 if (regno == STACK_POINTER_REGNUM && PUSH_ARGS)
13627 return 1;
13628 #endif
13629 for (; regno < endreg; regno++)
13630 {
13631 reg_stat_type *rsp = &reg_stat[regno];
13632 if (rsp->last_set
13633 && rsp->last_set_label == label_tick
13634 && DF_INSN_LUID (rsp->last_set) > from_luid)
13635 return 1;
13636 }
13637 return 0;
13638 }
13639
13640 if (code == MEM && mem_last_set > from_luid)
13641 return 1;
13642
13643 fmt = GET_RTX_FORMAT (code);
13644
13645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13646 {
13647 if (fmt[i] == 'E')
13648 {
13649 int j;
13650 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
13651 if (use_crosses_set_p (XVECEXP (x, i, j), from_luid))
13652 return 1;
13653 }
13654 else if (fmt[i] == 'e'
13655 && use_crosses_set_p (XEXP (x, i), from_luid))
13656 return 1;
13657 }
13658 return 0;
13659 }
13660 \f
13661 /* Define three variables used for communication between the following
13662 routines. */
13663
13664 static unsigned int reg_dead_regno, reg_dead_endregno;
13665 static int reg_dead_flag;
13666
13667 /* Function called via note_stores from reg_dead_at_p.
13668
13669 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13670 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13671
13672 static void
13673 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13674 {
13675 unsigned int regno, endregno;
13676
13677 if (!REG_P (dest))
13678 return;
13679
13680 regno = REGNO (dest);
13681 endregno = END_REGNO (dest);
13682 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13683 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13684 }
13685
13686 /* Return nonzero if REG is known to be dead at INSN.
13687
13688 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13689 referencing REG, it is dead. If we hit a SET referencing REG, it is
13690 live. Otherwise, see if it is live or dead at the start of the basic
13691 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13692 must be assumed to be always live. */
13693
13694 static int
13695 reg_dead_at_p (rtx reg, rtx_insn *insn)
13696 {
13697 basic_block block;
13698 unsigned int i;
13699
13700 /* Set variables for reg_dead_at_p_1. */
13701 reg_dead_regno = REGNO (reg);
13702 reg_dead_endregno = END_REGNO (reg);
13703
13704 reg_dead_flag = 0;
13705
13706 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13707 we allow the machine description to decide whether use-and-clobber
13708 patterns are OK. */
13709 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13710 {
13711 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13712 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13713 return 0;
13714 }
13715
13716 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13717 beginning of basic block. */
13718 block = BLOCK_FOR_INSN (insn);
13719 for (;;)
13720 {
13721 if (INSN_P (insn))
13722 {
13723 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13724 return 1;
13725
13726 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13727 if (reg_dead_flag)
13728 return reg_dead_flag == 1 ? 1 : 0;
13729
13730 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13731 return 1;
13732 }
13733
13734 if (insn == BB_HEAD (block))
13735 break;
13736
13737 insn = PREV_INSN (insn);
13738 }
13739
13740 /* Look at live-in sets for the basic block that we were in. */
13741 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13742 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13743 return 0;
13744
13745 return 1;
13746 }
13747 \f
13748 /* Note hard registers in X that are used. */
13749
13750 static void
13751 mark_used_regs_combine (rtx x)
13752 {
13753 RTX_CODE code = GET_CODE (x);
13754 unsigned int regno;
13755 int i;
13756
13757 switch (code)
13758 {
13759 case LABEL_REF:
13760 case SYMBOL_REF:
13761 case CONST:
13762 CASE_CONST_ANY:
13763 case PC:
13764 case ADDR_VEC:
13765 case ADDR_DIFF_VEC:
13766 case ASM_INPUT:
13767 /* CC0 must die in the insn after it is set, so we don't need to take
13768 special note of it here. */
13769 case CC0:
13770 return;
13771
13772 case CLOBBER:
13773 /* If we are clobbering a MEM, mark any hard registers inside the
13774 address as used. */
13775 if (MEM_P (XEXP (x, 0)))
13776 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13777 return;
13778
13779 case REG:
13780 regno = REGNO (x);
13781 /* A hard reg in a wide mode may really be multiple registers.
13782 If so, mark all of them just like the first. */
13783 if (regno < FIRST_PSEUDO_REGISTER)
13784 {
13785 /* None of this applies to the stack, frame or arg pointers. */
13786 if (regno == STACK_POINTER_REGNUM
13787 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13788 && regno == HARD_FRAME_POINTER_REGNUM)
13789 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13790 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13791 || regno == FRAME_POINTER_REGNUM)
13792 return;
13793
13794 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13795 }
13796 return;
13797
13798 case SET:
13799 {
13800 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13801 the address. */
13802 rtx testreg = SET_DEST (x);
13803
13804 while (GET_CODE (testreg) == SUBREG
13805 || GET_CODE (testreg) == ZERO_EXTRACT
13806 || GET_CODE (testreg) == STRICT_LOW_PART)
13807 testreg = XEXP (testreg, 0);
13808
13809 if (MEM_P (testreg))
13810 mark_used_regs_combine (XEXP (testreg, 0));
13811
13812 mark_used_regs_combine (SET_SRC (x));
13813 }
13814 return;
13815
13816 default:
13817 break;
13818 }
13819
13820 /* Recursively scan the operands of this expression. */
13821
13822 {
13823 const char *fmt = GET_RTX_FORMAT (code);
13824
13825 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13826 {
13827 if (fmt[i] == 'e')
13828 mark_used_regs_combine (XEXP (x, i));
13829 else if (fmt[i] == 'E')
13830 {
13831 int j;
13832
13833 for (j = 0; j < XVECLEN (x, i); j++)
13834 mark_used_regs_combine (XVECEXP (x, i, j));
13835 }
13836 }
13837 }
13838 }
13839 \f
13840 /* Remove register number REGNO from the dead registers list of INSN.
13841
13842 Return the note used to record the death, if there was one. */
13843
13844 rtx
13845 remove_death (unsigned int regno, rtx_insn *insn)
13846 {
13847 rtx note = find_regno_note (insn, REG_DEAD, regno);
13848
13849 if (note)
13850 remove_note (insn, note);
13851
13852 return note;
13853 }
13854
13855 /* For each register (hardware or pseudo) used within expression X, if its
13856 death is in an instruction with luid between FROM_LUID (inclusive) and
13857 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13858 list headed by PNOTES.
13859
13860 That said, don't move registers killed by maybe_kill_insn.
13861
13862 This is done when X is being merged by combination into TO_INSN. These
13863 notes will then be distributed as needed. */
13864
13865 static void
13866 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13867 rtx *pnotes)
13868 {
13869 const char *fmt;
13870 int len, i;
13871 enum rtx_code code = GET_CODE (x);
13872
13873 if (code == REG)
13874 {
13875 unsigned int regno = REGNO (x);
13876 rtx_insn *where_dead = reg_stat[regno].last_death;
13877
13878 /* Don't move the register if it gets killed in between from and to. */
13879 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13880 && ! reg_referenced_p (x, maybe_kill_insn))
13881 return;
13882
13883 if (where_dead
13884 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13885 && DF_INSN_LUID (where_dead) >= from_luid
13886 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13887 {
13888 rtx note = remove_death (regno, where_dead);
13889
13890 /* It is possible for the call above to return 0. This can occur
13891 when last_death points to I2 or I1 that we combined with.
13892 In that case make a new note.
13893
13894 We must also check for the case where X is a hard register
13895 and NOTE is a death note for a range of hard registers
13896 including X. In that case, we must put REG_DEAD notes for
13897 the remaining registers in place of NOTE. */
13898
13899 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13900 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13901 {
13902 unsigned int deadregno = REGNO (XEXP (note, 0));
13903 unsigned int deadend = END_REGNO (XEXP (note, 0));
13904 unsigned int ourend = END_REGNO (x);
13905 unsigned int i;
13906
13907 for (i = deadregno; i < deadend; i++)
13908 if (i < regno || i >= ourend)
13909 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13910 }
13911
13912 /* If we didn't find any note, or if we found a REG_DEAD note that
13913 covers only part of the given reg, and we have a multi-reg hard
13914 register, then to be safe we must check for REG_DEAD notes
13915 for each register other than the first. They could have
13916 their own REG_DEAD notes lying around. */
13917 else if ((note == 0
13918 || (note != 0
13919 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
13920 GET_MODE (x))))
13921 && regno < FIRST_PSEUDO_REGISTER
13922 && REG_NREGS (x) > 1)
13923 {
13924 unsigned int ourend = END_REGNO (x);
13925 unsigned int i, offset;
13926 rtx oldnotes = 0;
13927
13928 if (note)
13929 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
13930 else
13931 offset = 1;
13932
13933 for (i = regno + offset; i < ourend; i++)
13934 move_deaths (regno_reg_rtx[i],
13935 maybe_kill_insn, from_luid, to_insn, &oldnotes);
13936 }
13937
13938 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
13939 {
13940 XEXP (note, 1) = *pnotes;
13941 *pnotes = note;
13942 }
13943 else
13944 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
13945 }
13946
13947 return;
13948 }
13949
13950 else if (GET_CODE (x) == SET)
13951 {
13952 rtx dest = SET_DEST (x);
13953
13954 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
13955
13956 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
13957 that accesses one word of a multi-word item, some
13958 piece of everything register in the expression is used by
13959 this insn, so remove any old death. */
13960 /* ??? So why do we test for equality of the sizes? */
13961
13962 if (GET_CODE (dest) == ZERO_EXTRACT
13963 || GET_CODE (dest) == STRICT_LOW_PART
13964 || (GET_CODE (dest) == SUBREG
13965 && (((GET_MODE_SIZE (GET_MODE (dest))
13966 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
13967 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
13968 + UNITS_PER_WORD - 1) / UNITS_PER_WORD))))
13969 {
13970 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
13971 return;
13972 }
13973
13974 /* If this is some other SUBREG, we know it replaces the entire
13975 value, so use that as the destination. */
13976 if (GET_CODE (dest) == SUBREG)
13977 dest = SUBREG_REG (dest);
13978
13979 /* If this is a MEM, adjust deaths of anything used in the address.
13980 For a REG (the only other possibility), the entire value is
13981 being replaced so the old value is not used in this insn. */
13982
13983 if (MEM_P (dest))
13984 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
13985 to_insn, pnotes);
13986 return;
13987 }
13988
13989 else if (GET_CODE (x) == CLOBBER)
13990 return;
13991
13992 len = GET_RTX_LENGTH (code);
13993 fmt = GET_RTX_FORMAT (code);
13994
13995 for (i = 0; i < len; i++)
13996 {
13997 if (fmt[i] == 'E')
13998 {
13999 int j;
14000 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14001 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14002 to_insn, pnotes);
14003 }
14004 else if (fmt[i] == 'e')
14005 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14006 }
14007 }
14008 \f
14009 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14010 pattern of an insn. X must be a REG. */
14011
14012 static int
14013 reg_bitfield_target_p (rtx x, rtx body)
14014 {
14015 int i;
14016
14017 if (GET_CODE (body) == SET)
14018 {
14019 rtx dest = SET_DEST (body);
14020 rtx target;
14021 unsigned int regno, tregno, endregno, endtregno;
14022
14023 if (GET_CODE (dest) == ZERO_EXTRACT)
14024 target = XEXP (dest, 0);
14025 else if (GET_CODE (dest) == STRICT_LOW_PART)
14026 target = SUBREG_REG (XEXP (dest, 0));
14027 else
14028 return 0;
14029
14030 if (GET_CODE (target) == SUBREG)
14031 target = SUBREG_REG (target);
14032
14033 if (!REG_P (target))
14034 return 0;
14035
14036 tregno = REGNO (target), regno = REGNO (x);
14037 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14038 return target == x;
14039
14040 endtregno = end_hard_regno (GET_MODE (target), tregno);
14041 endregno = end_hard_regno (GET_MODE (x), regno);
14042
14043 return endregno > tregno && regno < endtregno;
14044 }
14045
14046 else if (GET_CODE (body) == PARALLEL)
14047 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14048 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14049 return 1;
14050
14051 return 0;
14052 }
14053 \f
14054 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14055 as appropriate. I3 and I2 are the insns resulting from the combination
14056 insns including FROM (I2 may be zero).
14057
14058 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14059 not need REG_DEAD notes because they are being substituted for. This
14060 saves searching in the most common cases.
14061
14062 Each note in the list is either ignored or placed on some insns, depending
14063 on the type of note. */
14064
14065 static void
14066 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14067 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14068 {
14069 rtx note, next_note;
14070 rtx tem_note;
14071 rtx_insn *tem_insn;
14072
14073 for (note = notes; note; note = next_note)
14074 {
14075 rtx_insn *place = 0, *place2 = 0;
14076
14077 next_note = XEXP (note, 1);
14078 switch (REG_NOTE_KIND (note))
14079 {
14080 case REG_BR_PROB:
14081 case REG_BR_PRED:
14082 /* Doesn't matter much where we put this, as long as it's somewhere.
14083 It is preferable to keep these notes on branches, which is most
14084 likely to be i3. */
14085 place = i3;
14086 break;
14087
14088 case REG_NON_LOCAL_GOTO:
14089 if (JUMP_P (i3))
14090 place = i3;
14091 else
14092 {
14093 gcc_assert (i2 && JUMP_P (i2));
14094 place = i2;
14095 }
14096 break;
14097
14098 case REG_EH_REGION:
14099 /* These notes must remain with the call or trapping instruction. */
14100 if (CALL_P (i3))
14101 place = i3;
14102 else if (i2 && CALL_P (i2))
14103 place = i2;
14104 else
14105 {
14106 gcc_assert (cfun->can_throw_non_call_exceptions);
14107 if (may_trap_p (i3))
14108 place = i3;
14109 else if (i2 && may_trap_p (i2))
14110 place = i2;
14111 /* ??? Otherwise assume we've combined things such that we
14112 can now prove that the instructions can't trap. Drop the
14113 note in this case. */
14114 }
14115 break;
14116
14117 case REG_ARGS_SIZE:
14118 /* ??? How to distribute between i3-i1. Assume i3 contains the
14119 entire adjustment. Assert i3 contains at least some adjust. */
14120 if (!noop_move_p (i3))
14121 {
14122 int old_size, args_size = INTVAL (XEXP (note, 0));
14123 /* fixup_args_size_notes looks at REG_NORETURN note,
14124 so ensure the note is placed there first. */
14125 if (CALL_P (i3))
14126 {
14127 rtx *np;
14128 for (np = &next_note; *np; np = &XEXP (*np, 1))
14129 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14130 {
14131 rtx n = *np;
14132 *np = XEXP (n, 1);
14133 XEXP (n, 1) = REG_NOTES (i3);
14134 REG_NOTES (i3) = n;
14135 break;
14136 }
14137 }
14138 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14139 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14140 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14141 gcc_assert (old_size != args_size
14142 || (CALL_P (i3)
14143 && !ACCUMULATE_OUTGOING_ARGS
14144 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14145 }
14146 break;
14147
14148 case REG_NORETURN:
14149 case REG_SETJMP:
14150 case REG_TM:
14151 case REG_CALL_DECL:
14152 /* These notes must remain with the call. It should not be
14153 possible for both I2 and I3 to be a call. */
14154 if (CALL_P (i3))
14155 place = i3;
14156 else
14157 {
14158 gcc_assert (i2 && CALL_P (i2));
14159 place = i2;
14160 }
14161 break;
14162
14163 case REG_UNUSED:
14164 /* Any clobbers for i3 may still exist, and so we must process
14165 REG_UNUSED notes from that insn.
14166
14167 Any clobbers from i2 or i1 can only exist if they were added by
14168 recog_for_combine. In that case, recog_for_combine created the
14169 necessary REG_UNUSED notes. Trying to keep any original
14170 REG_UNUSED notes from these insns can cause incorrect output
14171 if it is for the same register as the original i3 dest.
14172 In that case, we will notice that the register is set in i3,
14173 and then add a REG_UNUSED note for the destination of i3, which
14174 is wrong. However, it is possible to have REG_UNUSED notes from
14175 i2 or i1 for register which were both used and clobbered, so
14176 we keep notes from i2 or i1 if they will turn into REG_DEAD
14177 notes. */
14178
14179 /* If this register is set or clobbered in I3, put the note there
14180 unless there is one already. */
14181 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14182 {
14183 if (from_insn != i3)
14184 break;
14185
14186 if (! (REG_P (XEXP (note, 0))
14187 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14188 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14189 place = i3;
14190 }
14191 /* Otherwise, if this register is used by I3, then this register
14192 now dies here, so we must put a REG_DEAD note here unless there
14193 is one already. */
14194 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14195 && ! (REG_P (XEXP (note, 0))
14196 ? find_regno_note (i3, REG_DEAD,
14197 REGNO (XEXP (note, 0)))
14198 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14199 {
14200 PUT_REG_NOTE_KIND (note, REG_DEAD);
14201 place = i3;
14202 }
14203 break;
14204
14205 case REG_EQUAL:
14206 case REG_EQUIV:
14207 case REG_NOALIAS:
14208 /* These notes say something about results of an insn. We can
14209 only support them if they used to be on I3 in which case they
14210 remain on I3. Otherwise they are ignored.
14211
14212 If the note refers to an expression that is not a constant, we
14213 must also ignore the note since we cannot tell whether the
14214 equivalence is still true. It might be possible to do
14215 slightly better than this (we only have a problem if I2DEST
14216 or I1DEST is present in the expression), but it doesn't
14217 seem worth the trouble. */
14218
14219 if (from_insn == i3
14220 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14221 place = i3;
14222 break;
14223
14224 case REG_INC:
14225 /* These notes say something about how a register is used. They must
14226 be present on any use of the register in I2 or I3. */
14227 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14228 place = i3;
14229
14230 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14231 {
14232 if (place)
14233 place2 = i2;
14234 else
14235 place = i2;
14236 }
14237 break;
14238
14239 case REG_LABEL_TARGET:
14240 case REG_LABEL_OPERAND:
14241 /* This can show up in several ways -- either directly in the
14242 pattern, or hidden off in the constant pool with (or without?)
14243 a REG_EQUAL note. */
14244 /* ??? Ignore the without-reg_equal-note problem for now. */
14245 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14246 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14247 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14248 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14249 place = i3;
14250
14251 if (i2
14252 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14253 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14254 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14255 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14256 {
14257 if (place)
14258 place2 = i2;
14259 else
14260 place = i2;
14261 }
14262
14263 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14264 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14265 there. */
14266 if (place && JUMP_P (place)
14267 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14268 && (JUMP_LABEL (place) == NULL
14269 || JUMP_LABEL (place) == XEXP (note, 0)))
14270 {
14271 rtx label = JUMP_LABEL (place);
14272
14273 if (!label)
14274 JUMP_LABEL (place) = XEXP (note, 0);
14275 else if (LABEL_P (label))
14276 LABEL_NUSES (label)--;
14277 }
14278
14279 if (place2 && JUMP_P (place2)
14280 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14281 && (JUMP_LABEL (place2) == NULL
14282 || JUMP_LABEL (place2) == XEXP (note, 0)))
14283 {
14284 rtx label = JUMP_LABEL (place2);
14285
14286 if (!label)
14287 JUMP_LABEL (place2) = XEXP (note, 0);
14288 else if (LABEL_P (label))
14289 LABEL_NUSES (label)--;
14290 place2 = 0;
14291 }
14292 break;
14293
14294 case REG_NONNEG:
14295 /* This note says something about the value of a register prior
14296 to the execution of an insn. It is too much trouble to see
14297 if the note is still correct in all situations. It is better
14298 to simply delete it. */
14299 break;
14300
14301 case REG_DEAD:
14302 /* If we replaced the right hand side of FROM_INSN with a
14303 REG_EQUAL note, the original use of the dying register
14304 will not have been combined into I3 and I2. In such cases,
14305 FROM_INSN is guaranteed to be the first of the combined
14306 instructions, so we simply need to search back before
14307 FROM_INSN for the previous use or set of this register,
14308 then alter the notes there appropriately.
14309
14310 If the register is used as an input in I3, it dies there.
14311 Similarly for I2, if it is nonzero and adjacent to I3.
14312
14313 If the register is not used as an input in either I3 or I2
14314 and it is not one of the registers we were supposed to eliminate,
14315 there are two possibilities. We might have a non-adjacent I2
14316 or we might have somehow eliminated an additional register
14317 from a computation. For example, we might have had A & B where
14318 we discover that B will always be zero. In this case we will
14319 eliminate the reference to A.
14320
14321 In both cases, we must search to see if we can find a previous
14322 use of A and put the death note there. */
14323
14324 if (from_insn
14325 && from_insn == i2mod
14326 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14327 tem_insn = from_insn;
14328 else
14329 {
14330 if (from_insn
14331 && CALL_P (from_insn)
14332 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14333 place = from_insn;
14334 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14335 place = i3;
14336 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14337 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14338 place = i2;
14339 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14340 && !(i2mod
14341 && reg_overlap_mentioned_p (XEXP (note, 0),
14342 i2mod_old_rhs)))
14343 || rtx_equal_p (XEXP (note, 0), elim_i1)
14344 || rtx_equal_p (XEXP (note, 0), elim_i0))
14345 break;
14346 tem_insn = i3;
14347 /* If the new I2 sets the same register that is marked dead
14348 in the note, we do not know where to put the note.
14349 Give up. */
14350 if (i2 != 0 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14351 break;
14352 }
14353
14354 if (place == 0)
14355 {
14356 basic_block bb = this_basic_block;
14357
14358 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14359 {
14360 if (!NONDEBUG_INSN_P (tem_insn))
14361 {
14362 if (tem_insn == BB_HEAD (bb))
14363 break;
14364 continue;
14365 }
14366
14367 /* If the register is being set at TEM_INSN, see if that is all
14368 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14369 into a REG_UNUSED note instead. Don't delete sets to
14370 global register vars. */
14371 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14372 || !global_regs[REGNO (XEXP (note, 0))])
14373 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14374 {
14375 rtx set = single_set (tem_insn);
14376 rtx inner_dest = 0;
14377 rtx_insn *cc0_setter = NULL;
14378
14379 if (set != 0)
14380 for (inner_dest = SET_DEST (set);
14381 (GET_CODE (inner_dest) == STRICT_LOW_PART
14382 || GET_CODE (inner_dest) == SUBREG
14383 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14384 inner_dest = XEXP (inner_dest, 0))
14385 ;
14386
14387 /* Verify that it was the set, and not a clobber that
14388 modified the register.
14389
14390 CC0 targets must be careful to maintain setter/user
14391 pairs. If we cannot delete the setter due to side
14392 effects, mark the user with an UNUSED note instead
14393 of deleting it. */
14394
14395 if (set != 0 && ! side_effects_p (SET_SRC (set))
14396 && rtx_equal_p (XEXP (note, 0), inner_dest)
14397 && (!HAVE_cc0
14398 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14399 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14400 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14401 {
14402 /* Move the notes and links of TEM_INSN elsewhere.
14403 This might delete other dead insns recursively.
14404 First set the pattern to something that won't use
14405 any register. */
14406 rtx old_notes = REG_NOTES (tem_insn);
14407
14408 PATTERN (tem_insn) = pc_rtx;
14409 REG_NOTES (tem_insn) = NULL;
14410
14411 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14412 NULL_RTX, NULL_RTX, NULL_RTX);
14413 distribute_links (LOG_LINKS (tem_insn));
14414
14415 unsigned int regno = REGNO (XEXP (note, 0));
14416 reg_stat_type *rsp = &reg_stat[regno];
14417 if (rsp->last_set == tem_insn)
14418 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14419
14420 SET_INSN_DELETED (tem_insn);
14421 if (tem_insn == i2)
14422 i2 = NULL;
14423
14424 /* Delete the setter too. */
14425 if (cc0_setter)
14426 {
14427 PATTERN (cc0_setter) = pc_rtx;
14428 old_notes = REG_NOTES (cc0_setter);
14429 REG_NOTES (cc0_setter) = NULL;
14430
14431 distribute_notes (old_notes, cc0_setter,
14432 cc0_setter, NULL,
14433 NULL_RTX, NULL_RTX, NULL_RTX);
14434 distribute_links (LOG_LINKS (cc0_setter));
14435
14436 SET_INSN_DELETED (cc0_setter);
14437 if (cc0_setter == i2)
14438 i2 = NULL;
14439 }
14440 }
14441 else
14442 {
14443 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14444
14445 /* If there isn't already a REG_UNUSED note, put one
14446 here. Do not place a REG_DEAD note, even if
14447 the register is also used here; that would not
14448 match the algorithm used in lifetime analysis
14449 and can cause the consistency check in the
14450 scheduler to fail. */
14451 if (! find_regno_note (tem_insn, REG_UNUSED,
14452 REGNO (XEXP (note, 0))))
14453 place = tem_insn;
14454 break;
14455 }
14456 }
14457 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14458 || (CALL_P (tem_insn)
14459 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14460 {
14461 place = tem_insn;
14462
14463 /* If we are doing a 3->2 combination, and we have a
14464 register which formerly died in i3 and was not used
14465 by i2, which now no longer dies in i3 and is used in
14466 i2 but does not die in i2, and place is between i2
14467 and i3, then we may need to move a link from place to
14468 i2. */
14469 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14470 && from_insn
14471 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14472 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14473 {
14474 struct insn_link *links = LOG_LINKS (place);
14475 LOG_LINKS (place) = NULL;
14476 distribute_links (links);
14477 }
14478 break;
14479 }
14480
14481 if (tem_insn == BB_HEAD (bb))
14482 break;
14483 }
14484
14485 }
14486
14487 /* If the register is set or already dead at PLACE, we needn't do
14488 anything with this note if it is still a REG_DEAD note.
14489 We check here if it is set at all, not if is it totally replaced,
14490 which is what `dead_or_set_p' checks, so also check for it being
14491 set partially. */
14492
14493 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14494 {
14495 unsigned int regno = REGNO (XEXP (note, 0));
14496 reg_stat_type *rsp = &reg_stat[regno];
14497
14498 if (dead_or_set_p (place, XEXP (note, 0))
14499 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14500 {
14501 /* Unless the register previously died in PLACE, clear
14502 last_death. [I no longer understand why this is
14503 being done.] */
14504 if (rsp->last_death != place)
14505 rsp->last_death = 0;
14506 place = 0;
14507 }
14508 else
14509 rsp->last_death = place;
14510
14511 /* If this is a death note for a hard reg that is occupying
14512 multiple registers, ensure that we are still using all
14513 parts of the object. If we find a piece of the object
14514 that is unused, we must arrange for an appropriate REG_DEAD
14515 note to be added for it. However, we can't just emit a USE
14516 and tag the note to it, since the register might actually
14517 be dead; so we recourse, and the recursive call then finds
14518 the previous insn that used this register. */
14519
14520 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14521 {
14522 unsigned int endregno = END_REGNO (XEXP (note, 0));
14523 bool all_used = true;
14524 unsigned int i;
14525
14526 for (i = regno; i < endregno; i++)
14527 if ((! refers_to_regno_p (i, PATTERN (place))
14528 && ! find_regno_fusage (place, USE, i))
14529 || dead_or_set_regno_p (place, i))
14530 {
14531 all_used = false;
14532 break;
14533 }
14534
14535 if (! all_used)
14536 {
14537 /* Put only REG_DEAD notes for pieces that are
14538 not already dead or set. */
14539
14540 for (i = regno; i < endregno;
14541 i += hard_regno_nregs (i, reg_raw_mode[i]))
14542 {
14543 rtx piece = regno_reg_rtx[i];
14544 basic_block bb = this_basic_block;
14545
14546 if (! dead_or_set_p (place, piece)
14547 && ! reg_bitfield_target_p (piece,
14548 PATTERN (place)))
14549 {
14550 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14551 NULL_RTX);
14552
14553 distribute_notes (new_note, place, place,
14554 NULL, NULL_RTX, NULL_RTX,
14555 NULL_RTX);
14556 }
14557 else if (! refers_to_regno_p (i, PATTERN (place))
14558 && ! find_regno_fusage (place, USE, i))
14559 for (tem_insn = PREV_INSN (place); ;
14560 tem_insn = PREV_INSN (tem_insn))
14561 {
14562 if (!NONDEBUG_INSN_P (tem_insn))
14563 {
14564 if (tem_insn == BB_HEAD (bb))
14565 break;
14566 continue;
14567 }
14568 if (dead_or_set_p (tem_insn, piece)
14569 || reg_bitfield_target_p (piece,
14570 PATTERN (tem_insn)))
14571 {
14572 add_reg_note (tem_insn, REG_UNUSED, piece);
14573 break;
14574 }
14575 }
14576 }
14577
14578 place = 0;
14579 }
14580 }
14581 }
14582 break;
14583
14584 default:
14585 /* Any other notes should not be present at this point in the
14586 compilation. */
14587 gcc_unreachable ();
14588 }
14589
14590 if (place)
14591 {
14592 XEXP (note, 1) = REG_NOTES (place);
14593 REG_NOTES (place) = note;
14594 }
14595
14596 if (place2)
14597 add_shallow_copy_of_reg_note (place2, note);
14598 }
14599 }
14600 \f
14601 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14602 I3, I2, and I1 to new locations. This is also called to add a link
14603 pointing at I3 when I3's destination is changed. */
14604
14605 static void
14606 distribute_links (struct insn_link *links)
14607 {
14608 struct insn_link *link, *next_link;
14609
14610 for (link = links; link; link = next_link)
14611 {
14612 rtx_insn *place = 0;
14613 rtx_insn *insn;
14614 rtx set, reg;
14615
14616 next_link = link->next;
14617
14618 /* If the insn that this link points to is a NOTE, ignore it. */
14619 if (NOTE_P (link->insn))
14620 continue;
14621
14622 set = 0;
14623 rtx pat = PATTERN (link->insn);
14624 if (GET_CODE (pat) == SET)
14625 set = pat;
14626 else if (GET_CODE (pat) == PARALLEL)
14627 {
14628 int i;
14629 for (i = 0; i < XVECLEN (pat, 0); i++)
14630 {
14631 set = XVECEXP (pat, 0, i);
14632 if (GET_CODE (set) != SET)
14633 continue;
14634
14635 reg = SET_DEST (set);
14636 while (GET_CODE (reg) == ZERO_EXTRACT
14637 || GET_CODE (reg) == STRICT_LOW_PART
14638 || GET_CODE (reg) == SUBREG)
14639 reg = XEXP (reg, 0);
14640
14641 if (!REG_P (reg))
14642 continue;
14643
14644 if (REGNO (reg) == link->regno)
14645 break;
14646 }
14647 if (i == XVECLEN (pat, 0))
14648 continue;
14649 }
14650 else
14651 continue;
14652
14653 reg = SET_DEST (set);
14654
14655 while (GET_CODE (reg) == ZERO_EXTRACT
14656 || GET_CODE (reg) == STRICT_LOW_PART
14657 || GET_CODE (reg) == SUBREG)
14658 reg = XEXP (reg, 0);
14659
14660 /* A LOG_LINK is defined as being placed on the first insn that uses
14661 a register and points to the insn that sets the register. Start
14662 searching at the next insn after the target of the link and stop
14663 when we reach a set of the register or the end of the basic block.
14664
14665 Note that this correctly handles the link that used to point from
14666 I3 to I2. Also note that not much searching is typically done here
14667 since most links don't point very far away. */
14668
14669 for (insn = NEXT_INSN (link->insn);
14670 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14671 || BB_HEAD (this_basic_block->next_bb) != insn));
14672 insn = NEXT_INSN (insn))
14673 if (DEBUG_INSN_P (insn))
14674 continue;
14675 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14676 {
14677 if (reg_referenced_p (reg, PATTERN (insn)))
14678 place = insn;
14679 break;
14680 }
14681 else if (CALL_P (insn)
14682 && find_reg_fusage (insn, USE, reg))
14683 {
14684 place = insn;
14685 break;
14686 }
14687 else if (INSN_P (insn) && reg_set_p (reg, insn))
14688 break;
14689
14690 /* If we found a place to put the link, place it there unless there
14691 is already a link to the same insn as LINK at that point. */
14692
14693 if (place)
14694 {
14695 struct insn_link *link2;
14696
14697 FOR_EACH_LOG_LINK (link2, place)
14698 if (link2->insn == link->insn && link2->regno == link->regno)
14699 break;
14700
14701 if (link2 == NULL)
14702 {
14703 link->next = LOG_LINKS (place);
14704 LOG_LINKS (place) = link;
14705
14706 /* Set added_links_insn to the earliest insn we added a
14707 link to. */
14708 if (added_links_insn == 0
14709 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14710 added_links_insn = place;
14711 }
14712 }
14713 }
14714 }
14715 \f
14716 /* Check for any register or memory mentioned in EQUIV that is not
14717 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14718 of EXPR where some registers may have been replaced by constants. */
14719
14720 static bool
14721 unmentioned_reg_p (rtx equiv, rtx expr)
14722 {
14723 subrtx_iterator::array_type array;
14724 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14725 {
14726 const_rtx x = *iter;
14727 if ((REG_P (x) || MEM_P (x))
14728 && !reg_mentioned_p (x, expr))
14729 return true;
14730 }
14731 return false;
14732 }
14733 \f
14734 DEBUG_FUNCTION void
14735 dump_combine_stats (FILE *file)
14736 {
14737 fprintf
14738 (file,
14739 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14740 combine_attempts, combine_merges, combine_extras, combine_successes);
14741 }
14742
14743 void
14744 dump_combine_total_stats (FILE *file)
14745 {
14746 fprintf
14747 (file,
14748 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14749 total_attempts, total_merges, total_extras, total_successes);
14750 }
14751 \f
14752 /* Try combining insns through substitution. */
14753 static unsigned int
14754 rest_of_handle_combine (void)
14755 {
14756 int rebuild_jump_labels_after_combine;
14757
14758 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14759 df_note_add_problem ();
14760 df_analyze ();
14761
14762 regstat_init_n_sets_and_refs ();
14763 reg_n_sets_max = max_reg_num ();
14764
14765 rebuild_jump_labels_after_combine
14766 = combine_instructions (get_insns (), max_reg_num ());
14767
14768 /* Combining insns may have turned an indirect jump into a
14769 direct jump. Rebuild the JUMP_LABEL fields of jumping
14770 instructions. */
14771 if (rebuild_jump_labels_after_combine)
14772 {
14773 if (dom_info_available_p (CDI_DOMINATORS))
14774 free_dominance_info (CDI_DOMINATORS);
14775 timevar_push (TV_JUMP);
14776 rebuild_jump_labels (get_insns ());
14777 cleanup_cfg (0);
14778 timevar_pop (TV_JUMP);
14779 }
14780
14781 regstat_free_n_sets_and_refs ();
14782 return 0;
14783 }
14784
14785 namespace {
14786
14787 const pass_data pass_data_combine =
14788 {
14789 RTL_PASS, /* type */
14790 "combine", /* name */
14791 OPTGROUP_NONE, /* optinfo_flags */
14792 TV_COMBINE, /* tv_id */
14793 PROP_cfglayout, /* properties_required */
14794 0, /* properties_provided */
14795 0, /* properties_destroyed */
14796 0, /* todo_flags_start */
14797 TODO_df_finish, /* todo_flags_finish */
14798 };
14799
14800 class pass_combine : public rtl_opt_pass
14801 {
14802 public:
14803 pass_combine (gcc::context *ctxt)
14804 : rtl_opt_pass (pass_data_combine, ctxt)
14805 {}
14806
14807 /* opt_pass methods: */
14808 virtual bool gate (function *) { return (optimize > 0); }
14809 virtual unsigned int execute (function *)
14810 {
14811 return rest_of_handle_combine ();
14812 }
14813
14814 }; // class pass_combine
14815
14816 } // anon namespace
14817
14818 rtl_opt_pass *
14819 make_pass_combine (gcc::context *ctxt)
14820 {
14821 return new pass_combine (ctxt);
14822 }