]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/combine.c
* config/mips/mips.c (mips_final_postscan_insn): Modify call
[thirdparty/gcc.git] / gcc / combine.c
1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "params.h"
104 #include "tree-pass.h"
105 #include "valtrack.h"
106 #include "rtl-iter.h"
107 #include "print-rtl.h"
108
109 /* Number of attempts to combine instructions in this function. */
110
111 static int combine_attempts;
112
113 /* Number of attempts that got as far as substitution in this function. */
114
115 static int combine_merges;
116
117 /* Number of instructions combined with added SETs in this function. */
118
119 static int combine_extras;
120
121 /* Number of instructions combined in this function. */
122
123 static int combine_successes;
124
125 /* Totals over entire compilation. */
126
127 static int total_attempts, total_merges, total_extras, total_successes;
128
129 /* combine_instructions may try to replace the right hand side of the
130 second instruction with the value of an associated REG_EQUAL note
131 before throwing it at try_combine. That is problematic when there
132 is a REG_DEAD note for a register used in the old right hand side
133 and can cause distribute_notes to do wrong things. This is the
134 second instruction if it has been so modified, null otherwise. */
135
136 static rtx_insn *i2mod;
137
138 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
139
140 static rtx i2mod_old_rhs;
141
142 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
143
144 static rtx i2mod_new_rhs;
145 \f
146 struct reg_stat_type {
147 /* Record last point of death of (hard or pseudo) register n. */
148 rtx_insn *last_death;
149
150 /* Record last point of modification of (hard or pseudo) register n. */
151 rtx_insn *last_set;
152
153 /* The next group of fields allows the recording of the last value assigned
154 to (hard or pseudo) register n. We use this information to see if an
155 operation being processed is redundant given a prior operation performed
156 on the register. For example, an `and' with a constant is redundant if
157 all the zero bits are already known to be turned off.
158
159 We use an approach similar to that used by cse, but change it in the
160 following ways:
161
162 (1) We do not want to reinitialize at each label.
163 (2) It is useful, but not critical, to know the actual value assigned
164 to a register. Often just its form is helpful.
165
166 Therefore, we maintain the following fields:
167
168 last_set_value the last value assigned
169 last_set_label records the value of label_tick when the
170 register was assigned
171 last_set_table_tick records the value of label_tick when a
172 value using the register is assigned
173 last_set_invalid set to nonzero when it is not valid
174 to use the value of this register in some
175 register's value
176
177 To understand the usage of these tables, it is important to understand
178 the distinction between the value in last_set_value being valid and
179 the register being validly contained in some other expression in the
180 table.
181
182 (The next two parameters are out of date).
183
184 reg_stat[i].last_set_value is valid if it is nonzero, and either
185 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186
187 Register I may validly appear in any expression returned for the value
188 of another register if reg_n_sets[i] is 1. It may also appear in the
189 value for register J if reg_stat[j].last_set_invalid is zero, or
190 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191
192 If an expression is found in the table containing a register which may
193 not validly appear in an expression, the register is replaced by
194 something that won't match, (clobber (const_int 0)). */
195
196 /* Record last value assigned to (hard or pseudo) register n. */
197
198 rtx last_set_value;
199
200 /* Record the value of label_tick when an expression involving register n
201 is placed in last_set_value. */
202
203 int last_set_table_tick;
204
205 /* Record the value of label_tick when the value for register n is placed in
206 last_set_value. */
207
208 int last_set_label;
209
210 /* These fields are maintained in parallel with last_set_value and are
211 used to store the mode in which the register was last set, the bits
212 that were known to be zero when it was last set, and the number of
213 sign bits copies it was known to have when it was last set. */
214
215 unsigned HOST_WIDE_INT last_set_nonzero_bits;
216 char last_set_sign_bit_copies;
217 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
218
219 /* Set nonzero if references to register n in expressions should not be
220 used. last_set_invalid is set nonzero when this register is being
221 assigned to and last_set_table_tick == label_tick. */
222
223 char last_set_invalid;
224
225 /* Some registers that are set more than once and used in more than one
226 basic block are nevertheless always set in similar ways. For example,
227 a QImode register may be loaded from memory in two places on a machine
228 where byte loads zero extend.
229
230 We record in the following fields if a register has some leading bits
231 that are always equal to the sign bit, and what we know about the
232 nonzero bits of a register, specifically which bits are known to be
233 zero.
234
235 If an entry is zero, it means that we don't know anything special. */
236
237 unsigned char sign_bit_copies;
238
239 unsigned HOST_WIDE_INT nonzero_bits;
240
241 /* Record the value of the label_tick when the last truncation
242 happened. The field truncated_to_mode is only valid if
243 truncation_label == label_tick. */
244
245 int truncation_label;
246
247 /* Record the last truncation seen for this register. If truncation
248 is not a nop to this mode we might be able to save an explicit
249 truncation if we know that value already contains a truncated
250 value. */
251
252 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
253 };
254
255
256 static vec<reg_stat_type> reg_stat;
257
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260 but during combine_split_insns new pseudos can be created. As we don't have
261 updated DF information in that case, it is hard to initialize the array
262 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
263 so instead of growing the arrays, just assume all newly created pseudos
264 during combine might be set multiple times. */
265
266 static unsigned int reg_n_sets_max;
267
268 /* Record the luid of the last insn that invalidated memory
269 (anything that writes memory, and subroutine calls, but not pushes). */
270
271 static int mem_last_set;
272
273 /* Record the luid of the last CALL_INSN
274 so we can tell whether a potential combination crosses any calls. */
275
276 static int last_call_luid;
277
278 /* When `subst' is called, this is the insn that is being modified
279 (by combining in a previous insn). The PATTERN of this insn
280 is still the old pattern partially modified and it should not be
281 looked at, but this may be used to examine the successors of the insn
282 to judge whether a simplification is valid. */
283
284 static rtx_insn *subst_insn;
285
286 /* This is the lowest LUID that `subst' is currently dealing with.
287 get_last_value will not return a value if the register was set at or
288 after this LUID. If not for this mechanism, we could get confused if
289 I2 or I1 in try_combine were an insn that used the old value of a register
290 to obtain a new value. In that case, we might erroneously get the
291 new value of the register when we wanted the old one. */
292
293 static int subst_low_luid;
294
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296 must consider all these registers to be always live. */
297
298 static HARD_REG_SET newpat_used_regs;
299
300 /* This is an insn to which a LOG_LINKS entry has been added. If this
301 insn is the earlier than I2 or I3, combine should rescan starting at
302 that location. */
303
304 static rtx_insn *added_links_insn;
305
306 /* And similarly, for notes. */
307
308 static rtx_insn *added_notes_insn;
309
310 /* Basic block in which we are performing combines. */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
313
314 \f
315 /* Length of the currently allocated uid_insn_cost array. */
316
317 static int max_uid_known;
318
319 /* The following array records the insn_cost for every insn
320 in the instruction stream. */
321
322 static int *uid_insn_cost;
323
324 /* The following array records the LOG_LINKS for every insn in the
325 instruction stream as struct insn_link pointers. */
326
327 struct insn_link {
328 rtx_insn *insn;
329 unsigned int regno;
330 struct insn_link *next;
331 };
332
333 static struct insn_link **uid_log_links;
334
335 static inline int
336 insn_uid_check (const_rtx insn)
337 {
338 int uid = INSN_UID (insn);
339 gcc_checking_assert (uid <= max_uid_known);
340 return uid;
341 }
342
343 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
345
346 #define FOR_EACH_LOG_LINK(L, INSN) \
347 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348
349 /* Links for LOG_LINKS are allocated from this obstack. */
350
351 static struct obstack insn_link_obstack;
352
353 /* Allocate a link. */
354
355 static inline struct insn_link *
356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 {
358 struct insn_link *l
359 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 sizeof (struct insn_link));
361 l->insn = insn;
362 l->regno = regno;
363 l->next = next;
364 return l;
365 }
366
367 /* Incremented for each basic block. */
368
369 static int label_tick;
370
371 /* Reset to label_tick for each extended basic block in scanning order. */
372
373 static int label_tick_ebb_start;
374
375 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
376 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
377
378 static scalar_int_mode nonzero_bits_mode;
379
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381 be safely used. It is zero while computing them and after combine has
382 completed. This former test prevents propagating values based on
383 previously set values, which can be incorrect if a variable is modified
384 in a loop. */
385
386 static int nonzero_sign_valid;
387
388 \f
389 /* Record one modification to rtl structure
390 to be undone by storing old_contents into *where. */
391
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393
394 struct undo
395 {
396 struct undo *next;
397 enum undo_kind kind;
398 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399 union { rtx *r; int *i; struct insn_link **l; } where;
400 };
401
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403 num_undo says how many are currently recorded.
404
405 other_insn is nonzero if we have modified some other insn in the process
406 of working on subst_insn. It must be verified too. */
407
408 struct undobuf
409 {
410 struct undo *undos;
411 struct undo *frees;
412 rtx_insn *other_insn;
413 };
414
415 static struct undobuf undobuf;
416
417 /* Number of times the pseudo being substituted for
418 was found and replaced. */
419
420 static int n_occurrences;
421
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 scalar_int_mode,
424 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 scalar_int_mode,
427 unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
498 \f
499
500 /* It is not safe to use ordinary gen_lowpart in combine.
501 See comments in gen_lowpart_for_combine. */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
504
505 /* Our implementation of gen_lowpart never emits a new pseudo. */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
508
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
511
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
514
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
517
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
519
520 \f
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522 Target hooks cannot use enum rtx_code. */
523 static inline void
524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 bool op0_preserve_value)
526 {
527 int code_int = (int)*code;
528 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529 *code = (enum rtx_code)code_int;
530 }
531
532 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
533 PATTERN cannot be split. Otherwise, it returns an insn sequence.
534 This is a wrapper around split_insns which ensures that the
535 reg_stat vector is made larger if the splitter creates a new
536 register. */
537
538 static rtx_insn *
539 combine_split_insns (rtx pattern, rtx_insn *insn)
540 {
541 rtx_insn *ret;
542 unsigned int nregs;
543
544 ret = split_insns (pattern, insn);
545 nregs = max_reg_num ();
546 if (nregs > reg_stat.length ())
547 reg_stat.safe_grow_cleared (nregs);
548 return ret;
549 }
550
551 /* This is used by find_single_use to locate an rtx in LOC that
552 contains exactly one use of DEST, which is typically either a REG
553 or CC0. It returns a pointer to the innermost rtx expression
554 containing DEST. Appearances of DEST that are being used to
555 totally replace it are not counted. */
556
557 static rtx *
558 find_single_use_1 (rtx dest, rtx *loc)
559 {
560 rtx x = *loc;
561 enum rtx_code code = GET_CODE (x);
562 rtx *result = NULL;
563 rtx *this_result;
564 int i;
565 const char *fmt;
566
567 switch (code)
568 {
569 case CONST:
570 case LABEL_REF:
571 case SYMBOL_REF:
572 CASE_CONST_ANY:
573 case CLOBBER:
574 case CLOBBER_HIGH:
575 return 0;
576
577 case SET:
578 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 of a REG that occupies all of the REG, the insn uses DEST if
580 it is mentioned in the destination or the source. Otherwise, we
581 need just check the source. */
582 if (GET_CODE (SET_DEST (x)) != CC0
583 && GET_CODE (SET_DEST (x)) != PC
584 && !REG_P (SET_DEST (x))
585 && ! (GET_CODE (SET_DEST (x)) == SUBREG
586 && REG_P (SUBREG_REG (SET_DEST (x)))
587 && !read_modify_subreg_p (SET_DEST (x))))
588 break;
589
590 return find_single_use_1 (dest, &SET_SRC (x));
591
592 case MEM:
593 case SUBREG:
594 return find_single_use_1 (dest, &XEXP (x, 0));
595
596 default:
597 break;
598 }
599
600 /* If it wasn't one of the common cases above, check each expression and
601 vector of this code. Look for a unique usage of DEST. */
602
603 fmt = GET_RTX_FORMAT (code);
604 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605 {
606 if (fmt[i] == 'e')
607 {
608 if (dest == XEXP (x, i)
609 || (REG_P (dest) && REG_P (XEXP (x, i))
610 && REGNO (dest) == REGNO (XEXP (x, i))))
611 this_result = loc;
612 else
613 this_result = find_single_use_1 (dest, &XEXP (x, i));
614
615 if (result == NULL)
616 result = this_result;
617 else if (this_result)
618 /* Duplicate usage. */
619 return NULL;
620 }
621 else if (fmt[i] == 'E')
622 {
623 int j;
624
625 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 {
627 if (XVECEXP (x, i, j) == dest
628 || (REG_P (dest)
629 && REG_P (XVECEXP (x, i, j))
630 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
631 this_result = loc;
632 else
633 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634
635 if (result == NULL)
636 result = this_result;
637 else if (this_result)
638 return NULL;
639 }
640 }
641 }
642
643 return result;
644 }
645
646
647 /* See if DEST, produced in INSN, is used only a single time in the
648 sequel. If so, return a pointer to the innermost rtx expression in which
649 it is used.
650
651 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652
653 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
654 care about REG_DEAD notes or LOG_LINKS.
655
656 Otherwise, we find the single use by finding an insn that has a
657 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
658 only referenced once in that insn, we know that it must be the first
659 and last insn referencing DEST. */
660
661 static rtx *
662 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 {
664 basic_block bb;
665 rtx_insn *next;
666 rtx *result;
667 struct insn_link *link;
668
669 if (dest == cc0_rtx)
670 {
671 next = NEXT_INSN (insn);
672 if (next == 0
673 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
674 return 0;
675
676 result = find_single_use_1 (dest, &PATTERN (next));
677 if (result && ploc)
678 *ploc = next;
679 return result;
680 }
681
682 if (!REG_P (dest))
683 return 0;
684
685 bb = BLOCK_FOR_INSN (insn);
686 for (next = NEXT_INSN (insn);
687 next && BLOCK_FOR_INSN (next) == bb;
688 next = NEXT_INSN (next))
689 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690 {
691 FOR_EACH_LOG_LINK (link, next)
692 if (link->insn == insn && link->regno == REGNO (dest))
693 break;
694
695 if (link)
696 {
697 result = find_single_use_1 (dest, &PATTERN (next));
698 if (ploc)
699 *ploc = next;
700 return result;
701 }
702 }
703
704 return 0;
705 }
706 \f
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708 insn. The substitution can be undone by undo_all. If INTO is already
709 set to NEWVAL, do not record this change. Because computing NEWVAL might
710 also call SUBST, we have to compute it before we put anything into
711 the undo table. */
712
713 static void
714 do_SUBST (rtx *into, rtx newval)
715 {
716 struct undo *buf;
717 rtx oldval = *into;
718
719 if (oldval == newval)
720 return;
721
722 /* We'd like to catch as many invalid transformations here as
723 possible. Unfortunately, there are way too many mode changes
724 that are perfectly valid, so we'd waste too much effort for
725 little gain doing the checks here. Focus on catching invalid
726 transformations involving integer constants. */
727 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
728 && CONST_INT_P (newval))
729 {
730 /* Sanity check that we're replacing oldval with a CONST_INT
731 that is a valid sign-extension for the original mode. */
732 gcc_assert (INTVAL (newval)
733 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734
735 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 CONST_INT is not valid, because after the replacement, the
737 original mode would be gone. Unfortunately, we can't tell
738 when do_SUBST is called to replace the operand thereof, so we
739 perform this test on oldval instead, checking whether an
740 invalid replacement took place before we got here. */
741 gcc_assert (!(GET_CODE (oldval) == SUBREG
742 && CONST_INT_P (SUBREG_REG (oldval))));
743 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
744 && CONST_INT_P (XEXP (oldval, 0))));
745 }
746
747 if (undobuf.frees)
748 buf = undobuf.frees, undobuf.frees = buf->next;
749 else
750 buf = XNEW (struct undo);
751
752 buf->kind = UNDO_RTX;
753 buf->where.r = into;
754 buf->old_contents.r = oldval;
755 *into = newval;
756
757 buf->next = undobuf.undos, undobuf.undos = buf;
758 }
759
760 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
761
762 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
763 for the value of a HOST_WIDE_INT value (including CONST_INT) is
764 not safe. */
765
766 static void
767 do_SUBST_INT (int *into, int newval)
768 {
769 struct undo *buf;
770 int oldval = *into;
771
772 if (oldval == newval)
773 return;
774
775 if (undobuf.frees)
776 buf = undobuf.frees, undobuf.frees = buf->next;
777 else
778 buf = XNEW (struct undo);
779
780 buf->kind = UNDO_INT;
781 buf->where.i = into;
782 buf->old_contents.i = oldval;
783 *into = newval;
784
785 buf->next = undobuf.undos, undobuf.undos = buf;
786 }
787
788 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
789
790 /* Similar to SUBST, but just substitute the mode. This is used when
791 changing the mode of a pseudo-register, so that any other
792 references to the entry in the regno_reg_rtx array will change as
793 well. */
794
795 static void
796 do_SUBST_MODE (rtx *into, machine_mode newval)
797 {
798 struct undo *buf;
799 machine_mode oldval = GET_MODE (*into);
800
801 if (oldval == newval)
802 return;
803
804 if (undobuf.frees)
805 buf = undobuf.frees, undobuf.frees = buf->next;
806 else
807 buf = XNEW (struct undo);
808
809 buf->kind = UNDO_MODE;
810 buf->where.r = into;
811 buf->old_contents.m = oldval;
812 adjust_reg_mode (*into, newval);
813
814 buf->next = undobuf.undos, undobuf.undos = buf;
815 }
816
817 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
818
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
820
821 static void
822 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 {
824 struct undo *buf;
825 struct insn_link * oldval = *into;
826
827 if (oldval == newval)
828 return;
829
830 if (undobuf.frees)
831 buf = undobuf.frees, undobuf.frees = buf->next;
832 else
833 buf = XNEW (struct undo);
834
835 buf->kind = UNDO_LINKS;
836 buf->where.l = into;
837 buf->old_contents.l = oldval;
838 *into = newval;
839
840 buf->next = undobuf.undos, undobuf.undos = buf;
841 }
842
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 \f
845 /* Subroutine of try_combine. Determine whether the replacement patterns
846 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
847 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
848 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
849 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
850 of all the instructions can be estimated and the replacements are more
851 expensive than the original sequence. */
852
853 static bool
854 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
855 rtx newpat, rtx newi2pat, rtx newotherpat)
856 {
857 int i0_cost, i1_cost, i2_cost, i3_cost;
858 int new_i2_cost, new_i3_cost;
859 int old_cost, new_cost;
860
861 /* Lookup the original insn_costs. */
862 i2_cost = INSN_COST (i2);
863 i3_cost = INSN_COST (i3);
864
865 if (i1)
866 {
867 i1_cost = INSN_COST (i1);
868 if (i0)
869 {
870 i0_cost = INSN_COST (i0);
871 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
872 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 }
874 else
875 {
876 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
877 ? i1_cost + i2_cost + i3_cost : 0);
878 i0_cost = 0;
879 }
880 }
881 else
882 {
883 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
884 i1_cost = i0_cost = 0;
885 }
886
887 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
888 correct that. */
889 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
890 old_cost -= i1_cost;
891
892
893 /* Calculate the replacement insn_costs. */
894 rtx tmp = PATTERN (i3);
895 PATTERN (i3) = newpat;
896 int tmpi = INSN_CODE (i3);
897 INSN_CODE (i3) = -1;
898 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
899 PATTERN (i3) = tmp;
900 INSN_CODE (i3) = tmpi;
901 if (newi2pat)
902 {
903 tmp = PATTERN (i2);
904 PATTERN (i2) = newi2pat;
905 tmpi = INSN_CODE (i2);
906 INSN_CODE (i2) = -1;
907 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
908 PATTERN (i2) = tmp;
909 INSN_CODE (i2) = tmpi;
910 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
911 ? new_i2_cost + new_i3_cost : 0;
912 }
913 else
914 {
915 new_cost = new_i3_cost;
916 new_i2_cost = 0;
917 }
918
919 if (undobuf.other_insn)
920 {
921 int old_other_cost, new_other_cost;
922
923 old_other_cost = INSN_COST (undobuf.other_insn);
924 tmp = PATTERN (undobuf.other_insn);
925 PATTERN (undobuf.other_insn) = newotherpat;
926 tmpi = INSN_CODE (undobuf.other_insn);
927 INSN_CODE (undobuf.other_insn) = -1;
928 new_other_cost = insn_cost (undobuf.other_insn,
929 optimize_this_for_speed_p);
930 PATTERN (undobuf.other_insn) = tmp;
931 INSN_CODE (undobuf.other_insn) = tmpi;
932 if (old_other_cost > 0 && new_other_cost > 0)
933 {
934 old_cost += old_other_cost;
935 new_cost += new_other_cost;
936 }
937 else
938 old_cost = 0;
939 }
940
941 /* Disallow this combination if both new_cost and old_cost are greater than
942 zero, and new_cost is greater than old cost. */
943 int reject = old_cost > 0 && new_cost > old_cost;
944
945 if (dump_file)
946 {
947 fprintf (dump_file, "%s combination of insns ",
948 reject ? "rejecting" : "allowing");
949 if (i0)
950 fprintf (dump_file, "%d, ", INSN_UID (i0));
951 if (i1 && INSN_UID (i1) != INSN_UID (i2))
952 fprintf (dump_file, "%d, ", INSN_UID (i1));
953 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
954
955 fprintf (dump_file, "original costs ");
956 if (i0)
957 fprintf (dump_file, "%d + ", i0_cost);
958 if (i1 && INSN_UID (i1) != INSN_UID (i2))
959 fprintf (dump_file, "%d + ", i1_cost);
960 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
961
962 if (newi2pat)
963 fprintf (dump_file, "replacement costs %d + %d = %d\n",
964 new_i2_cost, new_i3_cost, new_cost);
965 else
966 fprintf (dump_file, "replacement cost %d\n", new_cost);
967 }
968
969 if (reject)
970 return false;
971
972 /* Update the uid_insn_cost array with the replacement costs. */
973 INSN_COST (i2) = new_i2_cost;
974 INSN_COST (i3) = new_i3_cost;
975 if (i1)
976 {
977 INSN_COST (i1) = 0;
978 if (i0)
979 INSN_COST (i0) = 0;
980 }
981
982 return true;
983 }
984
985
986 /* Delete any insns that copy a register to itself.
987 Return true if the CFG was changed. */
988
989 static bool
990 delete_noop_moves (void)
991 {
992 rtx_insn *insn, *next;
993 basic_block bb;
994
995 bool edges_deleted = false;
996
997 FOR_EACH_BB_FN (bb, cfun)
998 {
999 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
1000 {
1001 next = NEXT_INSN (insn);
1002 if (INSN_P (insn) && noop_move_p (insn))
1003 {
1004 if (dump_file)
1005 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1006
1007 edges_deleted |= delete_insn_and_edges (insn);
1008 }
1009 }
1010 }
1011
1012 return edges_deleted;
1013 }
1014
1015 \f
1016 /* Return false if we do not want to (or cannot) combine DEF. */
1017 static bool
1018 can_combine_def_p (df_ref def)
1019 {
1020 /* Do not consider if it is pre/post modification in MEM. */
1021 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1022 return false;
1023
1024 unsigned int regno = DF_REF_REGNO (def);
1025
1026 /* Do not combine frame pointer adjustments. */
1027 if ((regno == FRAME_POINTER_REGNUM
1028 && (!reload_completed || frame_pointer_needed))
1029 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1030 && regno == HARD_FRAME_POINTER_REGNUM
1031 && (!reload_completed || frame_pointer_needed))
1032 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1033 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1034 return false;
1035
1036 return true;
1037 }
1038
1039 /* Return false if we do not want to (or cannot) combine USE. */
1040 static bool
1041 can_combine_use_p (df_ref use)
1042 {
1043 /* Do not consider the usage of the stack pointer by function call. */
1044 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1045 return false;
1046
1047 return true;
1048 }
1049
1050 /* Fill in log links field for all insns. */
1051
1052 static void
1053 create_log_links (void)
1054 {
1055 basic_block bb;
1056 rtx_insn **next_use;
1057 rtx_insn *insn;
1058 df_ref def, use;
1059
1060 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1061
1062 /* Pass through each block from the end, recording the uses of each
1063 register and establishing log links when def is encountered.
1064 Note that we do not clear next_use array in order to save time,
1065 so we have to test whether the use is in the same basic block as def.
1066
1067 There are a few cases below when we do not consider the definition or
1068 usage -- these are taken from original flow.c did. Don't ask me why it is
1069 done this way; I don't know and if it works, I don't want to know. */
1070
1071 FOR_EACH_BB_FN (bb, cfun)
1072 {
1073 FOR_BB_INSNS_REVERSE (bb, insn)
1074 {
1075 if (!NONDEBUG_INSN_P (insn))
1076 continue;
1077
1078 /* Log links are created only once. */
1079 gcc_assert (!LOG_LINKS (insn));
1080
1081 FOR_EACH_INSN_DEF (def, insn)
1082 {
1083 unsigned int regno = DF_REF_REGNO (def);
1084 rtx_insn *use_insn;
1085
1086 if (!next_use[regno])
1087 continue;
1088
1089 if (!can_combine_def_p (def))
1090 continue;
1091
1092 use_insn = next_use[regno];
1093 next_use[regno] = NULL;
1094
1095 if (BLOCK_FOR_INSN (use_insn) != bb)
1096 continue;
1097
1098 /* flow.c claimed:
1099
1100 We don't build a LOG_LINK for hard registers contained
1101 in ASM_OPERANDs. If these registers get replaced,
1102 we might wind up changing the semantics of the insn,
1103 even if reload can make what appear to be valid
1104 assignments later. */
1105 if (regno < FIRST_PSEUDO_REGISTER
1106 && asm_noperands (PATTERN (use_insn)) >= 0)
1107 continue;
1108
1109 /* Don't add duplicate links between instructions. */
1110 struct insn_link *links;
1111 FOR_EACH_LOG_LINK (links, use_insn)
1112 if (insn == links->insn && regno == links->regno)
1113 break;
1114
1115 if (!links)
1116 LOG_LINKS (use_insn)
1117 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1118 }
1119
1120 FOR_EACH_INSN_USE (use, insn)
1121 if (can_combine_use_p (use))
1122 next_use[DF_REF_REGNO (use)] = insn;
1123 }
1124 }
1125
1126 free (next_use);
1127 }
1128
1129 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1130 true if we found a LOG_LINK that proves that A feeds B. This only works
1131 if there are no instructions between A and B which could have a link
1132 depending on A, since in that case we would not record a link for B.
1133 We also check the implicit dependency created by a cc0 setter/user
1134 pair. */
1135
1136 static bool
1137 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1138 {
1139 struct insn_link *links;
1140 FOR_EACH_LOG_LINK (links, b)
1141 if (links->insn == a)
1142 return true;
1143 if (HAVE_cc0 && sets_cc0_p (a))
1144 return true;
1145 return false;
1146 }
1147 \f
1148 /* Main entry point for combiner. F is the first insn of the function.
1149 NREGS is the first unused pseudo-reg number.
1150
1151 Return nonzero if the CFG was changed (e.g. if the combiner has
1152 turned an indirect jump instruction into a direct jump). */
1153 static int
1154 combine_instructions (rtx_insn *f, unsigned int nregs)
1155 {
1156 rtx_insn *insn, *next;
1157 rtx_insn *prev;
1158 struct insn_link *links, *nextlinks;
1159 rtx_insn *first;
1160 basic_block last_bb;
1161
1162 int new_direct_jump_p = 0;
1163
1164 for (first = f; first && !NONDEBUG_INSN_P (first); )
1165 first = NEXT_INSN (first);
1166 if (!first)
1167 return 0;
1168
1169 combine_attempts = 0;
1170 combine_merges = 0;
1171 combine_extras = 0;
1172 combine_successes = 0;
1173
1174 rtl_hooks = combine_rtl_hooks;
1175
1176 reg_stat.safe_grow_cleared (nregs);
1177
1178 init_recog_no_volatile ();
1179
1180 /* Allocate array for insn info. */
1181 max_uid_known = get_max_uid ();
1182 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1183 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1184 gcc_obstack_init (&insn_link_obstack);
1185
1186 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1187
1188 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1189 problems when, for example, we have j <<= 1 in a loop. */
1190
1191 nonzero_sign_valid = 0;
1192 label_tick = label_tick_ebb_start = 1;
1193
1194 /* Scan all SETs and see if we can deduce anything about what
1195 bits are known to be zero for some registers and how many copies
1196 of the sign bit are known to exist for those registers.
1197
1198 Also set any known values so that we can use it while searching
1199 for what bits are known to be set. */
1200
1201 setup_incoming_promotions (first);
1202 /* Allow the entry block and the first block to fall into the same EBB.
1203 Conceptually the incoming promotions are assigned to the entry block. */
1204 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1205
1206 create_log_links ();
1207 FOR_EACH_BB_FN (this_basic_block, cfun)
1208 {
1209 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1210 last_call_luid = 0;
1211 mem_last_set = -1;
1212
1213 label_tick++;
1214 if (!single_pred_p (this_basic_block)
1215 || single_pred (this_basic_block) != last_bb)
1216 label_tick_ebb_start = label_tick;
1217 last_bb = this_basic_block;
1218
1219 FOR_BB_INSNS (this_basic_block, insn)
1220 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1221 {
1222 rtx links;
1223
1224 subst_low_luid = DF_INSN_LUID (insn);
1225 subst_insn = insn;
1226
1227 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1228 insn);
1229 record_dead_and_set_regs (insn);
1230
1231 if (AUTO_INC_DEC)
1232 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1233 if (REG_NOTE_KIND (links) == REG_INC)
1234 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1235 insn);
1236
1237 /* Record the current insn_cost of this instruction. */
1238 if (NONJUMP_INSN_P (insn))
1239 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1240 if (dump_file)
1241 {
1242 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1243 dump_insn_slim (dump_file, insn);
1244 }
1245 }
1246 }
1247
1248 nonzero_sign_valid = 1;
1249
1250 /* Now scan all the insns in forward order. */
1251 label_tick = label_tick_ebb_start = 1;
1252 init_reg_last ();
1253 setup_incoming_promotions (first);
1254 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1255 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1256
1257 FOR_EACH_BB_FN (this_basic_block, cfun)
1258 {
1259 rtx_insn *last_combined_insn = NULL;
1260
1261 /* Ignore instruction combination in basic blocks that are going to
1262 be removed as unreachable anyway. See PR82386. */
1263 if (EDGE_COUNT (this_basic_block->preds) == 0)
1264 continue;
1265
1266 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1267 last_call_luid = 0;
1268 mem_last_set = -1;
1269
1270 label_tick++;
1271 if (!single_pred_p (this_basic_block)
1272 || single_pred (this_basic_block) != last_bb)
1273 label_tick_ebb_start = label_tick;
1274 last_bb = this_basic_block;
1275
1276 rtl_profile_for_bb (this_basic_block);
1277 for (insn = BB_HEAD (this_basic_block);
1278 insn != NEXT_INSN (BB_END (this_basic_block));
1279 insn = next ? next : NEXT_INSN (insn))
1280 {
1281 next = 0;
1282 if (!NONDEBUG_INSN_P (insn))
1283 continue;
1284
1285 while (last_combined_insn
1286 && (!NONDEBUG_INSN_P (last_combined_insn)
1287 || last_combined_insn->deleted ()))
1288 last_combined_insn = PREV_INSN (last_combined_insn);
1289 if (last_combined_insn == NULL_RTX
1290 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1291 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1292 last_combined_insn = insn;
1293
1294 /* See if we know about function return values before this
1295 insn based upon SUBREG flags. */
1296 check_promoted_subreg (insn, PATTERN (insn));
1297
1298 /* See if we can find hardregs and subreg of pseudos in
1299 narrower modes. This could help turning TRUNCATEs
1300 into SUBREGs. */
1301 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1302
1303 /* Try this insn with each insn it links back to. */
1304
1305 FOR_EACH_LOG_LINK (links, insn)
1306 if ((next = try_combine (insn, links->insn, NULL,
1307 NULL, &new_direct_jump_p,
1308 last_combined_insn)) != 0)
1309 {
1310 statistics_counter_event (cfun, "two-insn combine", 1);
1311 goto retry;
1312 }
1313
1314 /* Try each sequence of three linked insns ending with this one. */
1315
1316 if (max_combine >= 3)
1317 FOR_EACH_LOG_LINK (links, insn)
1318 {
1319 rtx_insn *link = links->insn;
1320
1321 /* If the linked insn has been replaced by a note, then there
1322 is no point in pursuing this chain any further. */
1323 if (NOTE_P (link))
1324 continue;
1325
1326 FOR_EACH_LOG_LINK (nextlinks, link)
1327 if ((next = try_combine (insn, link, nextlinks->insn,
1328 NULL, &new_direct_jump_p,
1329 last_combined_insn)) != 0)
1330 {
1331 statistics_counter_event (cfun, "three-insn combine", 1);
1332 goto retry;
1333 }
1334 }
1335
1336 /* Try to combine a jump insn that uses CC0
1337 with a preceding insn that sets CC0, and maybe with its
1338 logical predecessor as well.
1339 This is how we make decrement-and-branch insns.
1340 We need this special code because data flow connections
1341 via CC0 do not get entered in LOG_LINKS. */
1342
1343 if (HAVE_cc0
1344 && JUMP_P (insn)
1345 && (prev = prev_nonnote_insn (insn)) != 0
1346 && NONJUMP_INSN_P (prev)
1347 && sets_cc0_p (PATTERN (prev)))
1348 {
1349 if ((next = try_combine (insn, prev, NULL, NULL,
1350 &new_direct_jump_p,
1351 last_combined_insn)) != 0)
1352 goto retry;
1353
1354 FOR_EACH_LOG_LINK (nextlinks, prev)
1355 if ((next = try_combine (insn, prev, nextlinks->insn,
1356 NULL, &new_direct_jump_p,
1357 last_combined_insn)) != 0)
1358 goto retry;
1359 }
1360
1361 /* Do the same for an insn that explicitly references CC0. */
1362 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1363 && (prev = prev_nonnote_insn (insn)) != 0
1364 && NONJUMP_INSN_P (prev)
1365 && sets_cc0_p (PATTERN (prev))
1366 && GET_CODE (PATTERN (insn)) == SET
1367 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1368 {
1369 if ((next = try_combine (insn, prev, NULL, NULL,
1370 &new_direct_jump_p,
1371 last_combined_insn)) != 0)
1372 goto retry;
1373
1374 FOR_EACH_LOG_LINK (nextlinks, prev)
1375 if ((next = try_combine (insn, prev, nextlinks->insn,
1376 NULL, &new_direct_jump_p,
1377 last_combined_insn)) != 0)
1378 goto retry;
1379 }
1380
1381 /* Finally, see if any of the insns that this insn links to
1382 explicitly references CC0. If so, try this insn, that insn,
1383 and its predecessor if it sets CC0. */
1384 if (HAVE_cc0)
1385 {
1386 FOR_EACH_LOG_LINK (links, insn)
1387 if (NONJUMP_INSN_P (links->insn)
1388 && GET_CODE (PATTERN (links->insn)) == SET
1389 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1390 && (prev = prev_nonnote_insn (links->insn)) != 0
1391 && NONJUMP_INSN_P (prev)
1392 && sets_cc0_p (PATTERN (prev))
1393 && (next = try_combine (insn, links->insn,
1394 prev, NULL, &new_direct_jump_p,
1395 last_combined_insn)) != 0)
1396 goto retry;
1397 }
1398
1399 /* Try combining an insn with two different insns whose results it
1400 uses. */
1401 if (max_combine >= 3)
1402 FOR_EACH_LOG_LINK (links, insn)
1403 for (nextlinks = links->next; nextlinks;
1404 nextlinks = nextlinks->next)
1405 if ((next = try_combine (insn, links->insn,
1406 nextlinks->insn, NULL,
1407 &new_direct_jump_p,
1408 last_combined_insn)) != 0)
1409
1410 {
1411 statistics_counter_event (cfun, "three-insn combine", 1);
1412 goto retry;
1413 }
1414
1415 /* Try four-instruction combinations. */
1416 if (max_combine >= 4)
1417 FOR_EACH_LOG_LINK (links, insn)
1418 {
1419 struct insn_link *next1;
1420 rtx_insn *link = links->insn;
1421
1422 /* If the linked insn has been replaced by a note, then there
1423 is no point in pursuing this chain any further. */
1424 if (NOTE_P (link))
1425 continue;
1426
1427 FOR_EACH_LOG_LINK (next1, link)
1428 {
1429 rtx_insn *link1 = next1->insn;
1430 if (NOTE_P (link1))
1431 continue;
1432 /* I0 -> I1 -> I2 -> I3. */
1433 FOR_EACH_LOG_LINK (nextlinks, link1)
1434 if ((next = try_combine (insn, link, link1,
1435 nextlinks->insn,
1436 &new_direct_jump_p,
1437 last_combined_insn)) != 0)
1438 {
1439 statistics_counter_event (cfun, "four-insn combine", 1);
1440 goto retry;
1441 }
1442 /* I0, I1 -> I2, I2 -> I3. */
1443 for (nextlinks = next1->next; nextlinks;
1444 nextlinks = nextlinks->next)
1445 if ((next = try_combine (insn, link, link1,
1446 nextlinks->insn,
1447 &new_direct_jump_p,
1448 last_combined_insn)) != 0)
1449 {
1450 statistics_counter_event (cfun, "four-insn combine", 1);
1451 goto retry;
1452 }
1453 }
1454
1455 for (next1 = links->next; next1; next1 = next1->next)
1456 {
1457 rtx_insn *link1 = next1->insn;
1458 if (NOTE_P (link1))
1459 continue;
1460 /* I0 -> I2; I1, I2 -> I3. */
1461 FOR_EACH_LOG_LINK (nextlinks, link)
1462 if ((next = try_combine (insn, link, link1,
1463 nextlinks->insn,
1464 &new_direct_jump_p,
1465 last_combined_insn)) != 0)
1466 {
1467 statistics_counter_event (cfun, "four-insn combine", 1);
1468 goto retry;
1469 }
1470 /* I0 -> I1; I1, I2 -> I3. */
1471 FOR_EACH_LOG_LINK (nextlinks, link1)
1472 if ((next = try_combine (insn, link, link1,
1473 nextlinks->insn,
1474 &new_direct_jump_p,
1475 last_combined_insn)) != 0)
1476 {
1477 statistics_counter_event (cfun, "four-insn combine", 1);
1478 goto retry;
1479 }
1480 }
1481 }
1482
1483 /* Try this insn with each REG_EQUAL note it links back to. */
1484 FOR_EACH_LOG_LINK (links, insn)
1485 {
1486 rtx set, note;
1487 rtx_insn *temp = links->insn;
1488 if ((set = single_set (temp)) != 0
1489 && (note = find_reg_equal_equiv_note (temp)) != 0
1490 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1491 /* Avoid using a register that may already been marked
1492 dead by an earlier instruction. */
1493 && ! unmentioned_reg_p (note, SET_SRC (set))
1494 && (GET_MODE (note) == VOIDmode
1495 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1496 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1497 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1498 || (GET_MODE (XEXP (SET_DEST (set), 0))
1499 == GET_MODE (note))))))
1500 {
1501 /* Temporarily replace the set's source with the
1502 contents of the REG_EQUAL note. The insn will
1503 be deleted or recognized by try_combine. */
1504 rtx orig_src = SET_SRC (set);
1505 rtx orig_dest = SET_DEST (set);
1506 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1507 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1508 SET_SRC (set) = note;
1509 i2mod = temp;
1510 i2mod_old_rhs = copy_rtx (orig_src);
1511 i2mod_new_rhs = copy_rtx (note);
1512 next = try_combine (insn, i2mod, NULL, NULL,
1513 &new_direct_jump_p,
1514 last_combined_insn);
1515 i2mod = NULL;
1516 if (next)
1517 {
1518 statistics_counter_event (cfun, "insn-with-note combine", 1);
1519 goto retry;
1520 }
1521 SET_SRC (set) = orig_src;
1522 SET_DEST (set) = orig_dest;
1523 }
1524 }
1525
1526 if (!NOTE_P (insn))
1527 record_dead_and_set_regs (insn);
1528
1529 retry:
1530 ;
1531 }
1532 }
1533
1534 default_rtl_profile ();
1535 clear_bb_flags ();
1536 new_direct_jump_p |= purge_all_dead_edges ();
1537 new_direct_jump_p |= delete_noop_moves ();
1538
1539 /* Clean up. */
1540 obstack_free (&insn_link_obstack, NULL);
1541 free (uid_log_links);
1542 free (uid_insn_cost);
1543 reg_stat.release ();
1544
1545 {
1546 struct undo *undo, *next;
1547 for (undo = undobuf.frees; undo; undo = next)
1548 {
1549 next = undo->next;
1550 free (undo);
1551 }
1552 undobuf.frees = 0;
1553 }
1554
1555 total_attempts += combine_attempts;
1556 total_merges += combine_merges;
1557 total_extras += combine_extras;
1558 total_successes += combine_successes;
1559
1560 nonzero_sign_valid = 0;
1561 rtl_hooks = general_rtl_hooks;
1562
1563 /* Make recognizer allow volatile MEMs again. */
1564 init_recog ();
1565
1566 return new_direct_jump_p;
1567 }
1568
1569 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1570
1571 static void
1572 init_reg_last (void)
1573 {
1574 unsigned int i;
1575 reg_stat_type *p;
1576
1577 FOR_EACH_VEC_ELT (reg_stat, i, p)
1578 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1579 }
1580 \f
1581 /* Set up any promoted values for incoming argument registers. */
1582
1583 static void
1584 setup_incoming_promotions (rtx_insn *first)
1585 {
1586 tree arg;
1587 bool strictly_local = false;
1588
1589 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1590 arg = DECL_CHAIN (arg))
1591 {
1592 rtx x, reg = DECL_INCOMING_RTL (arg);
1593 int uns1, uns3;
1594 machine_mode mode1, mode2, mode3, mode4;
1595
1596 /* Only continue if the incoming argument is in a register. */
1597 if (!REG_P (reg))
1598 continue;
1599
1600 /* Determine, if possible, whether all call sites of the current
1601 function lie within the current compilation unit. (This does
1602 take into account the exporting of a function via taking its
1603 address, and so forth.) */
1604 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1605
1606 /* The mode and signedness of the argument before any promotions happen
1607 (equal to the mode of the pseudo holding it at that stage). */
1608 mode1 = TYPE_MODE (TREE_TYPE (arg));
1609 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1610
1611 /* The mode and signedness of the argument after any source language and
1612 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1613 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1614 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1615
1616 /* The mode and signedness of the argument as it is actually passed,
1617 see assign_parm_setup_reg in function.c. */
1618 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1619 TREE_TYPE (cfun->decl), 0);
1620
1621 /* The mode of the register in which the argument is being passed. */
1622 mode4 = GET_MODE (reg);
1623
1624 /* Eliminate sign extensions in the callee when:
1625 (a) A mode promotion has occurred; */
1626 if (mode1 == mode3)
1627 continue;
1628 /* (b) The mode of the register is the same as the mode of
1629 the argument as it is passed; */
1630 if (mode3 != mode4)
1631 continue;
1632 /* (c) There's no language level extension; */
1633 if (mode1 == mode2)
1634 ;
1635 /* (c.1) All callers are from the current compilation unit. If that's
1636 the case we don't have to rely on an ABI, we only have to know
1637 what we're generating right now, and we know that we will do the
1638 mode1 to mode2 promotion with the given sign. */
1639 else if (!strictly_local)
1640 continue;
1641 /* (c.2) The combination of the two promotions is useful. This is
1642 true when the signs match, or if the first promotion is unsigned.
1643 In the later case, (sign_extend (zero_extend x)) is the same as
1644 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1645 else if (uns1)
1646 uns3 = true;
1647 else if (uns3)
1648 continue;
1649
1650 /* Record that the value was promoted from mode1 to mode3,
1651 so that any sign extension at the head of the current
1652 function may be eliminated. */
1653 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1654 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1655 record_value_for_reg (reg, first, x);
1656 }
1657 }
1658
1659 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1660 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1661 because some machines (maybe most) will actually do the sign-extension and
1662 this is the conservative approach.
1663
1664 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1665 kludge. */
1666
1667 static rtx
1668 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1669 {
1670 scalar_int_mode int_mode;
1671 if (CONST_INT_P (src)
1672 && is_a <scalar_int_mode> (mode, &int_mode)
1673 && GET_MODE_PRECISION (int_mode) < prec
1674 && INTVAL (src) > 0
1675 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1676 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1677
1678 return src;
1679 }
1680
1681 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1682 and SET. */
1683
1684 static void
1685 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1686 rtx x)
1687 {
1688 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1689 unsigned HOST_WIDE_INT bits = 0;
1690 rtx reg_equal = NULL, src = SET_SRC (set);
1691 unsigned int num = 0;
1692
1693 if (reg_equal_note)
1694 reg_equal = XEXP (reg_equal_note, 0);
1695
1696 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1697 {
1698 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1699 if (reg_equal)
1700 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1701 }
1702
1703 /* Don't call nonzero_bits if it cannot change anything. */
1704 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1705 {
1706 machine_mode mode = GET_MODE (x);
1707 if (GET_MODE_CLASS (mode) == MODE_INT
1708 && HWI_COMPUTABLE_MODE_P (mode))
1709 mode = nonzero_bits_mode;
1710 bits = nonzero_bits (src, mode);
1711 if (reg_equal && bits)
1712 bits &= nonzero_bits (reg_equal, mode);
1713 rsp->nonzero_bits |= bits;
1714 }
1715
1716 /* Don't call num_sign_bit_copies if it cannot change anything. */
1717 if (rsp->sign_bit_copies != 1)
1718 {
1719 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1720 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1721 {
1722 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1723 if (num == 0 || numeq > num)
1724 num = numeq;
1725 }
1726 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1727 rsp->sign_bit_copies = num;
1728 }
1729 }
1730
1731 /* Called via note_stores. If X is a pseudo that is narrower than
1732 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1733
1734 If we are setting only a portion of X and we can't figure out what
1735 portion, assume all bits will be used since we don't know what will
1736 be happening.
1737
1738 Similarly, set how many bits of X are known to be copies of the sign bit
1739 at all locations in the function. This is the smallest number implied
1740 by any set of X. */
1741
1742 static void
1743 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1744 {
1745 rtx_insn *insn = (rtx_insn *) data;
1746 scalar_int_mode mode;
1747
1748 if (REG_P (x)
1749 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1750 /* If this register is undefined at the start of the file, we can't
1751 say what its contents were. */
1752 && ! REGNO_REG_SET_P
1753 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1754 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1755 && HWI_COMPUTABLE_MODE_P (mode))
1756 {
1757 reg_stat_type *rsp = &reg_stat[REGNO (x)];
1758
1759 if (set == 0 || GET_CODE (set) == CLOBBER)
1760 {
1761 rsp->nonzero_bits = GET_MODE_MASK (mode);
1762 rsp->sign_bit_copies = 1;
1763 return;
1764 }
1765
1766 /* Should not happen as we only using pseduo registers. */
1767 gcc_assert (GET_CODE (set) != CLOBBER_HIGH);
1768
1769 /* If this register is being initialized using itself, and the
1770 register is uninitialized in this basic block, and there are
1771 no LOG_LINKS which set the register, then part of the
1772 register is uninitialized. In that case we can't assume
1773 anything about the number of nonzero bits.
1774
1775 ??? We could do better if we checked this in
1776 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1777 could avoid making assumptions about the insn which initially
1778 sets the register, while still using the information in other
1779 insns. We would have to be careful to check every insn
1780 involved in the combination. */
1781
1782 if (insn
1783 && reg_referenced_p (x, PATTERN (insn))
1784 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1785 REGNO (x)))
1786 {
1787 struct insn_link *link;
1788
1789 FOR_EACH_LOG_LINK (link, insn)
1790 if (dead_or_set_p (link->insn, x))
1791 break;
1792 if (!link)
1793 {
1794 rsp->nonzero_bits = GET_MODE_MASK (mode);
1795 rsp->sign_bit_copies = 1;
1796 return;
1797 }
1798 }
1799
1800 /* If this is a complex assignment, see if we can convert it into a
1801 simple assignment. */
1802 set = expand_field_assignment (set);
1803
1804 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1805 set what we know about X. */
1806
1807 if (SET_DEST (set) == x
1808 || (paradoxical_subreg_p (SET_DEST (set))
1809 && SUBREG_REG (SET_DEST (set)) == x))
1810 update_rsp_from_reg_equal (rsp, insn, set, x);
1811 else
1812 {
1813 rsp->nonzero_bits = GET_MODE_MASK (mode);
1814 rsp->sign_bit_copies = 1;
1815 }
1816 }
1817 }
1818 \f
1819 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1820 optionally insns that were previously combined into I3 or that will be
1821 combined into the merger of INSN and I3. The order is PRED, PRED2,
1822 INSN, SUCC, SUCC2, I3.
1823
1824 Return 0 if the combination is not allowed for any reason.
1825
1826 If the combination is allowed, *PDEST will be set to the single
1827 destination of INSN and *PSRC to the single source, and this function
1828 will return 1. */
1829
1830 static int
1831 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1832 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1833 rtx *pdest, rtx *psrc)
1834 {
1835 int i;
1836 const_rtx set = 0;
1837 rtx src, dest;
1838 rtx_insn *p;
1839 rtx link;
1840 bool all_adjacent = true;
1841 int (*is_volatile_p) (const_rtx);
1842
1843 if (succ)
1844 {
1845 if (succ2)
1846 {
1847 if (next_active_insn (succ2) != i3)
1848 all_adjacent = false;
1849 if (next_active_insn (succ) != succ2)
1850 all_adjacent = false;
1851 }
1852 else if (next_active_insn (succ) != i3)
1853 all_adjacent = false;
1854 if (next_active_insn (insn) != succ)
1855 all_adjacent = false;
1856 }
1857 else if (next_active_insn (insn) != i3)
1858 all_adjacent = false;
1859
1860 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1861 or a PARALLEL consisting of such a SET and CLOBBERs.
1862
1863 If INSN has CLOBBER parallel parts, ignore them for our processing.
1864 By definition, these happen during the execution of the insn. When it
1865 is merged with another insn, all bets are off. If they are, in fact,
1866 needed and aren't also supplied in I3, they may be added by
1867 recog_for_combine. Otherwise, it won't match.
1868
1869 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1870 note.
1871
1872 Get the source and destination of INSN. If more than one, can't
1873 combine. */
1874
1875 if (GET_CODE (PATTERN (insn)) == SET)
1876 set = PATTERN (insn);
1877 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1878 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1879 {
1880 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1881 {
1882 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1883
1884 switch (GET_CODE (elt))
1885 {
1886 /* This is important to combine floating point insns
1887 for the SH4 port. */
1888 case USE:
1889 /* Combining an isolated USE doesn't make sense.
1890 We depend here on combinable_i3pat to reject them. */
1891 /* The code below this loop only verifies that the inputs of
1892 the SET in INSN do not change. We call reg_set_between_p
1893 to verify that the REG in the USE does not change between
1894 I3 and INSN.
1895 If the USE in INSN was for a pseudo register, the matching
1896 insn pattern will likely match any register; combining this
1897 with any other USE would only be safe if we knew that the
1898 used registers have identical values, or if there was
1899 something to tell them apart, e.g. different modes. For
1900 now, we forgo such complicated tests and simply disallow
1901 combining of USES of pseudo registers with any other USE. */
1902 if (REG_P (XEXP (elt, 0))
1903 && GET_CODE (PATTERN (i3)) == PARALLEL)
1904 {
1905 rtx i3pat = PATTERN (i3);
1906 int i = XVECLEN (i3pat, 0) - 1;
1907 unsigned int regno = REGNO (XEXP (elt, 0));
1908
1909 do
1910 {
1911 rtx i3elt = XVECEXP (i3pat, 0, i);
1912
1913 if (GET_CODE (i3elt) == USE
1914 && REG_P (XEXP (i3elt, 0))
1915 && (REGNO (XEXP (i3elt, 0)) == regno
1916 ? reg_set_between_p (XEXP (elt, 0),
1917 PREV_INSN (insn), i3)
1918 : regno >= FIRST_PSEUDO_REGISTER))
1919 return 0;
1920 }
1921 while (--i >= 0);
1922 }
1923 break;
1924
1925 /* We can ignore CLOBBERs. */
1926 case CLOBBER:
1927 case CLOBBER_HIGH:
1928 break;
1929
1930 case SET:
1931 /* Ignore SETs whose result isn't used but not those that
1932 have side-effects. */
1933 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1934 && insn_nothrow_p (insn)
1935 && !side_effects_p (elt))
1936 break;
1937
1938 /* If we have already found a SET, this is a second one and
1939 so we cannot combine with this insn. */
1940 if (set)
1941 return 0;
1942
1943 set = elt;
1944 break;
1945
1946 default:
1947 /* Anything else means we can't combine. */
1948 return 0;
1949 }
1950 }
1951
1952 if (set == 0
1953 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1954 so don't do anything with it. */
1955 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1956 return 0;
1957 }
1958 else
1959 return 0;
1960
1961 if (set == 0)
1962 return 0;
1963
1964 /* The simplification in expand_field_assignment may call back to
1965 get_last_value, so set safe guard here. */
1966 subst_low_luid = DF_INSN_LUID (insn);
1967
1968 set = expand_field_assignment (set);
1969 src = SET_SRC (set), dest = SET_DEST (set);
1970
1971 /* Do not eliminate user-specified register if it is in an
1972 asm input because we may break the register asm usage defined
1973 in GCC manual if allow to do so.
1974 Be aware that this may cover more cases than we expect but this
1975 should be harmless. */
1976 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1977 && extract_asm_operands (PATTERN (i3)))
1978 return 0;
1979
1980 /* Don't eliminate a store in the stack pointer. */
1981 if (dest == stack_pointer_rtx
1982 /* Don't combine with an insn that sets a register to itself if it has
1983 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1984 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1985 /* Can't merge an ASM_OPERANDS. */
1986 || GET_CODE (src) == ASM_OPERANDS
1987 /* Can't merge a function call. */
1988 || GET_CODE (src) == CALL
1989 /* Don't eliminate a function call argument. */
1990 || (CALL_P (i3)
1991 && (find_reg_fusage (i3, USE, dest)
1992 || (REG_P (dest)
1993 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1994 && global_regs[REGNO (dest)])))
1995 /* Don't substitute into an incremented register. */
1996 || FIND_REG_INC_NOTE (i3, dest)
1997 || (succ && FIND_REG_INC_NOTE (succ, dest))
1998 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1999 /* Don't substitute into a non-local goto, this confuses CFG. */
2000 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
2001 /* Make sure that DEST is not used after INSN but before SUCC, or
2002 after SUCC and before SUCC2, or after SUCC2 but before I3. */
2003 || (!all_adjacent
2004 && ((succ2
2005 && (reg_used_between_p (dest, succ2, i3)
2006 || reg_used_between_p (dest, succ, succ2)))
2007 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2008 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2009 || (succ
2010 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2011 that case SUCC is not in the insn stream, so use SUCC2
2012 instead for this test. */
2013 && reg_used_between_p (dest, insn,
2014 succ2
2015 && INSN_UID (succ) == INSN_UID (succ2)
2016 ? succ2 : succ))))
2017 /* Make sure that the value that is to be substituted for the register
2018 does not use any registers whose values alter in between. However,
2019 If the insns are adjacent, a use can't cross a set even though we
2020 think it might (this can happen for a sequence of insns each setting
2021 the same destination; last_set of that register might point to
2022 a NOTE). If INSN has a REG_EQUIV note, the register is always
2023 equivalent to the memory so the substitution is valid even if there
2024 are intervening stores. Also, don't move a volatile asm or
2025 UNSPEC_VOLATILE across any other insns. */
2026 || (! all_adjacent
2027 && (((!MEM_P (src)
2028 || ! find_reg_note (insn, REG_EQUIV, src))
2029 && modified_between_p (src, insn, i3))
2030 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2031 || GET_CODE (src) == UNSPEC_VOLATILE))
2032 /* Don't combine across a CALL_INSN, because that would possibly
2033 change whether the life span of some REGs crosses calls or not,
2034 and it is a pain to update that information.
2035 Exception: if source is a constant, moving it later can't hurt.
2036 Accept that as a special case. */
2037 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2038 return 0;
2039
2040 /* DEST must either be a REG or CC0. */
2041 if (REG_P (dest))
2042 {
2043 /* If register alignment is being enforced for multi-word items in all
2044 cases except for parameters, it is possible to have a register copy
2045 insn referencing a hard register that is not allowed to contain the
2046 mode being copied and which would not be valid as an operand of most
2047 insns. Eliminate this problem by not combining with such an insn.
2048
2049 Also, on some machines we don't want to extend the life of a hard
2050 register. */
2051
2052 if (REG_P (src)
2053 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2054 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2055 /* Don't extend the life of a hard register unless it is
2056 user variable (if we have few registers) or it can't
2057 fit into the desired register (meaning something special
2058 is going on).
2059 Also avoid substituting a return register into I3, because
2060 reload can't handle a conflict with constraints of other
2061 inputs. */
2062 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2063 && !targetm.hard_regno_mode_ok (REGNO (src),
2064 GET_MODE (src)))))
2065 return 0;
2066 }
2067 else if (GET_CODE (dest) != CC0)
2068 return 0;
2069
2070
2071 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2072 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2073 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2074 {
2075 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2076
2077 /* If the clobber represents an earlyclobber operand, we must not
2078 substitute an expression containing the clobbered register.
2079 As we do not analyze the constraint strings here, we have to
2080 make the conservative assumption. However, if the register is
2081 a fixed hard reg, the clobber cannot represent any operand;
2082 we leave it up to the machine description to either accept or
2083 reject use-and-clobber patterns. */
2084 if (!REG_P (reg)
2085 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2086 || !fixed_regs[REGNO (reg)])
2087 if (reg_overlap_mentioned_p (reg, src))
2088 return 0;
2089 }
2090
2091 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2092 or not), reject, unless nothing volatile comes between it and I3 */
2093
2094 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2095 {
2096 /* Make sure neither succ nor succ2 contains a volatile reference. */
2097 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2098 return 0;
2099 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2100 return 0;
2101 /* We'll check insns between INSN and I3 below. */
2102 }
2103
2104 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2105 to be an explicit register variable, and was chosen for a reason. */
2106
2107 if (GET_CODE (src) == ASM_OPERANDS
2108 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2109 return 0;
2110
2111 /* If INSN contains volatile references (specifically volatile MEMs),
2112 we cannot combine across any other volatile references.
2113 Even if INSN doesn't contain volatile references, any intervening
2114 volatile insn might affect machine state. */
2115
2116 is_volatile_p = volatile_refs_p (PATTERN (insn))
2117 ? volatile_refs_p
2118 : volatile_insn_p;
2119
2120 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2121 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2122 return 0;
2123
2124 /* If INSN contains an autoincrement or autodecrement, make sure that
2125 register is not used between there and I3, and not already used in
2126 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2127 Also insist that I3 not be a jump; if it were one
2128 and the incremented register were spilled, we would lose. */
2129
2130 if (AUTO_INC_DEC)
2131 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2132 if (REG_NOTE_KIND (link) == REG_INC
2133 && (JUMP_P (i3)
2134 || reg_used_between_p (XEXP (link, 0), insn, i3)
2135 || (pred != NULL_RTX
2136 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2137 || (pred2 != NULL_RTX
2138 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2139 || (succ != NULL_RTX
2140 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2141 || (succ2 != NULL_RTX
2142 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2143 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2144 return 0;
2145
2146 /* Don't combine an insn that follows a CC0-setting insn.
2147 An insn that uses CC0 must not be separated from the one that sets it.
2148 We do, however, allow I2 to follow a CC0-setting insn if that insn
2149 is passed as I1; in that case it will be deleted also.
2150 We also allow combining in this case if all the insns are adjacent
2151 because that would leave the two CC0 insns adjacent as well.
2152 It would be more logical to test whether CC0 occurs inside I1 or I2,
2153 but that would be much slower, and this ought to be equivalent. */
2154
2155 if (HAVE_cc0)
2156 {
2157 p = prev_nonnote_insn (insn);
2158 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2159 && ! all_adjacent)
2160 return 0;
2161 }
2162
2163 /* If we get here, we have passed all the tests and the combination is
2164 to be allowed. */
2165
2166 *pdest = dest;
2167 *psrc = src;
2168
2169 return 1;
2170 }
2171 \f
2172 /* LOC is the location within I3 that contains its pattern or the component
2173 of a PARALLEL of the pattern. We validate that it is valid for combining.
2174
2175 One problem is if I3 modifies its output, as opposed to replacing it
2176 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2177 doing so would produce an insn that is not equivalent to the original insns.
2178
2179 Consider:
2180
2181 (set (reg:DI 101) (reg:DI 100))
2182 (set (subreg:SI (reg:DI 101) 0) <foo>)
2183
2184 This is NOT equivalent to:
2185
2186 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2187 (set (reg:DI 101) (reg:DI 100))])
2188
2189 Not only does this modify 100 (in which case it might still be valid
2190 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2191
2192 We can also run into a problem if I2 sets a register that I1
2193 uses and I1 gets directly substituted into I3 (not via I2). In that
2194 case, we would be getting the wrong value of I2DEST into I3, so we
2195 must reject the combination. This case occurs when I2 and I1 both
2196 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2197 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2198 of a SET must prevent combination from occurring. The same situation
2199 can occur for I0, in which case I0_NOT_IN_SRC is set.
2200
2201 Before doing the above check, we first try to expand a field assignment
2202 into a set of logical operations.
2203
2204 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2205 we place a register that is both set and used within I3. If more than one
2206 such register is detected, we fail.
2207
2208 Return 1 if the combination is valid, zero otherwise. */
2209
2210 static int
2211 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2212 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2213 {
2214 rtx x = *loc;
2215
2216 if (GET_CODE (x) == SET)
2217 {
2218 rtx set = x ;
2219 rtx dest = SET_DEST (set);
2220 rtx src = SET_SRC (set);
2221 rtx inner_dest = dest;
2222 rtx subdest;
2223
2224 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2225 || GET_CODE (inner_dest) == SUBREG
2226 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2227 inner_dest = XEXP (inner_dest, 0);
2228
2229 /* Check for the case where I3 modifies its output, as discussed
2230 above. We don't want to prevent pseudos from being combined
2231 into the address of a MEM, so only prevent the combination if
2232 i1 or i2 set the same MEM. */
2233 if ((inner_dest != dest &&
2234 (!MEM_P (inner_dest)
2235 || rtx_equal_p (i2dest, inner_dest)
2236 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2237 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2238 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2239 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2240 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2241
2242 /* This is the same test done in can_combine_p except we can't test
2243 all_adjacent; we don't have to, since this instruction will stay
2244 in place, thus we are not considering increasing the lifetime of
2245 INNER_DEST.
2246
2247 Also, if this insn sets a function argument, combining it with
2248 something that might need a spill could clobber a previous
2249 function argument; the all_adjacent test in can_combine_p also
2250 checks this; here, we do a more specific test for this case. */
2251
2252 || (REG_P (inner_dest)
2253 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2254 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2255 GET_MODE (inner_dest)))
2256 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2257 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2258 return 0;
2259
2260 /* If DEST is used in I3, it is being killed in this insn, so
2261 record that for later. We have to consider paradoxical
2262 subregs here, since they kill the whole register, but we
2263 ignore partial subregs, STRICT_LOW_PART, etc.
2264 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2265 STACK_POINTER_REGNUM, since these are always considered to be
2266 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2267 subdest = dest;
2268 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2269 subdest = SUBREG_REG (subdest);
2270 if (pi3dest_killed
2271 && REG_P (subdest)
2272 && reg_referenced_p (subdest, PATTERN (i3))
2273 && REGNO (subdest) != FRAME_POINTER_REGNUM
2274 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2275 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2276 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2277 || (REGNO (subdest) != ARG_POINTER_REGNUM
2278 || ! fixed_regs [REGNO (subdest)]))
2279 && REGNO (subdest) != STACK_POINTER_REGNUM)
2280 {
2281 if (*pi3dest_killed)
2282 return 0;
2283
2284 *pi3dest_killed = subdest;
2285 }
2286 }
2287
2288 else if (GET_CODE (x) == PARALLEL)
2289 {
2290 int i;
2291
2292 for (i = 0; i < XVECLEN (x, 0); i++)
2293 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2294 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2295 return 0;
2296 }
2297
2298 return 1;
2299 }
2300 \f
2301 /* Return 1 if X is an arithmetic expression that contains a multiplication
2302 and division. We don't count multiplications by powers of two here. */
2303
2304 static int
2305 contains_muldiv (rtx x)
2306 {
2307 switch (GET_CODE (x))
2308 {
2309 case MOD: case DIV: case UMOD: case UDIV:
2310 return 1;
2311
2312 case MULT:
2313 return ! (CONST_INT_P (XEXP (x, 1))
2314 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2315 default:
2316 if (BINARY_P (x))
2317 return contains_muldiv (XEXP (x, 0))
2318 || contains_muldiv (XEXP (x, 1));
2319
2320 if (UNARY_P (x))
2321 return contains_muldiv (XEXP (x, 0));
2322
2323 return 0;
2324 }
2325 }
2326 \f
2327 /* Determine whether INSN can be used in a combination. Return nonzero if
2328 not. This is used in try_combine to detect early some cases where we
2329 can't perform combinations. */
2330
2331 static int
2332 cant_combine_insn_p (rtx_insn *insn)
2333 {
2334 rtx set;
2335 rtx src, dest;
2336
2337 /* If this isn't really an insn, we can't do anything.
2338 This can occur when flow deletes an insn that it has merged into an
2339 auto-increment address. */
2340 if (!NONDEBUG_INSN_P (insn))
2341 return 1;
2342
2343 /* Never combine loads and stores involving hard regs that are likely
2344 to be spilled. The register allocator can usually handle such
2345 reg-reg moves by tying. If we allow the combiner to make
2346 substitutions of likely-spilled regs, reload might die.
2347 As an exception, we allow combinations involving fixed regs; these are
2348 not available to the register allocator so there's no risk involved. */
2349
2350 set = single_set (insn);
2351 if (! set)
2352 return 0;
2353 src = SET_SRC (set);
2354 dest = SET_DEST (set);
2355 if (GET_CODE (src) == SUBREG)
2356 src = SUBREG_REG (src);
2357 if (GET_CODE (dest) == SUBREG)
2358 dest = SUBREG_REG (dest);
2359 if (REG_P (src) && REG_P (dest)
2360 && ((HARD_REGISTER_P (src)
2361 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2362 #ifdef LEAF_REGISTERS
2363 && ! LEAF_REGISTERS [REGNO (src)])
2364 #else
2365 )
2366 #endif
2367 || (HARD_REGISTER_P (dest)
2368 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2369 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2370 return 1;
2371
2372 return 0;
2373 }
2374
2375 struct likely_spilled_retval_info
2376 {
2377 unsigned regno, nregs;
2378 unsigned mask;
2379 };
2380
2381 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2382 hard registers that are known to be written to / clobbered in full. */
2383 static void
2384 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2385 {
2386 struct likely_spilled_retval_info *const info =
2387 (struct likely_spilled_retval_info *) data;
2388 unsigned regno, nregs;
2389 unsigned new_mask;
2390
2391 if (!REG_P (XEXP (set, 0)))
2392 return;
2393 regno = REGNO (x);
2394 if (regno >= info->regno + info->nregs)
2395 return;
2396 nregs = REG_NREGS (x);
2397 if (regno + nregs <= info->regno)
2398 return;
2399 new_mask = (2U << (nregs - 1)) - 1;
2400 if (regno < info->regno)
2401 new_mask >>= info->regno - regno;
2402 else
2403 new_mask <<= regno - info->regno;
2404 info->mask &= ~new_mask;
2405 }
2406
2407 /* Return nonzero iff part of the return value is live during INSN, and
2408 it is likely spilled. This can happen when more than one insn is needed
2409 to copy the return value, e.g. when we consider to combine into the
2410 second copy insn for a complex value. */
2411
2412 static int
2413 likely_spilled_retval_p (rtx_insn *insn)
2414 {
2415 rtx_insn *use = BB_END (this_basic_block);
2416 rtx reg;
2417 rtx_insn *p;
2418 unsigned regno, nregs;
2419 /* We assume here that no machine mode needs more than
2420 32 hard registers when the value overlaps with a register
2421 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2422 unsigned mask;
2423 struct likely_spilled_retval_info info;
2424
2425 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2426 return 0;
2427 reg = XEXP (PATTERN (use), 0);
2428 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2429 return 0;
2430 regno = REGNO (reg);
2431 nregs = REG_NREGS (reg);
2432 if (nregs == 1)
2433 return 0;
2434 mask = (2U << (nregs - 1)) - 1;
2435
2436 /* Disregard parts of the return value that are set later. */
2437 info.regno = regno;
2438 info.nregs = nregs;
2439 info.mask = mask;
2440 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2441 if (INSN_P (p))
2442 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2443 mask = info.mask;
2444
2445 /* Check if any of the (probably) live return value registers is
2446 likely spilled. */
2447 nregs --;
2448 do
2449 {
2450 if ((mask & 1 << nregs)
2451 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2452 return 1;
2453 } while (nregs--);
2454 return 0;
2455 }
2456
2457 /* Adjust INSN after we made a change to its destination.
2458
2459 Changing the destination can invalidate notes that say something about
2460 the results of the insn and a LOG_LINK pointing to the insn. */
2461
2462 static void
2463 adjust_for_new_dest (rtx_insn *insn)
2464 {
2465 /* For notes, be conservative and simply remove them. */
2466 remove_reg_equal_equiv_notes (insn);
2467
2468 /* The new insn will have a destination that was previously the destination
2469 of an insn just above it. Call distribute_links to make a LOG_LINK from
2470 the next use of that destination. */
2471
2472 rtx set = single_set (insn);
2473 gcc_assert (set);
2474
2475 rtx reg = SET_DEST (set);
2476
2477 while (GET_CODE (reg) == ZERO_EXTRACT
2478 || GET_CODE (reg) == STRICT_LOW_PART
2479 || GET_CODE (reg) == SUBREG)
2480 reg = XEXP (reg, 0);
2481 gcc_assert (REG_P (reg));
2482
2483 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2484
2485 df_insn_rescan (insn);
2486 }
2487
2488 /* Return TRUE if combine can reuse reg X in mode MODE.
2489 ADDED_SETS is nonzero if the original set is still required. */
2490 static bool
2491 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2492 {
2493 unsigned int regno;
2494
2495 if (!REG_P (x))
2496 return false;
2497
2498 /* Don't change between modes with different underlying register sizes,
2499 since this could lead to invalid subregs. */
2500 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2501 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2502 return false;
2503
2504 regno = REGNO (x);
2505 /* Allow hard registers if the new mode is legal, and occupies no more
2506 registers than the old mode. */
2507 if (regno < FIRST_PSEUDO_REGISTER)
2508 return (targetm.hard_regno_mode_ok (regno, mode)
2509 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2510
2511 /* Or a pseudo that is only used once. */
2512 return (regno < reg_n_sets_max
2513 && REG_N_SETS (regno) == 1
2514 && !added_sets
2515 && !REG_USERVAR_P (x));
2516 }
2517
2518
2519 /* Check whether X, the destination of a set, refers to part of
2520 the register specified by REG. */
2521
2522 static bool
2523 reg_subword_p (rtx x, rtx reg)
2524 {
2525 /* Check that reg is an integer mode register. */
2526 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2527 return false;
2528
2529 if (GET_CODE (x) == STRICT_LOW_PART
2530 || GET_CODE (x) == ZERO_EXTRACT)
2531 x = XEXP (x, 0);
2532
2533 return GET_CODE (x) == SUBREG
2534 && SUBREG_REG (x) == reg
2535 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2536 }
2537
2538 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2539 Note that the INSN should be deleted *after* removing dead edges, so
2540 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2541 but not for a (set (pc) (label_ref FOO)). */
2542
2543 static void
2544 update_cfg_for_uncondjump (rtx_insn *insn)
2545 {
2546 basic_block bb = BLOCK_FOR_INSN (insn);
2547 gcc_assert (BB_END (bb) == insn);
2548
2549 purge_dead_edges (bb);
2550
2551 delete_insn (insn);
2552 if (EDGE_COUNT (bb->succs) == 1)
2553 {
2554 rtx_insn *insn;
2555
2556 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2557
2558 /* Remove barriers from the footer if there are any. */
2559 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2560 if (BARRIER_P (insn))
2561 {
2562 if (PREV_INSN (insn))
2563 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2564 else
2565 BB_FOOTER (bb) = NEXT_INSN (insn);
2566 if (NEXT_INSN (insn))
2567 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2568 }
2569 else if (LABEL_P (insn))
2570 break;
2571 }
2572 }
2573
2574 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2575 by an arbitrary number of CLOBBERs. */
2576 static bool
2577 is_parallel_of_n_reg_sets (rtx pat, int n)
2578 {
2579 if (GET_CODE (pat) != PARALLEL)
2580 return false;
2581
2582 int len = XVECLEN (pat, 0);
2583 if (len < n)
2584 return false;
2585
2586 int i;
2587 for (i = 0; i < n; i++)
2588 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2589 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2590 return false;
2591 for ( ; i < len; i++)
2592 switch (GET_CODE (XVECEXP (pat, 0, i)))
2593 {
2594 case CLOBBER:
2595 if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2596 return false;
2597 break;
2598 case CLOBBER_HIGH:
2599 break;
2600 default:
2601 return false;
2602 }
2603 return true;
2604 }
2605
2606 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2607 CLOBBERs), can be split into individual SETs in that order, without
2608 changing semantics. */
2609 static bool
2610 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2611 {
2612 if (!insn_nothrow_p (insn))
2613 return false;
2614
2615 rtx pat = PATTERN (insn);
2616
2617 int i, j;
2618 for (i = 0; i < n; i++)
2619 {
2620 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2621 return false;
2622
2623 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2624
2625 for (j = i + 1; j < n; j++)
2626 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2627 return false;
2628 }
2629
2630 return true;
2631 }
2632
2633 /* Return whether X is just a single set, with the source
2634 a general_operand. */
2635 static bool
2636 is_just_move (rtx x)
2637 {
2638 if (INSN_P (x))
2639 x = PATTERN (x);
2640
2641 return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2642 }
2643
2644 /* Callback function to count autoincs. */
2645
2646 static int
2647 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2648 {
2649 (*((int *) arg))++;
2650
2651 return 0;
2652 }
2653
2654 /* Try to combine the insns I0, I1 and I2 into I3.
2655 Here I0, I1 and I2 appear earlier than I3.
2656 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2657 I3.
2658
2659 If we are combining more than two insns and the resulting insn is not
2660 recognized, try splitting it into two insns. If that happens, I2 and I3
2661 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2662 Otherwise, I0, I1 and I2 are pseudo-deleted.
2663
2664 Return 0 if the combination does not work. Then nothing is changed.
2665 If we did the combination, return the insn at which combine should
2666 resume scanning.
2667
2668 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2669 new direct jump instruction.
2670
2671 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2672 been I3 passed to an earlier try_combine within the same basic
2673 block. */
2674
2675 static rtx_insn *
2676 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2677 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2678 {
2679 /* New patterns for I3 and I2, respectively. */
2680 rtx newpat, newi2pat = 0;
2681 rtvec newpat_vec_with_clobbers = 0;
2682 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2683 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2684 dead. */
2685 int added_sets_0, added_sets_1, added_sets_2;
2686 /* Total number of SETs to put into I3. */
2687 int total_sets;
2688 /* Nonzero if I2's or I1's body now appears in I3. */
2689 int i2_is_used = 0, i1_is_used = 0;
2690 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2691 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2692 /* Contains I3 if the destination of I3 is used in its source, which means
2693 that the old life of I3 is being killed. If that usage is placed into
2694 I2 and not in I3, a REG_DEAD note must be made. */
2695 rtx i3dest_killed = 0;
2696 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2697 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2698 /* Copy of SET_SRC of I1 and I0, if needed. */
2699 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2700 /* Set if I2DEST was reused as a scratch register. */
2701 bool i2scratch = false;
2702 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2703 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2704 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2705 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2706 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2707 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2708 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2709 /* Notes that must be added to REG_NOTES in I3 and I2. */
2710 rtx new_i3_notes, new_i2_notes;
2711 /* Notes that we substituted I3 into I2 instead of the normal case. */
2712 int i3_subst_into_i2 = 0;
2713 /* Notes that I1, I2 or I3 is a MULT operation. */
2714 int have_mult = 0;
2715 int swap_i2i3 = 0;
2716 int split_i2i3 = 0;
2717 int changed_i3_dest = 0;
2718 bool i2_was_move = false, i3_was_move = false;
2719 int n_auto_inc = 0;
2720
2721 int maxreg;
2722 rtx_insn *temp_insn;
2723 rtx temp_expr;
2724 struct insn_link *link;
2725 rtx other_pat = 0;
2726 rtx new_other_notes;
2727 int i;
2728 scalar_int_mode dest_mode, temp_mode;
2729
2730 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2731 never be). */
2732 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2733 return 0;
2734
2735 /* Only try four-insn combinations when there's high likelihood of
2736 success. Look for simple insns, such as loads of constants or
2737 binary operations involving a constant. */
2738 if (i0)
2739 {
2740 int i;
2741 int ngood = 0;
2742 int nshift = 0;
2743 rtx set0, set3;
2744
2745 if (!flag_expensive_optimizations)
2746 return 0;
2747
2748 for (i = 0; i < 4; i++)
2749 {
2750 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2751 rtx set = single_set (insn);
2752 rtx src;
2753 if (!set)
2754 continue;
2755 src = SET_SRC (set);
2756 if (CONSTANT_P (src))
2757 {
2758 ngood += 2;
2759 break;
2760 }
2761 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2762 ngood++;
2763 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2764 || GET_CODE (src) == LSHIFTRT)
2765 nshift++;
2766 }
2767
2768 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2769 are likely manipulating its value. Ideally we'll be able to combine
2770 all four insns into a bitfield insertion of some kind.
2771
2772 Note the source in I0 might be inside a sign/zero extension and the
2773 memory modes in I0 and I3 might be different. So extract the address
2774 from the destination of I3 and search for it in the source of I0.
2775
2776 In the event that there's a match but the source/dest do not actually
2777 refer to the same memory, the worst that happens is we try some
2778 combinations that we wouldn't have otherwise. */
2779 if ((set0 = single_set (i0))
2780 /* Ensure the source of SET0 is a MEM, possibly buried inside
2781 an extension. */
2782 && (GET_CODE (SET_SRC (set0)) == MEM
2783 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2784 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2785 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2786 && (set3 = single_set (i3))
2787 /* Ensure the destination of SET3 is a MEM. */
2788 && GET_CODE (SET_DEST (set3)) == MEM
2789 /* Would it be better to extract the base address for the MEM
2790 in SET3 and look for that? I don't have cases where it matters
2791 but I could envision such cases. */
2792 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2793 ngood += 2;
2794
2795 if (ngood < 2 && nshift < 2)
2796 return 0;
2797 }
2798
2799 /* Exit early if one of the insns involved can't be used for
2800 combinations. */
2801 if (CALL_P (i2)
2802 || (i1 && CALL_P (i1))
2803 || (i0 && CALL_P (i0))
2804 || cant_combine_insn_p (i3)
2805 || cant_combine_insn_p (i2)
2806 || (i1 && cant_combine_insn_p (i1))
2807 || (i0 && cant_combine_insn_p (i0))
2808 || likely_spilled_retval_p (i3))
2809 return 0;
2810
2811 combine_attempts++;
2812 undobuf.other_insn = 0;
2813
2814 /* Reset the hard register usage information. */
2815 CLEAR_HARD_REG_SET (newpat_used_regs);
2816
2817 if (dump_file && (dump_flags & TDF_DETAILS))
2818 {
2819 if (i0)
2820 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2821 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2822 else if (i1)
2823 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2824 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2825 else
2826 fprintf (dump_file, "\nTrying %d -> %d:\n",
2827 INSN_UID (i2), INSN_UID (i3));
2828
2829 if (i0)
2830 dump_insn_slim (dump_file, i0);
2831 if (i1)
2832 dump_insn_slim (dump_file, i1);
2833 dump_insn_slim (dump_file, i2);
2834 dump_insn_slim (dump_file, i3);
2835 }
2836
2837 /* If multiple insns feed into one of I2 or I3, they can be in any
2838 order. To simplify the code below, reorder them in sequence. */
2839 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2840 std::swap (i0, i2);
2841 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2842 std::swap (i0, i1);
2843 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2844 std::swap (i1, i2);
2845
2846 added_links_insn = 0;
2847 added_notes_insn = 0;
2848
2849 /* First check for one important special case that the code below will
2850 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2851 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2852 we may be able to replace that destination with the destination of I3.
2853 This occurs in the common code where we compute both a quotient and
2854 remainder into a structure, in which case we want to do the computation
2855 directly into the structure to avoid register-register copies.
2856
2857 Note that this case handles both multiple sets in I2 and also cases
2858 where I2 has a number of CLOBBERs inside the PARALLEL.
2859
2860 We make very conservative checks below and only try to handle the
2861 most common cases of this. For example, we only handle the case
2862 where I2 and I3 are adjacent to avoid making difficult register
2863 usage tests. */
2864
2865 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2866 && REG_P (SET_SRC (PATTERN (i3)))
2867 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2868 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2869 && GET_CODE (PATTERN (i2)) == PARALLEL
2870 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2871 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2872 below would need to check what is inside (and reg_overlap_mentioned_p
2873 doesn't support those codes anyway). Don't allow those destinations;
2874 the resulting insn isn't likely to be recognized anyway. */
2875 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2876 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2877 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2878 SET_DEST (PATTERN (i3)))
2879 && next_active_insn (i2) == i3)
2880 {
2881 rtx p2 = PATTERN (i2);
2882
2883 /* Make sure that the destination of I3,
2884 which we are going to substitute into one output of I2,
2885 is not used within another output of I2. We must avoid making this:
2886 (parallel [(set (mem (reg 69)) ...)
2887 (set (reg 69) ...)])
2888 which is not well-defined as to order of actions.
2889 (Besides, reload can't handle output reloads for this.)
2890
2891 The problem can also happen if the dest of I3 is a memory ref,
2892 if another dest in I2 is an indirect memory ref.
2893
2894 Neither can this PARALLEL be an asm. We do not allow combining
2895 that usually (see can_combine_p), so do not here either. */
2896 bool ok = true;
2897 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2898 {
2899 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2900 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER
2901 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER_HIGH)
2902 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2903 SET_DEST (XVECEXP (p2, 0, i))))
2904 ok = false;
2905 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2906 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2907 ok = false;
2908 }
2909
2910 if (ok)
2911 for (i = 0; i < XVECLEN (p2, 0); i++)
2912 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2913 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2914 {
2915 combine_merges++;
2916
2917 subst_insn = i3;
2918 subst_low_luid = DF_INSN_LUID (i2);
2919
2920 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2921 i2src = SET_SRC (XVECEXP (p2, 0, i));
2922 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2923 i2dest_killed = dead_or_set_p (i2, i2dest);
2924
2925 /* Replace the dest in I2 with our dest and make the resulting
2926 insn the new pattern for I3. Then skip to where we validate
2927 the pattern. Everything was set up above. */
2928 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2929 newpat = p2;
2930 i3_subst_into_i2 = 1;
2931 goto validate_replacement;
2932 }
2933 }
2934
2935 /* If I2 is setting a pseudo to a constant and I3 is setting some
2936 sub-part of it to another constant, merge them by making a new
2937 constant. */
2938 if (i1 == 0
2939 && (temp_expr = single_set (i2)) != 0
2940 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2941 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2942 && GET_CODE (PATTERN (i3)) == SET
2943 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2944 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2945 {
2946 rtx dest = SET_DEST (PATTERN (i3));
2947 rtx temp_dest = SET_DEST (temp_expr);
2948 int offset = -1;
2949 int width = 0;
2950
2951 if (GET_CODE (dest) == ZERO_EXTRACT)
2952 {
2953 if (CONST_INT_P (XEXP (dest, 1))
2954 && CONST_INT_P (XEXP (dest, 2))
2955 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2956 &dest_mode))
2957 {
2958 width = INTVAL (XEXP (dest, 1));
2959 offset = INTVAL (XEXP (dest, 2));
2960 dest = XEXP (dest, 0);
2961 if (BITS_BIG_ENDIAN)
2962 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2963 }
2964 }
2965 else
2966 {
2967 if (GET_CODE (dest) == STRICT_LOW_PART)
2968 dest = XEXP (dest, 0);
2969 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2970 {
2971 width = GET_MODE_PRECISION (dest_mode);
2972 offset = 0;
2973 }
2974 }
2975
2976 if (offset >= 0)
2977 {
2978 /* If this is the low part, we're done. */
2979 if (subreg_lowpart_p (dest))
2980 ;
2981 /* Handle the case where inner is twice the size of outer. */
2982 else if (GET_MODE_PRECISION (temp_mode)
2983 == 2 * GET_MODE_PRECISION (dest_mode))
2984 offset += GET_MODE_PRECISION (dest_mode);
2985 /* Otherwise give up for now. */
2986 else
2987 offset = -1;
2988 }
2989
2990 if (offset >= 0)
2991 {
2992 rtx inner = SET_SRC (PATTERN (i3));
2993 rtx outer = SET_SRC (temp_expr);
2994
2995 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2996 rtx_mode_t (inner, dest_mode),
2997 offset, width);
2998
2999 combine_merges++;
3000 subst_insn = i3;
3001 subst_low_luid = DF_INSN_LUID (i2);
3002 added_sets_2 = added_sets_1 = added_sets_0 = 0;
3003 i2dest = temp_dest;
3004 i2dest_killed = dead_or_set_p (i2, i2dest);
3005
3006 /* Replace the source in I2 with the new constant and make the
3007 resulting insn the new pattern for I3. Then skip to where we
3008 validate the pattern. Everything was set up above. */
3009 SUBST (SET_SRC (temp_expr),
3010 immed_wide_int_const (o, temp_mode));
3011
3012 newpat = PATTERN (i2);
3013
3014 /* The dest of I3 has been replaced with the dest of I2. */
3015 changed_i3_dest = 1;
3016 goto validate_replacement;
3017 }
3018 }
3019
3020 /* If we have no I1 and I2 looks like:
3021 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3022 (set Y OP)])
3023 make up a dummy I1 that is
3024 (set Y OP)
3025 and change I2 to be
3026 (set (reg:CC X) (compare:CC Y (const_int 0)))
3027
3028 (We can ignore any trailing CLOBBERs.)
3029
3030 This undoes a previous combination and allows us to match a branch-and-
3031 decrement insn. */
3032
3033 if (!HAVE_cc0 && i1 == 0
3034 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3035 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3036 == MODE_CC)
3037 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3038 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3039 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3040 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3041 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3042 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3043 {
3044 /* We make I1 with the same INSN_UID as I2. This gives it
3045 the same DF_INSN_LUID for value tracking. Our fake I1 will
3046 never appear in the insn stream so giving it the same INSN_UID
3047 as I2 will not cause a problem. */
3048
3049 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3050 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3051 -1, NULL_RTX);
3052 INSN_UID (i1) = INSN_UID (i2);
3053
3054 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3055 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3056 SET_DEST (PATTERN (i1)));
3057 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3058 SUBST_LINK (LOG_LINKS (i2),
3059 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3060 }
3061
3062 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3063 make those two SETs separate I1 and I2 insns, and make an I0 that is
3064 the original I1. */
3065 if (!HAVE_cc0 && i0 == 0
3066 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3067 && can_split_parallel_of_n_reg_sets (i2, 2)
3068 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3069 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3070 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3071 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3072 {
3073 /* If there is no I1, there is no I0 either. */
3074 i0 = i1;
3075
3076 /* We make I1 with the same INSN_UID as I2. This gives it
3077 the same DF_INSN_LUID for value tracking. Our fake I1 will
3078 never appear in the insn stream so giving it the same INSN_UID
3079 as I2 will not cause a problem. */
3080
3081 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3082 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3083 -1, NULL_RTX);
3084 INSN_UID (i1) = INSN_UID (i2);
3085
3086 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3087 }
3088
3089 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3090 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3091 {
3092 if (dump_file && (dump_flags & TDF_DETAILS))
3093 fprintf (dump_file, "Can't combine i2 into i3\n");
3094 undo_all ();
3095 return 0;
3096 }
3097 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3098 {
3099 if (dump_file && (dump_flags & TDF_DETAILS))
3100 fprintf (dump_file, "Can't combine i1 into i3\n");
3101 undo_all ();
3102 return 0;
3103 }
3104 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3105 {
3106 if (dump_file && (dump_flags & TDF_DETAILS))
3107 fprintf (dump_file, "Can't combine i0 into i3\n");
3108 undo_all ();
3109 return 0;
3110 }
3111
3112 /* Record whether i2 and i3 are trivial moves. */
3113 i2_was_move = is_just_move (i2);
3114 i3_was_move = is_just_move (i3);
3115
3116 /* Record whether I2DEST is used in I2SRC and similarly for the other
3117 cases. Knowing this will help in register status updating below. */
3118 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3119 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3120 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3121 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3122 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3123 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3124 i2dest_killed = dead_or_set_p (i2, i2dest);
3125 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3126 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3127
3128 /* For the earlier insns, determine which of the subsequent ones they
3129 feed. */
3130 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3131 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3132 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3133 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3134 && reg_overlap_mentioned_p (i0dest, i2src))));
3135
3136 /* Ensure that I3's pattern can be the destination of combines. */
3137 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3138 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3139 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3140 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3141 &i3dest_killed))
3142 {
3143 undo_all ();
3144 return 0;
3145 }
3146
3147 /* See if any of the insns is a MULT operation. Unless one is, we will
3148 reject a combination that is, since it must be slower. Be conservative
3149 here. */
3150 if (GET_CODE (i2src) == MULT
3151 || (i1 != 0 && GET_CODE (i1src) == MULT)
3152 || (i0 != 0 && GET_CODE (i0src) == MULT)
3153 || (GET_CODE (PATTERN (i3)) == SET
3154 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3155 have_mult = 1;
3156
3157 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3158 We used to do this EXCEPT in one case: I3 has a post-inc in an
3159 output operand. However, that exception can give rise to insns like
3160 mov r3,(r3)+
3161 which is a famous insn on the PDP-11 where the value of r3 used as the
3162 source was model-dependent. Avoid this sort of thing. */
3163
3164 #if 0
3165 if (!(GET_CODE (PATTERN (i3)) == SET
3166 && REG_P (SET_SRC (PATTERN (i3)))
3167 && MEM_P (SET_DEST (PATTERN (i3)))
3168 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3169 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3170 /* It's not the exception. */
3171 #endif
3172 if (AUTO_INC_DEC)
3173 {
3174 rtx link;
3175 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3176 if (REG_NOTE_KIND (link) == REG_INC
3177 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3178 || (i1 != 0
3179 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3180 {
3181 undo_all ();
3182 return 0;
3183 }
3184 }
3185
3186 /* See if the SETs in I1 or I2 need to be kept around in the merged
3187 instruction: whenever the value set there is still needed past I3.
3188 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3189
3190 For the SET in I1, we have two cases: if I1 and I2 independently feed
3191 into I3, the set in I1 needs to be kept around unless I1DEST dies
3192 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3193 in I1 needs to be kept around unless I1DEST dies or is set in either
3194 I2 or I3. The same considerations apply to I0. */
3195
3196 added_sets_2 = !dead_or_set_p (i3, i2dest);
3197
3198 if (i1)
3199 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3200 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3201 else
3202 added_sets_1 = 0;
3203
3204 if (i0)
3205 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3206 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3207 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3208 && dead_or_set_p (i2, i0dest)));
3209 else
3210 added_sets_0 = 0;
3211
3212 /* We are about to copy insns for the case where they need to be kept
3213 around. Check that they can be copied in the merged instruction. */
3214
3215 if (targetm.cannot_copy_insn_p
3216 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3217 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3218 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3219 {
3220 undo_all ();
3221 return 0;
3222 }
3223
3224 /* Count how many auto_inc expressions there were in the original insns;
3225 we need to have the same number in the resulting patterns. */
3226
3227 if (i0)
3228 for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3229 if (i1)
3230 for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3231 for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3232 for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3233
3234 /* If the set in I2 needs to be kept around, we must make a copy of
3235 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3236 PATTERN (I2), we are only substituting for the original I1DEST, not into
3237 an already-substituted copy. This also prevents making self-referential
3238 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3239 I2DEST. */
3240
3241 if (added_sets_2)
3242 {
3243 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3244 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3245 else
3246 i2pat = copy_rtx (PATTERN (i2));
3247 }
3248
3249 if (added_sets_1)
3250 {
3251 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3252 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3253 else
3254 i1pat = copy_rtx (PATTERN (i1));
3255 }
3256
3257 if (added_sets_0)
3258 {
3259 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3260 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3261 else
3262 i0pat = copy_rtx (PATTERN (i0));
3263 }
3264
3265 combine_merges++;
3266
3267 /* Substitute in the latest insn for the regs set by the earlier ones. */
3268
3269 maxreg = max_reg_num ();
3270
3271 subst_insn = i3;
3272
3273 /* Many machines that don't use CC0 have insns that can both perform an
3274 arithmetic operation and set the condition code. These operations will
3275 be represented as a PARALLEL with the first element of the vector
3276 being a COMPARE of an arithmetic operation with the constant zero.
3277 The second element of the vector will set some pseudo to the result
3278 of the same arithmetic operation. If we simplify the COMPARE, we won't
3279 match such a pattern and so will generate an extra insn. Here we test
3280 for this case, where both the comparison and the operation result are
3281 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3282 I2SRC. Later we will make the PARALLEL that contains I2. */
3283
3284 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3285 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3286 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3287 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3288 {
3289 rtx newpat_dest;
3290 rtx *cc_use_loc = NULL;
3291 rtx_insn *cc_use_insn = NULL;
3292 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3293 machine_mode compare_mode, orig_compare_mode;
3294 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3295 scalar_int_mode mode;
3296
3297 newpat = PATTERN (i3);
3298 newpat_dest = SET_DEST (newpat);
3299 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3300
3301 if (undobuf.other_insn == 0
3302 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3303 &cc_use_insn)))
3304 {
3305 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3306 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3307 compare_code = simplify_compare_const (compare_code, mode,
3308 op0, &op1);
3309 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3310 }
3311
3312 /* Do the rest only if op1 is const0_rtx, which may be the
3313 result of simplification. */
3314 if (op1 == const0_rtx)
3315 {
3316 /* If a single use of the CC is found, prepare to modify it
3317 when SELECT_CC_MODE returns a new CC-class mode, or when
3318 the above simplify_compare_const() returned a new comparison
3319 operator. undobuf.other_insn is assigned the CC use insn
3320 when modifying it. */
3321 if (cc_use_loc)
3322 {
3323 #ifdef SELECT_CC_MODE
3324 machine_mode new_mode
3325 = SELECT_CC_MODE (compare_code, op0, op1);
3326 if (new_mode != orig_compare_mode
3327 && can_change_dest_mode (SET_DEST (newpat),
3328 added_sets_2, new_mode))
3329 {
3330 unsigned int regno = REGNO (newpat_dest);
3331 compare_mode = new_mode;
3332 if (regno < FIRST_PSEUDO_REGISTER)
3333 newpat_dest = gen_rtx_REG (compare_mode, regno);
3334 else
3335 {
3336 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3337 newpat_dest = regno_reg_rtx[regno];
3338 }
3339 }
3340 #endif
3341 /* Cases for modifying the CC-using comparison. */
3342 if (compare_code != orig_compare_code
3343 /* ??? Do we need to verify the zero rtx? */
3344 && XEXP (*cc_use_loc, 1) == const0_rtx)
3345 {
3346 /* Replace cc_use_loc with entire new RTX. */
3347 SUBST (*cc_use_loc,
3348 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3349 newpat_dest, const0_rtx));
3350 undobuf.other_insn = cc_use_insn;
3351 }
3352 else if (compare_mode != orig_compare_mode)
3353 {
3354 /* Just replace the CC reg with a new mode. */
3355 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3356 undobuf.other_insn = cc_use_insn;
3357 }
3358 }
3359
3360 /* Now we modify the current newpat:
3361 First, SET_DEST(newpat) is updated if the CC mode has been
3362 altered. For targets without SELECT_CC_MODE, this should be
3363 optimized away. */
3364 if (compare_mode != orig_compare_mode)
3365 SUBST (SET_DEST (newpat), newpat_dest);
3366 /* This is always done to propagate i2src into newpat. */
3367 SUBST (SET_SRC (newpat),
3368 gen_rtx_COMPARE (compare_mode, op0, op1));
3369 /* Create new version of i2pat if needed; the below PARALLEL
3370 creation needs this to work correctly. */
3371 if (! rtx_equal_p (i2src, op0))
3372 i2pat = gen_rtx_SET (i2dest, op0);
3373 i2_is_used = 1;
3374 }
3375 }
3376
3377 if (i2_is_used == 0)
3378 {
3379 /* It is possible that the source of I2 or I1 may be performing
3380 an unneeded operation, such as a ZERO_EXTEND of something
3381 that is known to have the high part zero. Handle that case
3382 by letting subst look at the inner insns.
3383
3384 Another way to do this would be to have a function that tries
3385 to simplify a single insn instead of merging two or more
3386 insns. We don't do this because of the potential of infinite
3387 loops and because of the potential extra memory required.
3388 However, doing it the way we are is a bit of a kludge and
3389 doesn't catch all cases.
3390
3391 But only do this if -fexpensive-optimizations since it slows
3392 things down and doesn't usually win.
3393
3394 This is not done in the COMPARE case above because the
3395 unmodified I2PAT is used in the PARALLEL and so a pattern
3396 with a modified I2SRC would not match. */
3397
3398 if (flag_expensive_optimizations)
3399 {
3400 /* Pass pc_rtx so no substitutions are done, just
3401 simplifications. */
3402 if (i1)
3403 {
3404 subst_low_luid = DF_INSN_LUID (i1);
3405 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3406 }
3407
3408 subst_low_luid = DF_INSN_LUID (i2);
3409 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3410 }
3411
3412 n_occurrences = 0; /* `subst' counts here */
3413 subst_low_luid = DF_INSN_LUID (i2);
3414
3415 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3416 copy of I2SRC each time we substitute it, in order to avoid creating
3417 self-referential RTL when we will be substituting I1SRC for I1DEST
3418 later. Likewise if I0 feeds into I2, either directly or indirectly
3419 through I1, and I0DEST is in I0SRC. */
3420 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3421 (i1_feeds_i2_n && i1dest_in_i1src)
3422 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3423 && i0dest_in_i0src));
3424 substed_i2 = 1;
3425
3426 /* Record whether I2's body now appears within I3's body. */
3427 i2_is_used = n_occurrences;
3428 }
3429
3430 /* If we already got a failure, don't try to do more. Otherwise, try to
3431 substitute I1 if we have it. */
3432
3433 if (i1 && GET_CODE (newpat) != CLOBBER)
3434 {
3435 /* Before we can do this substitution, we must redo the test done
3436 above (see detailed comments there) that ensures I1DEST isn't
3437 mentioned in any SETs in NEWPAT that are field assignments. */
3438 if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3439 0, 0, 0))
3440 {
3441 undo_all ();
3442 return 0;
3443 }
3444
3445 n_occurrences = 0;
3446 subst_low_luid = DF_INSN_LUID (i1);
3447
3448 /* If the following substitution will modify I1SRC, make a copy of it
3449 for the case where it is substituted for I1DEST in I2PAT later. */
3450 if (added_sets_2 && i1_feeds_i2_n)
3451 i1src_copy = copy_rtx (i1src);
3452
3453 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3454 copy of I1SRC each time we substitute it, in order to avoid creating
3455 self-referential RTL when we will be substituting I0SRC for I0DEST
3456 later. */
3457 newpat = subst (newpat, i1dest, i1src, 0, 0,
3458 i0_feeds_i1_n && i0dest_in_i0src);
3459 substed_i1 = 1;
3460
3461 /* Record whether I1's body now appears within I3's body. */
3462 i1_is_used = n_occurrences;
3463 }
3464
3465 /* Likewise for I0 if we have it. */
3466
3467 if (i0 && GET_CODE (newpat) != CLOBBER)
3468 {
3469 if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3470 0, 0, 0))
3471 {
3472 undo_all ();
3473 return 0;
3474 }
3475
3476 /* If the following substitution will modify I0SRC, make a copy of it
3477 for the case where it is substituted for I0DEST in I1PAT later. */
3478 if (added_sets_1 && i0_feeds_i1_n)
3479 i0src_copy = copy_rtx (i0src);
3480 /* And a copy for I0DEST in I2PAT substitution. */
3481 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3482 || (i0_feeds_i2_n)))
3483 i0src_copy2 = copy_rtx (i0src);
3484
3485 n_occurrences = 0;
3486 subst_low_luid = DF_INSN_LUID (i0);
3487 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3488 substed_i0 = 1;
3489 }
3490
3491 if (n_auto_inc)
3492 {
3493 int new_n_auto_inc = 0;
3494 for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3495
3496 if (n_auto_inc != new_n_auto_inc)
3497 {
3498 if (dump_file && (dump_flags & TDF_DETAILS))
3499 fprintf (dump_file, "Number of auto_inc expressions changed\n");
3500 undo_all ();
3501 return 0;
3502 }
3503 }
3504
3505 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3506 to count all the ways that I2SRC and I1SRC can be used. */
3507 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3508 && i2_is_used + added_sets_2 > 1)
3509 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3510 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3511 > 1))
3512 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3513 && (n_occurrences + added_sets_0
3514 + (added_sets_1 && i0_feeds_i1_n)
3515 + (added_sets_2 && i0_feeds_i2_n)
3516 > 1))
3517 /* Fail if we tried to make a new register. */
3518 || max_reg_num () != maxreg
3519 /* Fail if we couldn't do something and have a CLOBBER. */
3520 || GET_CODE (newpat) == CLOBBER
3521 /* Fail if this new pattern is a MULT and we didn't have one before
3522 at the outer level. */
3523 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3524 && ! have_mult))
3525 {
3526 undo_all ();
3527 return 0;
3528 }
3529
3530 /* If the actions of the earlier insns must be kept
3531 in addition to substituting them into the latest one,
3532 we must make a new PARALLEL for the latest insn
3533 to hold additional the SETs. */
3534
3535 if (added_sets_0 || added_sets_1 || added_sets_2)
3536 {
3537 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3538 combine_extras++;
3539
3540 if (GET_CODE (newpat) == PARALLEL)
3541 {
3542 rtvec old = XVEC (newpat, 0);
3543 total_sets = XVECLEN (newpat, 0) + extra_sets;
3544 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3546 sizeof (old->elem[0]) * old->num_elem);
3547 }
3548 else
3549 {
3550 rtx old = newpat;
3551 total_sets = 1 + extra_sets;
3552 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3553 XVECEXP (newpat, 0, 0) = old;
3554 }
3555
3556 if (added_sets_0)
3557 XVECEXP (newpat, 0, --total_sets) = i0pat;
3558
3559 if (added_sets_1)
3560 {
3561 rtx t = i1pat;
3562 if (i0_feeds_i1_n)
3563 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3564
3565 XVECEXP (newpat, 0, --total_sets) = t;
3566 }
3567 if (added_sets_2)
3568 {
3569 rtx t = i2pat;
3570 if (i1_feeds_i2_n)
3571 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3572 i0_feeds_i1_n && i0dest_in_i0src);
3573 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3574 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3575
3576 XVECEXP (newpat, 0, --total_sets) = t;
3577 }
3578 }
3579
3580 validate_replacement:
3581
3582 /* Note which hard regs this insn has as inputs. */
3583 mark_used_regs_combine (newpat);
3584
3585 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3586 consider splitting this pattern, we might need these clobbers. */
3587 if (i1 && GET_CODE (newpat) == PARALLEL
3588 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3589 {
3590 int len = XVECLEN (newpat, 0);
3591
3592 newpat_vec_with_clobbers = rtvec_alloc (len);
3593 for (i = 0; i < len; i++)
3594 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3595 }
3596
3597 /* We have recognized nothing yet. */
3598 insn_code_number = -1;
3599
3600 /* See if this is a PARALLEL of two SETs where one SET's destination is
3601 a register that is unused and this isn't marked as an instruction that
3602 might trap in an EH region. In that case, we just need the other SET.
3603 We prefer this over the PARALLEL.
3604
3605 This can occur when simplifying a divmod insn. We *must* test for this
3606 case here because the code below that splits two independent SETs doesn't
3607 handle this case correctly when it updates the register status.
3608
3609 It's pointless doing this if we originally had two sets, one from
3610 i3, and one from i2. Combining then splitting the parallel results
3611 in the original i2 again plus an invalid insn (which we delete).
3612 The net effect is only to move instructions around, which makes
3613 debug info less accurate.
3614
3615 If the remaining SET came from I2 its destination should not be used
3616 between I2 and I3. See PR82024. */
3617
3618 if (!(added_sets_2 && i1 == 0)
3619 && is_parallel_of_n_reg_sets (newpat, 2)
3620 && asm_noperands (newpat) < 0)
3621 {
3622 rtx set0 = XVECEXP (newpat, 0, 0);
3623 rtx set1 = XVECEXP (newpat, 0, 1);
3624 rtx oldpat = newpat;
3625
3626 if (((REG_P (SET_DEST (set1))
3627 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3628 || (GET_CODE (SET_DEST (set1)) == SUBREG
3629 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3630 && insn_nothrow_p (i3)
3631 && !side_effects_p (SET_SRC (set1)))
3632 {
3633 newpat = set0;
3634 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3635 }
3636
3637 else if (((REG_P (SET_DEST (set0))
3638 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3639 || (GET_CODE (SET_DEST (set0)) == SUBREG
3640 && find_reg_note (i3, REG_UNUSED,
3641 SUBREG_REG (SET_DEST (set0)))))
3642 && insn_nothrow_p (i3)
3643 && !side_effects_p (SET_SRC (set0)))
3644 {
3645 rtx dest = SET_DEST (set1);
3646 if (GET_CODE (dest) == SUBREG)
3647 dest = SUBREG_REG (dest);
3648 if (!reg_used_between_p (dest, i2, i3))
3649 {
3650 newpat = set1;
3651 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3652
3653 if (insn_code_number >= 0)
3654 changed_i3_dest = 1;
3655 }
3656 }
3657
3658 if (insn_code_number < 0)
3659 newpat = oldpat;
3660 }
3661
3662 /* Is the result of combination a valid instruction? */
3663 if (insn_code_number < 0)
3664 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3665
3666 /* If we were combining three insns and the result is a simple SET
3667 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3668 insns. There are two ways to do this. It can be split using a
3669 machine-specific method (like when you have an addition of a large
3670 constant) or by combine in the function find_split_point. */
3671
3672 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3673 && asm_noperands (newpat) < 0)
3674 {
3675 rtx parallel, *split;
3676 rtx_insn *m_split_insn;
3677
3678 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3679 use I2DEST as a scratch register will help. In the latter case,
3680 convert I2DEST to the mode of the source of NEWPAT if we can. */
3681
3682 m_split_insn = combine_split_insns (newpat, i3);
3683
3684 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3685 inputs of NEWPAT. */
3686
3687 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3688 possible to try that as a scratch reg. This would require adding
3689 more code to make it work though. */
3690
3691 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3692 {
3693 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3694
3695 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3696 (temporarily, until we are committed to this instruction
3697 combination) does not work: for example, any call to nonzero_bits
3698 on the register (from a splitter in the MD file, for example)
3699 will get the old information, which is invalid.
3700
3701 Since nowadays we can create registers during combine just fine,
3702 we should just create a new one here, not reuse i2dest. */
3703
3704 /* First try to split using the original register as a
3705 scratch register. */
3706 parallel = gen_rtx_PARALLEL (VOIDmode,
3707 gen_rtvec (2, newpat,
3708 gen_rtx_CLOBBER (VOIDmode,
3709 i2dest)));
3710 m_split_insn = combine_split_insns (parallel, i3);
3711
3712 /* If that didn't work, try changing the mode of I2DEST if
3713 we can. */
3714 if (m_split_insn == 0
3715 && new_mode != GET_MODE (i2dest)
3716 && new_mode != VOIDmode
3717 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3718 {
3719 machine_mode old_mode = GET_MODE (i2dest);
3720 rtx ni2dest;
3721
3722 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3723 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3724 else
3725 {
3726 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3727 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3728 }
3729
3730 parallel = (gen_rtx_PARALLEL
3731 (VOIDmode,
3732 gen_rtvec (2, newpat,
3733 gen_rtx_CLOBBER (VOIDmode,
3734 ni2dest))));
3735 m_split_insn = combine_split_insns (parallel, i3);
3736
3737 if (m_split_insn == 0
3738 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3739 {
3740 struct undo *buf;
3741
3742 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3743 buf = undobuf.undos;
3744 undobuf.undos = buf->next;
3745 buf->next = undobuf.frees;
3746 undobuf.frees = buf;
3747 }
3748 }
3749
3750 i2scratch = m_split_insn != 0;
3751 }
3752
3753 /* If recog_for_combine has discarded clobbers, try to use them
3754 again for the split. */
3755 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3756 {
3757 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3758 m_split_insn = combine_split_insns (parallel, i3);
3759 }
3760
3761 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3762 {
3763 rtx m_split_pat = PATTERN (m_split_insn);
3764 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3765 if (insn_code_number >= 0)
3766 newpat = m_split_pat;
3767 }
3768 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3769 && (next_nonnote_nondebug_insn (i2) == i3
3770 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3771 {
3772 rtx i2set, i3set;
3773 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3774 newi2pat = PATTERN (m_split_insn);
3775
3776 i3set = single_set (NEXT_INSN (m_split_insn));
3777 i2set = single_set (m_split_insn);
3778
3779 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3780
3781 /* If I2 or I3 has multiple SETs, we won't know how to track
3782 register status, so don't use these insns. If I2's destination
3783 is used between I2 and I3, we also can't use these insns. */
3784
3785 if (i2_code_number >= 0 && i2set && i3set
3786 && (next_nonnote_nondebug_insn (i2) == i3
3787 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3788 insn_code_number = recog_for_combine (&newi3pat, i3,
3789 &new_i3_notes);
3790 if (insn_code_number >= 0)
3791 newpat = newi3pat;
3792
3793 /* It is possible that both insns now set the destination of I3.
3794 If so, we must show an extra use of it. */
3795
3796 if (insn_code_number >= 0)
3797 {
3798 rtx new_i3_dest = SET_DEST (i3set);
3799 rtx new_i2_dest = SET_DEST (i2set);
3800
3801 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3802 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3803 || GET_CODE (new_i3_dest) == SUBREG)
3804 new_i3_dest = XEXP (new_i3_dest, 0);
3805
3806 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3807 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3808 || GET_CODE (new_i2_dest) == SUBREG)
3809 new_i2_dest = XEXP (new_i2_dest, 0);
3810
3811 if (REG_P (new_i3_dest)
3812 && REG_P (new_i2_dest)
3813 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3814 && REGNO (new_i2_dest) < reg_n_sets_max)
3815 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3816 }
3817 }
3818
3819 /* If we can split it and use I2DEST, go ahead and see if that
3820 helps things be recognized. Verify that none of the registers
3821 are set between I2 and I3. */
3822 if (insn_code_number < 0
3823 && (split = find_split_point (&newpat, i3, false)) != 0
3824 && (!HAVE_cc0 || REG_P (i2dest))
3825 /* We need I2DEST in the proper mode. If it is a hard register
3826 or the only use of a pseudo, we can change its mode.
3827 Make sure we don't change a hard register to have a mode that
3828 isn't valid for it, or change the number of registers. */
3829 && (GET_MODE (*split) == GET_MODE (i2dest)
3830 || GET_MODE (*split) == VOIDmode
3831 || can_change_dest_mode (i2dest, added_sets_2,
3832 GET_MODE (*split)))
3833 && (next_nonnote_nondebug_insn (i2) == i3
3834 || !modified_between_p (*split, i2, i3))
3835 /* We can't overwrite I2DEST if its value is still used by
3836 NEWPAT. */
3837 && ! reg_referenced_p (i2dest, newpat))
3838 {
3839 rtx newdest = i2dest;
3840 enum rtx_code split_code = GET_CODE (*split);
3841 machine_mode split_mode = GET_MODE (*split);
3842 bool subst_done = false;
3843 newi2pat = NULL_RTX;
3844
3845 i2scratch = true;
3846
3847 /* *SPLIT may be part of I2SRC, so make sure we have the
3848 original expression around for later debug processing.
3849 We should not need I2SRC any more in other cases. */
3850 if (MAY_HAVE_DEBUG_BIND_INSNS)
3851 i2src = copy_rtx (i2src);
3852 else
3853 i2src = NULL;
3854
3855 /* Get NEWDEST as a register in the proper mode. We have already
3856 validated that we can do this. */
3857 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3858 {
3859 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3860 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3861 else
3862 {
3863 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3864 newdest = regno_reg_rtx[REGNO (i2dest)];
3865 }
3866 }
3867
3868 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3869 an ASHIFT. This can occur if it was inside a PLUS and hence
3870 appeared to be a memory address. This is a kludge. */
3871 if (split_code == MULT
3872 && CONST_INT_P (XEXP (*split, 1))
3873 && INTVAL (XEXP (*split, 1)) > 0
3874 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3875 {
3876 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3877 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3878 XEXP (*split, 0), i_rtx));
3879 /* Update split_code because we may not have a multiply
3880 anymore. */
3881 split_code = GET_CODE (*split);
3882 }
3883
3884 /* Similarly for (plus (mult FOO (const_int pow2))). */
3885 if (split_code == PLUS
3886 && GET_CODE (XEXP (*split, 0)) == MULT
3887 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3888 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3889 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3890 {
3891 rtx nsplit = XEXP (*split, 0);
3892 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3893 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3894 XEXP (nsplit, 0),
3895 i_rtx));
3896 /* Update split_code because we may not have a multiply
3897 anymore. */
3898 split_code = GET_CODE (*split);
3899 }
3900
3901 #ifdef INSN_SCHEDULING
3902 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3903 be written as a ZERO_EXTEND. */
3904 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3905 {
3906 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3907 what it really is. */
3908 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3909 == SIGN_EXTEND)
3910 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3911 SUBREG_REG (*split)));
3912 else
3913 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3914 SUBREG_REG (*split)));
3915 }
3916 #endif
3917
3918 /* Attempt to split binary operators using arithmetic identities. */
3919 if (BINARY_P (SET_SRC (newpat))
3920 && split_mode == GET_MODE (SET_SRC (newpat))
3921 && ! side_effects_p (SET_SRC (newpat)))
3922 {
3923 rtx setsrc = SET_SRC (newpat);
3924 machine_mode mode = GET_MODE (setsrc);
3925 enum rtx_code code = GET_CODE (setsrc);
3926 rtx src_op0 = XEXP (setsrc, 0);
3927 rtx src_op1 = XEXP (setsrc, 1);
3928
3929 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3930 if (rtx_equal_p (src_op0, src_op1))
3931 {
3932 newi2pat = gen_rtx_SET (newdest, src_op0);
3933 SUBST (XEXP (setsrc, 0), newdest);
3934 SUBST (XEXP (setsrc, 1), newdest);
3935 subst_done = true;
3936 }
3937 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3938 else if ((code == PLUS || code == MULT)
3939 && GET_CODE (src_op0) == code
3940 && GET_CODE (XEXP (src_op0, 0)) == code
3941 && (INTEGRAL_MODE_P (mode)
3942 || (FLOAT_MODE_P (mode)
3943 && flag_unsafe_math_optimizations)))
3944 {
3945 rtx p = XEXP (XEXP (src_op0, 0), 0);
3946 rtx q = XEXP (XEXP (src_op0, 0), 1);
3947 rtx r = XEXP (src_op0, 1);
3948 rtx s = src_op1;
3949
3950 /* Split both "((X op Y) op X) op Y" and
3951 "((X op Y) op Y) op X" as "T op T" where T is
3952 "X op Y". */
3953 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3954 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3955 {
3956 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3957 SUBST (XEXP (setsrc, 0), newdest);
3958 SUBST (XEXP (setsrc, 1), newdest);
3959 subst_done = true;
3960 }
3961 /* Split "((X op X) op Y) op Y)" as "T op T" where
3962 T is "X op Y". */
3963 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3964 {
3965 rtx tmp = simplify_gen_binary (code, mode, p, r);
3966 newi2pat = gen_rtx_SET (newdest, tmp);
3967 SUBST (XEXP (setsrc, 0), newdest);
3968 SUBST (XEXP (setsrc, 1), newdest);
3969 subst_done = true;
3970 }
3971 }
3972 }
3973
3974 if (!subst_done)
3975 {
3976 newi2pat = gen_rtx_SET (newdest, *split);
3977 SUBST (*split, newdest);
3978 }
3979
3980 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3981
3982 /* recog_for_combine might have added CLOBBERs to newi2pat.
3983 Make sure NEWPAT does not depend on the clobbered regs. */
3984 if (GET_CODE (newi2pat) == PARALLEL)
3985 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3986 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3987 {
3988 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3989 if (reg_overlap_mentioned_p (reg, newpat))
3990 {
3991 undo_all ();
3992 return 0;
3993 }
3994 }
3995
3996 /* If the split point was a MULT and we didn't have one before,
3997 don't use one now. */
3998 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3999 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4000 }
4001 }
4002
4003 /* Check for a case where we loaded from memory in a narrow mode and
4004 then sign extended it, but we need both registers. In that case,
4005 we have a PARALLEL with both loads from the same memory location.
4006 We can split this into a load from memory followed by a register-register
4007 copy. This saves at least one insn, more if register allocation can
4008 eliminate the copy.
4009
4010 We cannot do this if the destination of the first assignment is a
4011 condition code register or cc0. We eliminate this case by making sure
4012 the SET_DEST and SET_SRC have the same mode.
4013
4014 We cannot do this if the destination of the second assignment is
4015 a register that we have already assumed is zero-extended. Similarly
4016 for a SUBREG of such a register. */
4017
4018 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4019 && GET_CODE (newpat) == PARALLEL
4020 && XVECLEN (newpat, 0) == 2
4021 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4022 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4023 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4024 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4025 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4026 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4027 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4028 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4029 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4030 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4031 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4032 (REG_P (temp_expr)
4033 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4034 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4035 BITS_PER_WORD)
4036 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 HOST_BITS_PER_INT)
4038 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4039 != GET_MODE_MASK (word_mode))))
4040 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4041 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4042 (REG_P (temp_expr)
4043 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4044 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4045 BITS_PER_WORD)
4046 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4047 HOST_BITS_PER_INT)
4048 && (reg_stat[REGNO (temp_expr)].nonzero_bits
4049 != GET_MODE_MASK (word_mode)))))
4050 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4051 SET_SRC (XVECEXP (newpat, 0, 1)))
4052 && ! find_reg_note (i3, REG_UNUSED,
4053 SET_DEST (XVECEXP (newpat, 0, 0))))
4054 {
4055 rtx ni2dest;
4056
4057 newi2pat = XVECEXP (newpat, 0, 0);
4058 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4059 newpat = XVECEXP (newpat, 0, 1);
4060 SUBST (SET_SRC (newpat),
4061 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4062 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4063
4064 if (i2_code_number >= 0)
4065 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4066
4067 if (insn_code_number >= 0)
4068 swap_i2i3 = 1;
4069 }
4070
4071 /* Similarly, check for a case where we have a PARALLEL of two independent
4072 SETs but we started with three insns. In this case, we can do the sets
4073 as two separate insns. This case occurs when some SET allows two
4074 other insns to combine, but the destination of that SET is still live.
4075
4076 Also do this if we started with two insns and (at least) one of the
4077 resulting sets is a noop; this noop will be deleted later.
4078
4079 Also do this if we started with two insns neither of which was a simple
4080 move. */
4081
4082 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4083 && GET_CODE (newpat) == PARALLEL
4084 && XVECLEN (newpat, 0) == 2
4085 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4086 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4087 && (i1
4088 || set_noop_p (XVECEXP (newpat, 0, 0))
4089 || set_noop_p (XVECEXP (newpat, 0, 1))
4090 || (!i2_was_move && !i3_was_move))
4091 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4092 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4093 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4094 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4095 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4096 XVECEXP (newpat, 0, 0))
4097 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4098 XVECEXP (newpat, 0, 1))
4099 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4100 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4101 {
4102 rtx set0 = XVECEXP (newpat, 0, 0);
4103 rtx set1 = XVECEXP (newpat, 0, 1);
4104
4105 /* Normally, it doesn't matter which of the two is done first,
4106 but the one that references cc0 can't be the second, and
4107 one which uses any regs/memory set in between i2 and i3 can't
4108 be first. The PARALLEL might also have been pre-existing in i3,
4109 so we need to make sure that we won't wrongly hoist a SET to i2
4110 that would conflict with a death note present in there, or would
4111 have its dest modified between i2 and i3. */
4112 if (!modified_between_p (SET_SRC (set1), i2, i3)
4113 && !(REG_P (SET_DEST (set1))
4114 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4115 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4116 && find_reg_note (i2, REG_DEAD,
4117 SUBREG_REG (SET_DEST (set1))))
4118 && !modified_between_p (SET_DEST (set1), i2, i3)
4119 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4120 /* If I3 is a jump, ensure that set0 is a jump so that
4121 we do not create invalid RTL. */
4122 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4123 )
4124 {
4125 newi2pat = set1;
4126 newpat = set0;
4127 }
4128 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4129 && !(REG_P (SET_DEST (set0))
4130 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4131 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4132 && find_reg_note (i2, REG_DEAD,
4133 SUBREG_REG (SET_DEST (set0))))
4134 && !modified_between_p (SET_DEST (set0), i2, i3)
4135 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4136 /* If I3 is a jump, ensure that set1 is a jump so that
4137 we do not create invalid RTL. */
4138 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4139 )
4140 {
4141 newi2pat = set0;
4142 newpat = set1;
4143 }
4144 else
4145 {
4146 undo_all ();
4147 return 0;
4148 }
4149
4150 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4151
4152 if (i2_code_number >= 0)
4153 {
4154 /* recog_for_combine might have added CLOBBERs to newi2pat.
4155 Make sure NEWPAT does not depend on the clobbered regs. */
4156 if (GET_CODE (newi2pat) == PARALLEL)
4157 {
4158 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4159 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4160 {
4161 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4162 if (reg_overlap_mentioned_p (reg, newpat))
4163 {
4164 undo_all ();
4165 return 0;
4166 }
4167 }
4168 }
4169
4170 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4171
4172 if (insn_code_number >= 0)
4173 split_i2i3 = 1;
4174 }
4175 }
4176
4177 /* If it still isn't recognized, fail and change things back the way they
4178 were. */
4179 if ((insn_code_number < 0
4180 /* Is the result a reasonable ASM_OPERANDS? */
4181 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4182 {
4183 undo_all ();
4184 return 0;
4185 }
4186
4187 /* If we had to change another insn, make sure it is valid also. */
4188 if (undobuf.other_insn)
4189 {
4190 CLEAR_HARD_REG_SET (newpat_used_regs);
4191
4192 other_pat = PATTERN (undobuf.other_insn);
4193 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4194 &new_other_notes);
4195
4196 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4197 {
4198 undo_all ();
4199 return 0;
4200 }
4201 }
4202
4203 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4204 they are adjacent to each other or not. */
4205 if (HAVE_cc0)
4206 {
4207 rtx_insn *p = prev_nonnote_insn (i3);
4208 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4209 && sets_cc0_p (newi2pat))
4210 {
4211 undo_all ();
4212 return 0;
4213 }
4214 }
4215
4216 /* Only allow this combination if insn_cost reports that the
4217 replacement instructions are cheaper than the originals. */
4218 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4219 {
4220 undo_all ();
4221 return 0;
4222 }
4223
4224 if (MAY_HAVE_DEBUG_BIND_INSNS)
4225 {
4226 struct undo *undo;
4227
4228 for (undo = undobuf.undos; undo; undo = undo->next)
4229 if (undo->kind == UNDO_MODE)
4230 {
4231 rtx reg = *undo->where.r;
4232 machine_mode new_mode = GET_MODE (reg);
4233 machine_mode old_mode = undo->old_contents.m;
4234
4235 /* Temporarily revert mode back. */
4236 adjust_reg_mode (reg, old_mode);
4237
4238 if (reg == i2dest && i2scratch)
4239 {
4240 /* If we used i2dest as a scratch register with a
4241 different mode, substitute it for the original
4242 i2src while its original mode is temporarily
4243 restored, and then clear i2scratch so that we don't
4244 do it again later. */
4245 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4246 this_basic_block);
4247 i2scratch = false;
4248 /* Put back the new mode. */
4249 adjust_reg_mode (reg, new_mode);
4250 }
4251 else
4252 {
4253 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4254 rtx_insn *first, *last;
4255
4256 if (reg == i2dest)
4257 {
4258 first = i2;
4259 last = last_combined_insn;
4260 }
4261 else
4262 {
4263 first = i3;
4264 last = undobuf.other_insn;
4265 gcc_assert (last);
4266 if (DF_INSN_LUID (last)
4267 < DF_INSN_LUID (last_combined_insn))
4268 last = last_combined_insn;
4269 }
4270
4271 /* We're dealing with a reg that changed mode but not
4272 meaning, so we want to turn it into a subreg for
4273 the new mode. However, because of REG sharing and
4274 because its mode had already changed, we have to do
4275 it in two steps. First, replace any debug uses of
4276 reg, with its original mode temporarily restored,
4277 with this copy we have created; then, replace the
4278 copy with the SUBREG of the original shared reg,
4279 once again changed to the new mode. */
4280 propagate_for_debug (first, last, reg, tempreg,
4281 this_basic_block);
4282 adjust_reg_mode (reg, new_mode);
4283 propagate_for_debug (first, last, tempreg,
4284 lowpart_subreg (old_mode, reg, new_mode),
4285 this_basic_block);
4286 }
4287 }
4288 }
4289
4290 /* If we will be able to accept this, we have made a
4291 change to the destination of I3. This requires us to
4292 do a few adjustments. */
4293
4294 if (changed_i3_dest)
4295 {
4296 PATTERN (i3) = newpat;
4297 adjust_for_new_dest (i3);
4298 }
4299
4300 /* We now know that we can do this combination. Merge the insns and
4301 update the status of registers and LOG_LINKS. */
4302
4303 if (undobuf.other_insn)
4304 {
4305 rtx note, next;
4306
4307 PATTERN (undobuf.other_insn) = other_pat;
4308
4309 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4310 ensure that they are still valid. Then add any non-duplicate
4311 notes added by recog_for_combine. */
4312 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4313 {
4314 next = XEXP (note, 1);
4315
4316 if ((REG_NOTE_KIND (note) == REG_DEAD
4317 && !reg_referenced_p (XEXP (note, 0),
4318 PATTERN (undobuf.other_insn)))
4319 ||(REG_NOTE_KIND (note) == REG_UNUSED
4320 && !reg_set_p (XEXP (note, 0),
4321 PATTERN (undobuf.other_insn)))
4322 /* Simply drop equal note since it may be no longer valid
4323 for other_insn. It may be possible to record that CC
4324 register is changed and only discard those notes, but
4325 in practice it's unnecessary complication and doesn't
4326 give any meaningful improvement.
4327
4328 See PR78559. */
4329 || REG_NOTE_KIND (note) == REG_EQUAL
4330 || REG_NOTE_KIND (note) == REG_EQUIV)
4331 remove_note (undobuf.other_insn, note);
4332 }
4333
4334 distribute_notes (new_other_notes, undobuf.other_insn,
4335 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4336 NULL_RTX);
4337 }
4338
4339 if (swap_i2i3)
4340 {
4341 /* I3 now uses what used to be its destination and which is now
4342 I2's destination. This requires us to do a few adjustments. */
4343 PATTERN (i3) = newpat;
4344 adjust_for_new_dest (i3);
4345 }
4346
4347 if (swap_i2i3 || split_i2i3)
4348 {
4349 /* We might need a LOG_LINK from I3 to I2. But then we used to
4350 have one, so we still will.
4351
4352 However, some later insn might be using I2's dest and have
4353 a LOG_LINK pointing at I3. We should change it to point at
4354 I2 instead. */
4355
4356 /* newi2pat is usually a SET here; however, recog_for_combine might
4357 have added some clobbers. */
4358 rtx x = newi2pat;
4359 if (GET_CODE (x) == PARALLEL)
4360 x = XVECEXP (newi2pat, 0, 0);
4361
4362 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4363 unsigned int regno = reg_or_subregno (SET_DEST (x));
4364
4365 bool done = false;
4366 for (rtx_insn *insn = NEXT_INSN (i3);
4367 !done
4368 && insn
4369 && NONDEBUG_INSN_P (insn)
4370 && BLOCK_FOR_INSN (insn) == this_basic_block;
4371 insn = NEXT_INSN (insn))
4372 {
4373 struct insn_link *link;
4374 FOR_EACH_LOG_LINK (link, insn)
4375 if (link->insn == i3 && link->regno == regno)
4376 {
4377 link->insn = i2;
4378 done = true;
4379 break;
4380 }
4381 }
4382 }
4383
4384 {
4385 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4386 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4387 rtx midnotes = 0;
4388 int from_luid;
4389 /* Compute which registers we expect to eliminate. newi2pat may be setting
4390 either i3dest or i2dest, so we must check it. */
4391 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4392 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4393 || !i2dest_killed
4394 ? 0 : i2dest);
4395 /* For i1, we need to compute both local elimination and global
4396 elimination information with respect to newi2pat because i1dest
4397 may be the same as i3dest, in which case newi2pat may be setting
4398 i1dest. Global information is used when distributing REG_DEAD
4399 note for i2 and i3, in which case it does matter if newi2pat sets
4400 i1dest or not.
4401
4402 Local information is used when distributing REG_DEAD note for i1,
4403 in which case it doesn't matter if newi2pat sets i1dest or not.
4404 See PR62151, if we have four insns combination:
4405 i0: r0 <- i0src
4406 i1: r1 <- i1src (using r0)
4407 REG_DEAD (r0)
4408 i2: r0 <- i2src (using r1)
4409 i3: r3 <- i3src (using r0)
4410 ix: using r0
4411 From i1's point of view, r0 is eliminated, no matter if it is set
4412 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4413 should be discarded.
4414
4415 Note local information only affects cases in forms like "I1->I2->I3",
4416 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4417 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4418 i0dest anyway. */
4419 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4420 || !i1dest_killed
4421 ? 0 : i1dest);
4422 rtx elim_i1 = (local_elim_i1 == 0
4423 || (newi2pat && reg_set_p (i1dest, newi2pat))
4424 ? 0 : i1dest);
4425 /* Same case as i1. */
4426 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4427 ? 0 : i0dest);
4428 rtx elim_i0 = (local_elim_i0 == 0
4429 || (newi2pat && reg_set_p (i0dest, newi2pat))
4430 ? 0 : i0dest);
4431
4432 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4433 clear them. */
4434 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4435 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4436 if (i1)
4437 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4438 if (i0)
4439 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4440
4441 /* Ensure that we do not have something that should not be shared but
4442 occurs multiple times in the new insns. Check this by first
4443 resetting all the `used' flags and then copying anything is shared. */
4444
4445 reset_used_flags (i3notes);
4446 reset_used_flags (i2notes);
4447 reset_used_flags (i1notes);
4448 reset_used_flags (i0notes);
4449 reset_used_flags (newpat);
4450 reset_used_flags (newi2pat);
4451 if (undobuf.other_insn)
4452 reset_used_flags (PATTERN (undobuf.other_insn));
4453
4454 i3notes = copy_rtx_if_shared (i3notes);
4455 i2notes = copy_rtx_if_shared (i2notes);
4456 i1notes = copy_rtx_if_shared (i1notes);
4457 i0notes = copy_rtx_if_shared (i0notes);
4458 newpat = copy_rtx_if_shared (newpat);
4459 newi2pat = copy_rtx_if_shared (newi2pat);
4460 if (undobuf.other_insn)
4461 reset_used_flags (PATTERN (undobuf.other_insn));
4462
4463 INSN_CODE (i3) = insn_code_number;
4464 PATTERN (i3) = newpat;
4465
4466 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4467 {
4468 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4469 link = XEXP (link, 1))
4470 {
4471 if (substed_i2)
4472 {
4473 /* I2SRC must still be meaningful at this point. Some
4474 splitting operations can invalidate I2SRC, but those
4475 operations do not apply to calls. */
4476 gcc_assert (i2src);
4477 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4478 i2dest, i2src);
4479 }
4480 if (substed_i1)
4481 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4482 i1dest, i1src);
4483 if (substed_i0)
4484 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4485 i0dest, i0src);
4486 }
4487 }
4488
4489 if (undobuf.other_insn)
4490 INSN_CODE (undobuf.other_insn) = other_code_number;
4491
4492 /* We had one special case above where I2 had more than one set and
4493 we replaced a destination of one of those sets with the destination
4494 of I3. In that case, we have to update LOG_LINKS of insns later
4495 in this basic block. Note that this (expensive) case is rare.
4496
4497 Also, in this case, we must pretend that all REG_NOTEs for I2
4498 actually came from I3, so that REG_UNUSED notes from I2 will be
4499 properly handled. */
4500
4501 if (i3_subst_into_i2)
4502 {
4503 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4504 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4505 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4506 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4507 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4508 && ! find_reg_note (i2, REG_UNUSED,
4509 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4510 for (temp_insn = NEXT_INSN (i2);
4511 temp_insn
4512 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4513 || BB_HEAD (this_basic_block) != temp_insn);
4514 temp_insn = NEXT_INSN (temp_insn))
4515 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4516 FOR_EACH_LOG_LINK (link, temp_insn)
4517 if (link->insn == i2)
4518 link->insn = i3;
4519
4520 if (i3notes)
4521 {
4522 rtx link = i3notes;
4523 while (XEXP (link, 1))
4524 link = XEXP (link, 1);
4525 XEXP (link, 1) = i2notes;
4526 }
4527 else
4528 i3notes = i2notes;
4529 i2notes = 0;
4530 }
4531
4532 LOG_LINKS (i3) = NULL;
4533 REG_NOTES (i3) = 0;
4534 LOG_LINKS (i2) = NULL;
4535 REG_NOTES (i2) = 0;
4536
4537 if (newi2pat)
4538 {
4539 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4540 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4541 this_basic_block);
4542 INSN_CODE (i2) = i2_code_number;
4543 PATTERN (i2) = newi2pat;
4544 }
4545 else
4546 {
4547 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4548 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4549 this_basic_block);
4550 SET_INSN_DELETED (i2);
4551 }
4552
4553 if (i1)
4554 {
4555 LOG_LINKS (i1) = NULL;
4556 REG_NOTES (i1) = 0;
4557 if (MAY_HAVE_DEBUG_BIND_INSNS)
4558 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4559 this_basic_block);
4560 SET_INSN_DELETED (i1);
4561 }
4562
4563 if (i0)
4564 {
4565 LOG_LINKS (i0) = NULL;
4566 REG_NOTES (i0) = 0;
4567 if (MAY_HAVE_DEBUG_BIND_INSNS)
4568 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4569 this_basic_block);
4570 SET_INSN_DELETED (i0);
4571 }
4572
4573 /* Get death notes for everything that is now used in either I3 or
4574 I2 and used to die in a previous insn. If we built two new
4575 patterns, move from I1 to I2 then I2 to I3 so that we get the
4576 proper movement on registers that I2 modifies. */
4577
4578 if (i0)
4579 from_luid = DF_INSN_LUID (i0);
4580 else if (i1)
4581 from_luid = DF_INSN_LUID (i1);
4582 else
4583 from_luid = DF_INSN_LUID (i2);
4584 if (newi2pat)
4585 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4586 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4587
4588 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4589 if (i3notes)
4590 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4591 elim_i2, elim_i1, elim_i0);
4592 if (i2notes)
4593 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4594 elim_i2, elim_i1, elim_i0);
4595 if (i1notes)
4596 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4597 elim_i2, local_elim_i1, local_elim_i0);
4598 if (i0notes)
4599 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4600 elim_i2, elim_i1, local_elim_i0);
4601 if (midnotes)
4602 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4603 elim_i2, elim_i1, elim_i0);
4604
4605 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4606 know these are REG_UNUSED and want them to go to the desired insn,
4607 so we always pass it as i3. */
4608
4609 if (newi2pat && new_i2_notes)
4610 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4611 NULL_RTX);
4612
4613 if (new_i3_notes)
4614 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4615 NULL_RTX);
4616
4617 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4618 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4619 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4620 in that case, it might delete I2. Similarly for I2 and I1.
4621 Show an additional death due to the REG_DEAD note we make here. If
4622 we discard it in distribute_notes, we will decrement it again. */
4623
4624 if (i3dest_killed)
4625 {
4626 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4627 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4628 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4629 elim_i1, elim_i0);
4630 else
4631 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4632 elim_i2, elim_i1, elim_i0);
4633 }
4634
4635 if (i2dest_in_i2src)
4636 {
4637 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4638 if (newi2pat && reg_set_p (i2dest, newi2pat))
4639 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4640 NULL_RTX, NULL_RTX);
4641 else
4642 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4643 NULL_RTX, NULL_RTX, NULL_RTX);
4644 }
4645
4646 if (i1dest_in_i1src)
4647 {
4648 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4649 if (newi2pat && reg_set_p (i1dest, newi2pat))
4650 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4651 NULL_RTX, NULL_RTX);
4652 else
4653 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4654 NULL_RTX, NULL_RTX, NULL_RTX);
4655 }
4656
4657 if (i0dest_in_i0src)
4658 {
4659 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4660 if (newi2pat && reg_set_p (i0dest, newi2pat))
4661 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4662 NULL_RTX, NULL_RTX);
4663 else
4664 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4665 NULL_RTX, NULL_RTX, NULL_RTX);
4666 }
4667
4668 distribute_links (i3links);
4669 distribute_links (i2links);
4670 distribute_links (i1links);
4671 distribute_links (i0links);
4672
4673 if (REG_P (i2dest))
4674 {
4675 struct insn_link *link;
4676 rtx_insn *i2_insn = 0;
4677 rtx i2_val = 0, set;
4678
4679 /* The insn that used to set this register doesn't exist, and
4680 this life of the register may not exist either. See if one of
4681 I3's links points to an insn that sets I2DEST. If it does,
4682 that is now the last known value for I2DEST. If we don't update
4683 this and I2 set the register to a value that depended on its old
4684 contents, we will get confused. If this insn is used, thing
4685 will be set correctly in combine_instructions. */
4686 FOR_EACH_LOG_LINK (link, i3)
4687 if ((set = single_set (link->insn)) != 0
4688 && rtx_equal_p (i2dest, SET_DEST (set)))
4689 i2_insn = link->insn, i2_val = SET_SRC (set);
4690
4691 record_value_for_reg (i2dest, i2_insn, i2_val);
4692
4693 /* If the reg formerly set in I2 died only once and that was in I3,
4694 zero its use count so it won't make `reload' do any work. */
4695 if (! added_sets_2
4696 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4697 && ! i2dest_in_i2src
4698 && REGNO (i2dest) < reg_n_sets_max)
4699 INC_REG_N_SETS (REGNO (i2dest), -1);
4700 }
4701
4702 if (i1 && REG_P (i1dest))
4703 {
4704 struct insn_link *link;
4705 rtx_insn *i1_insn = 0;
4706 rtx i1_val = 0, set;
4707
4708 FOR_EACH_LOG_LINK (link, i3)
4709 if ((set = single_set (link->insn)) != 0
4710 && rtx_equal_p (i1dest, SET_DEST (set)))
4711 i1_insn = link->insn, i1_val = SET_SRC (set);
4712
4713 record_value_for_reg (i1dest, i1_insn, i1_val);
4714
4715 if (! added_sets_1
4716 && ! i1dest_in_i1src
4717 && REGNO (i1dest) < reg_n_sets_max)
4718 INC_REG_N_SETS (REGNO (i1dest), -1);
4719 }
4720
4721 if (i0 && REG_P (i0dest))
4722 {
4723 struct insn_link *link;
4724 rtx_insn *i0_insn = 0;
4725 rtx i0_val = 0, set;
4726
4727 FOR_EACH_LOG_LINK (link, i3)
4728 if ((set = single_set (link->insn)) != 0
4729 && rtx_equal_p (i0dest, SET_DEST (set)))
4730 i0_insn = link->insn, i0_val = SET_SRC (set);
4731
4732 record_value_for_reg (i0dest, i0_insn, i0_val);
4733
4734 if (! added_sets_0
4735 && ! i0dest_in_i0src
4736 && REGNO (i0dest) < reg_n_sets_max)
4737 INC_REG_N_SETS (REGNO (i0dest), -1);
4738 }
4739
4740 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4741 been made to this insn. The order is important, because newi2pat
4742 can affect nonzero_bits of newpat. */
4743 if (newi2pat)
4744 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4745 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4746 }
4747
4748 if (undobuf.other_insn != NULL_RTX)
4749 {
4750 if (dump_file)
4751 {
4752 fprintf (dump_file, "modifying other_insn ");
4753 dump_insn_slim (dump_file, undobuf.other_insn);
4754 }
4755 df_insn_rescan (undobuf.other_insn);
4756 }
4757
4758 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4759 {
4760 if (dump_file)
4761 {
4762 fprintf (dump_file, "modifying insn i0 ");
4763 dump_insn_slim (dump_file, i0);
4764 }
4765 df_insn_rescan (i0);
4766 }
4767
4768 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4769 {
4770 if (dump_file)
4771 {
4772 fprintf (dump_file, "modifying insn i1 ");
4773 dump_insn_slim (dump_file, i1);
4774 }
4775 df_insn_rescan (i1);
4776 }
4777
4778 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4779 {
4780 if (dump_file)
4781 {
4782 fprintf (dump_file, "modifying insn i2 ");
4783 dump_insn_slim (dump_file, i2);
4784 }
4785 df_insn_rescan (i2);
4786 }
4787
4788 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4789 {
4790 if (dump_file)
4791 {
4792 fprintf (dump_file, "modifying insn i3 ");
4793 dump_insn_slim (dump_file, i3);
4794 }
4795 df_insn_rescan (i3);
4796 }
4797
4798 /* Set new_direct_jump_p if a new return or simple jump instruction
4799 has been created. Adjust the CFG accordingly. */
4800 if (returnjump_p (i3) || any_uncondjump_p (i3))
4801 {
4802 *new_direct_jump_p = 1;
4803 mark_jump_label (PATTERN (i3), i3, 0);
4804 update_cfg_for_uncondjump (i3);
4805 }
4806
4807 if (undobuf.other_insn != NULL_RTX
4808 && (returnjump_p (undobuf.other_insn)
4809 || any_uncondjump_p (undobuf.other_insn)))
4810 {
4811 *new_direct_jump_p = 1;
4812 update_cfg_for_uncondjump (undobuf.other_insn);
4813 }
4814
4815 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4816 && XEXP (PATTERN (i3), 0) == const1_rtx)
4817 {
4818 basic_block bb = BLOCK_FOR_INSN (i3);
4819 gcc_assert (bb);
4820 remove_edge (split_block (bb, i3));
4821 emit_barrier_after_bb (bb);
4822 *new_direct_jump_p = 1;
4823 }
4824
4825 if (undobuf.other_insn
4826 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4827 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4828 {
4829 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4830 gcc_assert (bb);
4831 remove_edge (split_block (bb, undobuf.other_insn));
4832 emit_barrier_after_bb (bb);
4833 *new_direct_jump_p = 1;
4834 }
4835
4836 /* A noop might also need cleaning up of CFG, if it comes from the
4837 simplification of a jump. */
4838 if (JUMP_P (i3)
4839 && GET_CODE (newpat) == SET
4840 && SET_SRC (newpat) == pc_rtx
4841 && SET_DEST (newpat) == pc_rtx)
4842 {
4843 *new_direct_jump_p = 1;
4844 update_cfg_for_uncondjump (i3);
4845 }
4846
4847 if (undobuf.other_insn != NULL_RTX
4848 && JUMP_P (undobuf.other_insn)
4849 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4850 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4851 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4852 {
4853 *new_direct_jump_p = 1;
4854 update_cfg_for_uncondjump (undobuf.other_insn);
4855 }
4856
4857 combine_successes++;
4858 undo_commit ();
4859
4860 rtx_insn *ret = newi2pat ? i2 : i3;
4861 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4862 ret = added_links_insn;
4863 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4864 ret = added_notes_insn;
4865
4866 return ret;
4867 }
4868 \f
4869 /* Get a marker for undoing to the current state. */
4870
4871 static void *
4872 get_undo_marker (void)
4873 {
4874 return undobuf.undos;
4875 }
4876
4877 /* Undo the modifications up to the marker. */
4878
4879 static void
4880 undo_to_marker (void *marker)
4881 {
4882 struct undo *undo, *next;
4883
4884 for (undo = undobuf.undos; undo != marker; undo = next)
4885 {
4886 gcc_assert (undo);
4887
4888 next = undo->next;
4889 switch (undo->kind)
4890 {
4891 case UNDO_RTX:
4892 *undo->where.r = undo->old_contents.r;
4893 break;
4894 case UNDO_INT:
4895 *undo->where.i = undo->old_contents.i;
4896 break;
4897 case UNDO_MODE:
4898 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4899 break;
4900 case UNDO_LINKS:
4901 *undo->where.l = undo->old_contents.l;
4902 break;
4903 default:
4904 gcc_unreachable ();
4905 }
4906
4907 undo->next = undobuf.frees;
4908 undobuf.frees = undo;
4909 }
4910
4911 undobuf.undos = (struct undo *) marker;
4912 }
4913
4914 /* Undo all the modifications recorded in undobuf. */
4915
4916 static void
4917 undo_all (void)
4918 {
4919 undo_to_marker (0);
4920 }
4921
4922 /* We've committed to accepting the changes we made. Move all
4923 of the undos to the free list. */
4924
4925 static void
4926 undo_commit (void)
4927 {
4928 struct undo *undo, *next;
4929
4930 for (undo = undobuf.undos; undo; undo = next)
4931 {
4932 next = undo->next;
4933 undo->next = undobuf.frees;
4934 undobuf.frees = undo;
4935 }
4936 undobuf.undos = 0;
4937 }
4938 \f
4939 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4940 where we have an arithmetic expression and return that point. LOC will
4941 be inside INSN.
4942
4943 try_combine will call this function to see if an insn can be split into
4944 two insns. */
4945
4946 static rtx *
4947 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4948 {
4949 rtx x = *loc;
4950 enum rtx_code code = GET_CODE (x);
4951 rtx *split;
4952 unsigned HOST_WIDE_INT len = 0;
4953 HOST_WIDE_INT pos = 0;
4954 int unsignedp = 0;
4955 rtx inner = NULL_RTX;
4956 scalar_int_mode mode, inner_mode;
4957
4958 /* First special-case some codes. */
4959 switch (code)
4960 {
4961 case SUBREG:
4962 #ifdef INSN_SCHEDULING
4963 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4964 point. */
4965 if (MEM_P (SUBREG_REG (x)))
4966 return loc;
4967 #endif
4968 return find_split_point (&SUBREG_REG (x), insn, false);
4969
4970 case MEM:
4971 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4972 using LO_SUM and HIGH. */
4973 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4974 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4975 {
4976 machine_mode address_mode = get_address_mode (x);
4977
4978 SUBST (XEXP (x, 0),
4979 gen_rtx_LO_SUM (address_mode,
4980 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4981 XEXP (x, 0)));
4982 return &XEXP (XEXP (x, 0), 0);
4983 }
4984
4985 /* If we have a PLUS whose second operand is a constant and the
4986 address is not valid, perhaps we can split it up using
4987 the machine-specific way to split large constants. We use
4988 the first pseudo-reg (one of the virtual regs) as a placeholder;
4989 it will not remain in the result. */
4990 if (GET_CODE (XEXP (x, 0)) == PLUS
4991 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4992 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4993 MEM_ADDR_SPACE (x)))
4994 {
4995 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4996 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4997 subst_insn);
4998
4999 /* This should have produced two insns, each of which sets our
5000 placeholder. If the source of the second is a valid address,
5001 we can put both sources together and make a split point
5002 in the middle. */
5003
5004 if (seq
5005 && NEXT_INSN (seq) != NULL_RTX
5006 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
5007 && NONJUMP_INSN_P (seq)
5008 && GET_CODE (PATTERN (seq)) == SET
5009 && SET_DEST (PATTERN (seq)) == reg
5010 && ! reg_mentioned_p (reg,
5011 SET_SRC (PATTERN (seq)))
5012 && NONJUMP_INSN_P (NEXT_INSN (seq))
5013 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5014 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5015 && memory_address_addr_space_p
5016 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5017 MEM_ADDR_SPACE (x)))
5018 {
5019 rtx src1 = SET_SRC (PATTERN (seq));
5020 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5021
5022 /* Replace the placeholder in SRC2 with SRC1. If we can
5023 find where in SRC2 it was placed, that can become our
5024 split point and we can replace this address with SRC2.
5025 Just try two obvious places. */
5026
5027 src2 = replace_rtx (src2, reg, src1);
5028 split = 0;
5029 if (XEXP (src2, 0) == src1)
5030 split = &XEXP (src2, 0);
5031 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5032 && XEXP (XEXP (src2, 0), 0) == src1)
5033 split = &XEXP (XEXP (src2, 0), 0);
5034
5035 if (split)
5036 {
5037 SUBST (XEXP (x, 0), src2);
5038 return split;
5039 }
5040 }
5041
5042 /* If that didn't work and we have a nested plus, like:
5043 ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5044 is valid address, try to split (REG1 * CONST1). */
5045 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5046 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5047 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5048 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5049 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5050 0), 0)))))
5051 {
5052 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5053 XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5054 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5055 MEM_ADDR_SPACE (x)))
5056 {
5057 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5058 return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5059 }
5060 XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5061 }
5062 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5063 && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5064 && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5065 && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5066 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5067 0), 1)))))
5068 {
5069 rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5070 XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5071 if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5072 MEM_ADDR_SPACE (x)))
5073 {
5074 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5075 return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5076 }
5077 XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5078 }
5079
5080 /* If that didn't work, perhaps the first operand is complex and
5081 needs to be computed separately, so make a split point there.
5082 This will occur on machines that just support REG + CONST
5083 and have a constant moved through some previous computation. */
5084 if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5085 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5086 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5087 return &XEXP (XEXP (x, 0), 0);
5088 }
5089
5090 /* If we have a PLUS whose first operand is complex, try computing it
5091 separately by making a split there. */
5092 if (GET_CODE (XEXP (x, 0)) == PLUS
5093 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5094 MEM_ADDR_SPACE (x))
5095 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5096 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5097 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5098 return &XEXP (XEXP (x, 0), 0);
5099 break;
5100
5101 case SET:
5102 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5103 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5104 we need to put the operand into a register. So split at that
5105 point. */
5106
5107 if (SET_DEST (x) == cc0_rtx
5108 && GET_CODE (SET_SRC (x)) != COMPARE
5109 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5110 && !OBJECT_P (SET_SRC (x))
5111 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5112 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5113 return &SET_SRC (x);
5114
5115 /* See if we can split SET_SRC as it stands. */
5116 split = find_split_point (&SET_SRC (x), insn, true);
5117 if (split && split != &SET_SRC (x))
5118 return split;
5119
5120 /* See if we can split SET_DEST as it stands. */
5121 split = find_split_point (&SET_DEST (x), insn, false);
5122 if (split && split != &SET_DEST (x))
5123 return split;
5124
5125 /* See if this is a bitfield assignment with everything constant. If
5126 so, this is an IOR of an AND, so split it into that. */
5127 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5128 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5129 &inner_mode)
5130 && HWI_COMPUTABLE_MODE_P (inner_mode)
5131 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5132 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5133 && CONST_INT_P (SET_SRC (x))
5134 && ((INTVAL (XEXP (SET_DEST (x), 1))
5135 + INTVAL (XEXP (SET_DEST (x), 2)))
5136 <= GET_MODE_PRECISION (inner_mode))
5137 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5138 {
5139 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5140 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5141 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
5142 rtx dest = XEXP (SET_DEST (x), 0);
5143 unsigned HOST_WIDE_INT mask
5144 = (HOST_WIDE_INT_1U << len) - 1;
5145 rtx or_mask;
5146
5147 if (BITS_BIG_ENDIAN)
5148 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5149
5150 or_mask = gen_int_mode (src << pos, inner_mode);
5151 if (src == mask)
5152 SUBST (SET_SRC (x),
5153 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5154 else
5155 {
5156 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5157 SUBST (SET_SRC (x),
5158 simplify_gen_binary (IOR, inner_mode,
5159 simplify_gen_binary (AND, inner_mode,
5160 dest, negmask),
5161 or_mask));
5162 }
5163
5164 SUBST (SET_DEST (x), dest);
5165
5166 split = find_split_point (&SET_SRC (x), insn, true);
5167 if (split && split != &SET_SRC (x))
5168 return split;
5169 }
5170
5171 /* Otherwise, see if this is an operation that we can split into two.
5172 If so, try to split that. */
5173 code = GET_CODE (SET_SRC (x));
5174
5175 switch (code)
5176 {
5177 case AND:
5178 /* If we are AND'ing with a large constant that is only a single
5179 bit and the result is only being used in a context where we
5180 need to know if it is zero or nonzero, replace it with a bit
5181 extraction. This will avoid the large constant, which might
5182 have taken more than one insn to make. If the constant were
5183 not a valid argument to the AND but took only one insn to make,
5184 this is no worse, but if it took more than one insn, it will
5185 be better. */
5186
5187 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5188 && REG_P (XEXP (SET_SRC (x), 0))
5189 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5190 && REG_P (SET_DEST (x))
5191 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5192 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5193 && XEXP (*split, 0) == SET_DEST (x)
5194 && XEXP (*split, 1) == const0_rtx)
5195 {
5196 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5197 XEXP (SET_SRC (x), 0),
5198 pos, NULL_RTX, 1, 1, 0, 0);
5199 if (extraction != 0)
5200 {
5201 SUBST (SET_SRC (x), extraction);
5202 return find_split_point (loc, insn, false);
5203 }
5204 }
5205 break;
5206
5207 case NE:
5208 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5209 is known to be on, this can be converted into a NEG of a shift. */
5210 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5211 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5212 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5213 GET_MODE (XEXP (SET_SRC (x),
5214 0))))) >= 1))
5215 {
5216 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5217 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5218 SUBST (SET_SRC (x),
5219 gen_rtx_NEG (mode,
5220 gen_rtx_LSHIFTRT (mode,
5221 XEXP (SET_SRC (x), 0),
5222 pos_rtx)));
5223
5224 split = find_split_point (&SET_SRC (x), insn, true);
5225 if (split && split != &SET_SRC (x))
5226 return split;
5227 }
5228 break;
5229
5230 case SIGN_EXTEND:
5231 inner = XEXP (SET_SRC (x), 0);
5232
5233 /* We can't optimize if either mode is a partial integer
5234 mode as we don't know how many bits are significant
5235 in those modes. */
5236 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5237 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5238 break;
5239
5240 pos = 0;
5241 len = GET_MODE_PRECISION (inner_mode);
5242 unsignedp = 0;
5243 break;
5244
5245 case SIGN_EXTRACT:
5246 case ZERO_EXTRACT:
5247 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5248 &inner_mode)
5249 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5250 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5251 {
5252 inner = XEXP (SET_SRC (x), 0);
5253 len = INTVAL (XEXP (SET_SRC (x), 1));
5254 pos = INTVAL (XEXP (SET_SRC (x), 2));
5255
5256 if (BITS_BIG_ENDIAN)
5257 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5258 unsignedp = (code == ZERO_EXTRACT);
5259 }
5260 break;
5261
5262 default:
5263 break;
5264 }
5265
5266 if (len
5267 && known_subrange_p (pos, len,
5268 0, GET_MODE_PRECISION (GET_MODE (inner)))
5269 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5270 {
5271 /* For unsigned, we have a choice of a shift followed by an
5272 AND or two shifts. Use two shifts for field sizes where the
5273 constant might be too large. We assume here that we can
5274 always at least get 8-bit constants in an AND insn, which is
5275 true for every current RISC. */
5276
5277 if (unsignedp && len <= 8)
5278 {
5279 unsigned HOST_WIDE_INT mask
5280 = (HOST_WIDE_INT_1U << len) - 1;
5281 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5282 SUBST (SET_SRC (x),
5283 gen_rtx_AND (mode,
5284 gen_rtx_LSHIFTRT
5285 (mode, gen_lowpart (mode, inner), pos_rtx),
5286 gen_int_mode (mask, mode)));
5287
5288 split = find_split_point (&SET_SRC (x), insn, true);
5289 if (split && split != &SET_SRC (x))
5290 return split;
5291 }
5292 else
5293 {
5294 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5295 int right_bits = GET_MODE_PRECISION (mode) - len;
5296 SUBST (SET_SRC (x),
5297 gen_rtx_fmt_ee
5298 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5299 gen_rtx_ASHIFT (mode,
5300 gen_lowpart (mode, inner),
5301 gen_int_shift_amount (mode, left_bits)),
5302 gen_int_shift_amount (mode, right_bits)));
5303
5304 split = find_split_point (&SET_SRC (x), insn, true);
5305 if (split && split != &SET_SRC (x))
5306 return split;
5307 }
5308 }
5309
5310 /* See if this is a simple operation with a constant as the second
5311 operand. It might be that this constant is out of range and hence
5312 could be used as a split point. */
5313 if (BINARY_P (SET_SRC (x))
5314 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5315 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5316 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5317 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5318 return &XEXP (SET_SRC (x), 1);
5319
5320 /* Finally, see if this is a simple operation with its first operand
5321 not in a register. The operation might require this operand in a
5322 register, so return it as a split point. We can always do this
5323 because if the first operand were another operation, we would have
5324 already found it as a split point. */
5325 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5326 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5327 return &XEXP (SET_SRC (x), 0);
5328
5329 return 0;
5330
5331 case AND:
5332 case IOR:
5333 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5334 it is better to write this as (not (ior A B)) so we can split it.
5335 Similarly for IOR. */
5336 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5337 {
5338 SUBST (*loc,
5339 gen_rtx_NOT (GET_MODE (x),
5340 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5341 GET_MODE (x),
5342 XEXP (XEXP (x, 0), 0),
5343 XEXP (XEXP (x, 1), 0))));
5344 return find_split_point (loc, insn, set_src);
5345 }
5346
5347 /* Many RISC machines have a large set of logical insns. If the
5348 second operand is a NOT, put it first so we will try to split the
5349 other operand first. */
5350 if (GET_CODE (XEXP (x, 1)) == NOT)
5351 {
5352 rtx tem = XEXP (x, 0);
5353 SUBST (XEXP (x, 0), XEXP (x, 1));
5354 SUBST (XEXP (x, 1), tem);
5355 }
5356 break;
5357
5358 case PLUS:
5359 case MINUS:
5360 /* Canonicalization can produce (minus A (mult B C)), where C is a
5361 constant. It may be better to try splitting (plus (mult B -C) A)
5362 instead if this isn't a multiply by a power of two. */
5363 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5364 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5365 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5366 {
5367 machine_mode mode = GET_MODE (x);
5368 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5369 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5370 SUBST (*loc, gen_rtx_PLUS (mode,
5371 gen_rtx_MULT (mode,
5372 XEXP (XEXP (x, 1), 0),
5373 gen_int_mode (other_int,
5374 mode)),
5375 XEXP (x, 0)));
5376 return find_split_point (loc, insn, set_src);
5377 }
5378
5379 /* Split at a multiply-accumulate instruction. However if this is
5380 the SET_SRC, we likely do not have such an instruction and it's
5381 worthless to try this split. */
5382 if (!set_src
5383 && (GET_CODE (XEXP (x, 0)) == MULT
5384 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5385 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5386 return loc;
5387
5388 default:
5389 break;
5390 }
5391
5392 /* Otherwise, select our actions depending on our rtx class. */
5393 switch (GET_RTX_CLASS (code))
5394 {
5395 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5396 case RTX_TERNARY:
5397 split = find_split_point (&XEXP (x, 2), insn, false);
5398 if (split)
5399 return split;
5400 /* fall through */
5401 case RTX_BIN_ARITH:
5402 case RTX_COMM_ARITH:
5403 case RTX_COMPARE:
5404 case RTX_COMM_COMPARE:
5405 split = find_split_point (&XEXP (x, 1), insn, false);
5406 if (split)
5407 return split;
5408 /* fall through */
5409 case RTX_UNARY:
5410 /* Some machines have (and (shift ...) ...) insns. If X is not
5411 an AND, but XEXP (X, 0) is, use it as our split point. */
5412 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5413 return &XEXP (x, 0);
5414
5415 split = find_split_point (&XEXP (x, 0), insn, false);
5416 if (split)
5417 return split;
5418 return loc;
5419
5420 default:
5421 /* Otherwise, we don't have a split point. */
5422 return 0;
5423 }
5424 }
5425 \f
5426 /* Throughout X, replace FROM with TO, and return the result.
5427 The result is TO if X is FROM;
5428 otherwise the result is X, but its contents may have been modified.
5429 If they were modified, a record was made in undobuf so that
5430 undo_all will (among other things) return X to its original state.
5431
5432 If the number of changes necessary is too much to record to undo,
5433 the excess changes are not made, so the result is invalid.
5434 The changes already made can still be undone.
5435 undobuf.num_undo is incremented for such changes, so by testing that
5436 the caller can tell whether the result is valid.
5437
5438 `n_occurrences' is incremented each time FROM is replaced.
5439
5440 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5441
5442 IN_COND is nonzero if we are at the top level of a condition.
5443
5444 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5445 by copying if `n_occurrences' is nonzero. */
5446
5447 static rtx
5448 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5449 {
5450 enum rtx_code code = GET_CODE (x);
5451 machine_mode op0_mode = VOIDmode;
5452 const char *fmt;
5453 int len, i;
5454 rtx new_rtx;
5455
5456 /* Two expressions are equal if they are identical copies of a shared
5457 RTX or if they are both registers with the same register number
5458 and mode. */
5459
5460 #define COMBINE_RTX_EQUAL_P(X,Y) \
5461 ((X) == (Y) \
5462 || (REG_P (X) && REG_P (Y) \
5463 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5464
5465 /* Do not substitute into clobbers of regs -- this will never result in
5466 valid RTL. */
5467 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5468 return x;
5469
5470 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5471 {
5472 n_occurrences++;
5473 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5474 }
5475
5476 /* If X and FROM are the same register but different modes, they
5477 will not have been seen as equal above. However, the log links code
5478 will make a LOG_LINKS entry for that case. If we do nothing, we
5479 will try to rerecognize our original insn and, when it succeeds,
5480 we will delete the feeding insn, which is incorrect.
5481
5482 So force this insn not to match in this (rare) case. */
5483 if (! in_dest && code == REG && REG_P (from)
5484 && reg_overlap_mentioned_p (x, from))
5485 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5486
5487 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5488 of which may contain things that can be combined. */
5489 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5490 return x;
5491
5492 /* It is possible to have a subexpression appear twice in the insn.
5493 Suppose that FROM is a register that appears within TO.
5494 Then, after that subexpression has been scanned once by `subst',
5495 the second time it is scanned, TO may be found. If we were
5496 to scan TO here, we would find FROM within it and create a
5497 self-referent rtl structure which is completely wrong. */
5498 if (COMBINE_RTX_EQUAL_P (x, to))
5499 return to;
5500
5501 /* Parallel asm_operands need special attention because all of the
5502 inputs are shared across the arms. Furthermore, unsharing the
5503 rtl results in recognition failures. Failure to handle this case
5504 specially can result in circular rtl.
5505
5506 Solve this by doing a normal pass across the first entry of the
5507 parallel, and only processing the SET_DESTs of the subsequent
5508 entries. Ug. */
5509
5510 if (code == PARALLEL
5511 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5512 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5513 {
5514 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5515
5516 /* If this substitution failed, this whole thing fails. */
5517 if (GET_CODE (new_rtx) == CLOBBER
5518 && XEXP (new_rtx, 0) == const0_rtx)
5519 return new_rtx;
5520
5521 SUBST (XVECEXP (x, 0, 0), new_rtx);
5522
5523 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5524 {
5525 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5526
5527 if (!REG_P (dest)
5528 && GET_CODE (dest) != CC0
5529 && GET_CODE (dest) != PC)
5530 {
5531 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5532
5533 /* If this substitution failed, this whole thing fails. */
5534 if (GET_CODE (new_rtx) == CLOBBER
5535 && XEXP (new_rtx, 0) == const0_rtx)
5536 return new_rtx;
5537
5538 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5539 }
5540 }
5541 }
5542 else
5543 {
5544 len = GET_RTX_LENGTH (code);
5545 fmt = GET_RTX_FORMAT (code);
5546
5547 /* We don't need to process a SET_DEST that is a register, CC0,
5548 or PC, so set up to skip this common case. All other cases
5549 where we want to suppress replacing something inside a
5550 SET_SRC are handled via the IN_DEST operand. */
5551 if (code == SET
5552 && (REG_P (SET_DEST (x))
5553 || GET_CODE (SET_DEST (x)) == CC0
5554 || GET_CODE (SET_DEST (x)) == PC))
5555 fmt = "ie";
5556
5557 /* Trying to simplify the operands of a widening MULT is not likely
5558 to create RTL matching a machine insn. */
5559 if (code == MULT
5560 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5561 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5562 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5563 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5564 && REG_P (XEXP (XEXP (x, 0), 0))
5565 && REG_P (XEXP (XEXP (x, 1), 0))
5566 && from == to)
5567 return x;
5568
5569
5570 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5571 constant. */
5572 if (fmt[0] == 'e')
5573 op0_mode = GET_MODE (XEXP (x, 0));
5574
5575 for (i = 0; i < len; i++)
5576 {
5577 if (fmt[i] == 'E')
5578 {
5579 int j;
5580 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5581 {
5582 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5583 {
5584 new_rtx = (unique_copy && n_occurrences
5585 ? copy_rtx (to) : to);
5586 n_occurrences++;
5587 }
5588 else
5589 {
5590 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5591 unique_copy);
5592
5593 /* If this substitution failed, this whole thing
5594 fails. */
5595 if (GET_CODE (new_rtx) == CLOBBER
5596 && XEXP (new_rtx, 0) == const0_rtx)
5597 return new_rtx;
5598 }
5599
5600 SUBST (XVECEXP (x, i, j), new_rtx);
5601 }
5602 }
5603 else if (fmt[i] == 'e')
5604 {
5605 /* If this is a register being set, ignore it. */
5606 new_rtx = XEXP (x, i);
5607 if (in_dest
5608 && i == 0
5609 && (((code == SUBREG || code == ZERO_EXTRACT)
5610 && REG_P (new_rtx))
5611 || code == STRICT_LOW_PART))
5612 ;
5613
5614 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5615 {
5616 /* In general, don't install a subreg involving two
5617 modes not tieable. It can worsen register
5618 allocation, and can even make invalid reload
5619 insns, since the reg inside may need to be copied
5620 from in the outside mode, and that may be invalid
5621 if it is an fp reg copied in integer mode.
5622
5623 We allow two exceptions to this: It is valid if
5624 it is inside another SUBREG and the mode of that
5625 SUBREG and the mode of the inside of TO is
5626 tieable and it is valid if X is a SET that copies
5627 FROM to CC0. */
5628
5629 if (GET_CODE (to) == SUBREG
5630 && !targetm.modes_tieable_p (GET_MODE (to),
5631 GET_MODE (SUBREG_REG (to)))
5632 && ! (code == SUBREG
5633 && (targetm.modes_tieable_p
5634 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5635 && (!HAVE_cc0
5636 || (! (code == SET
5637 && i == 1
5638 && XEXP (x, 0) == cc0_rtx))))
5639 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5640
5641 if (code == SUBREG
5642 && REG_P (to)
5643 && REGNO (to) < FIRST_PSEUDO_REGISTER
5644 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5645 SUBREG_BYTE (x),
5646 GET_MODE (x)) < 0)
5647 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5648
5649 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5650 n_occurrences++;
5651 }
5652 else
5653 /* If we are in a SET_DEST, suppress most cases unless we
5654 have gone inside a MEM, in which case we want to
5655 simplify the address. We assume here that things that
5656 are actually part of the destination have their inner
5657 parts in the first expression. This is true for SUBREG,
5658 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5659 things aside from REG and MEM that should appear in a
5660 SET_DEST. */
5661 new_rtx = subst (XEXP (x, i), from, to,
5662 (((in_dest
5663 && (code == SUBREG || code == STRICT_LOW_PART
5664 || code == ZERO_EXTRACT))
5665 || code == SET)
5666 && i == 0),
5667 code == IF_THEN_ELSE && i == 0,
5668 unique_copy);
5669
5670 /* If we found that we will have to reject this combination,
5671 indicate that by returning the CLOBBER ourselves, rather than
5672 an expression containing it. This will speed things up as
5673 well as prevent accidents where two CLOBBERs are considered
5674 to be equal, thus producing an incorrect simplification. */
5675
5676 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5677 return new_rtx;
5678
5679 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5680 {
5681 machine_mode mode = GET_MODE (x);
5682
5683 x = simplify_subreg (GET_MODE (x), new_rtx,
5684 GET_MODE (SUBREG_REG (x)),
5685 SUBREG_BYTE (x));
5686 if (! x)
5687 x = gen_rtx_CLOBBER (mode, const0_rtx);
5688 }
5689 else if (CONST_SCALAR_INT_P (new_rtx)
5690 && (GET_CODE (x) == ZERO_EXTEND
5691 || GET_CODE (x) == FLOAT
5692 || GET_CODE (x) == UNSIGNED_FLOAT))
5693 {
5694 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5695 new_rtx,
5696 GET_MODE (XEXP (x, 0)));
5697 if (!x)
5698 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5699 }
5700 else
5701 SUBST (XEXP (x, i), new_rtx);
5702 }
5703 }
5704 }
5705
5706 /* Check if we are loading something from the constant pool via float
5707 extension; in this case we would undo compress_float_constant
5708 optimization and degenerate constant load to an immediate value. */
5709 if (GET_CODE (x) == FLOAT_EXTEND
5710 && MEM_P (XEXP (x, 0))
5711 && MEM_READONLY_P (XEXP (x, 0)))
5712 {
5713 rtx tmp = avoid_constant_pool_reference (x);
5714 if (x != tmp)
5715 return x;
5716 }
5717
5718 /* Try to simplify X. If the simplification changed the code, it is likely
5719 that further simplification will help, so loop, but limit the number
5720 of repetitions that will be performed. */
5721
5722 for (i = 0; i < 4; i++)
5723 {
5724 /* If X is sufficiently simple, don't bother trying to do anything
5725 with it. */
5726 if (code != CONST_INT && code != REG && code != CLOBBER)
5727 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5728
5729 if (GET_CODE (x) == code)
5730 break;
5731
5732 code = GET_CODE (x);
5733
5734 /* We no longer know the original mode of operand 0 since we
5735 have changed the form of X) */
5736 op0_mode = VOIDmode;
5737 }
5738
5739 return x;
5740 }
5741 \f
5742 /* If X is a commutative operation whose operands are not in the canonical
5743 order, use substitutions to swap them. */
5744
5745 static void
5746 maybe_swap_commutative_operands (rtx x)
5747 {
5748 if (COMMUTATIVE_ARITH_P (x)
5749 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5750 {
5751 rtx temp = XEXP (x, 0);
5752 SUBST (XEXP (x, 0), XEXP (x, 1));
5753 SUBST (XEXP (x, 1), temp);
5754 }
5755 }
5756
5757 /* Simplify X, a piece of RTL. We just operate on the expression at the
5758 outer level; call `subst' to simplify recursively. Return the new
5759 expression.
5760
5761 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5762 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5763 of a condition. */
5764
5765 static rtx
5766 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5767 int in_cond)
5768 {
5769 enum rtx_code code = GET_CODE (x);
5770 machine_mode mode = GET_MODE (x);
5771 scalar_int_mode int_mode;
5772 rtx temp;
5773 int i;
5774
5775 /* If this is a commutative operation, put a constant last and a complex
5776 expression first. We don't need to do this for comparisons here. */
5777 maybe_swap_commutative_operands (x);
5778
5779 /* Try to fold this expression in case we have constants that weren't
5780 present before. */
5781 temp = 0;
5782 switch (GET_RTX_CLASS (code))
5783 {
5784 case RTX_UNARY:
5785 if (op0_mode == VOIDmode)
5786 op0_mode = GET_MODE (XEXP (x, 0));
5787 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5788 break;
5789 case RTX_COMPARE:
5790 case RTX_COMM_COMPARE:
5791 {
5792 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5793 if (cmp_mode == VOIDmode)
5794 {
5795 cmp_mode = GET_MODE (XEXP (x, 1));
5796 if (cmp_mode == VOIDmode)
5797 cmp_mode = op0_mode;
5798 }
5799 temp = simplify_relational_operation (code, mode, cmp_mode,
5800 XEXP (x, 0), XEXP (x, 1));
5801 }
5802 break;
5803 case RTX_COMM_ARITH:
5804 case RTX_BIN_ARITH:
5805 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5806 break;
5807 case RTX_BITFIELD_OPS:
5808 case RTX_TERNARY:
5809 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5810 XEXP (x, 1), XEXP (x, 2));
5811 break;
5812 default:
5813 break;
5814 }
5815
5816 if (temp)
5817 {
5818 x = temp;
5819 code = GET_CODE (temp);
5820 op0_mode = VOIDmode;
5821 mode = GET_MODE (temp);
5822 }
5823
5824 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5825 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5826 things. Check for cases where both arms are testing the same
5827 condition.
5828
5829 Don't do anything if all operands are very simple. */
5830
5831 if ((BINARY_P (x)
5832 && ((!OBJECT_P (XEXP (x, 0))
5833 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5834 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5835 || (!OBJECT_P (XEXP (x, 1))
5836 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5837 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5838 || (UNARY_P (x)
5839 && (!OBJECT_P (XEXP (x, 0))
5840 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5841 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5842 {
5843 rtx cond, true_rtx, false_rtx;
5844
5845 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5846 if (cond != 0
5847 /* If everything is a comparison, what we have is highly unlikely
5848 to be simpler, so don't use it. */
5849 && ! (COMPARISON_P (x)
5850 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5851 /* Similarly, if we end up with one of the expressions the same
5852 as the original, it is certainly not simpler. */
5853 && ! rtx_equal_p (x, true_rtx)
5854 && ! rtx_equal_p (x, false_rtx))
5855 {
5856 rtx cop1 = const0_rtx;
5857 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5858
5859 if (cond_code == NE && COMPARISON_P (cond))
5860 return x;
5861
5862 /* Simplify the alternative arms; this may collapse the true and
5863 false arms to store-flag values. Be careful to use copy_rtx
5864 here since true_rtx or false_rtx might share RTL with x as a
5865 result of the if_then_else_cond call above. */
5866 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5867 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5868
5869 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5870 is unlikely to be simpler. */
5871 if (general_operand (true_rtx, VOIDmode)
5872 && general_operand (false_rtx, VOIDmode))
5873 {
5874 enum rtx_code reversed;
5875
5876 /* Restarting if we generate a store-flag expression will cause
5877 us to loop. Just drop through in this case. */
5878
5879 /* If the result values are STORE_FLAG_VALUE and zero, we can
5880 just make the comparison operation. */
5881 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5882 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5883 cond, cop1);
5884 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5885 && ((reversed = reversed_comparison_code_parts
5886 (cond_code, cond, cop1, NULL))
5887 != UNKNOWN))
5888 x = simplify_gen_relational (reversed, mode, VOIDmode,
5889 cond, cop1);
5890
5891 /* Likewise, we can make the negate of a comparison operation
5892 if the result values are - STORE_FLAG_VALUE and zero. */
5893 else if (CONST_INT_P (true_rtx)
5894 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5895 && false_rtx == const0_rtx)
5896 x = simplify_gen_unary (NEG, mode,
5897 simplify_gen_relational (cond_code,
5898 mode, VOIDmode,
5899 cond, cop1),
5900 mode);
5901 else if (CONST_INT_P (false_rtx)
5902 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5903 && true_rtx == const0_rtx
5904 && ((reversed = reversed_comparison_code_parts
5905 (cond_code, cond, cop1, NULL))
5906 != UNKNOWN))
5907 x = simplify_gen_unary (NEG, mode,
5908 simplify_gen_relational (reversed,
5909 mode, VOIDmode,
5910 cond, cop1),
5911 mode);
5912
5913 code = GET_CODE (x);
5914 op0_mode = VOIDmode;
5915 }
5916 }
5917 }
5918
5919 /* First see if we can apply the inverse distributive law. */
5920 if (code == PLUS || code == MINUS
5921 || code == AND || code == IOR || code == XOR)
5922 {
5923 x = apply_distributive_law (x);
5924 code = GET_CODE (x);
5925 op0_mode = VOIDmode;
5926 }
5927
5928 /* If CODE is an associative operation not otherwise handled, see if we
5929 can associate some operands. This can win if they are constants or
5930 if they are logically related (i.e. (a & b) & a). */
5931 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5932 || code == AND || code == IOR || code == XOR
5933 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5934 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5935 || (flag_associative_math && FLOAT_MODE_P (mode))))
5936 {
5937 if (GET_CODE (XEXP (x, 0)) == code)
5938 {
5939 rtx other = XEXP (XEXP (x, 0), 0);
5940 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5941 rtx inner_op1 = XEXP (x, 1);
5942 rtx inner;
5943
5944 /* Make sure we pass the constant operand if any as the second
5945 one if this is a commutative operation. */
5946 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5947 std::swap (inner_op0, inner_op1);
5948 inner = simplify_binary_operation (code == MINUS ? PLUS
5949 : code == DIV ? MULT
5950 : code,
5951 mode, inner_op0, inner_op1);
5952
5953 /* For commutative operations, try the other pair if that one
5954 didn't simplify. */
5955 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5956 {
5957 other = XEXP (XEXP (x, 0), 1);
5958 inner = simplify_binary_operation (code, mode,
5959 XEXP (XEXP (x, 0), 0),
5960 XEXP (x, 1));
5961 }
5962
5963 if (inner)
5964 return simplify_gen_binary (code, mode, other, inner);
5965 }
5966 }
5967
5968 /* A little bit of algebraic simplification here. */
5969 switch (code)
5970 {
5971 case MEM:
5972 /* Ensure that our address has any ASHIFTs converted to MULT in case
5973 address-recognizing predicates are called later. */
5974 temp = make_compound_operation (XEXP (x, 0), MEM);
5975 SUBST (XEXP (x, 0), temp);
5976 break;
5977
5978 case SUBREG:
5979 if (op0_mode == VOIDmode)
5980 op0_mode = GET_MODE (SUBREG_REG (x));
5981
5982 /* See if this can be moved to simplify_subreg. */
5983 if (CONSTANT_P (SUBREG_REG (x))
5984 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5985 /* Don't call gen_lowpart if the inner mode
5986 is VOIDmode and we cannot simplify it, as SUBREG without
5987 inner mode is invalid. */
5988 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5989 || gen_lowpart_common (mode, SUBREG_REG (x))))
5990 return gen_lowpart (mode, SUBREG_REG (x));
5991
5992 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5993 break;
5994 {
5995 rtx temp;
5996 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5997 SUBREG_BYTE (x));
5998 if (temp)
5999 return temp;
6000
6001 /* If op is known to have all lower bits zero, the result is zero. */
6002 scalar_int_mode int_mode, int_op0_mode;
6003 if (!in_dest
6004 && is_a <scalar_int_mode> (mode, &int_mode)
6005 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
6006 && (GET_MODE_PRECISION (int_mode)
6007 < GET_MODE_PRECISION (int_op0_mode))
6008 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6009 SUBREG_BYTE (x))
6010 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6011 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6012 & GET_MODE_MASK (int_mode)) == 0)
6013 && !side_effects_p (SUBREG_REG (x)))
6014 return CONST0_RTX (int_mode);
6015 }
6016
6017 /* Don't change the mode of the MEM if that would change the meaning
6018 of the address. */
6019 if (MEM_P (SUBREG_REG (x))
6020 && (MEM_VOLATILE_P (SUBREG_REG (x))
6021 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6022 MEM_ADDR_SPACE (SUBREG_REG (x)))))
6023 return gen_rtx_CLOBBER (mode, const0_rtx);
6024
6025 /* Note that we cannot do any narrowing for non-constants since
6026 we might have been counting on using the fact that some bits were
6027 zero. We now do this in the SET. */
6028
6029 break;
6030
6031 case NEG:
6032 temp = expand_compound_operation (XEXP (x, 0));
6033
6034 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6035 replaced by (lshiftrt X C). This will convert
6036 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
6037
6038 if (GET_CODE (temp) == ASHIFTRT
6039 && CONST_INT_P (XEXP (temp, 1))
6040 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6041 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6042 INTVAL (XEXP (temp, 1)));
6043
6044 /* If X has only a single bit that might be nonzero, say, bit I, convert
6045 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6046 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
6047 (sign_extract X 1 Y). But only do this if TEMP isn't a register
6048 or a SUBREG of one since we'd be making the expression more
6049 complex if it was just a register. */
6050
6051 if (!REG_P (temp)
6052 && ! (GET_CODE (temp) == SUBREG
6053 && REG_P (SUBREG_REG (temp)))
6054 && is_a <scalar_int_mode> (mode, &int_mode)
6055 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6056 {
6057 rtx temp1 = simplify_shift_const
6058 (NULL_RTX, ASHIFTRT, int_mode,
6059 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6060 GET_MODE_PRECISION (int_mode) - 1 - i),
6061 GET_MODE_PRECISION (int_mode) - 1 - i);
6062
6063 /* If all we did was surround TEMP with the two shifts, we
6064 haven't improved anything, so don't use it. Otherwise,
6065 we are better off with TEMP1. */
6066 if (GET_CODE (temp1) != ASHIFTRT
6067 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6068 || XEXP (XEXP (temp1, 0), 0) != temp)
6069 return temp1;
6070 }
6071 break;
6072
6073 case TRUNCATE:
6074 /* We can't handle truncation to a partial integer mode here
6075 because we don't know the real bitsize of the partial
6076 integer mode. */
6077 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6078 break;
6079
6080 if (HWI_COMPUTABLE_MODE_P (mode))
6081 SUBST (XEXP (x, 0),
6082 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6083 GET_MODE_MASK (mode), 0));
6084
6085 /* We can truncate a constant value and return it. */
6086 {
6087 poly_int64 c;
6088 if (poly_int_rtx_p (XEXP (x, 0), &c))
6089 return gen_int_mode (c, mode);
6090 }
6091
6092 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6093 whose value is a comparison can be replaced with a subreg if
6094 STORE_FLAG_VALUE permits. */
6095 if (HWI_COMPUTABLE_MODE_P (mode)
6096 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6097 && (temp = get_last_value (XEXP (x, 0)))
6098 && COMPARISON_P (temp))
6099 return gen_lowpart (mode, XEXP (x, 0));
6100 break;
6101
6102 case CONST:
6103 /* (const (const X)) can become (const X). Do it this way rather than
6104 returning the inner CONST since CONST can be shared with a
6105 REG_EQUAL note. */
6106 if (GET_CODE (XEXP (x, 0)) == CONST)
6107 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6108 break;
6109
6110 case LO_SUM:
6111 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6112 can add in an offset. find_split_point will split this address up
6113 again if it doesn't match. */
6114 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6115 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6116 return XEXP (x, 1);
6117 break;
6118
6119 case PLUS:
6120 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6121 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6122 bit-field and can be replaced by either a sign_extend or a
6123 sign_extract. The `and' may be a zero_extend and the two
6124 <c>, -<c> constants may be reversed. */
6125 if (GET_CODE (XEXP (x, 0)) == XOR
6126 && is_a <scalar_int_mode> (mode, &int_mode)
6127 && CONST_INT_P (XEXP (x, 1))
6128 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6129 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6130 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6131 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6132 && HWI_COMPUTABLE_MODE_P (int_mode)
6133 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6134 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6135 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6136 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6137 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6138 && known_eq ((GET_MODE_PRECISION
6139 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6140 (unsigned int) i + 1))))
6141 return simplify_shift_const
6142 (NULL_RTX, ASHIFTRT, int_mode,
6143 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6144 XEXP (XEXP (XEXP (x, 0), 0), 0),
6145 GET_MODE_PRECISION (int_mode) - (i + 1)),
6146 GET_MODE_PRECISION (int_mode) - (i + 1));
6147
6148 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6149 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6150 the bitsize of the mode - 1. This allows simplification of
6151 "a = (b & 8) == 0;" */
6152 if (XEXP (x, 1) == constm1_rtx
6153 && !REG_P (XEXP (x, 0))
6154 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6155 && REG_P (SUBREG_REG (XEXP (x, 0))))
6156 && is_a <scalar_int_mode> (mode, &int_mode)
6157 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6158 return simplify_shift_const
6159 (NULL_RTX, ASHIFTRT, int_mode,
6160 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6161 gen_rtx_XOR (int_mode, XEXP (x, 0),
6162 const1_rtx),
6163 GET_MODE_PRECISION (int_mode) - 1),
6164 GET_MODE_PRECISION (int_mode) - 1);
6165
6166 /* If we are adding two things that have no bits in common, convert
6167 the addition into an IOR. This will often be further simplified,
6168 for example in cases like ((a & 1) + (a & 2)), which can
6169 become a & 3. */
6170
6171 if (HWI_COMPUTABLE_MODE_P (mode)
6172 && (nonzero_bits (XEXP (x, 0), mode)
6173 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6174 {
6175 /* Try to simplify the expression further. */
6176 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6177 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6178
6179 /* If we could, great. If not, do not go ahead with the IOR
6180 replacement, since PLUS appears in many special purpose
6181 address arithmetic instructions. */
6182 if (GET_CODE (temp) != CLOBBER
6183 && (GET_CODE (temp) != IOR
6184 || ((XEXP (temp, 0) != XEXP (x, 0)
6185 || XEXP (temp, 1) != XEXP (x, 1))
6186 && (XEXP (temp, 0) != XEXP (x, 1)
6187 || XEXP (temp, 1) != XEXP (x, 0)))))
6188 return temp;
6189 }
6190
6191 /* Canonicalize x + x into x << 1. */
6192 if (GET_MODE_CLASS (mode) == MODE_INT
6193 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6194 && !side_effects_p (XEXP (x, 0)))
6195 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6196
6197 break;
6198
6199 case MINUS:
6200 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6201 (and <foo> (const_int pow2-1)) */
6202 if (is_a <scalar_int_mode> (mode, &int_mode)
6203 && GET_CODE (XEXP (x, 1)) == AND
6204 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6205 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6206 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6207 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6208 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6209 break;
6210
6211 case MULT:
6212 /* If we have (mult (plus A B) C), apply the distributive law and then
6213 the inverse distributive law to see if things simplify. This
6214 occurs mostly in addresses, often when unrolling loops. */
6215
6216 if (GET_CODE (XEXP (x, 0)) == PLUS)
6217 {
6218 rtx result = distribute_and_simplify_rtx (x, 0);
6219 if (result)
6220 return result;
6221 }
6222
6223 /* Try simplify a*(b/c) as (a*b)/c. */
6224 if (FLOAT_MODE_P (mode) && flag_associative_math
6225 && GET_CODE (XEXP (x, 0)) == DIV)
6226 {
6227 rtx tem = simplify_binary_operation (MULT, mode,
6228 XEXP (XEXP (x, 0), 0),
6229 XEXP (x, 1));
6230 if (tem)
6231 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6232 }
6233 break;
6234
6235 case UDIV:
6236 /* If this is a divide by a power of two, treat it as a shift if
6237 its first operand is a shift. */
6238 if (is_a <scalar_int_mode> (mode, &int_mode)
6239 && CONST_INT_P (XEXP (x, 1))
6240 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6241 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6242 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6243 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6244 || GET_CODE (XEXP (x, 0)) == ROTATE
6245 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6246 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6247 XEXP (x, 0), i);
6248 break;
6249
6250 case EQ: case NE:
6251 case GT: case GTU: case GE: case GEU:
6252 case LT: case LTU: case LE: case LEU:
6253 case UNEQ: case LTGT:
6254 case UNGT: case UNGE:
6255 case UNLT: case UNLE:
6256 case UNORDERED: case ORDERED:
6257 /* If the first operand is a condition code, we can't do anything
6258 with it. */
6259 if (GET_CODE (XEXP (x, 0)) == COMPARE
6260 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6261 && ! CC0_P (XEXP (x, 0))))
6262 {
6263 rtx op0 = XEXP (x, 0);
6264 rtx op1 = XEXP (x, 1);
6265 enum rtx_code new_code;
6266
6267 if (GET_CODE (op0) == COMPARE)
6268 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6269
6270 /* Simplify our comparison, if possible. */
6271 new_code = simplify_comparison (code, &op0, &op1);
6272
6273 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6274 if only the low-order bit is possibly nonzero in X (such as when
6275 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6276 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6277 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6278 (plus X 1).
6279
6280 Remove any ZERO_EXTRACT we made when thinking this was a
6281 comparison. It may now be simpler to use, e.g., an AND. If a
6282 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6283 the call to make_compound_operation in the SET case.
6284
6285 Don't apply these optimizations if the caller would
6286 prefer a comparison rather than a value.
6287 E.g., for the condition in an IF_THEN_ELSE most targets need
6288 an explicit comparison. */
6289
6290 if (in_cond)
6291 ;
6292
6293 else if (STORE_FLAG_VALUE == 1
6294 && new_code == NE
6295 && is_int_mode (mode, &int_mode)
6296 && op1 == const0_rtx
6297 && int_mode == GET_MODE (op0)
6298 && nonzero_bits (op0, int_mode) == 1)
6299 return gen_lowpart (int_mode,
6300 expand_compound_operation (op0));
6301
6302 else if (STORE_FLAG_VALUE == 1
6303 && new_code == NE
6304 && is_int_mode (mode, &int_mode)
6305 && op1 == const0_rtx
6306 && int_mode == GET_MODE (op0)
6307 && (num_sign_bit_copies (op0, int_mode)
6308 == GET_MODE_PRECISION (int_mode)))
6309 {
6310 op0 = expand_compound_operation (op0);
6311 return simplify_gen_unary (NEG, int_mode,
6312 gen_lowpart (int_mode, op0),
6313 int_mode);
6314 }
6315
6316 else if (STORE_FLAG_VALUE == 1
6317 && new_code == EQ
6318 && is_int_mode (mode, &int_mode)
6319 && op1 == const0_rtx
6320 && int_mode == GET_MODE (op0)
6321 && nonzero_bits (op0, int_mode) == 1)
6322 {
6323 op0 = expand_compound_operation (op0);
6324 return simplify_gen_binary (XOR, int_mode,
6325 gen_lowpart (int_mode, op0),
6326 const1_rtx);
6327 }
6328
6329 else if (STORE_FLAG_VALUE == 1
6330 && new_code == EQ
6331 && is_int_mode (mode, &int_mode)
6332 && op1 == const0_rtx
6333 && int_mode == GET_MODE (op0)
6334 && (num_sign_bit_copies (op0, int_mode)
6335 == GET_MODE_PRECISION (int_mode)))
6336 {
6337 op0 = expand_compound_operation (op0);
6338 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6339 }
6340
6341 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6342 those above. */
6343 if (in_cond)
6344 ;
6345
6346 else if (STORE_FLAG_VALUE == -1
6347 && new_code == NE
6348 && is_int_mode (mode, &int_mode)
6349 && op1 == const0_rtx
6350 && int_mode == GET_MODE (op0)
6351 && (num_sign_bit_copies (op0, int_mode)
6352 == GET_MODE_PRECISION (int_mode)))
6353 return gen_lowpart (int_mode, expand_compound_operation (op0));
6354
6355 else if (STORE_FLAG_VALUE == -1
6356 && new_code == NE
6357 && is_int_mode (mode, &int_mode)
6358 && op1 == const0_rtx
6359 && int_mode == GET_MODE (op0)
6360 && nonzero_bits (op0, int_mode) == 1)
6361 {
6362 op0 = expand_compound_operation (op0);
6363 return simplify_gen_unary (NEG, int_mode,
6364 gen_lowpart (int_mode, op0),
6365 int_mode);
6366 }
6367
6368 else if (STORE_FLAG_VALUE == -1
6369 && new_code == EQ
6370 && is_int_mode (mode, &int_mode)
6371 && op1 == const0_rtx
6372 && int_mode == GET_MODE (op0)
6373 && (num_sign_bit_copies (op0, int_mode)
6374 == GET_MODE_PRECISION (int_mode)))
6375 {
6376 op0 = expand_compound_operation (op0);
6377 return simplify_gen_unary (NOT, int_mode,
6378 gen_lowpart (int_mode, op0),
6379 int_mode);
6380 }
6381
6382 /* If X is 0/1, (eq X 0) is X-1. */
6383 else if (STORE_FLAG_VALUE == -1
6384 && new_code == EQ
6385 && is_int_mode (mode, &int_mode)
6386 && op1 == const0_rtx
6387 && int_mode == GET_MODE (op0)
6388 && nonzero_bits (op0, int_mode) == 1)
6389 {
6390 op0 = expand_compound_operation (op0);
6391 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6392 }
6393
6394 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6395 one bit that might be nonzero, we can convert (ne x 0) to
6396 (ashift x c) where C puts the bit in the sign bit. Remove any
6397 AND with STORE_FLAG_VALUE when we are done, since we are only
6398 going to test the sign bit. */
6399 if (new_code == NE
6400 && is_int_mode (mode, &int_mode)
6401 && HWI_COMPUTABLE_MODE_P (int_mode)
6402 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6403 && op1 == const0_rtx
6404 && int_mode == GET_MODE (op0)
6405 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6406 {
6407 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6408 expand_compound_operation (op0),
6409 GET_MODE_PRECISION (int_mode) - 1 - i);
6410 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6411 return XEXP (x, 0);
6412 else
6413 return x;
6414 }
6415
6416 /* If the code changed, return a whole new comparison.
6417 We also need to avoid using SUBST in cases where
6418 simplify_comparison has widened a comparison with a CONST_INT,
6419 since in that case the wider CONST_INT may fail the sanity
6420 checks in do_SUBST. */
6421 if (new_code != code
6422 || (CONST_INT_P (op1)
6423 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6424 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6425 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6426
6427 /* Otherwise, keep this operation, but maybe change its operands.
6428 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6429 SUBST (XEXP (x, 0), op0);
6430 SUBST (XEXP (x, 1), op1);
6431 }
6432 break;
6433
6434 case IF_THEN_ELSE:
6435 return simplify_if_then_else (x);
6436
6437 case ZERO_EXTRACT:
6438 case SIGN_EXTRACT:
6439 case ZERO_EXTEND:
6440 case SIGN_EXTEND:
6441 /* If we are processing SET_DEST, we are done. */
6442 if (in_dest)
6443 return x;
6444
6445 return expand_compound_operation (x);
6446
6447 case SET:
6448 return simplify_set (x);
6449
6450 case AND:
6451 case IOR:
6452 return simplify_logical (x);
6453
6454 case ASHIFT:
6455 case LSHIFTRT:
6456 case ASHIFTRT:
6457 case ROTATE:
6458 case ROTATERT:
6459 /* If this is a shift by a constant amount, simplify it. */
6460 if (CONST_INT_P (XEXP (x, 1)))
6461 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6462 INTVAL (XEXP (x, 1)));
6463
6464 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6465 SUBST (XEXP (x, 1),
6466 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6467 (HOST_WIDE_INT_1U
6468 << exact_log2 (GET_MODE_UNIT_BITSIZE
6469 (GET_MODE (x))))
6470 - 1,
6471 0));
6472 break;
6473
6474 default:
6475 break;
6476 }
6477
6478 return x;
6479 }
6480 \f
6481 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6482
6483 static rtx
6484 simplify_if_then_else (rtx x)
6485 {
6486 machine_mode mode = GET_MODE (x);
6487 rtx cond = XEXP (x, 0);
6488 rtx true_rtx = XEXP (x, 1);
6489 rtx false_rtx = XEXP (x, 2);
6490 enum rtx_code true_code = GET_CODE (cond);
6491 int comparison_p = COMPARISON_P (cond);
6492 rtx temp;
6493 int i;
6494 enum rtx_code false_code;
6495 rtx reversed;
6496 scalar_int_mode int_mode, inner_mode;
6497
6498 /* Simplify storing of the truth value. */
6499 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6500 return simplify_gen_relational (true_code, mode, VOIDmode,
6501 XEXP (cond, 0), XEXP (cond, 1));
6502
6503 /* Also when the truth value has to be reversed. */
6504 if (comparison_p
6505 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6506 && (reversed = reversed_comparison (cond, mode)))
6507 return reversed;
6508
6509 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6510 in it is being compared against certain values. Get the true and false
6511 comparisons and see if that says anything about the value of each arm. */
6512
6513 if (comparison_p
6514 && ((false_code = reversed_comparison_code (cond, NULL))
6515 != UNKNOWN)
6516 && REG_P (XEXP (cond, 0)))
6517 {
6518 HOST_WIDE_INT nzb;
6519 rtx from = XEXP (cond, 0);
6520 rtx true_val = XEXP (cond, 1);
6521 rtx false_val = true_val;
6522 int swapped = 0;
6523
6524 /* If FALSE_CODE is EQ, swap the codes and arms. */
6525
6526 if (false_code == EQ)
6527 {
6528 swapped = 1, true_code = EQ, false_code = NE;
6529 std::swap (true_rtx, false_rtx);
6530 }
6531
6532 scalar_int_mode from_mode;
6533 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6534 {
6535 /* If we are comparing against zero and the expression being
6536 tested has only a single bit that might be nonzero, that is
6537 its value when it is not equal to zero. Similarly if it is
6538 known to be -1 or 0. */
6539 if (true_code == EQ
6540 && true_val == const0_rtx
6541 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6542 {
6543 false_code = EQ;
6544 false_val = gen_int_mode (nzb, from_mode);
6545 }
6546 else if (true_code == EQ
6547 && true_val == const0_rtx
6548 && (num_sign_bit_copies (from, from_mode)
6549 == GET_MODE_PRECISION (from_mode)))
6550 {
6551 false_code = EQ;
6552 false_val = constm1_rtx;
6553 }
6554 }
6555
6556 /* Now simplify an arm if we know the value of the register in the
6557 branch and it is used in the arm. Be careful due to the potential
6558 of locally-shared RTL. */
6559
6560 if (reg_mentioned_p (from, true_rtx))
6561 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6562 from, true_val),
6563 pc_rtx, pc_rtx, 0, 0, 0);
6564 if (reg_mentioned_p (from, false_rtx))
6565 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6566 from, false_val),
6567 pc_rtx, pc_rtx, 0, 0, 0);
6568
6569 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6570 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6571
6572 true_rtx = XEXP (x, 1);
6573 false_rtx = XEXP (x, 2);
6574 true_code = GET_CODE (cond);
6575 }
6576
6577 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6578 reversed, do so to avoid needing two sets of patterns for
6579 subtract-and-branch insns. Similarly if we have a constant in the true
6580 arm, the false arm is the same as the first operand of the comparison, or
6581 the false arm is more complicated than the true arm. */
6582
6583 if (comparison_p
6584 && reversed_comparison_code (cond, NULL) != UNKNOWN
6585 && (true_rtx == pc_rtx
6586 || (CONSTANT_P (true_rtx)
6587 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6588 || true_rtx == const0_rtx
6589 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6590 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6591 && !OBJECT_P (false_rtx))
6592 || reg_mentioned_p (true_rtx, false_rtx)
6593 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6594 {
6595 true_code = reversed_comparison_code (cond, NULL);
6596 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6597 SUBST (XEXP (x, 1), false_rtx);
6598 SUBST (XEXP (x, 2), true_rtx);
6599
6600 std::swap (true_rtx, false_rtx);
6601 cond = XEXP (x, 0);
6602
6603 /* It is possible that the conditional has been simplified out. */
6604 true_code = GET_CODE (cond);
6605 comparison_p = COMPARISON_P (cond);
6606 }
6607
6608 /* If the two arms are identical, we don't need the comparison. */
6609
6610 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6611 return true_rtx;
6612
6613 /* Convert a == b ? b : a to "a". */
6614 if (true_code == EQ && ! side_effects_p (cond)
6615 && !HONOR_NANS (mode)
6616 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6617 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6618 return false_rtx;
6619 else if (true_code == NE && ! side_effects_p (cond)
6620 && !HONOR_NANS (mode)
6621 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6622 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6623 return true_rtx;
6624
6625 /* Look for cases where we have (abs x) or (neg (abs X)). */
6626
6627 if (GET_MODE_CLASS (mode) == MODE_INT
6628 && comparison_p
6629 && XEXP (cond, 1) == const0_rtx
6630 && GET_CODE (false_rtx) == NEG
6631 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6632 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6633 && ! side_effects_p (true_rtx))
6634 switch (true_code)
6635 {
6636 case GT:
6637 case GE:
6638 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6639 case LT:
6640 case LE:
6641 return
6642 simplify_gen_unary (NEG, mode,
6643 simplify_gen_unary (ABS, mode, true_rtx, mode),
6644 mode);
6645 default:
6646 break;
6647 }
6648
6649 /* Look for MIN or MAX. */
6650
6651 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6652 && comparison_p
6653 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6654 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6655 && ! side_effects_p (cond))
6656 switch (true_code)
6657 {
6658 case GE:
6659 case GT:
6660 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6661 case LE:
6662 case LT:
6663 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6664 case GEU:
6665 case GTU:
6666 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6667 case LEU:
6668 case LTU:
6669 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6670 default:
6671 break;
6672 }
6673
6674 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6675 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6676 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6677 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6678 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6679 neither 1 or -1, but it isn't worth checking for. */
6680
6681 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6682 && comparison_p
6683 && is_int_mode (mode, &int_mode)
6684 && ! side_effects_p (x))
6685 {
6686 rtx t = make_compound_operation (true_rtx, SET);
6687 rtx f = make_compound_operation (false_rtx, SET);
6688 rtx cond_op0 = XEXP (cond, 0);
6689 rtx cond_op1 = XEXP (cond, 1);
6690 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6691 scalar_int_mode m = int_mode;
6692 rtx z = 0, c1 = NULL_RTX;
6693
6694 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6695 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6696 || GET_CODE (t) == ASHIFT
6697 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6698 && rtx_equal_p (XEXP (t, 0), f))
6699 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6700
6701 /* If an identity-zero op is commutative, check whether there
6702 would be a match if we swapped the operands. */
6703 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6704 || GET_CODE (t) == XOR)
6705 && rtx_equal_p (XEXP (t, 1), f))
6706 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6707 else if (GET_CODE (t) == SIGN_EXTEND
6708 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6709 && (GET_CODE (XEXP (t, 0)) == PLUS
6710 || GET_CODE (XEXP (t, 0)) == MINUS
6711 || GET_CODE (XEXP (t, 0)) == IOR
6712 || GET_CODE (XEXP (t, 0)) == XOR
6713 || GET_CODE (XEXP (t, 0)) == ASHIFT
6714 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6715 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6716 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6717 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6718 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6719 && (num_sign_bit_copies (f, GET_MODE (f))
6720 > (unsigned int)
6721 (GET_MODE_PRECISION (int_mode)
6722 - GET_MODE_PRECISION (inner_mode))))
6723 {
6724 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6725 extend_op = SIGN_EXTEND;
6726 m = inner_mode;
6727 }
6728 else if (GET_CODE (t) == SIGN_EXTEND
6729 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6730 && (GET_CODE (XEXP (t, 0)) == PLUS
6731 || GET_CODE (XEXP (t, 0)) == IOR
6732 || GET_CODE (XEXP (t, 0)) == XOR)
6733 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6734 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6735 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6736 && (num_sign_bit_copies (f, GET_MODE (f))
6737 > (unsigned int)
6738 (GET_MODE_PRECISION (int_mode)
6739 - GET_MODE_PRECISION (inner_mode))))
6740 {
6741 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6742 extend_op = SIGN_EXTEND;
6743 m = inner_mode;
6744 }
6745 else if (GET_CODE (t) == ZERO_EXTEND
6746 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6747 && (GET_CODE (XEXP (t, 0)) == PLUS
6748 || GET_CODE (XEXP (t, 0)) == MINUS
6749 || GET_CODE (XEXP (t, 0)) == IOR
6750 || GET_CODE (XEXP (t, 0)) == XOR
6751 || GET_CODE (XEXP (t, 0)) == ASHIFT
6752 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6753 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6754 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6755 && HWI_COMPUTABLE_MODE_P (int_mode)
6756 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6757 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6758 && ((nonzero_bits (f, GET_MODE (f))
6759 & ~GET_MODE_MASK (inner_mode))
6760 == 0))
6761 {
6762 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6763 extend_op = ZERO_EXTEND;
6764 m = inner_mode;
6765 }
6766 else if (GET_CODE (t) == ZERO_EXTEND
6767 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6768 && (GET_CODE (XEXP (t, 0)) == PLUS
6769 || GET_CODE (XEXP (t, 0)) == IOR
6770 || GET_CODE (XEXP (t, 0)) == XOR)
6771 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6772 && HWI_COMPUTABLE_MODE_P (int_mode)
6773 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6774 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6775 && ((nonzero_bits (f, GET_MODE (f))
6776 & ~GET_MODE_MASK (inner_mode))
6777 == 0))
6778 {
6779 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6780 extend_op = ZERO_EXTEND;
6781 m = inner_mode;
6782 }
6783
6784 if (z)
6785 {
6786 machine_mode cm = m;
6787 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6788 && GET_MODE (c1) != VOIDmode)
6789 cm = GET_MODE (c1);
6790 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6791 cond_op0, cond_op1),
6792 pc_rtx, pc_rtx, 0, 0, 0);
6793 temp = simplify_gen_binary (MULT, cm, temp,
6794 simplify_gen_binary (MULT, cm, c1,
6795 const_true_rtx));
6796 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6797 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6798
6799 if (extend_op != UNKNOWN)
6800 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6801
6802 return temp;
6803 }
6804 }
6805
6806 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6807 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6808 negation of a single bit, we can convert this operation to a shift. We
6809 can actually do this more generally, but it doesn't seem worth it. */
6810
6811 if (true_code == NE
6812 && is_a <scalar_int_mode> (mode, &int_mode)
6813 && XEXP (cond, 1) == const0_rtx
6814 && false_rtx == const0_rtx
6815 && CONST_INT_P (true_rtx)
6816 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6817 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6818 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6819 == GET_MODE_PRECISION (int_mode))
6820 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6821 return
6822 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6823 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6824
6825 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6826 non-zero bit in A is C1. */
6827 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6828 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6829 && is_a <scalar_int_mode> (mode, &int_mode)
6830 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6831 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6832 == nonzero_bits (XEXP (cond, 0), inner_mode)
6833 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6834 {
6835 rtx val = XEXP (cond, 0);
6836 if (inner_mode == int_mode)
6837 return val;
6838 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6839 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6840 }
6841
6842 return x;
6843 }
6844 \f
6845 /* Simplify X, a SET expression. Return the new expression. */
6846
6847 static rtx
6848 simplify_set (rtx x)
6849 {
6850 rtx src = SET_SRC (x);
6851 rtx dest = SET_DEST (x);
6852 machine_mode mode
6853 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6854 rtx_insn *other_insn;
6855 rtx *cc_use;
6856 scalar_int_mode int_mode;
6857
6858 /* (set (pc) (return)) gets written as (return). */
6859 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6860 return src;
6861
6862 /* Now that we know for sure which bits of SRC we are using, see if we can
6863 simplify the expression for the object knowing that we only need the
6864 low-order bits. */
6865
6866 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6867 {
6868 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6869 SUBST (SET_SRC (x), src);
6870 }
6871
6872 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6873 the comparison result and try to simplify it unless we already have used
6874 undobuf.other_insn. */
6875 if ((GET_MODE_CLASS (mode) == MODE_CC
6876 || GET_CODE (src) == COMPARE
6877 || CC0_P (dest))
6878 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6879 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6880 && COMPARISON_P (*cc_use)
6881 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6882 {
6883 enum rtx_code old_code = GET_CODE (*cc_use);
6884 enum rtx_code new_code;
6885 rtx op0, op1, tmp;
6886 int other_changed = 0;
6887 rtx inner_compare = NULL_RTX;
6888 machine_mode compare_mode = GET_MODE (dest);
6889
6890 if (GET_CODE (src) == COMPARE)
6891 {
6892 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6893 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6894 {
6895 inner_compare = op0;
6896 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6897 }
6898 }
6899 else
6900 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6901
6902 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6903 op0, op1);
6904 if (!tmp)
6905 new_code = old_code;
6906 else if (!CONSTANT_P (tmp))
6907 {
6908 new_code = GET_CODE (tmp);
6909 op0 = XEXP (tmp, 0);
6910 op1 = XEXP (tmp, 1);
6911 }
6912 else
6913 {
6914 rtx pat = PATTERN (other_insn);
6915 undobuf.other_insn = other_insn;
6916 SUBST (*cc_use, tmp);
6917
6918 /* Attempt to simplify CC user. */
6919 if (GET_CODE (pat) == SET)
6920 {
6921 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6922 if (new_rtx != NULL_RTX)
6923 SUBST (SET_SRC (pat), new_rtx);
6924 }
6925
6926 /* Convert X into a no-op move. */
6927 SUBST (SET_DEST (x), pc_rtx);
6928 SUBST (SET_SRC (x), pc_rtx);
6929 return x;
6930 }
6931
6932 /* Simplify our comparison, if possible. */
6933 new_code = simplify_comparison (new_code, &op0, &op1);
6934
6935 #ifdef SELECT_CC_MODE
6936 /* If this machine has CC modes other than CCmode, check to see if we
6937 need to use a different CC mode here. */
6938 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6939 compare_mode = GET_MODE (op0);
6940 else if (inner_compare
6941 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6942 && new_code == old_code
6943 && op0 == XEXP (inner_compare, 0)
6944 && op1 == XEXP (inner_compare, 1))
6945 compare_mode = GET_MODE (inner_compare);
6946 else
6947 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6948
6949 /* If the mode changed, we have to change SET_DEST, the mode in the
6950 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6951 a hard register, just build new versions with the proper mode. If it
6952 is a pseudo, we lose unless it is only time we set the pseudo, in
6953 which case we can safely change its mode. */
6954 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6955 {
6956 if (can_change_dest_mode (dest, 0, compare_mode))
6957 {
6958 unsigned int regno = REGNO (dest);
6959 rtx new_dest;
6960
6961 if (regno < FIRST_PSEUDO_REGISTER)
6962 new_dest = gen_rtx_REG (compare_mode, regno);
6963 else
6964 {
6965 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6966 new_dest = regno_reg_rtx[regno];
6967 }
6968
6969 SUBST (SET_DEST (x), new_dest);
6970 SUBST (XEXP (*cc_use, 0), new_dest);
6971 other_changed = 1;
6972
6973 dest = new_dest;
6974 }
6975 }
6976 #endif /* SELECT_CC_MODE */
6977
6978 /* If the code changed, we have to build a new comparison in
6979 undobuf.other_insn. */
6980 if (new_code != old_code)
6981 {
6982 int other_changed_previously = other_changed;
6983 unsigned HOST_WIDE_INT mask;
6984 rtx old_cc_use = *cc_use;
6985
6986 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6987 dest, const0_rtx));
6988 other_changed = 1;
6989
6990 /* If the only change we made was to change an EQ into an NE or
6991 vice versa, OP0 has only one bit that might be nonzero, and OP1
6992 is zero, check if changing the user of the condition code will
6993 produce a valid insn. If it won't, we can keep the original code
6994 in that insn by surrounding our operation with an XOR. */
6995
6996 if (((old_code == NE && new_code == EQ)
6997 || (old_code == EQ && new_code == NE))
6998 && ! other_changed_previously && op1 == const0_rtx
6999 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
7000 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
7001 {
7002 rtx pat = PATTERN (other_insn), note = 0;
7003
7004 if ((recog_for_combine (&pat, other_insn, &note) < 0
7005 && ! check_asm_operands (pat)))
7006 {
7007 *cc_use = old_cc_use;
7008 other_changed = 0;
7009
7010 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7011 gen_int_mode (mask,
7012 GET_MODE (op0)));
7013 }
7014 }
7015 }
7016
7017 if (other_changed)
7018 undobuf.other_insn = other_insn;
7019
7020 /* Don't generate a compare of a CC with 0, just use that CC. */
7021 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7022 {
7023 SUBST (SET_SRC (x), op0);
7024 src = SET_SRC (x);
7025 }
7026 /* Otherwise, if we didn't previously have the same COMPARE we
7027 want, create it from scratch. */
7028 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7029 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7030 {
7031 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7032 src = SET_SRC (x);
7033 }
7034 }
7035 else
7036 {
7037 /* Get SET_SRC in a form where we have placed back any
7038 compound expressions. Then do the checks below. */
7039 src = make_compound_operation (src, SET);
7040 SUBST (SET_SRC (x), src);
7041 }
7042
7043 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7044 and X being a REG or (subreg (reg)), we may be able to convert this to
7045 (set (subreg:m2 x) (op)).
7046
7047 We can always do this if M1 is narrower than M2 because that means that
7048 we only care about the low bits of the result.
7049
7050 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7051 perform a narrower operation than requested since the high-order bits will
7052 be undefined. On machine where it is defined, this transformation is safe
7053 as long as M1 and M2 have the same number of words. */
7054
7055 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7056 && !OBJECT_P (SUBREG_REG (src))
7057 && (known_equal_after_align_up
7058 (GET_MODE_SIZE (GET_MODE (src)),
7059 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7060 UNITS_PER_WORD))
7061 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7062 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7063 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7064 GET_MODE (SUBREG_REG (src)),
7065 GET_MODE (src)))
7066 && (REG_P (dest)
7067 || (GET_CODE (dest) == SUBREG
7068 && REG_P (SUBREG_REG (dest)))))
7069 {
7070 SUBST (SET_DEST (x),
7071 gen_lowpart (GET_MODE (SUBREG_REG (src)),
7072 dest));
7073 SUBST (SET_SRC (x), SUBREG_REG (src));
7074
7075 src = SET_SRC (x), dest = SET_DEST (x);
7076 }
7077
7078 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7079 in SRC. */
7080 if (dest == cc0_rtx
7081 && partial_subreg_p (src)
7082 && subreg_lowpart_p (src))
7083 {
7084 rtx inner = SUBREG_REG (src);
7085 machine_mode inner_mode = GET_MODE (inner);
7086
7087 /* Here we make sure that we don't have a sign bit on. */
7088 if (val_signbit_known_clear_p (GET_MODE (src),
7089 nonzero_bits (inner, inner_mode)))
7090 {
7091 SUBST (SET_SRC (x), inner);
7092 src = SET_SRC (x);
7093 }
7094 }
7095
7096 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7097 would require a paradoxical subreg. Replace the subreg with a
7098 zero_extend to avoid the reload that would otherwise be required.
7099 Don't do this unless we have a scalar integer mode, otherwise the
7100 transformation is incorrect. */
7101
7102 enum rtx_code extend_op;
7103 if (paradoxical_subreg_p (src)
7104 && MEM_P (SUBREG_REG (src))
7105 && SCALAR_INT_MODE_P (GET_MODE (src))
7106 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7107 {
7108 SUBST (SET_SRC (x),
7109 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7110
7111 src = SET_SRC (x);
7112 }
7113
7114 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7115 are comparing an item known to be 0 or -1 against 0, use a logical
7116 operation instead. Check for one of the arms being an IOR of the other
7117 arm with some value. We compute three terms to be IOR'ed together. In
7118 practice, at most two will be nonzero. Then we do the IOR's. */
7119
7120 if (GET_CODE (dest) != PC
7121 && GET_CODE (src) == IF_THEN_ELSE
7122 && is_int_mode (GET_MODE (src), &int_mode)
7123 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7124 && XEXP (XEXP (src, 0), 1) == const0_rtx
7125 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7126 && (!HAVE_conditional_move
7127 || ! can_conditionally_move_p (int_mode))
7128 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7129 == GET_MODE_PRECISION (int_mode))
7130 && ! side_effects_p (src))
7131 {
7132 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7133 ? XEXP (src, 1) : XEXP (src, 2));
7134 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7135 ? XEXP (src, 2) : XEXP (src, 1));
7136 rtx term1 = const0_rtx, term2, term3;
7137
7138 if (GET_CODE (true_rtx) == IOR
7139 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7140 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7141 else if (GET_CODE (true_rtx) == IOR
7142 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7143 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7144 else if (GET_CODE (false_rtx) == IOR
7145 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7146 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7147 else if (GET_CODE (false_rtx) == IOR
7148 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7149 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7150
7151 term2 = simplify_gen_binary (AND, int_mode,
7152 XEXP (XEXP (src, 0), 0), true_rtx);
7153 term3 = simplify_gen_binary (AND, int_mode,
7154 simplify_gen_unary (NOT, int_mode,
7155 XEXP (XEXP (src, 0), 0),
7156 int_mode),
7157 false_rtx);
7158
7159 SUBST (SET_SRC (x),
7160 simplify_gen_binary (IOR, int_mode,
7161 simplify_gen_binary (IOR, int_mode,
7162 term1, term2),
7163 term3));
7164
7165 src = SET_SRC (x);
7166 }
7167
7168 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7169 whole thing fail. */
7170 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7171 return src;
7172 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7173 return dest;
7174 else
7175 /* Convert this into a field assignment operation, if possible. */
7176 return make_field_assignment (x);
7177 }
7178 \f
7179 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7180 result. */
7181
7182 static rtx
7183 simplify_logical (rtx x)
7184 {
7185 rtx op0 = XEXP (x, 0);
7186 rtx op1 = XEXP (x, 1);
7187 scalar_int_mode mode;
7188
7189 switch (GET_CODE (x))
7190 {
7191 case AND:
7192 /* We can call simplify_and_const_int only if we don't lose
7193 any (sign) bits when converting INTVAL (op1) to
7194 "unsigned HOST_WIDE_INT". */
7195 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7196 && CONST_INT_P (op1)
7197 && (HWI_COMPUTABLE_MODE_P (mode)
7198 || INTVAL (op1) > 0))
7199 {
7200 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7201 if (GET_CODE (x) != AND)
7202 return x;
7203
7204 op0 = XEXP (x, 0);
7205 op1 = XEXP (x, 1);
7206 }
7207
7208 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7209 apply the distributive law and then the inverse distributive
7210 law to see if things simplify. */
7211 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7212 {
7213 rtx result = distribute_and_simplify_rtx (x, 0);
7214 if (result)
7215 return result;
7216 }
7217 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7218 {
7219 rtx result = distribute_and_simplify_rtx (x, 1);
7220 if (result)
7221 return result;
7222 }
7223 break;
7224
7225 case IOR:
7226 /* If we have (ior (and A B) C), apply the distributive law and then
7227 the inverse distributive law to see if things simplify. */
7228
7229 if (GET_CODE (op0) == AND)
7230 {
7231 rtx result = distribute_and_simplify_rtx (x, 0);
7232 if (result)
7233 return result;
7234 }
7235
7236 if (GET_CODE (op1) == AND)
7237 {
7238 rtx result = distribute_and_simplify_rtx (x, 1);
7239 if (result)
7240 return result;
7241 }
7242 break;
7243
7244 default:
7245 gcc_unreachable ();
7246 }
7247
7248 return x;
7249 }
7250 \f
7251 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7252 operations" because they can be replaced with two more basic operations.
7253 ZERO_EXTEND is also considered "compound" because it can be replaced with
7254 an AND operation, which is simpler, though only one operation.
7255
7256 The function expand_compound_operation is called with an rtx expression
7257 and will convert it to the appropriate shifts and AND operations,
7258 simplifying at each stage.
7259
7260 The function make_compound_operation is called to convert an expression
7261 consisting of shifts and ANDs into the equivalent compound expression.
7262 It is the inverse of this function, loosely speaking. */
7263
7264 static rtx
7265 expand_compound_operation (rtx x)
7266 {
7267 unsigned HOST_WIDE_INT pos = 0, len;
7268 int unsignedp = 0;
7269 unsigned int modewidth;
7270 rtx tem;
7271 scalar_int_mode inner_mode;
7272
7273 switch (GET_CODE (x))
7274 {
7275 case ZERO_EXTEND:
7276 unsignedp = 1;
7277 /* FALLTHRU */
7278 case SIGN_EXTEND:
7279 /* We can't necessarily use a const_int for a multiword mode;
7280 it depends on implicitly extending the value.
7281 Since we don't know the right way to extend it,
7282 we can't tell whether the implicit way is right.
7283
7284 Even for a mode that is no wider than a const_int,
7285 we can't win, because we need to sign extend one of its bits through
7286 the rest of it, and we don't know which bit. */
7287 if (CONST_INT_P (XEXP (x, 0)))
7288 return x;
7289
7290 /* Reject modes that aren't scalar integers because turning vector
7291 or complex modes into shifts causes problems. */
7292 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7293 return x;
7294
7295 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7296 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7297 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7298 reloaded. If not for that, MEM's would very rarely be safe.
7299
7300 Reject modes bigger than a word, because we might not be able
7301 to reference a two-register group starting with an arbitrary register
7302 (and currently gen_lowpart might crash for a SUBREG). */
7303
7304 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7305 return x;
7306
7307 len = GET_MODE_PRECISION (inner_mode);
7308 /* If the inner object has VOIDmode (the only way this can happen
7309 is if it is an ASM_OPERANDS), we can't do anything since we don't
7310 know how much masking to do. */
7311 if (len == 0)
7312 return x;
7313
7314 break;
7315
7316 case ZERO_EXTRACT:
7317 unsignedp = 1;
7318
7319 /* fall through */
7320
7321 case SIGN_EXTRACT:
7322 /* If the operand is a CLOBBER, just return it. */
7323 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7324 return XEXP (x, 0);
7325
7326 if (!CONST_INT_P (XEXP (x, 1))
7327 || !CONST_INT_P (XEXP (x, 2)))
7328 return x;
7329
7330 /* Reject modes that aren't scalar integers because turning vector
7331 or complex modes into shifts causes problems. */
7332 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7333 return x;
7334
7335 len = INTVAL (XEXP (x, 1));
7336 pos = INTVAL (XEXP (x, 2));
7337
7338 /* This should stay within the object being extracted, fail otherwise. */
7339 if (len + pos > GET_MODE_PRECISION (inner_mode))
7340 return x;
7341
7342 if (BITS_BIG_ENDIAN)
7343 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7344
7345 break;
7346
7347 default:
7348 return x;
7349 }
7350
7351 /* We've rejected non-scalar operations by now. */
7352 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7353
7354 /* Convert sign extension to zero extension, if we know that the high
7355 bit is not set, as this is easier to optimize. It will be converted
7356 back to cheaper alternative in make_extraction. */
7357 if (GET_CODE (x) == SIGN_EXTEND
7358 && HWI_COMPUTABLE_MODE_P (mode)
7359 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7360 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7361 == 0))
7362 {
7363 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7364 rtx temp2 = expand_compound_operation (temp);
7365
7366 /* Make sure this is a profitable operation. */
7367 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7368 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7369 return temp2;
7370 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7371 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7372 return temp;
7373 else
7374 return x;
7375 }
7376
7377 /* We can optimize some special cases of ZERO_EXTEND. */
7378 if (GET_CODE (x) == ZERO_EXTEND)
7379 {
7380 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7381 know that the last value didn't have any inappropriate bits
7382 set. */
7383 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7384 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7385 && HWI_COMPUTABLE_MODE_P (mode)
7386 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7387 & ~GET_MODE_MASK (inner_mode)) == 0)
7388 return XEXP (XEXP (x, 0), 0);
7389
7390 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7391 if (GET_CODE (XEXP (x, 0)) == SUBREG
7392 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7393 && subreg_lowpart_p (XEXP (x, 0))
7394 && HWI_COMPUTABLE_MODE_P (mode)
7395 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7396 & ~GET_MODE_MASK (inner_mode)) == 0)
7397 return SUBREG_REG (XEXP (x, 0));
7398
7399 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7400 is a comparison and STORE_FLAG_VALUE permits. This is like
7401 the first case, but it works even when MODE is larger
7402 than HOST_WIDE_INT. */
7403 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7404 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7405 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7406 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7407 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7408 return XEXP (XEXP (x, 0), 0);
7409
7410 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7411 if (GET_CODE (XEXP (x, 0)) == SUBREG
7412 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7413 && subreg_lowpart_p (XEXP (x, 0))
7414 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7415 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7416 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7417 return SUBREG_REG (XEXP (x, 0));
7418
7419 }
7420
7421 /* If we reach here, we want to return a pair of shifts. The inner
7422 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7423 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7424 logical depending on the value of UNSIGNEDP.
7425
7426 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7427 converted into an AND of a shift.
7428
7429 We must check for the case where the left shift would have a negative
7430 count. This can happen in a case like (x >> 31) & 255 on machines
7431 that can't shift by a constant. On those machines, we would first
7432 combine the shift with the AND to produce a variable-position
7433 extraction. Then the constant of 31 would be substituted in
7434 to produce such a position. */
7435
7436 modewidth = GET_MODE_PRECISION (mode);
7437 if (modewidth >= pos + len)
7438 {
7439 tem = gen_lowpart (mode, XEXP (x, 0));
7440 if (!tem || GET_CODE (tem) == CLOBBER)
7441 return x;
7442 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7443 tem, modewidth - pos - len);
7444 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7445 mode, tem, modewidth - len);
7446 }
7447 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7448 tem = simplify_and_const_int (NULL_RTX, mode,
7449 simplify_shift_const (NULL_RTX, LSHIFTRT,
7450 mode, XEXP (x, 0),
7451 pos),
7452 (HOST_WIDE_INT_1U << len) - 1);
7453 else
7454 /* Any other cases we can't handle. */
7455 return x;
7456
7457 /* If we couldn't do this for some reason, return the original
7458 expression. */
7459 if (GET_CODE (tem) == CLOBBER)
7460 return x;
7461
7462 return tem;
7463 }
7464 \f
7465 /* X is a SET which contains an assignment of one object into
7466 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7467 or certain SUBREGS). If possible, convert it into a series of
7468 logical operations.
7469
7470 We half-heartedly support variable positions, but do not at all
7471 support variable lengths. */
7472
7473 static const_rtx
7474 expand_field_assignment (const_rtx x)
7475 {
7476 rtx inner;
7477 rtx pos; /* Always counts from low bit. */
7478 int len, inner_len;
7479 rtx mask, cleared, masked;
7480 scalar_int_mode compute_mode;
7481
7482 /* Loop until we find something we can't simplify. */
7483 while (1)
7484 {
7485 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7486 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7487 {
7488 rtx x0 = XEXP (SET_DEST (x), 0);
7489 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7490 break;
7491 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7492 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7493 MAX_MODE_INT);
7494 }
7495 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7496 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7497 {
7498 inner = XEXP (SET_DEST (x), 0);
7499 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7500 break;
7501
7502 len = INTVAL (XEXP (SET_DEST (x), 1));
7503 pos = XEXP (SET_DEST (x), 2);
7504
7505 /* A constant position should stay within the width of INNER. */
7506 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7507 break;
7508
7509 if (BITS_BIG_ENDIAN)
7510 {
7511 if (CONST_INT_P (pos))
7512 pos = GEN_INT (inner_len - len - INTVAL (pos));
7513 else if (GET_CODE (pos) == MINUS
7514 && CONST_INT_P (XEXP (pos, 1))
7515 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7516 /* If position is ADJUST - X, new position is X. */
7517 pos = XEXP (pos, 0);
7518 else
7519 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7520 gen_int_mode (inner_len - len,
7521 GET_MODE (pos)),
7522 pos);
7523 }
7524 }
7525
7526 /* If the destination is a subreg that overwrites the whole of the inner
7527 register, we can move the subreg to the source. */
7528 else if (GET_CODE (SET_DEST (x)) == SUBREG
7529 /* We need SUBREGs to compute nonzero_bits properly. */
7530 && nonzero_sign_valid
7531 && !read_modify_subreg_p (SET_DEST (x)))
7532 {
7533 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7534 gen_lowpart
7535 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7536 SET_SRC (x)));
7537 continue;
7538 }
7539 else
7540 break;
7541
7542 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7543 inner = SUBREG_REG (inner);
7544
7545 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7546 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7547 {
7548 /* Don't do anything for vector or complex integral types. */
7549 if (! FLOAT_MODE_P (GET_MODE (inner)))
7550 break;
7551
7552 /* Try to find an integral mode to pun with. */
7553 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7554 .exists (&compute_mode))
7555 break;
7556
7557 inner = gen_lowpart (compute_mode, inner);
7558 }
7559
7560 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7561 if (len >= HOST_BITS_PER_WIDE_INT)
7562 break;
7563
7564 /* Don't try to compute in too wide unsupported modes. */
7565 if (!targetm.scalar_mode_supported_p (compute_mode))
7566 break;
7567
7568 /* Now compute the equivalent expression. Make a copy of INNER
7569 for the SET_DEST in case it is a MEM into which we will substitute;
7570 we don't want shared RTL in that case. */
7571 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7572 compute_mode);
7573 cleared = simplify_gen_binary (AND, compute_mode,
7574 simplify_gen_unary (NOT, compute_mode,
7575 simplify_gen_binary (ASHIFT,
7576 compute_mode,
7577 mask, pos),
7578 compute_mode),
7579 inner);
7580 masked = simplify_gen_binary (ASHIFT, compute_mode,
7581 simplify_gen_binary (
7582 AND, compute_mode,
7583 gen_lowpart (compute_mode, SET_SRC (x)),
7584 mask),
7585 pos);
7586
7587 x = gen_rtx_SET (copy_rtx (inner),
7588 simplify_gen_binary (IOR, compute_mode,
7589 cleared, masked));
7590 }
7591
7592 return x;
7593 }
7594 \f
7595 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7596 it is an RTX that represents the (variable) starting position; otherwise,
7597 POS is the (constant) starting bit position. Both are counted from the LSB.
7598
7599 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7600
7601 IN_DEST is nonzero if this is a reference in the destination of a SET.
7602 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7603 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7604 be used.
7605
7606 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7607 ZERO_EXTRACT should be built even for bits starting at bit 0.
7608
7609 MODE is the desired mode of the result (if IN_DEST == 0).
7610
7611 The result is an RTX for the extraction or NULL_RTX if the target
7612 can't handle it. */
7613
7614 static rtx
7615 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7616 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7617 int in_dest, int in_compare)
7618 {
7619 /* This mode describes the size of the storage area
7620 to fetch the overall value from. Within that, we
7621 ignore the POS lowest bits, etc. */
7622 machine_mode is_mode = GET_MODE (inner);
7623 machine_mode inner_mode;
7624 scalar_int_mode wanted_inner_mode;
7625 scalar_int_mode wanted_inner_reg_mode = word_mode;
7626 scalar_int_mode pos_mode = word_mode;
7627 machine_mode extraction_mode = word_mode;
7628 rtx new_rtx = 0;
7629 rtx orig_pos_rtx = pos_rtx;
7630 HOST_WIDE_INT orig_pos;
7631
7632 if (pos_rtx && CONST_INT_P (pos_rtx))
7633 pos = INTVAL (pos_rtx), pos_rtx = 0;
7634
7635 if (GET_CODE (inner) == SUBREG
7636 && subreg_lowpart_p (inner)
7637 && (paradoxical_subreg_p (inner)
7638 /* If trying or potentionally trying to extract
7639 bits outside of is_mode, don't look through
7640 non-paradoxical SUBREGs. See PR82192. */
7641 || (pos_rtx == NULL_RTX
7642 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7643 {
7644 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7645 consider just the QI as the memory to extract from.
7646 The subreg adds or removes high bits; its mode is
7647 irrelevant to the meaning of this extraction,
7648 since POS and LEN count from the lsb. */
7649 if (MEM_P (SUBREG_REG (inner)))
7650 is_mode = GET_MODE (SUBREG_REG (inner));
7651 inner = SUBREG_REG (inner);
7652 }
7653 else if (GET_CODE (inner) == ASHIFT
7654 && CONST_INT_P (XEXP (inner, 1))
7655 && pos_rtx == 0 && pos == 0
7656 && len > UINTVAL (XEXP (inner, 1)))
7657 {
7658 /* We're extracting the least significant bits of an rtx
7659 (ashift X (const_int C)), where LEN > C. Extract the
7660 least significant (LEN - C) bits of X, giving an rtx
7661 whose mode is MODE, then shift it left C times. */
7662 new_rtx = make_extraction (mode, XEXP (inner, 0),
7663 0, 0, len - INTVAL (XEXP (inner, 1)),
7664 unsignedp, in_dest, in_compare);
7665 if (new_rtx != 0)
7666 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7667 }
7668 else if (GET_CODE (inner) == TRUNCATE
7669 /* If trying or potentionally trying to extract
7670 bits outside of is_mode, don't look through
7671 TRUNCATE. See PR82192. */
7672 && pos_rtx == NULL_RTX
7673 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7674 inner = XEXP (inner, 0);
7675
7676 inner_mode = GET_MODE (inner);
7677
7678 /* See if this can be done without an extraction. We never can if the
7679 width of the field is not the same as that of some integer mode. For
7680 registers, we can only avoid the extraction if the position is at the
7681 low-order bit and this is either not in the destination or we have the
7682 appropriate STRICT_LOW_PART operation available.
7683
7684 For MEM, we can avoid an extract if the field starts on an appropriate
7685 boundary and we can change the mode of the memory reference. */
7686
7687 scalar_int_mode tmode;
7688 if (int_mode_for_size (len, 1).exists (&tmode)
7689 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7690 && !MEM_P (inner)
7691 && (pos == 0 || REG_P (inner))
7692 && (inner_mode == tmode
7693 || !REG_P (inner)
7694 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7695 || reg_truncated_to_mode (tmode, inner))
7696 && (! in_dest
7697 || (REG_P (inner)
7698 && have_insn_for (STRICT_LOW_PART, tmode))))
7699 || (MEM_P (inner) && pos_rtx == 0
7700 && (pos
7701 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7702 : BITS_PER_UNIT)) == 0
7703 /* We can't do this if we are widening INNER_MODE (it
7704 may not be aligned, for one thing). */
7705 && !paradoxical_subreg_p (tmode, inner_mode)
7706 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7707 && (inner_mode == tmode
7708 || (! mode_dependent_address_p (XEXP (inner, 0),
7709 MEM_ADDR_SPACE (inner))
7710 && ! MEM_VOLATILE_P (inner))))))
7711 {
7712 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7713 field. If the original and current mode are the same, we need not
7714 adjust the offset. Otherwise, we do if bytes big endian.
7715
7716 If INNER is not a MEM, get a piece consisting of just the field
7717 of interest (in this case POS % BITS_PER_WORD must be 0). */
7718
7719 if (MEM_P (inner))
7720 {
7721 poly_int64 offset;
7722
7723 /* POS counts from lsb, but make OFFSET count in memory order. */
7724 if (BYTES_BIG_ENDIAN)
7725 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7726 - len - pos);
7727 else
7728 offset = pos / BITS_PER_UNIT;
7729
7730 new_rtx = adjust_address_nv (inner, tmode, offset);
7731 }
7732 else if (REG_P (inner))
7733 {
7734 if (tmode != inner_mode)
7735 {
7736 /* We can't call gen_lowpart in a DEST since we
7737 always want a SUBREG (see below) and it would sometimes
7738 return a new hard register. */
7739 if (pos || in_dest)
7740 {
7741 poly_uint64 offset
7742 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7743
7744 /* Avoid creating invalid subregs, for example when
7745 simplifying (x>>32)&255. */
7746 if (!validate_subreg (tmode, inner_mode, inner, offset))
7747 return NULL_RTX;
7748
7749 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7750 }
7751 else
7752 new_rtx = gen_lowpart (tmode, inner);
7753 }
7754 else
7755 new_rtx = inner;
7756 }
7757 else
7758 new_rtx = force_to_mode (inner, tmode,
7759 len >= HOST_BITS_PER_WIDE_INT
7760 ? HOST_WIDE_INT_M1U
7761 : (HOST_WIDE_INT_1U << len) - 1, 0);
7762
7763 /* If this extraction is going into the destination of a SET,
7764 make a STRICT_LOW_PART unless we made a MEM. */
7765
7766 if (in_dest)
7767 return (MEM_P (new_rtx) ? new_rtx
7768 : (GET_CODE (new_rtx) != SUBREG
7769 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7770 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7771
7772 if (mode == tmode)
7773 return new_rtx;
7774
7775 if (CONST_SCALAR_INT_P (new_rtx))
7776 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7777 mode, new_rtx, tmode);
7778
7779 /* If we know that no extraneous bits are set, and that the high
7780 bit is not set, convert the extraction to the cheaper of
7781 sign and zero extension, that are equivalent in these cases. */
7782 if (flag_expensive_optimizations
7783 && (HWI_COMPUTABLE_MODE_P (tmode)
7784 && ((nonzero_bits (new_rtx, tmode)
7785 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7786 == 0)))
7787 {
7788 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7789 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7790
7791 /* Prefer ZERO_EXTENSION, since it gives more information to
7792 backends. */
7793 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7794 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7795 return temp;
7796 return temp1;
7797 }
7798
7799 /* Otherwise, sign- or zero-extend unless we already are in the
7800 proper mode. */
7801
7802 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7803 mode, new_rtx));
7804 }
7805
7806 /* Unless this is a COMPARE or we have a funny memory reference,
7807 don't do anything with zero-extending field extracts starting at
7808 the low-order bit since they are simple AND operations. */
7809 if (pos_rtx == 0 && pos == 0 && ! in_dest
7810 && ! in_compare && unsignedp)
7811 return 0;
7812
7813 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7814 if the position is not a constant and the length is not 1. In all
7815 other cases, we would only be going outside our object in cases when
7816 an original shift would have been undefined. */
7817 if (MEM_P (inner)
7818 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7819 || (pos_rtx != 0 && len != 1)))
7820 return 0;
7821
7822 enum extraction_pattern pattern = (in_dest ? EP_insv
7823 : unsignedp ? EP_extzv : EP_extv);
7824
7825 /* If INNER is not from memory, we want it to have the mode of a register
7826 extraction pattern's structure operand, or word_mode if there is no
7827 such pattern. The same applies to extraction_mode and pos_mode
7828 and their respective operands.
7829
7830 For memory, assume that the desired extraction_mode and pos_mode
7831 are the same as for a register operation, since at present we don't
7832 have named patterns for aligned memory structures. */
7833 struct extraction_insn insn;
7834 unsigned int inner_size;
7835 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7836 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7837 {
7838 wanted_inner_reg_mode = insn.struct_mode.require ();
7839 pos_mode = insn.pos_mode;
7840 extraction_mode = insn.field_mode;
7841 }
7842
7843 /* Never narrow an object, since that might not be safe. */
7844
7845 if (mode != VOIDmode
7846 && partial_subreg_p (extraction_mode, mode))
7847 extraction_mode = mode;
7848
7849 /* Punt if len is too large for extraction_mode. */
7850 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7851 return NULL_RTX;
7852
7853 if (!MEM_P (inner))
7854 wanted_inner_mode = wanted_inner_reg_mode;
7855 else
7856 {
7857 /* Be careful not to go beyond the extracted object and maintain the
7858 natural alignment of the memory. */
7859 wanted_inner_mode = smallest_int_mode_for_size (len);
7860 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7861 > GET_MODE_BITSIZE (wanted_inner_mode))
7862 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7863 }
7864
7865 orig_pos = pos;
7866
7867 if (BITS_BIG_ENDIAN)
7868 {
7869 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7870 BITS_BIG_ENDIAN style. If position is constant, compute new
7871 position. Otherwise, build subtraction.
7872 Note that POS is relative to the mode of the original argument.
7873 If it's a MEM we need to recompute POS relative to that.
7874 However, if we're extracting from (or inserting into) a register,
7875 we want to recompute POS relative to wanted_inner_mode. */
7876 int width;
7877 if (!MEM_P (inner))
7878 width = GET_MODE_BITSIZE (wanted_inner_mode);
7879 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7880 return NULL_RTX;
7881
7882 if (pos_rtx == 0)
7883 pos = width - len - pos;
7884 else
7885 pos_rtx
7886 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7887 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7888 pos_rtx);
7889 /* POS may be less than 0 now, but we check for that below.
7890 Note that it can only be less than 0 if !MEM_P (inner). */
7891 }
7892
7893 /* If INNER has a wider mode, and this is a constant extraction, try to
7894 make it smaller and adjust the byte to point to the byte containing
7895 the value. */
7896 if (wanted_inner_mode != VOIDmode
7897 && inner_mode != wanted_inner_mode
7898 && ! pos_rtx
7899 && partial_subreg_p (wanted_inner_mode, is_mode)
7900 && MEM_P (inner)
7901 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7902 && ! MEM_VOLATILE_P (inner))
7903 {
7904 poly_int64 offset = 0;
7905
7906 /* The computations below will be correct if the machine is big
7907 endian in both bits and bytes or little endian in bits and bytes.
7908 If it is mixed, we must adjust. */
7909
7910 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7911 adjust OFFSET to compensate. */
7912 if (BYTES_BIG_ENDIAN
7913 && paradoxical_subreg_p (is_mode, inner_mode))
7914 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7915
7916 /* We can now move to the desired byte. */
7917 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7918 * GET_MODE_SIZE (wanted_inner_mode);
7919 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7920
7921 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7922 && is_mode != wanted_inner_mode)
7923 offset = (GET_MODE_SIZE (is_mode)
7924 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7925
7926 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7927 }
7928
7929 /* If INNER is not memory, get it into the proper mode. If we are changing
7930 its mode, POS must be a constant and smaller than the size of the new
7931 mode. */
7932 else if (!MEM_P (inner))
7933 {
7934 /* On the LHS, don't create paradoxical subregs implicitely truncating
7935 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7936 if (in_dest
7937 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7938 wanted_inner_mode))
7939 return NULL_RTX;
7940
7941 if (GET_MODE (inner) != wanted_inner_mode
7942 && (pos_rtx != 0
7943 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7944 return NULL_RTX;
7945
7946 if (orig_pos < 0)
7947 return NULL_RTX;
7948
7949 inner = force_to_mode (inner, wanted_inner_mode,
7950 pos_rtx
7951 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7952 ? HOST_WIDE_INT_M1U
7953 : (((HOST_WIDE_INT_1U << len) - 1)
7954 << orig_pos),
7955 0);
7956 }
7957
7958 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7959 have to zero extend. Otherwise, we can just use a SUBREG.
7960
7961 We dealt with constant rtxes earlier, so pos_rtx cannot
7962 have VOIDmode at this point. */
7963 if (pos_rtx != 0
7964 && (GET_MODE_SIZE (pos_mode)
7965 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7966 {
7967 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7968 GET_MODE (pos_rtx));
7969
7970 /* If we know that no extraneous bits are set, and that the high
7971 bit is not set, convert extraction to cheaper one - either
7972 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7973 cases. */
7974 if (flag_expensive_optimizations
7975 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7976 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7977 & ~(((unsigned HOST_WIDE_INT)
7978 GET_MODE_MASK (GET_MODE (pos_rtx)))
7979 >> 1))
7980 == 0)))
7981 {
7982 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7983 GET_MODE (pos_rtx));
7984
7985 /* Prefer ZERO_EXTENSION, since it gives more information to
7986 backends. */
7987 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7988 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7989 temp = temp1;
7990 }
7991 pos_rtx = temp;
7992 }
7993
7994 /* Make POS_RTX unless we already have it and it is correct. If we don't
7995 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7996 be a CONST_INT. */
7997 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7998 pos_rtx = orig_pos_rtx;
7999
8000 else if (pos_rtx == 0)
8001 pos_rtx = GEN_INT (pos);
8002
8003 /* Make the required operation. See if we can use existing rtx. */
8004 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
8005 extraction_mode, inner, GEN_INT (len), pos_rtx);
8006 if (! in_dest)
8007 new_rtx = gen_lowpart (mode, new_rtx);
8008
8009 return new_rtx;
8010 }
8011 \f
8012 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8013 can be commuted with any other operations in X. Return X without
8014 that shift if so. */
8015
8016 static rtx
8017 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8018 {
8019 enum rtx_code code = GET_CODE (x);
8020 rtx tem;
8021
8022 switch (code)
8023 {
8024 case ASHIFT:
8025 /* This is the shift itself. If it is wide enough, we will return
8026 either the value being shifted if the shift count is equal to
8027 COUNT or a shift for the difference. */
8028 if (CONST_INT_P (XEXP (x, 1))
8029 && INTVAL (XEXP (x, 1)) >= count)
8030 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8031 INTVAL (XEXP (x, 1)) - count);
8032 break;
8033
8034 case NEG: case NOT:
8035 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8036 return simplify_gen_unary (code, mode, tem, mode);
8037
8038 break;
8039
8040 case PLUS: case IOR: case XOR: case AND:
8041 /* If we can safely shift this constant and we find the inner shift,
8042 make a new operation. */
8043 if (CONST_INT_P (XEXP (x, 1))
8044 && (UINTVAL (XEXP (x, 1))
8045 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8046 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8047 {
8048 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8049 return simplify_gen_binary (code, mode, tem,
8050 gen_int_mode (val, mode));
8051 }
8052 break;
8053
8054 default:
8055 break;
8056 }
8057
8058 return 0;
8059 }
8060 \f
8061 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
8062 level of the expression and MODE is its mode. IN_CODE is as for
8063 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
8064 that should be used when recursing on operands of *X_PTR.
8065
8066 There are two possible actions:
8067
8068 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
8069 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8070
8071 - Return a new rtx, which the caller returns directly. */
8072
8073 static rtx
8074 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8075 enum rtx_code in_code,
8076 enum rtx_code *next_code_ptr)
8077 {
8078 rtx x = *x_ptr;
8079 enum rtx_code next_code = *next_code_ptr;
8080 enum rtx_code code = GET_CODE (x);
8081 int mode_width = GET_MODE_PRECISION (mode);
8082 rtx rhs, lhs;
8083 rtx new_rtx = 0;
8084 int i;
8085 rtx tem;
8086 scalar_int_mode inner_mode;
8087 bool equality_comparison = false;
8088
8089 if (in_code == EQ)
8090 {
8091 equality_comparison = true;
8092 in_code = COMPARE;
8093 }
8094
8095 /* Process depending on the code of this operation. If NEW is set
8096 nonzero, it will be returned. */
8097
8098 switch (code)
8099 {
8100 case ASHIFT:
8101 /* Convert shifts by constants into multiplications if inside
8102 an address. */
8103 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8104 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8105 && INTVAL (XEXP (x, 1)) >= 0)
8106 {
8107 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8108 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8109
8110 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8111 if (GET_CODE (new_rtx) == NEG)
8112 {
8113 new_rtx = XEXP (new_rtx, 0);
8114 multval = -multval;
8115 }
8116 multval = trunc_int_for_mode (multval, mode);
8117 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8118 }
8119 break;
8120
8121 case PLUS:
8122 lhs = XEXP (x, 0);
8123 rhs = XEXP (x, 1);
8124 lhs = make_compound_operation (lhs, next_code);
8125 rhs = make_compound_operation (rhs, next_code);
8126 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8127 {
8128 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8129 XEXP (lhs, 1));
8130 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8131 }
8132 else if (GET_CODE (lhs) == MULT
8133 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8134 {
8135 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8136 simplify_gen_unary (NEG, mode,
8137 XEXP (lhs, 1),
8138 mode));
8139 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8140 }
8141 else
8142 {
8143 SUBST (XEXP (x, 0), lhs);
8144 SUBST (XEXP (x, 1), rhs);
8145 }
8146 maybe_swap_commutative_operands (x);
8147 return x;
8148
8149 case MINUS:
8150 lhs = XEXP (x, 0);
8151 rhs = XEXP (x, 1);
8152 lhs = make_compound_operation (lhs, next_code);
8153 rhs = make_compound_operation (rhs, next_code);
8154 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8155 {
8156 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8157 XEXP (rhs, 1));
8158 return simplify_gen_binary (PLUS, mode, tem, lhs);
8159 }
8160 else if (GET_CODE (rhs) == MULT
8161 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8162 {
8163 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8164 simplify_gen_unary (NEG, mode,
8165 XEXP (rhs, 1),
8166 mode));
8167 return simplify_gen_binary (PLUS, mode, tem, lhs);
8168 }
8169 else
8170 {
8171 SUBST (XEXP (x, 0), lhs);
8172 SUBST (XEXP (x, 1), rhs);
8173 return x;
8174 }
8175
8176 case AND:
8177 /* If the second operand is not a constant, we can't do anything
8178 with it. */
8179 if (!CONST_INT_P (XEXP (x, 1)))
8180 break;
8181
8182 /* If the constant is a power of two minus one and the first operand
8183 is a logical right shift, make an extraction. */
8184 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8185 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8186 {
8187 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8188 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8189 i, 1, 0, in_code == COMPARE);
8190 }
8191
8192 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8193 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8194 && subreg_lowpart_p (XEXP (x, 0))
8195 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8196 &inner_mode)
8197 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8198 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8199 {
8200 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8201 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8202 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8203 XEXP (inner_x0, 1),
8204 i, 1, 0, in_code == COMPARE);
8205
8206 /* If we narrowed the mode when dropping the subreg, then we lose. */
8207 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8208 new_rtx = NULL;
8209
8210 /* If that didn't give anything, see if the AND simplifies on
8211 its own. */
8212 if (!new_rtx && i >= 0)
8213 {
8214 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8215 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8216 0, in_code == COMPARE);
8217 }
8218 }
8219 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8220 else if ((GET_CODE (XEXP (x, 0)) == XOR
8221 || GET_CODE (XEXP (x, 0)) == IOR)
8222 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8223 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8224 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8225 {
8226 /* Apply the distributive law, and then try to make extractions. */
8227 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8228 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8229 XEXP (x, 1)),
8230 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8231 XEXP (x, 1)));
8232 new_rtx = make_compound_operation (new_rtx, in_code);
8233 }
8234
8235 /* If we are have (and (rotate X C) M) and C is larger than the number
8236 of bits in M, this is an extraction. */
8237
8238 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8239 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8240 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8241 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8242 {
8243 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8244 new_rtx = make_extraction (mode, new_rtx,
8245 (GET_MODE_PRECISION (mode)
8246 - INTVAL (XEXP (XEXP (x, 0), 1))),
8247 NULL_RTX, i, 1, 0, in_code == COMPARE);
8248 }
8249
8250 /* On machines without logical shifts, if the operand of the AND is
8251 a logical shift and our mask turns off all the propagated sign
8252 bits, we can replace the logical shift with an arithmetic shift. */
8253 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8254 && !have_insn_for (LSHIFTRT, mode)
8255 && have_insn_for (ASHIFTRT, mode)
8256 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8257 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8258 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8259 && mode_width <= HOST_BITS_PER_WIDE_INT)
8260 {
8261 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8262
8263 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8264 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8265 SUBST (XEXP (x, 0),
8266 gen_rtx_ASHIFTRT (mode,
8267 make_compound_operation (XEXP (XEXP (x,
8268 0),
8269 0),
8270 next_code),
8271 XEXP (XEXP (x, 0), 1)));
8272 }
8273
8274 /* If the constant is one less than a power of two, this might be
8275 representable by an extraction even if no shift is present.
8276 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8277 we are in a COMPARE. */
8278 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8279 new_rtx = make_extraction (mode,
8280 make_compound_operation (XEXP (x, 0),
8281 next_code),
8282 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8283
8284 /* If we are in a comparison and this is an AND with a power of two,
8285 convert this into the appropriate bit extract. */
8286 else if (in_code == COMPARE
8287 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8288 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8289 new_rtx = make_extraction (mode,
8290 make_compound_operation (XEXP (x, 0),
8291 next_code),
8292 i, NULL_RTX, 1, 1, 0, 1);
8293
8294 /* If the one operand is a paradoxical subreg of a register or memory and
8295 the constant (limited to the smaller mode) has only zero bits where
8296 the sub expression has known zero bits, this can be expressed as
8297 a zero_extend. */
8298 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8299 {
8300 rtx sub;
8301
8302 sub = XEXP (XEXP (x, 0), 0);
8303 machine_mode sub_mode = GET_MODE (sub);
8304 int sub_width;
8305 if ((REG_P (sub) || MEM_P (sub))
8306 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8307 && sub_width < mode_width)
8308 {
8309 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8310 unsigned HOST_WIDE_INT mask;
8311
8312 /* original AND constant with all the known zero bits set */
8313 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8314 if ((mask & mode_mask) == mode_mask)
8315 {
8316 new_rtx = make_compound_operation (sub, next_code);
8317 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8318 1, 0, in_code == COMPARE);
8319 }
8320 }
8321 }
8322
8323 break;
8324
8325 case LSHIFTRT:
8326 /* If the sign bit is known to be zero, replace this with an
8327 arithmetic shift. */
8328 if (have_insn_for (ASHIFTRT, mode)
8329 && ! have_insn_for (LSHIFTRT, mode)
8330 && mode_width <= HOST_BITS_PER_WIDE_INT
8331 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8332 {
8333 new_rtx = gen_rtx_ASHIFTRT (mode,
8334 make_compound_operation (XEXP (x, 0),
8335 next_code),
8336 XEXP (x, 1));
8337 break;
8338 }
8339
8340 /* fall through */
8341
8342 case ASHIFTRT:
8343 lhs = XEXP (x, 0);
8344 rhs = XEXP (x, 1);
8345
8346 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8347 this is a SIGN_EXTRACT. */
8348 if (CONST_INT_P (rhs)
8349 && GET_CODE (lhs) == ASHIFT
8350 && CONST_INT_P (XEXP (lhs, 1))
8351 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8352 && INTVAL (XEXP (lhs, 1)) >= 0
8353 && INTVAL (rhs) < mode_width)
8354 {
8355 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8356 new_rtx = make_extraction (mode, new_rtx,
8357 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8358 NULL_RTX, mode_width - INTVAL (rhs),
8359 code == LSHIFTRT, 0, in_code == COMPARE);
8360 break;
8361 }
8362
8363 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8364 If so, try to merge the shifts into a SIGN_EXTEND. We could
8365 also do this for some cases of SIGN_EXTRACT, but it doesn't
8366 seem worth the effort; the case checked for occurs on Alpha. */
8367
8368 if (!OBJECT_P (lhs)
8369 && ! (GET_CODE (lhs) == SUBREG
8370 && (OBJECT_P (SUBREG_REG (lhs))))
8371 && CONST_INT_P (rhs)
8372 && INTVAL (rhs) >= 0
8373 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8374 && INTVAL (rhs) < mode_width
8375 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8376 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8377 next_code),
8378 0, NULL_RTX, mode_width - INTVAL (rhs),
8379 code == LSHIFTRT, 0, in_code == COMPARE);
8380
8381 break;
8382
8383 case SUBREG:
8384 /* Call ourselves recursively on the inner expression. If we are
8385 narrowing the object and it has a different RTL code from
8386 what it originally did, do this SUBREG as a force_to_mode. */
8387 {
8388 rtx inner = SUBREG_REG (x), simplified;
8389 enum rtx_code subreg_code = in_code;
8390
8391 /* If the SUBREG is masking of a logical right shift,
8392 make an extraction. */
8393 if (GET_CODE (inner) == LSHIFTRT
8394 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8395 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8396 && CONST_INT_P (XEXP (inner, 1))
8397 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8398 && subreg_lowpart_p (x))
8399 {
8400 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8401 int width = GET_MODE_PRECISION (inner_mode)
8402 - INTVAL (XEXP (inner, 1));
8403 if (width > mode_width)
8404 width = mode_width;
8405 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8406 width, 1, 0, in_code == COMPARE);
8407 break;
8408 }
8409
8410 /* If in_code is COMPARE, it isn't always safe to pass it through
8411 to the recursive make_compound_operation call. */
8412 if (subreg_code == COMPARE
8413 && (!subreg_lowpart_p (x)
8414 || GET_CODE (inner) == SUBREG
8415 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8416 is (const_int 0), rather than
8417 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8418 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8419 for non-equality comparisons against 0 is not equivalent
8420 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8421 || (GET_CODE (inner) == AND
8422 && CONST_INT_P (XEXP (inner, 1))
8423 && partial_subreg_p (x)
8424 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8425 >= GET_MODE_BITSIZE (mode) - 1)))
8426 subreg_code = SET;
8427
8428 tem = make_compound_operation (inner, subreg_code);
8429
8430 simplified
8431 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8432 if (simplified)
8433 tem = simplified;
8434
8435 if (GET_CODE (tem) != GET_CODE (inner)
8436 && partial_subreg_p (x)
8437 && subreg_lowpart_p (x))
8438 {
8439 rtx newer
8440 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8441
8442 /* If we have something other than a SUBREG, we might have
8443 done an expansion, so rerun ourselves. */
8444 if (GET_CODE (newer) != SUBREG)
8445 newer = make_compound_operation (newer, in_code);
8446
8447 /* force_to_mode can expand compounds. If it just re-expanded
8448 the compound, use gen_lowpart to convert to the desired
8449 mode. */
8450 if (rtx_equal_p (newer, x)
8451 /* Likewise if it re-expanded the compound only partially.
8452 This happens for SUBREG of ZERO_EXTRACT if they extract
8453 the same number of bits. */
8454 || (GET_CODE (newer) == SUBREG
8455 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8456 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8457 && GET_CODE (inner) == AND
8458 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8459 return gen_lowpart (GET_MODE (x), tem);
8460
8461 return newer;
8462 }
8463
8464 if (simplified)
8465 return tem;
8466 }
8467 break;
8468
8469 default:
8470 break;
8471 }
8472
8473 if (new_rtx)
8474 *x_ptr = gen_lowpart (mode, new_rtx);
8475 *next_code_ptr = next_code;
8476 return NULL_RTX;
8477 }
8478
8479 /* Look at the expression rooted at X. Look for expressions
8480 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8481 Form these expressions.
8482
8483 Return the new rtx, usually just X.
8484
8485 Also, for machines like the VAX that don't have logical shift insns,
8486 try to convert logical to arithmetic shift operations in cases where
8487 they are equivalent. This undoes the canonicalizations to logical
8488 shifts done elsewhere.
8489
8490 We try, as much as possible, to re-use rtl expressions to save memory.
8491
8492 IN_CODE says what kind of expression we are processing. Normally, it is
8493 SET. In a memory address it is MEM. When processing the arguments of
8494 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8495 precisely it is an equality comparison against zero. */
8496
8497 rtx
8498 make_compound_operation (rtx x, enum rtx_code in_code)
8499 {
8500 enum rtx_code code = GET_CODE (x);
8501 const char *fmt;
8502 int i, j;
8503 enum rtx_code next_code;
8504 rtx new_rtx, tem;
8505
8506 /* Select the code to be used in recursive calls. Once we are inside an
8507 address, we stay there. If we have a comparison, set to COMPARE,
8508 but once inside, go back to our default of SET. */
8509
8510 next_code = (code == MEM ? MEM
8511 : ((code == COMPARE || COMPARISON_P (x))
8512 && XEXP (x, 1) == const0_rtx) ? COMPARE
8513 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8514
8515 scalar_int_mode mode;
8516 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8517 {
8518 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8519 &next_code);
8520 if (new_rtx)
8521 return new_rtx;
8522 code = GET_CODE (x);
8523 }
8524
8525 /* Now recursively process each operand of this operation. We need to
8526 handle ZERO_EXTEND specially so that we don't lose track of the
8527 inner mode. */
8528 if (code == ZERO_EXTEND)
8529 {
8530 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8531 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8532 new_rtx, GET_MODE (XEXP (x, 0)));
8533 if (tem)
8534 return tem;
8535 SUBST (XEXP (x, 0), new_rtx);
8536 return x;
8537 }
8538
8539 fmt = GET_RTX_FORMAT (code);
8540 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8541 if (fmt[i] == 'e')
8542 {
8543 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8544 SUBST (XEXP (x, i), new_rtx);
8545 }
8546 else if (fmt[i] == 'E')
8547 for (j = 0; j < XVECLEN (x, i); j++)
8548 {
8549 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8550 SUBST (XVECEXP (x, i, j), new_rtx);
8551 }
8552
8553 maybe_swap_commutative_operands (x);
8554 return x;
8555 }
8556 \f
8557 /* Given M see if it is a value that would select a field of bits
8558 within an item, but not the entire word. Return -1 if not.
8559 Otherwise, return the starting position of the field, where 0 is the
8560 low-order bit.
8561
8562 *PLEN is set to the length of the field. */
8563
8564 static int
8565 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8566 {
8567 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8568 int pos = m ? ctz_hwi (m) : -1;
8569 int len = 0;
8570
8571 if (pos >= 0)
8572 /* Now shift off the low-order zero bits and see if we have a
8573 power of two minus 1. */
8574 len = exact_log2 ((m >> pos) + 1);
8575
8576 if (len <= 0)
8577 pos = -1;
8578
8579 *plen = len;
8580 return pos;
8581 }
8582 \f
8583 /* If X refers to a register that equals REG in value, replace these
8584 references with REG. */
8585 static rtx
8586 canon_reg_for_combine (rtx x, rtx reg)
8587 {
8588 rtx op0, op1, op2;
8589 const char *fmt;
8590 int i;
8591 bool copied;
8592
8593 enum rtx_code code = GET_CODE (x);
8594 switch (GET_RTX_CLASS (code))
8595 {
8596 case RTX_UNARY:
8597 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8598 if (op0 != XEXP (x, 0))
8599 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8600 GET_MODE (reg));
8601 break;
8602
8603 case RTX_BIN_ARITH:
8604 case RTX_COMM_ARITH:
8605 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8606 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8607 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8608 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8609 break;
8610
8611 case RTX_COMPARE:
8612 case RTX_COMM_COMPARE:
8613 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8614 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8615 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8616 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8617 GET_MODE (op0), op0, op1);
8618 break;
8619
8620 case RTX_TERNARY:
8621 case RTX_BITFIELD_OPS:
8622 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8623 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8624 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8625 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8626 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8627 GET_MODE (op0), op0, op1, op2);
8628 /* FALLTHRU */
8629
8630 case RTX_OBJ:
8631 if (REG_P (x))
8632 {
8633 if (rtx_equal_p (get_last_value (reg), x)
8634 || rtx_equal_p (reg, get_last_value (x)))
8635 return reg;
8636 else
8637 break;
8638 }
8639
8640 /* fall through */
8641
8642 default:
8643 fmt = GET_RTX_FORMAT (code);
8644 copied = false;
8645 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8646 if (fmt[i] == 'e')
8647 {
8648 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8649 if (op != XEXP (x, i))
8650 {
8651 if (!copied)
8652 {
8653 copied = true;
8654 x = copy_rtx (x);
8655 }
8656 XEXP (x, i) = op;
8657 }
8658 }
8659 else if (fmt[i] == 'E')
8660 {
8661 int j;
8662 for (j = 0; j < XVECLEN (x, i); j++)
8663 {
8664 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8665 if (op != XVECEXP (x, i, j))
8666 {
8667 if (!copied)
8668 {
8669 copied = true;
8670 x = copy_rtx (x);
8671 }
8672 XVECEXP (x, i, j) = op;
8673 }
8674 }
8675 }
8676
8677 break;
8678 }
8679
8680 return x;
8681 }
8682
8683 /* Return X converted to MODE. If the value is already truncated to
8684 MODE we can just return a subreg even though in the general case we
8685 would need an explicit truncation. */
8686
8687 static rtx
8688 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8689 {
8690 if (!CONST_INT_P (x)
8691 && partial_subreg_p (mode, GET_MODE (x))
8692 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8693 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8694 {
8695 /* Bit-cast X into an integer mode. */
8696 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8697 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8698 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8699 x, GET_MODE (x));
8700 }
8701
8702 return gen_lowpart (mode, x);
8703 }
8704
8705 /* See if X can be simplified knowing that we will only refer to it in
8706 MODE and will only refer to those bits that are nonzero in MASK.
8707 If other bits are being computed or if masking operations are done
8708 that select a superset of the bits in MASK, they can sometimes be
8709 ignored.
8710
8711 Return a possibly simplified expression, but always convert X to
8712 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8713
8714 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8715 are all off in X. This is used when X will be complemented, by either
8716 NOT, NEG, or XOR. */
8717
8718 static rtx
8719 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8720 int just_select)
8721 {
8722 enum rtx_code code = GET_CODE (x);
8723 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8724 machine_mode op_mode;
8725 unsigned HOST_WIDE_INT nonzero;
8726
8727 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8728 code below will do the wrong thing since the mode of such an
8729 expression is VOIDmode.
8730
8731 Also do nothing if X is a CLOBBER; this can happen if X was
8732 the return value from a call to gen_lowpart. */
8733 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8734 return x;
8735
8736 /* We want to perform the operation in its present mode unless we know
8737 that the operation is valid in MODE, in which case we do the operation
8738 in MODE. */
8739 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8740 && have_insn_for (code, mode))
8741 ? mode : GET_MODE (x));
8742
8743 /* It is not valid to do a right-shift in a narrower mode
8744 than the one it came in with. */
8745 if ((code == LSHIFTRT || code == ASHIFTRT)
8746 && partial_subreg_p (mode, GET_MODE (x)))
8747 op_mode = GET_MODE (x);
8748
8749 /* Truncate MASK to fit OP_MODE. */
8750 if (op_mode)
8751 mask &= GET_MODE_MASK (op_mode);
8752
8753 /* Determine what bits of X are guaranteed to be (non)zero. */
8754 nonzero = nonzero_bits (x, mode);
8755
8756 /* If none of the bits in X are needed, return a zero. */
8757 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8758 x = const0_rtx;
8759
8760 /* If X is a CONST_INT, return a new one. Do this here since the
8761 test below will fail. */
8762 if (CONST_INT_P (x))
8763 {
8764 if (SCALAR_INT_MODE_P (mode))
8765 return gen_int_mode (INTVAL (x) & mask, mode);
8766 else
8767 {
8768 x = GEN_INT (INTVAL (x) & mask);
8769 return gen_lowpart_common (mode, x);
8770 }
8771 }
8772
8773 /* If X is narrower than MODE and we want all the bits in X's mode, just
8774 get X in the proper mode. */
8775 if (paradoxical_subreg_p (mode, GET_MODE (x))
8776 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8777 return gen_lowpart (mode, x);
8778
8779 /* We can ignore the effect of a SUBREG if it narrows the mode or
8780 if the constant masks to zero all the bits the mode doesn't have. */
8781 if (GET_CODE (x) == SUBREG
8782 && subreg_lowpart_p (x)
8783 && (partial_subreg_p (x)
8784 || (mask
8785 & GET_MODE_MASK (GET_MODE (x))
8786 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8787 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8788
8789 scalar_int_mode int_mode, xmode;
8790 if (is_a <scalar_int_mode> (mode, &int_mode)
8791 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8792 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8793 integer too. */
8794 return force_int_to_mode (x, int_mode, xmode,
8795 as_a <scalar_int_mode> (op_mode),
8796 mask, just_select);
8797
8798 return gen_lowpart_or_truncate (mode, x);
8799 }
8800
8801 /* Subroutine of force_to_mode that handles cases in which both X and
8802 the result are scalar integers. MODE is the mode of the result,
8803 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8804 is preferred for simplified versions of X. The other arguments
8805 are as for force_to_mode. */
8806
8807 static rtx
8808 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8809 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8810 int just_select)
8811 {
8812 enum rtx_code code = GET_CODE (x);
8813 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8814 unsigned HOST_WIDE_INT fuller_mask;
8815 rtx op0, op1, temp;
8816 poly_int64 const_op0;
8817
8818 /* When we have an arithmetic operation, or a shift whose count we
8819 do not know, we need to assume that all bits up to the highest-order
8820 bit in MASK will be needed. This is how we form such a mask. */
8821 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8822 fuller_mask = HOST_WIDE_INT_M1U;
8823 else
8824 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8825 - 1);
8826
8827 switch (code)
8828 {
8829 case CLOBBER:
8830 /* If X is a (clobber (const_int)), return it since we know we are
8831 generating something that won't match. */
8832 return x;
8833
8834 case SIGN_EXTEND:
8835 case ZERO_EXTEND:
8836 case ZERO_EXTRACT:
8837 case SIGN_EXTRACT:
8838 x = expand_compound_operation (x);
8839 if (GET_CODE (x) != code)
8840 return force_to_mode (x, mode, mask, next_select);
8841 break;
8842
8843 case TRUNCATE:
8844 /* Similarly for a truncate. */
8845 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8846
8847 case AND:
8848 /* If this is an AND with a constant, convert it into an AND
8849 whose constant is the AND of that constant with MASK. If it
8850 remains an AND of MASK, delete it since it is redundant. */
8851
8852 if (CONST_INT_P (XEXP (x, 1)))
8853 {
8854 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8855 mask & INTVAL (XEXP (x, 1)));
8856 xmode = op_mode;
8857
8858 /* If X is still an AND, see if it is an AND with a mask that
8859 is just some low-order bits. If so, and it is MASK, we don't
8860 need it. */
8861
8862 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8863 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8864 x = XEXP (x, 0);
8865
8866 /* If it remains an AND, try making another AND with the bits
8867 in the mode mask that aren't in MASK turned on. If the
8868 constant in the AND is wide enough, this might make a
8869 cheaper constant. */
8870
8871 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8872 && GET_MODE_MASK (xmode) != mask
8873 && HWI_COMPUTABLE_MODE_P (xmode))
8874 {
8875 unsigned HOST_WIDE_INT cval
8876 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8877 rtx y;
8878
8879 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8880 gen_int_mode (cval, xmode));
8881 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8882 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8883 x = y;
8884 }
8885
8886 break;
8887 }
8888
8889 goto binop;
8890
8891 case PLUS:
8892 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8893 low-order bits (as in an alignment operation) and FOO is already
8894 aligned to that boundary, mask C1 to that boundary as well.
8895 This may eliminate that PLUS and, later, the AND. */
8896
8897 {
8898 unsigned int width = GET_MODE_PRECISION (mode);
8899 unsigned HOST_WIDE_INT smask = mask;
8900
8901 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8902 number, sign extend it. */
8903
8904 if (width < HOST_BITS_PER_WIDE_INT
8905 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8906 smask |= HOST_WIDE_INT_M1U << width;
8907
8908 if (CONST_INT_P (XEXP (x, 1))
8909 && pow2p_hwi (- smask)
8910 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8911 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8912 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8913 (INTVAL (XEXP (x, 1)) & smask)),
8914 mode, smask, next_select);
8915 }
8916
8917 /* fall through */
8918
8919 case MULT:
8920 /* Substituting into the operands of a widening MULT is not likely to
8921 create RTL matching a machine insn. */
8922 if (code == MULT
8923 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8924 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8925 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8926 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8927 && REG_P (XEXP (XEXP (x, 0), 0))
8928 && REG_P (XEXP (XEXP (x, 1), 0)))
8929 return gen_lowpart_or_truncate (mode, x);
8930
8931 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8932 most significant bit in MASK since carries from those bits will
8933 affect the bits we are interested in. */
8934 mask = fuller_mask;
8935 goto binop;
8936
8937 case MINUS:
8938 /* If X is (minus C Y) where C's least set bit is larger than any bit
8939 in the mask, then we may replace with (neg Y). */
8940 if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8941 && known_alignment (poly_uint64 (const_op0)) > mask)
8942 {
8943 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8944 return force_to_mode (x, mode, mask, next_select);
8945 }
8946
8947 /* Similarly, if C contains every bit in the fuller_mask, then we may
8948 replace with (not Y). */
8949 if (CONST_INT_P (XEXP (x, 0))
8950 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8951 {
8952 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8953 return force_to_mode (x, mode, mask, next_select);
8954 }
8955
8956 mask = fuller_mask;
8957 goto binop;
8958
8959 case IOR:
8960 case XOR:
8961 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8962 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8963 operation which may be a bitfield extraction. Ensure that the
8964 constant we form is not wider than the mode of X. */
8965
8966 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8967 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8968 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8969 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8970 && CONST_INT_P (XEXP (x, 1))
8971 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8972 + floor_log2 (INTVAL (XEXP (x, 1))))
8973 < GET_MODE_PRECISION (xmode))
8974 && (UINTVAL (XEXP (x, 1))
8975 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8976 {
8977 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8978 << INTVAL (XEXP (XEXP (x, 0), 1)),
8979 xmode);
8980 temp = simplify_gen_binary (GET_CODE (x), xmode,
8981 XEXP (XEXP (x, 0), 0), temp);
8982 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8983 XEXP (XEXP (x, 0), 1));
8984 return force_to_mode (x, mode, mask, next_select);
8985 }
8986
8987 binop:
8988 /* For most binary operations, just propagate into the operation and
8989 change the mode if we have an operation of that mode. */
8990
8991 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8992 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8993
8994 /* If we ended up truncating both operands, truncate the result of the
8995 operation instead. */
8996 if (GET_CODE (op0) == TRUNCATE
8997 && GET_CODE (op1) == TRUNCATE)
8998 {
8999 op0 = XEXP (op0, 0);
9000 op1 = XEXP (op1, 0);
9001 }
9002
9003 op0 = gen_lowpart_or_truncate (op_mode, op0);
9004 op1 = gen_lowpart_or_truncate (op_mode, op1);
9005
9006 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
9007 {
9008 x = simplify_gen_binary (code, op_mode, op0, op1);
9009 xmode = op_mode;
9010 }
9011 break;
9012
9013 case ASHIFT:
9014 /* For left shifts, do the same, but just for the first operand.
9015 However, we cannot do anything with shifts where we cannot
9016 guarantee that the counts are smaller than the size of the mode
9017 because such a count will have a different meaning in a
9018 wider mode. */
9019
9020 if (! (CONST_INT_P (XEXP (x, 1))
9021 && INTVAL (XEXP (x, 1)) >= 0
9022 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9023 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9024 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9025 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9026 break;
9027
9028 /* If the shift count is a constant and we can do arithmetic in
9029 the mode of the shift, refine which bits we need. Otherwise, use the
9030 conservative form of the mask. */
9031 if (CONST_INT_P (XEXP (x, 1))
9032 && INTVAL (XEXP (x, 1)) >= 0
9033 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9034 && HWI_COMPUTABLE_MODE_P (op_mode))
9035 mask >>= INTVAL (XEXP (x, 1));
9036 else
9037 mask = fuller_mask;
9038
9039 op0 = gen_lowpart_or_truncate (op_mode,
9040 force_to_mode (XEXP (x, 0), mode,
9041 mask, next_select));
9042
9043 if (op_mode != xmode || op0 != XEXP (x, 0))
9044 {
9045 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9046 xmode = op_mode;
9047 }
9048 break;
9049
9050 case LSHIFTRT:
9051 /* Here we can only do something if the shift count is a constant,
9052 this shift constant is valid for the host, and we can do arithmetic
9053 in OP_MODE. */
9054
9055 if (CONST_INT_P (XEXP (x, 1))
9056 && INTVAL (XEXP (x, 1)) >= 0
9057 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9058 && HWI_COMPUTABLE_MODE_P (op_mode))
9059 {
9060 rtx inner = XEXP (x, 0);
9061 unsigned HOST_WIDE_INT inner_mask;
9062
9063 /* Select the mask of the bits we need for the shift operand. */
9064 inner_mask = mask << INTVAL (XEXP (x, 1));
9065
9066 /* We can only change the mode of the shift if we can do arithmetic
9067 in the mode of the shift and INNER_MASK is no wider than the
9068 width of X's mode. */
9069 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9070 op_mode = xmode;
9071
9072 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9073
9074 if (xmode != op_mode || inner != XEXP (x, 0))
9075 {
9076 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9077 xmode = op_mode;
9078 }
9079 }
9080
9081 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9082 shift and AND produces only copies of the sign bit (C2 is one less
9083 than a power of two), we can do this with just a shift. */
9084
9085 if (GET_CODE (x) == LSHIFTRT
9086 && CONST_INT_P (XEXP (x, 1))
9087 /* The shift puts one of the sign bit copies in the least significant
9088 bit. */
9089 && ((INTVAL (XEXP (x, 1))
9090 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9091 >= GET_MODE_PRECISION (xmode))
9092 && pow2p_hwi (mask + 1)
9093 /* Number of bits left after the shift must be more than the mask
9094 needs. */
9095 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9096 <= GET_MODE_PRECISION (xmode))
9097 /* Must be more sign bit copies than the mask needs. */
9098 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9099 >= exact_log2 (mask + 1)))
9100 {
9101 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9102 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9103 gen_int_shift_amount (xmode, nbits));
9104 }
9105 goto shiftrt;
9106
9107 case ASHIFTRT:
9108 /* If we are just looking for the sign bit, we don't need this shift at
9109 all, even if it has a variable count. */
9110 if (val_signbit_p (xmode, mask))
9111 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9112
9113 /* If this is a shift by a constant, get a mask that contains those bits
9114 that are not copies of the sign bit. We then have two cases: If
9115 MASK only includes those bits, this can be a logical shift, which may
9116 allow simplifications. If MASK is a single-bit field not within
9117 those bits, we are requesting a copy of the sign bit and hence can
9118 shift the sign bit to the appropriate location. */
9119
9120 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9121 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9122 {
9123 unsigned HOST_WIDE_INT nonzero;
9124 int i;
9125
9126 /* If the considered data is wider than HOST_WIDE_INT, we can't
9127 represent a mask for all its bits in a single scalar.
9128 But we only care about the lower bits, so calculate these. */
9129
9130 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9131 {
9132 nonzero = HOST_WIDE_INT_M1U;
9133
9134 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9135 is the number of bits a full-width mask would have set.
9136 We need only shift if these are fewer than nonzero can
9137 hold. If not, we must keep all bits set in nonzero. */
9138
9139 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9140 < HOST_BITS_PER_WIDE_INT)
9141 nonzero >>= INTVAL (XEXP (x, 1))
9142 + HOST_BITS_PER_WIDE_INT
9143 - GET_MODE_PRECISION (xmode);
9144 }
9145 else
9146 {
9147 nonzero = GET_MODE_MASK (xmode);
9148 nonzero >>= INTVAL (XEXP (x, 1));
9149 }
9150
9151 if ((mask & ~nonzero) == 0)
9152 {
9153 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9154 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9155 if (GET_CODE (x) != ASHIFTRT)
9156 return force_to_mode (x, mode, mask, next_select);
9157 }
9158
9159 else if ((i = exact_log2 (mask)) >= 0)
9160 {
9161 x = simplify_shift_const
9162 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9163 GET_MODE_PRECISION (xmode) - 1 - i);
9164
9165 if (GET_CODE (x) != ASHIFTRT)
9166 return force_to_mode (x, mode, mask, next_select);
9167 }
9168 }
9169
9170 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9171 even if the shift count isn't a constant. */
9172 if (mask == 1)
9173 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9174
9175 shiftrt:
9176
9177 /* If this is a zero- or sign-extension operation that just affects bits
9178 we don't care about, remove it. Be sure the call above returned
9179 something that is still a shift. */
9180
9181 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9182 && CONST_INT_P (XEXP (x, 1))
9183 && INTVAL (XEXP (x, 1)) >= 0
9184 && (INTVAL (XEXP (x, 1))
9185 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9186 && GET_CODE (XEXP (x, 0)) == ASHIFT
9187 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9188 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9189 next_select);
9190
9191 break;
9192
9193 case ROTATE:
9194 case ROTATERT:
9195 /* If the shift count is constant and we can do computations
9196 in the mode of X, compute where the bits we care about are.
9197 Otherwise, we can't do anything. Don't change the mode of
9198 the shift or propagate MODE into the shift, though. */
9199 if (CONST_INT_P (XEXP (x, 1))
9200 && INTVAL (XEXP (x, 1)) >= 0)
9201 {
9202 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9203 xmode, gen_int_mode (mask, xmode),
9204 XEXP (x, 1));
9205 if (temp && CONST_INT_P (temp))
9206 x = simplify_gen_binary (code, xmode,
9207 force_to_mode (XEXP (x, 0), xmode,
9208 INTVAL (temp), next_select),
9209 XEXP (x, 1));
9210 }
9211 break;
9212
9213 case NEG:
9214 /* If we just want the low-order bit, the NEG isn't needed since it
9215 won't change the low-order bit. */
9216 if (mask == 1)
9217 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9218
9219 /* We need any bits less significant than the most significant bit in
9220 MASK since carries from those bits will affect the bits we are
9221 interested in. */
9222 mask = fuller_mask;
9223 goto unop;
9224
9225 case NOT:
9226 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9227 same as the XOR case above. Ensure that the constant we form is not
9228 wider than the mode of X. */
9229
9230 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9231 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9232 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9233 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9234 < GET_MODE_PRECISION (xmode))
9235 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9236 {
9237 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9238 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9239 x = simplify_gen_binary (LSHIFTRT, xmode,
9240 temp, XEXP (XEXP (x, 0), 1));
9241
9242 return force_to_mode (x, mode, mask, next_select);
9243 }
9244
9245 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9246 use the full mask inside the NOT. */
9247 mask = fuller_mask;
9248
9249 unop:
9250 op0 = gen_lowpart_or_truncate (op_mode,
9251 force_to_mode (XEXP (x, 0), mode, mask,
9252 next_select));
9253 if (op_mode != xmode || op0 != XEXP (x, 0))
9254 {
9255 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9256 xmode = op_mode;
9257 }
9258 break;
9259
9260 case NE:
9261 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9262 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9263 which is equal to STORE_FLAG_VALUE. */
9264 if ((mask & ~STORE_FLAG_VALUE) == 0
9265 && XEXP (x, 1) == const0_rtx
9266 && GET_MODE (XEXP (x, 0)) == mode
9267 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9268 && (nonzero_bits (XEXP (x, 0), mode)
9269 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9270 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9271
9272 break;
9273
9274 case IF_THEN_ELSE:
9275 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9276 written in a narrower mode. We play it safe and do not do so. */
9277
9278 op0 = gen_lowpart_or_truncate (xmode,
9279 force_to_mode (XEXP (x, 1), mode,
9280 mask, next_select));
9281 op1 = gen_lowpart_or_truncate (xmode,
9282 force_to_mode (XEXP (x, 2), mode,
9283 mask, next_select));
9284 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9285 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9286 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9287 op0, op1);
9288 break;
9289
9290 default:
9291 break;
9292 }
9293
9294 /* Ensure we return a value of the proper mode. */
9295 return gen_lowpart_or_truncate (mode, x);
9296 }
9297 \f
9298 /* Return nonzero if X is an expression that has one of two values depending on
9299 whether some other value is zero or nonzero. In that case, we return the
9300 value that is being tested, *PTRUE is set to the value if the rtx being
9301 returned has a nonzero value, and *PFALSE is set to the other alternative.
9302
9303 If we return zero, we set *PTRUE and *PFALSE to X. */
9304
9305 static rtx
9306 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9307 {
9308 machine_mode mode = GET_MODE (x);
9309 enum rtx_code code = GET_CODE (x);
9310 rtx cond0, cond1, true0, true1, false0, false1;
9311 unsigned HOST_WIDE_INT nz;
9312 scalar_int_mode int_mode;
9313
9314 /* If we are comparing a value against zero, we are done. */
9315 if ((code == NE || code == EQ)
9316 && XEXP (x, 1) == const0_rtx)
9317 {
9318 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9319 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9320 return XEXP (x, 0);
9321 }
9322
9323 /* If this is a unary operation whose operand has one of two values, apply
9324 our opcode to compute those values. */
9325 else if (UNARY_P (x)
9326 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9327 {
9328 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9329 *pfalse = simplify_gen_unary (code, mode, false0,
9330 GET_MODE (XEXP (x, 0)));
9331 return cond0;
9332 }
9333
9334 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9335 make can't possibly match and would suppress other optimizations. */
9336 else if (code == COMPARE)
9337 ;
9338
9339 /* If this is a binary operation, see if either side has only one of two
9340 values. If either one does or if both do and they are conditional on
9341 the same value, compute the new true and false values. */
9342 else if (BINARY_P (x))
9343 {
9344 rtx op0 = XEXP (x, 0);
9345 rtx op1 = XEXP (x, 1);
9346 cond0 = if_then_else_cond (op0, &true0, &false0);
9347 cond1 = if_then_else_cond (op1, &true1, &false1);
9348
9349 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9350 && (REG_P (op0) || REG_P (op1)))
9351 {
9352 /* Try to enable a simplification by undoing work done by
9353 if_then_else_cond if it converted a REG into something more
9354 complex. */
9355 if (REG_P (op0))
9356 {
9357 cond0 = 0;
9358 true0 = false0 = op0;
9359 }
9360 else
9361 {
9362 cond1 = 0;
9363 true1 = false1 = op1;
9364 }
9365 }
9366
9367 if ((cond0 != 0 || cond1 != 0)
9368 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9369 {
9370 /* If if_then_else_cond returned zero, then true/false are the
9371 same rtl. We must copy one of them to prevent invalid rtl
9372 sharing. */
9373 if (cond0 == 0)
9374 true0 = copy_rtx (true0);
9375 else if (cond1 == 0)
9376 true1 = copy_rtx (true1);
9377
9378 if (COMPARISON_P (x))
9379 {
9380 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9381 true0, true1);
9382 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9383 false0, false1);
9384 }
9385 else
9386 {
9387 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9388 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9389 }
9390
9391 return cond0 ? cond0 : cond1;
9392 }
9393
9394 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9395 operands is zero when the other is nonzero, and vice-versa,
9396 and STORE_FLAG_VALUE is 1 or -1. */
9397
9398 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9399 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9400 || code == UMAX)
9401 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9402 {
9403 rtx op0 = XEXP (XEXP (x, 0), 1);
9404 rtx op1 = XEXP (XEXP (x, 1), 1);
9405
9406 cond0 = XEXP (XEXP (x, 0), 0);
9407 cond1 = XEXP (XEXP (x, 1), 0);
9408
9409 if (COMPARISON_P (cond0)
9410 && COMPARISON_P (cond1)
9411 && SCALAR_INT_MODE_P (mode)
9412 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9413 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9414 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9415 || ((swap_condition (GET_CODE (cond0))
9416 == reversed_comparison_code (cond1, NULL))
9417 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9418 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9419 && ! side_effects_p (x))
9420 {
9421 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9422 *pfalse = simplify_gen_binary (MULT, mode,
9423 (code == MINUS
9424 ? simplify_gen_unary (NEG, mode,
9425 op1, mode)
9426 : op1),
9427 const_true_rtx);
9428 return cond0;
9429 }
9430 }
9431
9432 /* Similarly for MULT, AND and UMIN, except that for these the result
9433 is always zero. */
9434 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9435 && (code == MULT || code == AND || code == UMIN)
9436 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9437 {
9438 cond0 = XEXP (XEXP (x, 0), 0);
9439 cond1 = XEXP (XEXP (x, 1), 0);
9440
9441 if (COMPARISON_P (cond0)
9442 && COMPARISON_P (cond1)
9443 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9444 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9445 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9446 || ((swap_condition (GET_CODE (cond0))
9447 == reversed_comparison_code (cond1, NULL))
9448 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9449 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9450 && ! side_effects_p (x))
9451 {
9452 *ptrue = *pfalse = const0_rtx;
9453 return cond0;
9454 }
9455 }
9456 }
9457
9458 else if (code == IF_THEN_ELSE)
9459 {
9460 /* If we have IF_THEN_ELSE already, extract the condition and
9461 canonicalize it if it is NE or EQ. */
9462 cond0 = XEXP (x, 0);
9463 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9464 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9465 return XEXP (cond0, 0);
9466 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9467 {
9468 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9469 return XEXP (cond0, 0);
9470 }
9471 else
9472 return cond0;
9473 }
9474
9475 /* If X is a SUBREG, we can narrow both the true and false values
9476 if the inner expression, if there is a condition. */
9477 else if (code == SUBREG
9478 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9479 &false0)) != 0)
9480 {
9481 true0 = simplify_gen_subreg (mode, true0,
9482 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9483 false0 = simplify_gen_subreg (mode, false0,
9484 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9485 if (true0 && false0)
9486 {
9487 *ptrue = true0;
9488 *pfalse = false0;
9489 return cond0;
9490 }
9491 }
9492
9493 /* If X is a constant, this isn't special and will cause confusions
9494 if we treat it as such. Likewise if it is equivalent to a constant. */
9495 else if (CONSTANT_P (x)
9496 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9497 ;
9498
9499 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9500 will be least confusing to the rest of the compiler. */
9501 else if (mode == BImode)
9502 {
9503 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9504 return x;
9505 }
9506
9507 /* If X is known to be either 0 or -1, those are the true and
9508 false values when testing X. */
9509 else if (x == constm1_rtx || x == const0_rtx
9510 || (is_a <scalar_int_mode> (mode, &int_mode)
9511 && (num_sign_bit_copies (x, int_mode)
9512 == GET_MODE_PRECISION (int_mode))))
9513 {
9514 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9515 return x;
9516 }
9517
9518 /* Likewise for 0 or a single bit. */
9519 else if (HWI_COMPUTABLE_MODE_P (mode)
9520 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9521 {
9522 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9523 return x;
9524 }
9525
9526 /* Otherwise fail; show no condition with true and false values the same. */
9527 *ptrue = *pfalse = x;
9528 return 0;
9529 }
9530 \f
9531 /* Return the value of expression X given the fact that condition COND
9532 is known to be true when applied to REG as its first operand and VAL
9533 as its second. X is known to not be shared and so can be modified in
9534 place.
9535
9536 We only handle the simplest cases, and specifically those cases that
9537 arise with IF_THEN_ELSE expressions. */
9538
9539 static rtx
9540 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9541 {
9542 enum rtx_code code = GET_CODE (x);
9543 const char *fmt;
9544 int i, j;
9545
9546 if (side_effects_p (x))
9547 return x;
9548
9549 /* If either operand of the condition is a floating point value,
9550 then we have to avoid collapsing an EQ comparison. */
9551 if (cond == EQ
9552 && rtx_equal_p (x, reg)
9553 && ! FLOAT_MODE_P (GET_MODE (x))
9554 && ! FLOAT_MODE_P (GET_MODE (val)))
9555 return val;
9556
9557 if (cond == UNEQ && rtx_equal_p (x, reg))
9558 return val;
9559
9560 /* If X is (abs REG) and we know something about REG's relationship
9561 with zero, we may be able to simplify this. */
9562
9563 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9564 switch (cond)
9565 {
9566 case GE: case GT: case EQ:
9567 return XEXP (x, 0);
9568 case LT: case LE:
9569 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9570 XEXP (x, 0),
9571 GET_MODE (XEXP (x, 0)));
9572 default:
9573 break;
9574 }
9575
9576 /* The only other cases we handle are MIN, MAX, and comparisons if the
9577 operands are the same as REG and VAL. */
9578
9579 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9580 {
9581 if (rtx_equal_p (XEXP (x, 0), val))
9582 {
9583 std::swap (val, reg);
9584 cond = swap_condition (cond);
9585 }
9586
9587 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9588 {
9589 if (COMPARISON_P (x))
9590 {
9591 if (comparison_dominates_p (cond, code))
9592 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9593
9594 code = reversed_comparison_code (x, NULL);
9595 if (code != UNKNOWN
9596 && comparison_dominates_p (cond, code))
9597 return CONST0_RTX (GET_MODE (x));
9598 else
9599 return x;
9600 }
9601 else if (code == SMAX || code == SMIN
9602 || code == UMIN || code == UMAX)
9603 {
9604 int unsignedp = (code == UMIN || code == UMAX);
9605
9606 /* Do not reverse the condition when it is NE or EQ.
9607 This is because we cannot conclude anything about
9608 the value of 'SMAX (x, y)' when x is not equal to y,
9609 but we can when x equals y. */
9610 if ((code == SMAX || code == UMAX)
9611 && ! (cond == EQ || cond == NE))
9612 cond = reverse_condition (cond);
9613
9614 switch (cond)
9615 {
9616 case GE: case GT:
9617 return unsignedp ? x : XEXP (x, 1);
9618 case LE: case LT:
9619 return unsignedp ? x : XEXP (x, 0);
9620 case GEU: case GTU:
9621 return unsignedp ? XEXP (x, 1) : x;
9622 case LEU: case LTU:
9623 return unsignedp ? XEXP (x, 0) : x;
9624 default:
9625 break;
9626 }
9627 }
9628 }
9629 }
9630 else if (code == SUBREG)
9631 {
9632 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9633 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9634
9635 if (SUBREG_REG (x) != r)
9636 {
9637 /* We must simplify subreg here, before we lose track of the
9638 original inner_mode. */
9639 new_rtx = simplify_subreg (GET_MODE (x), r,
9640 inner_mode, SUBREG_BYTE (x));
9641 if (new_rtx)
9642 return new_rtx;
9643 else
9644 SUBST (SUBREG_REG (x), r);
9645 }
9646
9647 return x;
9648 }
9649 /* We don't have to handle SIGN_EXTEND here, because even in the
9650 case of replacing something with a modeless CONST_INT, a
9651 CONST_INT is already (supposed to be) a valid sign extension for
9652 its narrower mode, which implies it's already properly
9653 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9654 story is different. */
9655 else if (code == ZERO_EXTEND)
9656 {
9657 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9658 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9659
9660 if (XEXP (x, 0) != r)
9661 {
9662 /* We must simplify the zero_extend here, before we lose
9663 track of the original inner_mode. */
9664 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9665 r, inner_mode);
9666 if (new_rtx)
9667 return new_rtx;
9668 else
9669 SUBST (XEXP (x, 0), r);
9670 }
9671
9672 return x;
9673 }
9674
9675 fmt = GET_RTX_FORMAT (code);
9676 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9677 {
9678 if (fmt[i] == 'e')
9679 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9680 else if (fmt[i] == 'E')
9681 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9682 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9683 cond, reg, val));
9684 }
9685
9686 return x;
9687 }
9688 \f
9689 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9690 assignment as a field assignment. */
9691
9692 static int
9693 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9694 {
9695 if (widen_x && GET_MODE (x) != GET_MODE (y))
9696 {
9697 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9698 return 0;
9699 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9700 return 0;
9701 x = adjust_address_nv (x, GET_MODE (y),
9702 byte_lowpart_offset (GET_MODE (y),
9703 GET_MODE (x)));
9704 }
9705
9706 if (x == y || rtx_equal_p (x, y))
9707 return 1;
9708
9709 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9710 return 0;
9711
9712 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9713 Note that all SUBREGs of MEM are paradoxical; otherwise they
9714 would have been rewritten. */
9715 if (MEM_P (x) && GET_CODE (y) == SUBREG
9716 && MEM_P (SUBREG_REG (y))
9717 && rtx_equal_p (SUBREG_REG (y),
9718 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9719 return 1;
9720
9721 if (MEM_P (y) && GET_CODE (x) == SUBREG
9722 && MEM_P (SUBREG_REG (x))
9723 && rtx_equal_p (SUBREG_REG (x),
9724 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9725 return 1;
9726
9727 /* We used to see if get_last_value of X and Y were the same but that's
9728 not correct. In one direction, we'll cause the assignment to have
9729 the wrong destination and in the case, we'll import a register into this
9730 insn that might have already have been dead. So fail if none of the
9731 above cases are true. */
9732 return 0;
9733 }
9734 \f
9735 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9736 Return that assignment if so.
9737
9738 We only handle the most common cases. */
9739
9740 static rtx
9741 make_field_assignment (rtx x)
9742 {
9743 rtx dest = SET_DEST (x);
9744 rtx src = SET_SRC (x);
9745 rtx assign;
9746 rtx rhs, lhs;
9747 HOST_WIDE_INT c1;
9748 HOST_WIDE_INT pos;
9749 unsigned HOST_WIDE_INT len;
9750 rtx other;
9751
9752 /* All the rules in this function are specific to scalar integers. */
9753 scalar_int_mode mode;
9754 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9755 return x;
9756
9757 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9758 a clear of a one-bit field. We will have changed it to
9759 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9760 for a SUBREG. */
9761
9762 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9763 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9764 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9765 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9766 {
9767 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9768 1, 1, 1, 0);
9769 if (assign != 0)
9770 return gen_rtx_SET (assign, const0_rtx);
9771 return x;
9772 }
9773
9774 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9775 && subreg_lowpart_p (XEXP (src, 0))
9776 && partial_subreg_p (XEXP (src, 0))
9777 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9778 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9779 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9780 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9781 {
9782 assign = make_extraction (VOIDmode, dest, 0,
9783 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9784 1, 1, 1, 0);
9785 if (assign != 0)
9786 return gen_rtx_SET (assign, const0_rtx);
9787 return x;
9788 }
9789
9790 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9791 one-bit field. */
9792 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9793 && XEXP (XEXP (src, 0), 0) == const1_rtx
9794 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9795 {
9796 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9797 1, 1, 1, 0);
9798 if (assign != 0)
9799 return gen_rtx_SET (assign, const1_rtx);
9800 return x;
9801 }
9802
9803 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9804 SRC is an AND with all bits of that field set, then we can discard
9805 the AND. */
9806 if (GET_CODE (dest) == ZERO_EXTRACT
9807 && CONST_INT_P (XEXP (dest, 1))
9808 && GET_CODE (src) == AND
9809 && CONST_INT_P (XEXP (src, 1)))
9810 {
9811 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9812 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9813 unsigned HOST_WIDE_INT ze_mask;
9814
9815 if (width >= HOST_BITS_PER_WIDE_INT)
9816 ze_mask = -1;
9817 else
9818 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9819
9820 /* Complete overlap. We can remove the source AND. */
9821 if ((and_mask & ze_mask) == ze_mask)
9822 return gen_rtx_SET (dest, XEXP (src, 0));
9823
9824 /* Partial overlap. We can reduce the source AND. */
9825 if ((and_mask & ze_mask) != and_mask)
9826 {
9827 src = gen_rtx_AND (mode, XEXP (src, 0),
9828 gen_int_mode (and_mask & ze_mask, mode));
9829 return gen_rtx_SET (dest, src);
9830 }
9831 }
9832
9833 /* The other case we handle is assignments into a constant-position
9834 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9835 a mask that has all one bits except for a group of zero bits and
9836 OTHER is known to have zeros where C1 has ones, this is such an
9837 assignment. Compute the position and length from C1. Shift OTHER
9838 to the appropriate position, force it to the required mode, and
9839 make the extraction. Check for the AND in both operands. */
9840
9841 /* One or more SUBREGs might obscure the constant-position field
9842 assignment. The first one we are likely to encounter is an outer
9843 narrowing SUBREG, which we can just strip for the purposes of
9844 identifying the constant-field assignment. */
9845 scalar_int_mode src_mode = mode;
9846 if (GET_CODE (src) == SUBREG
9847 && subreg_lowpart_p (src)
9848 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9849 src = SUBREG_REG (src);
9850
9851 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9852 return x;
9853
9854 rhs = expand_compound_operation (XEXP (src, 0));
9855 lhs = expand_compound_operation (XEXP (src, 1));
9856
9857 if (GET_CODE (rhs) == AND
9858 && CONST_INT_P (XEXP (rhs, 1))
9859 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9860 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9861 /* The second SUBREG that might get in the way is a paradoxical
9862 SUBREG around the first operand of the AND. We want to
9863 pretend the operand is as wide as the destination here. We
9864 do this by adjusting the MEM to wider mode for the sole
9865 purpose of the call to rtx_equal_for_field_assignment_p. Also
9866 note this trick only works for MEMs. */
9867 else if (GET_CODE (rhs) == AND
9868 && paradoxical_subreg_p (XEXP (rhs, 0))
9869 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9870 && CONST_INT_P (XEXP (rhs, 1))
9871 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9872 dest, true))
9873 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9874 else if (GET_CODE (lhs) == AND
9875 && CONST_INT_P (XEXP (lhs, 1))
9876 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9877 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9878 /* The second SUBREG that might get in the way is a paradoxical
9879 SUBREG around the first operand of the AND. We want to
9880 pretend the operand is as wide as the destination here. We
9881 do this by adjusting the MEM to wider mode for the sole
9882 purpose of the call to rtx_equal_for_field_assignment_p. Also
9883 note this trick only works for MEMs. */
9884 else if (GET_CODE (lhs) == AND
9885 && paradoxical_subreg_p (XEXP (lhs, 0))
9886 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9887 && CONST_INT_P (XEXP (lhs, 1))
9888 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9889 dest, true))
9890 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9891 else
9892 return x;
9893
9894 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9895 if (pos < 0
9896 || pos + len > GET_MODE_PRECISION (mode)
9897 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9898 || (c1 & nonzero_bits (other, mode)) != 0)
9899 return x;
9900
9901 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9902 if (assign == 0)
9903 return x;
9904
9905 /* The mode to use for the source is the mode of the assignment, or of
9906 what is inside a possible STRICT_LOW_PART. */
9907 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9908 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9909
9910 /* Shift OTHER right POS places and make it the source, restricting it
9911 to the proper length and mode. */
9912
9913 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9914 src_mode, other, pos),
9915 dest);
9916 src = force_to_mode (src, new_mode,
9917 len >= HOST_BITS_PER_WIDE_INT
9918 ? HOST_WIDE_INT_M1U
9919 : (HOST_WIDE_INT_1U << len) - 1,
9920 0);
9921
9922 /* If SRC is masked by an AND that does not make a difference in
9923 the value being stored, strip it. */
9924 if (GET_CODE (assign) == ZERO_EXTRACT
9925 && CONST_INT_P (XEXP (assign, 1))
9926 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9927 && GET_CODE (src) == AND
9928 && CONST_INT_P (XEXP (src, 1))
9929 && UINTVAL (XEXP (src, 1))
9930 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9931 src = XEXP (src, 0);
9932
9933 return gen_rtx_SET (assign, src);
9934 }
9935 \f
9936 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9937 if so. */
9938
9939 static rtx
9940 apply_distributive_law (rtx x)
9941 {
9942 enum rtx_code code = GET_CODE (x);
9943 enum rtx_code inner_code;
9944 rtx lhs, rhs, other;
9945 rtx tem;
9946
9947 /* Distributivity is not true for floating point as it can change the
9948 value. So we don't do it unless -funsafe-math-optimizations. */
9949 if (FLOAT_MODE_P (GET_MODE (x))
9950 && ! flag_unsafe_math_optimizations)
9951 return x;
9952
9953 /* The outer operation can only be one of the following: */
9954 if (code != IOR && code != AND && code != XOR
9955 && code != PLUS && code != MINUS)
9956 return x;
9957
9958 lhs = XEXP (x, 0);
9959 rhs = XEXP (x, 1);
9960
9961 /* If either operand is a primitive we can't do anything, so get out
9962 fast. */
9963 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9964 return x;
9965
9966 lhs = expand_compound_operation (lhs);
9967 rhs = expand_compound_operation (rhs);
9968 inner_code = GET_CODE (lhs);
9969 if (inner_code != GET_CODE (rhs))
9970 return x;
9971
9972 /* See if the inner and outer operations distribute. */
9973 switch (inner_code)
9974 {
9975 case LSHIFTRT:
9976 case ASHIFTRT:
9977 case AND:
9978 case IOR:
9979 /* These all distribute except over PLUS. */
9980 if (code == PLUS || code == MINUS)
9981 return x;
9982 break;
9983
9984 case MULT:
9985 if (code != PLUS && code != MINUS)
9986 return x;
9987 break;
9988
9989 case ASHIFT:
9990 /* This is also a multiply, so it distributes over everything. */
9991 break;
9992
9993 /* This used to handle SUBREG, but this turned out to be counter-
9994 productive, since (subreg (op ...)) usually is not handled by
9995 insn patterns, and this "optimization" therefore transformed
9996 recognizable patterns into unrecognizable ones. Therefore the
9997 SUBREG case was removed from here.
9998
9999 It is possible that distributing SUBREG over arithmetic operations
10000 leads to an intermediate result than can then be optimized further,
10001 e.g. by moving the outer SUBREG to the other side of a SET as done
10002 in simplify_set. This seems to have been the original intent of
10003 handling SUBREGs here.
10004
10005 However, with current GCC this does not appear to actually happen,
10006 at least on major platforms. If some case is found where removing
10007 the SUBREG case here prevents follow-on optimizations, distributing
10008 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
10009
10010 default:
10011 return x;
10012 }
10013
10014 /* Set LHS and RHS to the inner operands (A and B in the example
10015 above) and set OTHER to the common operand (C in the example).
10016 There is only one way to do this unless the inner operation is
10017 commutative. */
10018 if (COMMUTATIVE_ARITH_P (lhs)
10019 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10020 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10021 else if (COMMUTATIVE_ARITH_P (lhs)
10022 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10023 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10024 else if (COMMUTATIVE_ARITH_P (lhs)
10025 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10026 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10027 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10028 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10029 else
10030 return x;
10031
10032 /* Form the new inner operation, seeing if it simplifies first. */
10033 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10034
10035 /* There is one exception to the general way of distributing:
10036 (a | c) ^ (b | c) -> (a ^ b) & ~c */
10037 if (code == XOR && inner_code == IOR)
10038 {
10039 inner_code = AND;
10040 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10041 }
10042
10043 /* We may be able to continuing distributing the result, so call
10044 ourselves recursively on the inner operation before forming the
10045 outer operation, which we return. */
10046 return simplify_gen_binary (inner_code, GET_MODE (x),
10047 apply_distributive_law (tem), other);
10048 }
10049
10050 /* See if X is of the form (* (+ A B) C), and if so convert to
10051 (+ (* A C) (* B C)) and try to simplify.
10052
10053 Most of the time, this results in no change. However, if some of
10054 the operands are the same or inverses of each other, simplifications
10055 will result.
10056
10057 For example, (and (ior A B) (not B)) can occur as the result of
10058 expanding a bit field assignment. When we apply the distributive
10059 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10060 which then simplifies to (and (A (not B))).
10061
10062 Note that no checks happen on the validity of applying the inverse
10063 distributive law. This is pointless since we can do it in the
10064 few places where this routine is called.
10065
10066 N is the index of the term that is decomposed (the arithmetic operation,
10067 i.e. (+ A B) in the first example above). !N is the index of the term that
10068 is distributed, i.e. of C in the first example above. */
10069 static rtx
10070 distribute_and_simplify_rtx (rtx x, int n)
10071 {
10072 machine_mode mode;
10073 enum rtx_code outer_code, inner_code;
10074 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10075
10076 /* Distributivity is not true for floating point as it can change the
10077 value. So we don't do it unless -funsafe-math-optimizations. */
10078 if (FLOAT_MODE_P (GET_MODE (x))
10079 && ! flag_unsafe_math_optimizations)
10080 return NULL_RTX;
10081
10082 decomposed = XEXP (x, n);
10083 if (!ARITHMETIC_P (decomposed))
10084 return NULL_RTX;
10085
10086 mode = GET_MODE (x);
10087 outer_code = GET_CODE (x);
10088 distributed = XEXP (x, !n);
10089
10090 inner_code = GET_CODE (decomposed);
10091 inner_op0 = XEXP (decomposed, 0);
10092 inner_op1 = XEXP (decomposed, 1);
10093
10094 /* Special case (and (xor B C) (not A)), which is equivalent to
10095 (xor (ior A B) (ior A C)) */
10096 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10097 {
10098 distributed = XEXP (distributed, 0);
10099 outer_code = IOR;
10100 }
10101
10102 if (n == 0)
10103 {
10104 /* Distribute the second term. */
10105 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10106 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10107 }
10108 else
10109 {
10110 /* Distribute the first term. */
10111 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10112 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10113 }
10114
10115 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10116 new_op0, new_op1));
10117 if (GET_CODE (tmp) != outer_code
10118 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10119 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10120 return tmp;
10121
10122 return NULL_RTX;
10123 }
10124 \f
10125 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10126 in MODE. Return an equivalent form, if different from (and VAROP
10127 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10128
10129 static rtx
10130 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10131 unsigned HOST_WIDE_INT constop)
10132 {
10133 unsigned HOST_WIDE_INT nonzero;
10134 unsigned HOST_WIDE_INT orig_constop;
10135 rtx orig_varop;
10136 int i;
10137
10138 orig_varop = varop;
10139 orig_constop = constop;
10140 if (GET_CODE (varop) == CLOBBER)
10141 return NULL_RTX;
10142
10143 /* Simplify VAROP knowing that we will be only looking at some of the
10144 bits in it.
10145
10146 Note by passing in CONSTOP, we guarantee that the bits not set in
10147 CONSTOP are not significant and will never be examined. We must
10148 ensure that is the case by explicitly masking out those bits
10149 before returning. */
10150 varop = force_to_mode (varop, mode, constop, 0);
10151
10152 /* If VAROP is a CLOBBER, we will fail so return it. */
10153 if (GET_CODE (varop) == CLOBBER)
10154 return varop;
10155
10156 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10157 to VAROP and return the new constant. */
10158 if (CONST_INT_P (varop))
10159 return gen_int_mode (INTVAL (varop) & constop, mode);
10160
10161 /* See what bits may be nonzero in VAROP. Unlike the general case of
10162 a call to nonzero_bits, here we don't care about bits outside
10163 MODE. */
10164
10165 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10166
10167 /* Turn off all bits in the constant that are known to already be zero.
10168 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10169 which is tested below. */
10170
10171 constop &= nonzero;
10172
10173 /* If we don't have any bits left, return zero. */
10174 if (constop == 0)
10175 return const0_rtx;
10176
10177 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10178 a power of two, we can replace this with an ASHIFT. */
10179 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10180 && (i = exact_log2 (constop)) >= 0)
10181 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10182
10183 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10184 or XOR, then try to apply the distributive law. This may eliminate
10185 operations if either branch can be simplified because of the AND.
10186 It may also make some cases more complex, but those cases probably
10187 won't match a pattern either with or without this. */
10188
10189 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10190 {
10191 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10192 return
10193 gen_lowpart
10194 (mode,
10195 apply_distributive_law
10196 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10197 simplify_and_const_int (NULL_RTX, varop_mode,
10198 XEXP (varop, 0),
10199 constop),
10200 simplify_and_const_int (NULL_RTX, varop_mode,
10201 XEXP (varop, 1),
10202 constop))));
10203 }
10204
10205 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10206 the AND and see if one of the operands simplifies to zero. If so, we
10207 may eliminate it. */
10208
10209 if (GET_CODE (varop) == PLUS
10210 && pow2p_hwi (constop + 1))
10211 {
10212 rtx o0, o1;
10213
10214 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10215 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10216 if (o0 == const0_rtx)
10217 return o1;
10218 if (o1 == const0_rtx)
10219 return o0;
10220 }
10221
10222 /* Make a SUBREG if necessary. If we can't make it, fail. */
10223 varop = gen_lowpart (mode, varop);
10224 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10225 return NULL_RTX;
10226
10227 /* If we are only masking insignificant bits, return VAROP. */
10228 if (constop == nonzero)
10229 return varop;
10230
10231 if (varop == orig_varop && constop == orig_constop)
10232 return NULL_RTX;
10233
10234 /* Otherwise, return an AND. */
10235 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10236 }
10237
10238
10239 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10240 in MODE.
10241
10242 Return an equivalent form, if different from X. Otherwise, return X. If
10243 X is zero, we are to always construct the equivalent form. */
10244
10245 static rtx
10246 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10247 unsigned HOST_WIDE_INT constop)
10248 {
10249 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10250 if (tem)
10251 return tem;
10252
10253 if (!x)
10254 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10255 gen_int_mode (constop, mode));
10256 if (GET_MODE (x) != mode)
10257 x = gen_lowpart (mode, x);
10258 return x;
10259 }
10260 \f
10261 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10262 We don't care about bits outside of those defined in MODE.
10263 We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10264
10265 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10266 a shift, AND, or zero_extract, we can do better. */
10267
10268 static rtx
10269 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10270 scalar_int_mode mode,
10271 unsigned HOST_WIDE_INT *nonzero)
10272 {
10273 rtx tem;
10274 reg_stat_type *rsp;
10275
10276 /* If X is a register whose nonzero bits value is current, use it.
10277 Otherwise, if X is a register whose value we can find, use that
10278 value. Otherwise, use the previously-computed global nonzero bits
10279 for this register. */
10280
10281 rsp = &reg_stat[REGNO (x)];
10282 if (rsp->last_set_value != 0
10283 && (rsp->last_set_mode == mode
10284 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10285 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10286 && GET_MODE_CLASS (mode) == MODE_INT))
10287 && ((rsp->last_set_label >= label_tick_ebb_start
10288 && rsp->last_set_label < label_tick)
10289 || (rsp->last_set_label == label_tick
10290 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10291 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10292 && REGNO (x) < reg_n_sets_max
10293 && REG_N_SETS (REGNO (x)) == 1
10294 && !REGNO_REG_SET_P
10295 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10296 REGNO (x)))))
10297 {
10298 /* Note that, even if the precision of last_set_mode is lower than that
10299 of mode, record_value_for_reg invoked nonzero_bits on the register
10300 with nonzero_bits_mode (because last_set_mode is necessarily integral
10301 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10302 are all valid, hence in mode too since nonzero_bits_mode is defined
10303 to the largest HWI_COMPUTABLE_MODE_P mode. */
10304 *nonzero &= rsp->last_set_nonzero_bits;
10305 return NULL;
10306 }
10307
10308 tem = get_last_value (x);
10309 if (tem)
10310 {
10311 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10312 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10313
10314 return tem;
10315 }
10316
10317 if (nonzero_sign_valid && rsp->nonzero_bits)
10318 {
10319 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10320
10321 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10322 /* We don't know anything about the upper bits. */
10323 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10324
10325 *nonzero &= mask;
10326 }
10327
10328 return NULL;
10329 }
10330
10331 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10332 end of X that are known to be equal to the sign bit. X will be used
10333 in mode MODE; the returned value will always be between 1 and the
10334 number of bits in MODE. */
10335
10336 static rtx
10337 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10338 scalar_int_mode mode,
10339 unsigned int *result)
10340 {
10341 rtx tem;
10342 reg_stat_type *rsp;
10343
10344 rsp = &reg_stat[REGNO (x)];
10345 if (rsp->last_set_value != 0
10346 && rsp->last_set_mode == mode
10347 && ((rsp->last_set_label >= label_tick_ebb_start
10348 && rsp->last_set_label < label_tick)
10349 || (rsp->last_set_label == label_tick
10350 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10351 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10352 && REGNO (x) < reg_n_sets_max
10353 && REG_N_SETS (REGNO (x)) == 1
10354 && !REGNO_REG_SET_P
10355 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10356 REGNO (x)))))
10357 {
10358 *result = rsp->last_set_sign_bit_copies;
10359 return NULL;
10360 }
10361
10362 tem = get_last_value (x);
10363 if (tem != 0)
10364 return tem;
10365
10366 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10367 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10368 *result = rsp->sign_bit_copies;
10369
10370 return NULL;
10371 }
10372 \f
10373 /* Return the number of "extended" bits there are in X, when interpreted
10374 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10375 unsigned quantities, this is the number of high-order zero bits.
10376 For signed quantities, this is the number of copies of the sign bit
10377 minus 1. In both case, this function returns the number of "spare"
10378 bits. For example, if two quantities for which this function returns
10379 at least 1 are added, the addition is known not to overflow.
10380
10381 This function will always return 0 unless called during combine, which
10382 implies that it must be called from a define_split. */
10383
10384 unsigned int
10385 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10386 {
10387 if (nonzero_sign_valid == 0)
10388 return 0;
10389
10390 scalar_int_mode int_mode;
10391 return (unsignedp
10392 ? (is_a <scalar_int_mode> (mode, &int_mode)
10393 && HWI_COMPUTABLE_MODE_P (int_mode)
10394 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10395 - floor_log2 (nonzero_bits (x, int_mode)))
10396 : 0)
10397 : num_sign_bit_copies (x, mode) - 1);
10398 }
10399
10400 /* This function is called from `simplify_shift_const' to merge two
10401 outer operations. Specifically, we have already found that we need
10402 to perform operation *POP0 with constant *PCONST0 at the outermost
10403 position. We would now like to also perform OP1 with constant CONST1
10404 (with *POP0 being done last).
10405
10406 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10407 the resulting operation. *PCOMP_P is set to 1 if we would need to
10408 complement the innermost operand, otherwise it is unchanged.
10409
10410 MODE is the mode in which the operation will be done. No bits outside
10411 the width of this mode matter. It is assumed that the width of this mode
10412 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10413
10414 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10415 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10416 result is simply *PCONST0.
10417
10418 If the resulting operation cannot be expressed as one operation, we
10419 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10420
10421 static int
10422 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10423 {
10424 enum rtx_code op0 = *pop0;
10425 HOST_WIDE_INT const0 = *pconst0;
10426
10427 const0 &= GET_MODE_MASK (mode);
10428 const1 &= GET_MODE_MASK (mode);
10429
10430 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10431 if (op0 == AND)
10432 const1 &= const0;
10433
10434 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10435 if OP0 is SET. */
10436
10437 if (op1 == UNKNOWN || op0 == SET)
10438 return 1;
10439
10440 else if (op0 == UNKNOWN)
10441 op0 = op1, const0 = const1;
10442
10443 else if (op0 == op1)
10444 {
10445 switch (op0)
10446 {
10447 case AND:
10448 const0 &= const1;
10449 break;
10450 case IOR:
10451 const0 |= const1;
10452 break;
10453 case XOR:
10454 const0 ^= const1;
10455 break;
10456 case PLUS:
10457 const0 += const1;
10458 break;
10459 case NEG:
10460 op0 = UNKNOWN;
10461 break;
10462 default:
10463 break;
10464 }
10465 }
10466
10467 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10468 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10469 return 0;
10470
10471 /* If the two constants aren't the same, we can't do anything. The
10472 remaining six cases can all be done. */
10473 else if (const0 != const1)
10474 return 0;
10475
10476 else
10477 switch (op0)
10478 {
10479 case IOR:
10480 if (op1 == AND)
10481 /* (a & b) | b == b */
10482 op0 = SET;
10483 else /* op1 == XOR */
10484 /* (a ^ b) | b == a | b */
10485 {;}
10486 break;
10487
10488 case XOR:
10489 if (op1 == AND)
10490 /* (a & b) ^ b == (~a) & b */
10491 op0 = AND, *pcomp_p = 1;
10492 else /* op1 == IOR */
10493 /* (a | b) ^ b == a & ~b */
10494 op0 = AND, const0 = ~const0;
10495 break;
10496
10497 case AND:
10498 if (op1 == IOR)
10499 /* (a | b) & b == b */
10500 op0 = SET;
10501 else /* op1 == XOR */
10502 /* (a ^ b) & b) == (~a) & b */
10503 *pcomp_p = 1;
10504 break;
10505 default:
10506 break;
10507 }
10508
10509 /* Check for NO-OP cases. */
10510 const0 &= GET_MODE_MASK (mode);
10511 if (const0 == 0
10512 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10513 op0 = UNKNOWN;
10514 else if (const0 == 0 && op0 == AND)
10515 op0 = SET;
10516 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10517 && op0 == AND)
10518 op0 = UNKNOWN;
10519
10520 *pop0 = op0;
10521
10522 /* ??? Slightly redundant with the above mask, but not entirely.
10523 Moving this above means we'd have to sign-extend the mode mask
10524 for the final test. */
10525 if (op0 != UNKNOWN && op0 != NEG)
10526 *pconst0 = trunc_int_for_mode (const0, mode);
10527
10528 return 1;
10529 }
10530 \f
10531 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10532 the shift in. The original shift operation CODE is performed on OP in
10533 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10534 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10535 result of the shift is subject to operation OUTER_CODE with operand
10536 OUTER_CONST. */
10537
10538 static scalar_int_mode
10539 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10540 scalar_int_mode orig_mode, scalar_int_mode mode,
10541 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10542 {
10543 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10544
10545 /* In general we can't perform in wider mode for right shift and rotate. */
10546 switch (code)
10547 {
10548 case ASHIFTRT:
10549 /* We can still widen if the bits brought in from the left are identical
10550 to the sign bit of ORIG_MODE. */
10551 if (num_sign_bit_copies (op, mode)
10552 > (unsigned) (GET_MODE_PRECISION (mode)
10553 - GET_MODE_PRECISION (orig_mode)))
10554 return mode;
10555 return orig_mode;
10556
10557 case LSHIFTRT:
10558 /* Similarly here but with zero bits. */
10559 if (HWI_COMPUTABLE_MODE_P (mode)
10560 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10561 return mode;
10562
10563 /* We can also widen if the bits brought in will be masked off. This
10564 operation is performed in ORIG_MODE. */
10565 if (outer_code == AND)
10566 {
10567 int care_bits = low_bitmask_len (orig_mode, outer_const);
10568
10569 if (care_bits >= 0
10570 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10571 return mode;
10572 }
10573 /* fall through */
10574
10575 case ROTATE:
10576 return orig_mode;
10577
10578 case ROTATERT:
10579 gcc_unreachable ();
10580
10581 default:
10582 return mode;
10583 }
10584 }
10585
10586 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10587 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10588 if we cannot simplify it. Otherwise, return a simplified value.
10589
10590 The shift is normally computed in the widest mode we find in VAROP, as
10591 long as it isn't a different number of words than RESULT_MODE. Exceptions
10592 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10593
10594 static rtx
10595 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10596 rtx varop, int orig_count)
10597 {
10598 enum rtx_code orig_code = code;
10599 rtx orig_varop = varop;
10600 int count, log2;
10601 machine_mode mode = result_mode;
10602 machine_mode shift_mode;
10603 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10604 /* We form (outer_op (code varop count) (outer_const)). */
10605 enum rtx_code outer_op = UNKNOWN;
10606 HOST_WIDE_INT outer_const = 0;
10607 int complement_p = 0;
10608 rtx new_rtx, x;
10609
10610 /* Make sure and truncate the "natural" shift on the way in. We don't
10611 want to do this inside the loop as it makes it more difficult to
10612 combine shifts. */
10613 if (SHIFT_COUNT_TRUNCATED)
10614 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10615
10616 /* If we were given an invalid count, don't do anything except exactly
10617 what was requested. */
10618
10619 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10620 return NULL_RTX;
10621
10622 count = orig_count;
10623
10624 /* Unless one of the branches of the `if' in this loop does a `continue',
10625 we will `break' the loop after the `if'. */
10626
10627 while (count != 0)
10628 {
10629 /* If we have an operand of (clobber (const_int 0)), fail. */
10630 if (GET_CODE (varop) == CLOBBER)
10631 return NULL_RTX;
10632
10633 /* Convert ROTATERT to ROTATE. */
10634 if (code == ROTATERT)
10635 {
10636 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10637 code = ROTATE;
10638 count = bitsize - count;
10639 }
10640
10641 shift_mode = result_mode;
10642 if (shift_mode != mode)
10643 {
10644 /* We only change the modes of scalar shifts. */
10645 int_mode = as_a <scalar_int_mode> (mode);
10646 int_result_mode = as_a <scalar_int_mode> (result_mode);
10647 shift_mode = try_widen_shift_mode (code, varop, count,
10648 int_result_mode, int_mode,
10649 outer_op, outer_const);
10650 }
10651
10652 scalar_int_mode shift_unit_mode
10653 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10654
10655 /* Handle cases where the count is greater than the size of the mode
10656 minus 1. For ASHIFT, use the size minus one as the count (this can
10657 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10658 take the count modulo the size. For other shifts, the result is
10659 zero.
10660
10661 Since these shifts are being produced by the compiler by combining
10662 multiple operations, each of which are defined, we know what the
10663 result is supposed to be. */
10664
10665 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10666 {
10667 if (code == ASHIFTRT)
10668 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10669 else if (code == ROTATE || code == ROTATERT)
10670 count %= GET_MODE_PRECISION (shift_unit_mode);
10671 else
10672 {
10673 /* We can't simply return zero because there may be an
10674 outer op. */
10675 varop = const0_rtx;
10676 count = 0;
10677 break;
10678 }
10679 }
10680
10681 /* If we discovered we had to complement VAROP, leave. Making a NOT
10682 here would cause an infinite loop. */
10683 if (complement_p)
10684 break;
10685
10686 if (shift_mode == shift_unit_mode)
10687 {
10688 /* An arithmetic right shift of a quantity known to be -1 or 0
10689 is a no-op. */
10690 if (code == ASHIFTRT
10691 && (num_sign_bit_copies (varop, shift_unit_mode)
10692 == GET_MODE_PRECISION (shift_unit_mode)))
10693 {
10694 count = 0;
10695 break;
10696 }
10697
10698 /* If we are doing an arithmetic right shift and discarding all but
10699 the sign bit copies, this is equivalent to doing a shift by the
10700 bitsize minus one. Convert it into that shift because it will
10701 often allow other simplifications. */
10702
10703 if (code == ASHIFTRT
10704 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10705 >= GET_MODE_PRECISION (shift_unit_mode)))
10706 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10707
10708 /* We simplify the tests below and elsewhere by converting
10709 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10710 `make_compound_operation' will convert it to an ASHIFTRT for
10711 those machines (such as VAX) that don't have an LSHIFTRT. */
10712 if (code == ASHIFTRT
10713 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10714 && val_signbit_known_clear_p (shift_unit_mode,
10715 nonzero_bits (varop,
10716 shift_unit_mode)))
10717 code = LSHIFTRT;
10718
10719 if (((code == LSHIFTRT
10720 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10721 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10722 || (code == ASHIFT
10723 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10724 && !((nonzero_bits (varop, shift_unit_mode) << count)
10725 & GET_MODE_MASK (shift_unit_mode))))
10726 && !side_effects_p (varop))
10727 varop = const0_rtx;
10728 }
10729
10730 switch (GET_CODE (varop))
10731 {
10732 case SIGN_EXTEND:
10733 case ZERO_EXTEND:
10734 case SIGN_EXTRACT:
10735 case ZERO_EXTRACT:
10736 new_rtx = expand_compound_operation (varop);
10737 if (new_rtx != varop)
10738 {
10739 varop = new_rtx;
10740 continue;
10741 }
10742 break;
10743
10744 case MEM:
10745 /* The following rules apply only to scalars. */
10746 if (shift_mode != shift_unit_mode)
10747 break;
10748 int_mode = as_a <scalar_int_mode> (mode);
10749
10750 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10751 minus the width of a smaller mode, we can do this with a
10752 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10753 if ((code == ASHIFTRT || code == LSHIFTRT)
10754 && ! mode_dependent_address_p (XEXP (varop, 0),
10755 MEM_ADDR_SPACE (varop))
10756 && ! MEM_VOLATILE_P (varop)
10757 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10758 .exists (&tmode)))
10759 {
10760 new_rtx = adjust_address_nv (varop, tmode,
10761 BYTES_BIG_ENDIAN ? 0
10762 : count / BITS_PER_UNIT);
10763
10764 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10765 : ZERO_EXTEND, int_mode, new_rtx);
10766 count = 0;
10767 continue;
10768 }
10769 break;
10770
10771 case SUBREG:
10772 /* The following rules apply only to scalars. */
10773 if (shift_mode != shift_unit_mode)
10774 break;
10775 int_mode = as_a <scalar_int_mode> (mode);
10776 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10777
10778 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10779 the same number of words as what we've seen so far. Then store
10780 the widest mode in MODE. */
10781 if (subreg_lowpart_p (varop)
10782 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10783 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10784 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10785 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10786 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10787 {
10788 varop = SUBREG_REG (varop);
10789 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10790 mode = inner_mode;
10791 continue;
10792 }
10793 break;
10794
10795 case MULT:
10796 /* Some machines use MULT instead of ASHIFT because MULT
10797 is cheaper. But it is still better on those machines to
10798 merge two shifts into one. */
10799 if (CONST_INT_P (XEXP (varop, 1))
10800 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10801 {
10802 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10803 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10804 XEXP (varop, 0), log2_rtx);
10805 continue;
10806 }
10807 break;
10808
10809 case UDIV:
10810 /* Similar, for when divides are cheaper. */
10811 if (CONST_INT_P (XEXP (varop, 1))
10812 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10813 {
10814 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10815 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10816 XEXP (varop, 0), log2_rtx);
10817 continue;
10818 }
10819 break;
10820
10821 case ASHIFTRT:
10822 /* If we are extracting just the sign bit of an arithmetic
10823 right shift, that shift is not needed. However, the sign
10824 bit of a wider mode may be different from what would be
10825 interpreted as the sign bit in a narrower mode, so, if
10826 the result is narrower, don't discard the shift. */
10827 if (code == LSHIFTRT
10828 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10829 && (GET_MODE_UNIT_BITSIZE (result_mode)
10830 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10831 {
10832 varop = XEXP (varop, 0);
10833 continue;
10834 }
10835
10836 /* fall through */
10837
10838 case LSHIFTRT:
10839 case ASHIFT:
10840 case ROTATE:
10841 /* The following rules apply only to scalars. */
10842 if (shift_mode != shift_unit_mode)
10843 break;
10844 int_mode = as_a <scalar_int_mode> (mode);
10845 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10846 int_result_mode = as_a <scalar_int_mode> (result_mode);
10847
10848 /* Here we have two nested shifts. The result is usually the
10849 AND of a new shift with a mask. We compute the result below. */
10850 if (CONST_INT_P (XEXP (varop, 1))
10851 && INTVAL (XEXP (varop, 1)) >= 0
10852 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10853 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10854 && HWI_COMPUTABLE_MODE_P (int_mode))
10855 {
10856 enum rtx_code first_code = GET_CODE (varop);
10857 unsigned int first_count = INTVAL (XEXP (varop, 1));
10858 unsigned HOST_WIDE_INT mask;
10859 rtx mask_rtx;
10860
10861 /* We have one common special case. We can't do any merging if
10862 the inner code is an ASHIFTRT of a smaller mode. However, if
10863 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10864 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10865 we can convert it to
10866 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10867 This simplifies certain SIGN_EXTEND operations. */
10868 if (code == ASHIFT && first_code == ASHIFTRT
10869 && count == (GET_MODE_PRECISION (int_result_mode)
10870 - GET_MODE_PRECISION (int_varop_mode)))
10871 {
10872 /* C3 has the low-order C1 bits zero. */
10873
10874 mask = GET_MODE_MASK (int_mode)
10875 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10876
10877 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10878 XEXP (varop, 0), mask);
10879 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10880 int_result_mode, varop, count);
10881 count = first_count;
10882 code = ASHIFTRT;
10883 continue;
10884 }
10885
10886 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10887 than C1 high-order bits equal to the sign bit, we can convert
10888 this to either an ASHIFT or an ASHIFTRT depending on the
10889 two counts.
10890
10891 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10892
10893 if (code == ASHIFTRT && first_code == ASHIFT
10894 && int_varop_mode == shift_unit_mode
10895 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10896 > first_count))
10897 {
10898 varop = XEXP (varop, 0);
10899 count -= first_count;
10900 if (count < 0)
10901 {
10902 count = -count;
10903 code = ASHIFT;
10904 }
10905
10906 continue;
10907 }
10908
10909 /* There are some cases we can't do. If CODE is ASHIFTRT,
10910 we can only do this if FIRST_CODE is also ASHIFTRT.
10911
10912 We can't do the case when CODE is ROTATE and FIRST_CODE is
10913 ASHIFTRT.
10914
10915 If the mode of this shift is not the mode of the outer shift,
10916 we can't do this if either shift is a right shift or ROTATE.
10917
10918 Finally, we can't do any of these if the mode is too wide
10919 unless the codes are the same.
10920
10921 Handle the case where the shift codes are the same
10922 first. */
10923
10924 if (code == first_code)
10925 {
10926 if (int_varop_mode != int_result_mode
10927 && (code == ASHIFTRT || code == LSHIFTRT
10928 || code == ROTATE))
10929 break;
10930
10931 count += first_count;
10932 varop = XEXP (varop, 0);
10933 continue;
10934 }
10935
10936 if (code == ASHIFTRT
10937 || (code == ROTATE && first_code == ASHIFTRT)
10938 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10939 || (int_varop_mode != int_result_mode
10940 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10941 || first_code == ROTATE
10942 || code == ROTATE)))
10943 break;
10944
10945 /* To compute the mask to apply after the shift, shift the
10946 nonzero bits of the inner shift the same way the
10947 outer shift will. */
10948
10949 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10950 int_result_mode);
10951 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10952 mask_rtx
10953 = simplify_const_binary_operation (code, int_result_mode,
10954 mask_rtx, count_rtx);
10955
10956 /* Give up if we can't compute an outer operation to use. */
10957 if (mask_rtx == 0
10958 || !CONST_INT_P (mask_rtx)
10959 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10960 INTVAL (mask_rtx),
10961 int_result_mode, &complement_p))
10962 break;
10963
10964 /* If the shifts are in the same direction, we add the
10965 counts. Otherwise, we subtract them. */
10966 if ((code == ASHIFTRT || code == LSHIFTRT)
10967 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10968 count += first_count;
10969 else
10970 count -= first_count;
10971
10972 /* If COUNT is positive, the new shift is usually CODE,
10973 except for the two exceptions below, in which case it is
10974 FIRST_CODE. If the count is negative, FIRST_CODE should
10975 always be used */
10976 if (count > 0
10977 && ((first_code == ROTATE && code == ASHIFT)
10978 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10979 code = first_code;
10980 else if (count < 0)
10981 code = first_code, count = -count;
10982
10983 varop = XEXP (varop, 0);
10984 continue;
10985 }
10986
10987 /* If we have (A << B << C) for any shift, we can convert this to
10988 (A << C << B). This wins if A is a constant. Only try this if
10989 B is not a constant. */
10990
10991 else if (GET_CODE (varop) == code
10992 && CONST_INT_P (XEXP (varop, 0))
10993 && !CONST_INT_P (XEXP (varop, 1)))
10994 {
10995 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10996 sure the result will be masked. See PR70222. */
10997 if (code == LSHIFTRT
10998 && int_mode != int_result_mode
10999 && !merge_outer_ops (&outer_op, &outer_const, AND,
11000 GET_MODE_MASK (int_result_mode)
11001 >> orig_count, int_result_mode,
11002 &complement_p))
11003 break;
11004 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
11005 up outer sign extension (often left and right shift) is
11006 hardly more efficient than the original. See PR70429. */
11007 if (code == ASHIFTRT && int_mode != int_result_mode)
11008 break;
11009
11010 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11011 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11012 XEXP (varop, 0),
11013 count_rtx);
11014 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11015 count = 0;
11016 continue;
11017 }
11018 break;
11019
11020 case NOT:
11021 /* The following rules apply only to scalars. */
11022 if (shift_mode != shift_unit_mode)
11023 break;
11024
11025 /* Make this fit the case below. */
11026 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11027 continue;
11028
11029 case IOR:
11030 case AND:
11031 case XOR:
11032 /* The following rules apply only to scalars. */
11033 if (shift_mode != shift_unit_mode)
11034 break;
11035 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11036 int_result_mode = as_a <scalar_int_mode> (result_mode);
11037
11038 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11039 with C the size of VAROP - 1 and the shift is logical if
11040 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11041 we have an (le X 0) operation. If we have an arithmetic shift
11042 and STORE_FLAG_VALUE is 1 or we have a logical shift with
11043 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
11044
11045 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11046 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11047 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11048 && (code == LSHIFTRT || code == ASHIFTRT)
11049 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11050 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11051 {
11052 count = 0;
11053 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11054 const0_rtx);
11055
11056 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11057 varop = gen_rtx_NEG (int_varop_mode, varop);
11058
11059 continue;
11060 }
11061
11062 /* If we have (shift (logical)), move the logical to the outside
11063 to allow it to possibly combine with another logical and the
11064 shift to combine with another shift. This also canonicalizes to
11065 what a ZERO_EXTRACT looks like. Also, some machines have
11066 (and (shift)) insns. */
11067
11068 if (CONST_INT_P (XEXP (varop, 1))
11069 /* We can't do this if we have (ashiftrt (xor)) and the
11070 constant has its sign bit set in shift_unit_mode with
11071 shift_unit_mode wider than result_mode. */
11072 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11073 && int_result_mode != shift_unit_mode
11074 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11075 shift_unit_mode) < 0)
11076 && (new_rtx = simplify_const_binary_operation
11077 (code, int_result_mode,
11078 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11079 gen_int_shift_amount (int_result_mode, count))) != 0
11080 && CONST_INT_P (new_rtx)
11081 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11082 INTVAL (new_rtx), int_result_mode,
11083 &complement_p))
11084 {
11085 varop = XEXP (varop, 0);
11086 continue;
11087 }
11088
11089 /* If we can't do that, try to simplify the shift in each arm of the
11090 logical expression, make a new logical expression, and apply
11091 the inverse distributive law. This also can't be done for
11092 (ashiftrt (xor)) where we've widened the shift and the constant
11093 changes the sign bit. */
11094 if (CONST_INT_P (XEXP (varop, 1))
11095 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11096 && int_result_mode != shift_unit_mode
11097 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11098 shift_unit_mode) < 0))
11099 {
11100 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11101 XEXP (varop, 0), count);
11102 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11103 XEXP (varop, 1), count);
11104
11105 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11106 lhs, rhs);
11107 varop = apply_distributive_law (varop);
11108
11109 count = 0;
11110 continue;
11111 }
11112 break;
11113
11114 case EQ:
11115 /* The following rules apply only to scalars. */
11116 if (shift_mode != shift_unit_mode)
11117 break;
11118 int_result_mode = as_a <scalar_int_mode> (result_mode);
11119
11120 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11121 says that the sign bit can be tested, FOO has mode MODE, C is
11122 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11123 that may be nonzero. */
11124 if (code == LSHIFTRT
11125 && XEXP (varop, 1) == const0_rtx
11126 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11127 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11128 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11129 && STORE_FLAG_VALUE == -1
11130 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11131 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11132 int_result_mode, &complement_p))
11133 {
11134 varop = XEXP (varop, 0);
11135 count = 0;
11136 continue;
11137 }
11138 break;
11139
11140 case NEG:
11141 /* The following rules apply only to scalars. */
11142 if (shift_mode != shift_unit_mode)
11143 break;
11144 int_result_mode = as_a <scalar_int_mode> (result_mode);
11145
11146 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11147 than the number of bits in the mode is equivalent to A. */
11148 if (code == LSHIFTRT
11149 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11150 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11151 {
11152 varop = XEXP (varop, 0);
11153 count = 0;
11154 continue;
11155 }
11156
11157 /* NEG commutes with ASHIFT since it is multiplication. Move the
11158 NEG outside to allow shifts to combine. */
11159 if (code == ASHIFT
11160 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11161 int_result_mode, &complement_p))
11162 {
11163 varop = XEXP (varop, 0);
11164 continue;
11165 }
11166 break;
11167
11168 case PLUS:
11169 /* The following rules apply only to scalars. */
11170 if (shift_mode != shift_unit_mode)
11171 break;
11172 int_result_mode = as_a <scalar_int_mode> (result_mode);
11173
11174 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11175 is one less than the number of bits in the mode is
11176 equivalent to (xor A 1). */
11177 if (code == LSHIFTRT
11178 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11179 && XEXP (varop, 1) == constm1_rtx
11180 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11181 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11182 int_result_mode, &complement_p))
11183 {
11184 count = 0;
11185 varop = XEXP (varop, 0);
11186 continue;
11187 }
11188
11189 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11190 that might be nonzero in BAR are those being shifted out and those
11191 bits are known zero in FOO, we can replace the PLUS with FOO.
11192 Similarly in the other operand order. This code occurs when
11193 we are computing the size of a variable-size array. */
11194
11195 if ((code == ASHIFTRT || code == LSHIFTRT)
11196 && count < HOST_BITS_PER_WIDE_INT
11197 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11198 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11199 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11200 {
11201 varop = XEXP (varop, 0);
11202 continue;
11203 }
11204 else if ((code == ASHIFTRT || code == LSHIFTRT)
11205 && count < HOST_BITS_PER_WIDE_INT
11206 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11207 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11208 >> count) == 0
11209 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11210 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11211 {
11212 varop = XEXP (varop, 1);
11213 continue;
11214 }
11215
11216 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11217 if (code == ASHIFT
11218 && CONST_INT_P (XEXP (varop, 1))
11219 && (new_rtx = simplify_const_binary_operation
11220 (ASHIFT, int_result_mode,
11221 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11222 gen_int_shift_amount (int_result_mode, count))) != 0
11223 && CONST_INT_P (new_rtx)
11224 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11225 INTVAL (new_rtx), int_result_mode,
11226 &complement_p))
11227 {
11228 varop = XEXP (varop, 0);
11229 continue;
11230 }
11231
11232 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11233 signbit', and attempt to change the PLUS to an XOR and move it to
11234 the outer operation as is done above in the AND/IOR/XOR case
11235 leg for shift(logical). See details in logical handling above
11236 for reasoning in doing so. */
11237 if (code == LSHIFTRT
11238 && CONST_INT_P (XEXP (varop, 1))
11239 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11240 && (new_rtx = simplify_const_binary_operation
11241 (code, int_result_mode,
11242 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11243 gen_int_shift_amount (int_result_mode, count))) != 0
11244 && CONST_INT_P (new_rtx)
11245 && merge_outer_ops (&outer_op, &outer_const, XOR,
11246 INTVAL (new_rtx), int_result_mode,
11247 &complement_p))
11248 {
11249 varop = XEXP (varop, 0);
11250 continue;
11251 }
11252
11253 break;
11254
11255 case MINUS:
11256 /* The following rules apply only to scalars. */
11257 if (shift_mode != shift_unit_mode)
11258 break;
11259 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11260
11261 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11262 with C the size of VAROP - 1 and the shift is logical if
11263 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11264 we have a (gt X 0) operation. If the shift is arithmetic with
11265 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11266 we have a (neg (gt X 0)) operation. */
11267
11268 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11269 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11270 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11271 && (code == LSHIFTRT || code == ASHIFTRT)
11272 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11273 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11274 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11275 {
11276 count = 0;
11277 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11278 const0_rtx);
11279
11280 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11281 varop = gen_rtx_NEG (int_varop_mode, varop);
11282
11283 continue;
11284 }
11285 break;
11286
11287 case TRUNCATE:
11288 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11289 if the truncate does not affect the value. */
11290 if (code == LSHIFTRT
11291 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11292 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11293 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11294 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11295 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11296 {
11297 rtx varop_inner = XEXP (varop, 0);
11298 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11299 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11300 new_count);
11301 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11302 XEXP (varop_inner, 0),
11303 new_count_rtx);
11304 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11305 count = 0;
11306 continue;
11307 }
11308 break;
11309
11310 default:
11311 break;
11312 }
11313
11314 break;
11315 }
11316
11317 shift_mode = result_mode;
11318 if (shift_mode != mode)
11319 {
11320 /* We only change the modes of scalar shifts. */
11321 int_mode = as_a <scalar_int_mode> (mode);
11322 int_result_mode = as_a <scalar_int_mode> (result_mode);
11323 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11324 int_mode, outer_op, outer_const);
11325 }
11326
11327 /* We have now finished analyzing the shift. The result should be
11328 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11329 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11330 to the result of the shift. OUTER_CONST is the relevant constant,
11331 but we must turn off all bits turned off in the shift. */
11332
11333 if (outer_op == UNKNOWN
11334 && orig_code == code && orig_count == count
11335 && varop == orig_varop
11336 && shift_mode == GET_MODE (varop))
11337 return NULL_RTX;
11338
11339 /* Make a SUBREG if necessary. If we can't make it, fail. */
11340 varop = gen_lowpart (shift_mode, varop);
11341 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11342 return NULL_RTX;
11343
11344 /* If we have an outer operation and we just made a shift, it is
11345 possible that we could have simplified the shift were it not
11346 for the outer operation. So try to do the simplification
11347 recursively. */
11348
11349 if (outer_op != UNKNOWN)
11350 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11351 else
11352 x = NULL_RTX;
11353
11354 if (x == NULL_RTX)
11355 x = simplify_gen_binary (code, shift_mode, varop,
11356 gen_int_shift_amount (shift_mode, count));
11357
11358 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11359 turn off all the bits that the shift would have turned off. */
11360 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11361 /* We only change the modes of scalar shifts. */
11362 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11363 x, GET_MODE_MASK (result_mode) >> orig_count);
11364
11365 /* Do the remainder of the processing in RESULT_MODE. */
11366 x = gen_lowpart_or_truncate (result_mode, x);
11367
11368 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11369 operation. */
11370 if (complement_p)
11371 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11372
11373 if (outer_op != UNKNOWN)
11374 {
11375 int_result_mode = as_a <scalar_int_mode> (result_mode);
11376
11377 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11378 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11379 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11380
11381 if (outer_op == AND)
11382 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11383 else if (outer_op == SET)
11384 {
11385 /* This means that we have determined that the result is
11386 equivalent to a constant. This should be rare. */
11387 if (!side_effects_p (x))
11388 x = GEN_INT (outer_const);
11389 }
11390 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11391 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11392 else
11393 x = simplify_gen_binary (outer_op, int_result_mode, x,
11394 GEN_INT (outer_const));
11395 }
11396
11397 return x;
11398 }
11399
11400 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11401 The result of the shift is RESULT_MODE. If we cannot simplify it,
11402 return X or, if it is NULL, synthesize the expression with
11403 simplify_gen_binary. Otherwise, return a simplified value.
11404
11405 The shift is normally computed in the widest mode we find in VAROP, as
11406 long as it isn't a different number of words than RESULT_MODE. Exceptions
11407 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11408
11409 static rtx
11410 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11411 rtx varop, int count)
11412 {
11413 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11414 if (tem)
11415 return tem;
11416
11417 if (!x)
11418 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11419 gen_int_shift_amount (GET_MODE (varop), count));
11420 if (GET_MODE (x) != result_mode)
11421 x = gen_lowpart (result_mode, x);
11422 return x;
11423 }
11424
11425 \f
11426 /* A subroutine of recog_for_combine. See there for arguments and
11427 return value. */
11428
11429 static int
11430 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11431 {
11432 rtx pat = *pnewpat;
11433 rtx pat_without_clobbers;
11434 int insn_code_number;
11435 int num_clobbers_to_add = 0;
11436 int i;
11437 rtx notes = NULL_RTX;
11438 rtx old_notes, old_pat;
11439 int old_icode;
11440
11441 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11442 we use to indicate that something didn't match. If we find such a
11443 thing, force rejection. */
11444 if (GET_CODE (pat) == PARALLEL)
11445 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11446 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11447 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11448 return -1;
11449
11450 old_pat = PATTERN (insn);
11451 old_notes = REG_NOTES (insn);
11452 PATTERN (insn) = pat;
11453 REG_NOTES (insn) = NULL_RTX;
11454
11455 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11456 if (dump_file && (dump_flags & TDF_DETAILS))
11457 {
11458 if (insn_code_number < 0)
11459 fputs ("Failed to match this instruction:\n", dump_file);
11460 else
11461 fputs ("Successfully matched this instruction:\n", dump_file);
11462 print_rtl_single (dump_file, pat);
11463 }
11464
11465 /* If it isn't, there is the possibility that we previously had an insn
11466 that clobbered some register as a side effect, but the combined
11467 insn doesn't need to do that. So try once more without the clobbers
11468 unless this represents an ASM insn. */
11469
11470 if (insn_code_number < 0 && ! check_asm_operands (pat)
11471 && GET_CODE (pat) == PARALLEL)
11472 {
11473 int pos;
11474
11475 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11476 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11477 {
11478 if (i != pos)
11479 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11480 pos++;
11481 }
11482
11483 SUBST_INT (XVECLEN (pat, 0), pos);
11484
11485 if (pos == 1)
11486 pat = XVECEXP (pat, 0, 0);
11487
11488 PATTERN (insn) = pat;
11489 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11490 if (dump_file && (dump_flags & TDF_DETAILS))
11491 {
11492 if (insn_code_number < 0)
11493 fputs ("Failed to match this instruction:\n", dump_file);
11494 else
11495 fputs ("Successfully matched this instruction:\n", dump_file);
11496 print_rtl_single (dump_file, pat);
11497 }
11498 }
11499
11500 pat_without_clobbers = pat;
11501
11502 PATTERN (insn) = old_pat;
11503 REG_NOTES (insn) = old_notes;
11504
11505 /* Recognize all noop sets, these will be killed by followup pass. */
11506 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11507 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11508
11509 /* If we had any clobbers to add, make a new pattern than contains
11510 them. Then check to make sure that all of them are dead. */
11511 if (num_clobbers_to_add)
11512 {
11513 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11514 rtvec_alloc (GET_CODE (pat) == PARALLEL
11515 ? (XVECLEN (pat, 0)
11516 + num_clobbers_to_add)
11517 : num_clobbers_to_add + 1));
11518
11519 if (GET_CODE (pat) == PARALLEL)
11520 for (i = 0; i < XVECLEN (pat, 0); i++)
11521 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11522 else
11523 XVECEXP (newpat, 0, 0) = pat;
11524
11525 add_clobbers (newpat, insn_code_number);
11526
11527 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11528 i < XVECLEN (newpat, 0); i++)
11529 {
11530 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11531 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11532 return -1;
11533 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11534 {
11535 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11536 notes = alloc_reg_note (REG_UNUSED,
11537 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11538 }
11539 }
11540 pat = newpat;
11541 }
11542
11543 if (insn_code_number >= 0
11544 && insn_code_number != NOOP_MOVE_INSN_CODE)
11545 {
11546 old_pat = PATTERN (insn);
11547 old_notes = REG_NOTES (insn);
11548 old_icode = INSN_CODE (insn);
11549 PATTERN (insn) = pat;
11550 REG_NOTES (insn) = notes;
11551 INSN_CODE (insn) = insn_code_number;
11552
11553 /* Allow targets to reject combined insn. */
11554 if (!targetm.legitimate_combined_insn (insn))
11555 {
11556 if (dump_file && (dump_flags & TDF_DETAILS))
11557 fputs ("Instruction not appropriate for target.",
11558 dump_file);
11559
11560 /* Callers expect recog_for_combine to strip
11561 clobbers from the pattern on failure. */
11562 pat = pat_without_clobbers;
11563 notes = NULL_RTX;
11564
11565 insn_code_number = -1;
11566 }
11567
11568 PATTERN (insn) = old_pat;
11569 REG_NOTES (insn) = old_notes;
11570 INSN_CODE (insn) = old_icode;
11571 }
11572
11573 *pnewpat = pat;
11574 *pnotes = notes;
11575
11576 return insn_code_number;
11577 }
11578
11579 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11580 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11581 Return whether anything was so changed. */
11582
11583 static bool
11584 change_zero_ext (rtx pat)
11585 {
11586 bool changed = false;
11587 rtx *src = &SET_SRC (pat);
11588
11589 subrtx_ptr_iterator::array_type array;
11590 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11591 {
11592 rtx x = **iter;
11593 scalar_int_mode mode, inner_mode;
11594 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11595 continue;
11596 int size;
11597
11598 if (GET_CODE (x) == ZERO_EXTRACT
11599 && CONST_INT_P (XEXP (x, 1))
11600 && CONST_INT_P (XEXP (x, 2))
11601 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11602 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11603 {
11604 size = INTVAL (XEXP (x, 1));
11605
11606 int start = INTVAL (XEXP (x, 2));
11607 if (BITS_BIG_ENDIAN)
11608 start = GET_MODE_PRECISION (inner_mode) - size - start;
11609
11610 if (start != 0)
11611 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11612 gen_int_shift_amount (inner_mode, start));
11613 else
11614 x = XEXP (x, 0);
11615
11616 if (mode != inner_mode)
11617 {
11618 if (REG_P (x) && HARD_REGISTER_P (x)
11619 && !can_change_dest_mode (x, 0, mode))
11620 continue;
11621
11622 x = gen_lowpart_SUBREG (mode, x);
11623 }
11624 }
11625 else if (GET_CODE (x) == ZERO_EXTEND
11626 && GET_CODE (XEXP (x, 0)) == SUBREG
11627 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11628 && !paradoxical_subreg_p (XEXP (x, 0))
11629 && subreg_lowpart_p (XEXP (x, 0)))
11630 {
11631 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11632 size = GET_MODE_PRECISION (inner_mode);
11633 x = SUBREG_REG (XEXP (x, 0));
11634 if (GET_MODE (x) != mode)
11635 {
11636 if (REG_P (x) && HARD_REGISTER_P (x)
11637 && !can_change_dest_mode (x, 0, mode))
11638 continue;
11639
11640 x = gen_lowpart_SUBREG (mode, x);
11641 }
11642 }
11643 else if (GET_CODE (x) == ZERO_EXTEND
11644 && REG_P (XEXP (x, 0))
11645 && HARD_REGISTER_P (XEXP (x, 0))
11646 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11647 {
11648 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11649 size = GET_MODE_PRECISION (inner_mode);
11650 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11651 }
11652 else
11653 continue;
11654
11655 if (!(GET_CODE (x) == LSHIFTRT
11656 && CONST_INT_P (XEXP (x, 1))
11657 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11658 {
11659 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11660 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11661 }
11662
11663 SUBST (**iter, x);
11664 changed = true;
11665 }
11666
11667 if (changed)
11668 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11669 maybe_swap_commutative_operands (**iter);
11670
11671 rtx *dst = &SET_DEST (pat);
11672 scalar_int_mode mode;
11673 if (GET_CODE (*dst) == ZERO_EXTRACT
11674 && REG_P (XEXP (*dst, 0))
11675 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11676 && CONST_INT_P (XEXP (*dst, 1))
11677 && CONST_INT_P (XEXP (*dst, 2)))
11678 {
11679 rtx reg = XEXP (*dst, 0);
11680 int width = INTVAL (XEXP (*dst, 1));
11681 int offset = INTVAL (XEXP (*dst, 2));
11682 int reg_width = GET_MODE_PRECISION (mode);
11683 if (BITS_BIG_ENDIAN)
11684 offset = reg_width - width - offset;
11685
11686 rtx x, y, z, w;
11687 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11688 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11689 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11690 if (offset)
11691 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11692 else
11693 y = SET_SRC (pat);
11694 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11695 w = gen_rtx_IOR (mode, x, z);
11696 SUBST (SET_DEST (pat), reg);
11697 SUBST (SET_SRC (pat), w);
11698
11699 changed = true;
11700 }
11701
11702 return changed;
11703 }
11704
11705 /* Like recog, but we receive the address of a pointer to a new pattern.
11706 We try to match the rtx that the pointer points to.
11707 If that fails, we may try to modify or replace the pattern,
11708 storing the replacement into the same pointer object.
11709
11710 Modifications include deletion or addition of CLOBBERs. If the
11711 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11712 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11713 (and undo if that fails).
11714
11715 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11716 the CLOBBERs are placed.
11717
11718 The value is the final insn code from the pattern ultimately matched,
11719 or -1. */
11720
11721 static int
11722 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11723 {
11724 rtx pat = *pnewpat;
11725 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11726 if (insn_code_number >= 0 || check_asm_operands (pat))
11727 return insn_code_number;
11728
11729 void *marker = get_undo_marker ();
11730 bool changed = false;
11731
11732 if (GET_CODE (pat) == SET)
11733 changed = change_zero_ext (pat);
11734 else if (GET_CODE (pat) == PARALLEL)
11735 {
11736 int i;
11737 for (i = 0; i < XVECLEN (pat, 0); i++)
11738 {
11739 rtx set = XVECEXP (pat, 0, i);
11740 if (GET_CODE (set) == SET)
11741 changed |= change_zero_ext (set);
11742 }
11743 }
11744
11745 if (changed)
11746 {
11747 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11748
11749 if (insn_code_number < 0)
11750 undo_to_marker (marker);
11751 }
11752
11753 return insn_code_number;
11754 }
11755 \f
11756 /* Like gen_lowpart_general but for use by combine. In combine it
11757 is not possible to create any new pseudoregs. However, it is
11758 safe to create invalid memory addresses, because combine will
11759 try to recognize them and all they will do is make the combine
11760 attempt fail.
11761
11762 If for some reason this cannot do its job, an rtx
11763 (clobber (const_int 0)) is returned.
11764 An insn containing that will not be recognized. */
11765
11766 static rtx
11767 gen_lowpart_for_combine (machine_mode omode, rtx x)
11768 {
11769 machine_mode imode = GET_MODE (x);
11770 rtx result;
11771
11772 if (omode == imode)
11773 return x;
11774
11775 /* We can only support MODE being wider than a word if X is a
11776 constant integer or has a mode the same size. */
11777 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11778 && ! (CONST_SCALAR_INT_P (x)
11779 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11780 goto fail;
11781
11782 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11783 won't know what to do. So we will strip off the SUBREG here and
11784 process normally. */
11785 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11786 {
11787 x = SUBREG_REG (x);
11788
11789 /* For use in case we fall down into the address adjustments
11790 further below, we need to adjust the known mode and size of
11791 x; imode and isize, since we just adjusted x. */
11792 imode = GET_MODE (x);
11793
11794 if (imode == omode)
11795 return x;
11796 }
11797
11798 result = gen_lowpart_common (omode, x);
11799
11800 if (result)
11801 return result;
11802
11803 if (MEM_P (x))
11804 {
11805 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11806 address. */
11807 if (MEM_VOLATILE_P (x)
11808 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11809 goto fail;
11810
11811 /* If we want to refer to something bigger than the original memref,
11812 generate a paradoxical subreg instead. That will force a reload
11813 of the original memref X. */
11814 if (paradoxical_subreg_p (omode, imode))
11815 return gen_rtx_SUBREG (omode, x, 0);
11816
11817 poly_int64 offset = byte_lowpart_offset (omode, imode);
11818 return adjust_address_nv (x, omode, offset);
11819 }
11820
11821 /* If X is a comparison operator, rewrite it in a new mode. This
11822 probably won't match, but may allow further simplifications. */
11823 else if (COMPARISON_P (x))
11824 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11825
11826 /* If we couldn't simplify X any other way, just enclose it in a
11827 SUBREG. Normally, this SUBREG won't match, but some patterns may
11828 include an explicit SUBREG or we may simplify it further in combine. */
11829 else
11830 {
11831 rtx res;
11832
11833 if (imode == VOIDmode)
11834 {
11835 imode = int_mode_for_mode (omode).require ();
11836 x = gen_lowpart_common (imode, x);
11837 if (x == NULL)
11838 goto fail;
11839 }
11840 res = lowpart_subreg (omode, x, imode);
11841 if (res)
11842 return res;
11843 }
11844
11845 fail:
11846 return gen_rtx_CLOBBER (omode, const0_rtx);
11847 }
11848 \f
11849 /* Try to simplify a comparison between OP0 and a constant OP1,
11850 where CODE is the comparison code that will be tested, into a
11851 (CODE OP0 const0_rtx) form.
11852
11853 The result is a possibly different comparison code to use.
11854 *POP1 may be updated. */
11855
11856 static enum rtx_code
11857 simplify_compare_const (enum rtx_code code, machine_mode mode,
11858 rtx op0, rtx *pop1)
11859 {
11860 scalar_int_mode int_mode;
11861 HOST_WIDE_INT const_op = INTVAL (*pop1);
11862
11863 /* Get the constant we are comparing against and turn off all bits
11864 not on in our mode. */
11865 if (mode != VOIDmode)
11866 const_op = trunc_int_for_mode (const_op, mode);
11867
11868 /* If we are comparing against a constant power of two and the value
11869 being compared can only have that single bit nonzero (e.g., it was
11870 `and'ed with that bit), we can replace this with a comparison
11871 with zero. */
11872 if (const_op
11873 && (code == EQ || code == NE || code == GE || code == GEU
11874 || code == LT || code == LTU)
11875 && is_a <scalar_int_mode> (mode, &int_mode)
11876 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11877 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11878 && (nonzero_bits (op0, int_mode)
11879 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11880 {
11881 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11882 const_op = 0;
11883 }
11884
11885 /* Similarly, if we are comparing a value known to be either -1 or
11886 0 with -1, change it to the opposite comparison against zero. */
11887 if (const_op == -1
11888 && (code == EQ || code == NE || code == GT || code == LE
11889 || code == GEU || code == LTU)
11890 && is_a <scalar_int_mode> (mode, &int_mode)
11891 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11892 {
11893 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11894 const_op = 0;
11895 }
11896
11897 /* Do some canonicalizations based on the comparison code. We prefer
11898 comparisons against zero and then prefer equality comparisons.
11899 If we can reduce the size of a constant, we will do that too. */
11900 switch (code)
11901 {
11902 case LT:
11903 /* < C is equivalent to <= (C - 1) */
11904 if (const_op > 0)
11905 {
11906 const_op -= 1;
11907 code = LE;
11908 /* ... fall through to LE case below. */
11909 gcc_fallthrough ();
11910 }
11911 else
11912 break;
11913
11914 case LE:
11915 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11916 if (const_op < 0)
11917 {
11918 const_op += 1;
11919 code = LT;
11920 }
11921
11922 /* If we are doing a <= 0 comparison on a value known to have
11923 a zero sign bit, we can replace this with == 0. */
11924 else if (const_op == 0
11925 && is_a <scalar_int_mode> (mode, &int_mode)
11926 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11927 && (nonzero_bits (op0, int_mode)
11928 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11929 == 0)
11930 code = EQ;
11931 break;
11932
11933 case GE:
11934 /* >= C is equivalent to > (C - 1). */
11935 if (const_op > 0)
11936 {
11937 const_op -= 1;
11938 code = GT;
11939 /* ... fall through to GT below. */
11940 gcc_fallthrough ();
11941 }
11942 else
11943 break;
11944
11945 case GT:
11946 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11947 if (const_op < 0)
11948 {
11949 const_op += 1;
11950 code = GE;
11951 }
11952
11953 /* If we are doing a > 0 comparison on a value known to have
11954 a zero sign bit, we can replace this with != 0. */
11955 else if (const_op == 0
11956 && is_a <scalar_int_mode> (mode, &int_mode)
11957 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11958 && (nonzero_bits (op0, int_mode)
11959 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11960 == 0)
11961 code = NE;
11962 break;
11963
11964 case LTU:
11965 /* < C is equivalent to <= (C - 1). */
11966 if (const_op > 0)
11967 {
11968 const_op -= 1;
11969 code = LEU;
11970 /* ... fall through ... */
11971 gcc_fallthrough ();
11972 }
11973 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11974 else if (is_a <scalar_int_mode> (mode, &int_mode)
11975 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11976 && ((unsigned HOST_WIDE_INT) const_op
11977 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11978 {
11979 const_op = 0;
11980 code = GE;
11981 break;
11982 }
11983 else
11984 break;
11985
11986 case LEU:
11987 /* unsigned <= 0 is equivalent to == 0 */
11988 if (const_op == 0)
11989 code = EQ;
11990 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11991 else if (is_a <scalar_int_mode> (mode, &int_mode)
11992 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11993 && ((unsigned HOST_WIDE_INT) const_op
11994 == ((HOST_WIDE_INT_1U
11995 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11996 {
11997 const_op = 0;
11998 code = GE;
11999 }
12000 break;
12001
12002 case GEU:
12003 /* >= C is equivalent to > (C - 1). */
12004 if (const_op > 1)
12005 {
12006 const_op -= 1;
12007 code = GTU;
12008 /* ... fall through ... */
12009 gcc_fallthrough ();
12010 }
12011
12012 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
12013 else if (is_a <scalar_int_mode> (mode, &int_mode)
12014 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12015 && ((unsigned HOST_WIDE_INT) const_op
12016 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12017 {
12018 const_op = 0;
12019 code = LT;
12020 break;
12021 }
12022 else
12023 break;
12024
12025 case GTU:
12026 /* unsigned > 0 is equivalent to != 0 */
12027 if (const_op == 0)
12028 code = NE;
12029 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
12030 else if (is_a <scalar_int_mode> (mode, &int_mode)
12031 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12032 && ((unsigned HOST_WIDE_INT) const_op
12033 == (HOST_WIDE_INT_1U
12034 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12035 {
12036 const_op = 0;
12037 code = LT;
12038 }
12039 break;
12040
12041 default:
12042 break;
12043 }
12044
12045 *pop1 = GEN_INT (const_op);
12046 return code;
12047 }
12048 \f
12049 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12050 comparison code that will be tested.
12051
12052 The result is a possibly different comparison code to use. *POP0 and
12053 *POP1 may be updated.
12054
12055 It is possible that we might detect that a comparison is either always
12056 true or always false. However, we do not perform general constant
12057 folding in combine, so this knowledge isn't useful. Such tautologies
12058 should have been detected earlier. Hence we ignore all such cases. */
12059
12060 static enum rtx_code
12061 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12062 {
12063 rtx op0 = *pop0;
12064 rtx op1 = *pop1;
12065 rtx tem, tem1;
12066 int i;
12067 scalar_int_mode mode, inner_mode, tmode;
12068 opt_scalar_int_mode tmode_iter;
12069
12070 /* Try a few ways of applying the same transformation to both operands. */
12071 while (1)
12072 {
12073 /* The test below this one won't handle SIGN_EXTENDs on these machines,
12074 so check specially. */
12075 if (!WORD_REGISTER_OPERATIONS
12076 && code != GTU && code != GEU && code != LTU && code != LEU
12077 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12078 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12079 && GET_CODE (XEXP (op1, 0)) == ASHIFT
12080 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12081 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12082 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12083 && (is_a <scalar_int_mode>
12084 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12085 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12086 && CONST_INT_P (XEXP (op0, 1))
12087 && XEXP (op0, 1) == XEXP (op1, 1)
12088 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12089 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12090 && (INTVAL (XEXP (op0, 1))
12091 == (GET_MODE_PRECISION (mode)
12092 - GET_MODE_PRECISION (inner_mode))))
12093 {
12094 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12095 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12096 }
12097
12098 /* If both operands are the same constant shift, see if we can ignore the
12099 shift. We can if the shift is a rotate or if the bits shifted out of
12100 this shift are known to be zero for both inputs and if the type of
12101 comparison is compatible with the shift. */
12102 if (GET_CODE (op0) == GET_CODE (op1)
12103 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12104 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12105 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12106 && (code != GT && code != LT && code != GE && code != LE))
12107 || (GET_CODE (op0) == ASHIFTRT
12108 && (code != GTU && code != LTU
12109 && code != GEU && code != LEU)))
12110 && CONST_INT_P (XEXP (op0, 1))
12111 && INTVAL (XEXP (op0, 1)) >= 0
12112 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12113 && XEXP (op0, 1) == XEXP (op1, 1))
12114 {
12115 machine_mode mode = GET_MODE (op0);
12116 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12117 int shift_count = INTVAL (XEXP (op0, 1));
12118
12119 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12120 mask &= (mask >> shift_count) << shift_count;
12121 else if (GET_CODE (op0) == ASHIFT)
12122 mask = (mask & (mask << shift_count)) >> shift_count;
12123
12124 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12125 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12126 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12127 else
12128 break;
12129 }
12130
12131 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12132 SUBREGs are of the same mode, and, in both cases, the AND would
12133 be redundant if the comparison was done in the narrower mode,
12134 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12135 and the operand's possibly nonzero bits are 0xffffff01; in that case
12136 if we only care about QImode, we don't need the AND). This case
12137 occurs if the output mode of an scc insn is not SImode and
12138 STORE_FLAG_VALUE == 1 (e.g., the 386).
12139
12140 Similarly, check for a case where the AND's are ZERO_EXTEND
12141 operations from some narrower mode even though a SUBREG is not
12142 present. */
12143
12144 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12145 && CONST_INT_P (XEXP (op0, 1))
12146 && CONST_INT_P (XEXP (op1, 1)))
12147 {
12148 rtx inner_op0 = XEXP (op0, 0);
12149 rtx inner_op1 = XEXP (op1, 0);
12150 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12151 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12152 int changed = 0;
12153
12154 if (paradoxical_subreg_p (inner_op0)
12155 && GET_CODE (inner_op1) == SUBREG
12156 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12157 && (GET_MODE (SUBREG_REG (inner_op0))
12158 == GET_MODE (SUBREG_REG (inner_op1)))
12159 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12160 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12161 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12162 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12163 {
12164 op0 = SUBREG_REG (inner_op0);
12165 op1 = SUBREG_REG (inner_op1);
12166
12167 /* The resulting comparison is always unsigned since we masked
12168 off the original sign bit. */
12169 code = unsigned_condition (code);
12170
12171 changed = 1;
12172 }
12173
12174 else if (c0 == c1)
12175 FOR_EACH_MODE_UNTIL (tmode,
12176 as_a <scalar_int_mode> (GET_MODE (op0)))
12177 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12178 {
12179 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12180 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12181 code = unsigned_condition (code);
12182 changed = 1;
12183 break;
12184 }
12185
12186 if (! changed)
12187 break;
12188 }
12189
12190 /* If both operands are NOT, we can strip off the outer operation
12191 and adjust the comparison code for swapped operands; similarly for
12192 NEG, except that this must be an equality comparison. */
12193 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12194 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12195 && (code == EQ || code == NE)))
12196 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12197
12198 else
12199 break;
12200 }
12201
12202 /* If the first operand is a constant, swap the operands and adjust the
12203 comparison code appropriately, but don't do this if the second operand
12204 is already a constant integer. */
12205 if (swap_commutative_operands_p (op0, op1))
12206 {
12207 std::swap (op0, op1);
12208 code = swap_condition (code);
12209 }
12210
12211 /* We now enter a loop during which we will try to simplify the comparison.
12212 For the most part, we only are concerned with comparisons with zero,
12213 but some things may really be comparisons with zero but not start
12214 out looking that way. */
12215
12216 while (CONST_INT_P (op1))
12217 {
12218 machine_mode raw_mode = GET_MODE (op0);
12219 scalar_int_mode int_mode;
12220 int equality_comparison_p;
12221 int sign_bit_comparison_p;
12222 int unsigned_comparison_p;
12223 HOST_WIDE_INT const_op;
12224
12225 /* We only want to handle integral modes. This catches VOIDmode,
12226 CCmode, and the floating-point modes. An exception is that we
12227 can handle VOIDmode if OP0 is a COMPARE or a comparison
12228 operation. */
12229
12230 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12231 && ! (raw_mode == VOIDmode
12232 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12233 break;
12234
12235 /* Try to simplify the compare to constant, possibly changing the
12236 comparison op, and/or changing op1 to zero. */
12237 code = simplify_compare_const (code, raw_mode, op0, &op1);
12238 const_op = INTVAL (op1);
12239
12240 /* Compute some predicates to simplify code below. */
12241
12242 equality_comparison_p = (code == EQ || code == NE);
12243 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12244 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12245 || code == GEU);
12246
12247 /* If this is a sign bit comparison and we can do arithmetic in
12248 MODE, say that we will only be needing the sign bit of OP0. */
12249 if (sign_bit_comparison_p
12250 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12251 && HWI_COMPUTABLE_MODE_P (int_mode))
12252 op0 = force_to_mode (op0, int_mode,
12253 HOST_WIDE_INT_1U
12254 << (GET_MODE_PRECISION (int_mode) - 1),
12255 0);
12256
12257 if (COMPARISON_P (op0))
12258 {
12259 /* We can't do anything if OP0 is a condition code value, rather
12260 than an actual data value. */
12261 if (const_op != 0
12262 || CC0_P (XEXP (op0, 0))
12263 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12264 break;
12265
12266 /* Get the two operands being compared. */
12267 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12268 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12269 else
12270 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12271
12272 /* Check for the cases where we simply want the result of the
12273 earlier test or the opposite of that result. */
12274 if (code == NE || code == EQ
12275 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12276 && (code == LT || code == GE)))
12277 {
12278 enum rtx_code new_code;
12279 if (code == LT || code == NE)
12280 new_code = GET_CODE (op0);
12281 else
12282 new_code = reversed_comparison_code (op0, NULL);
12283
12284 if (new_code != UNKNOWN)
12285 {
12286 code = new_code;
12287 op0 = tem;
12288 op1 = tem1;
12289 continue;
12290 }
12291 }
12292 break;
12293 }
12294
12295 if (raw_mode == VOIDmode)
12296 break;
12297 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12298
12299 /* Now try cases based on the opcode of OP0. If none of the cases
12300 does a "continue", we exit this loop immediately after the
12301 switch. */
12302
12303 unsigned int mode_width = GET_MODE_PRECISION (mode);
12304 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12305 switch (GET_CODE (op0))
12306 {
12307 case ZERO_EXTRACT:
12308 /* If we are extracting a single bit from a variable position in
12309 a constant that has only a single bit set and are comparing it
12310 with zero, we can convert this into an equality comparison
12311 between the position and the location of the single bit. */
12312 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12313 have already reduced the shift count modulo the word size. */
12314 if (!SHIFT_COUNT_TRUNCATED
12315 && CONST_INT_P (XEXP (op0, 0))
12316 && XEXP (op0, 1) == const1_rtx
12317 && equality_comparison_p && const_op == 0
12318 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12319 {
12320 if (BITS_BIG_ENDIAN)
12321 i = BITS_PER_WORD - 1 - i;
12322
12323 op0 = XEXP (op0, 2);
12324 op1 = GEN_INT (i);
12325 const_op = i;
12326
12327 /* Result is nonzero iff shift count is equal to I. */
12328 code = reverse_condition (code);
12329 continue;
12330 }
12331
12332 /* fall through */
12333
12334 case SIGN_EXTRACT:
12335 tem = expand_compound_operation (op0);
12336 if (tem != op0)
12337 {
12338 op0 = tem;
12339 continue;
12340 }
12341 break;
12342
12343 case NOT:
12344 /* If testing for equality, we can take the NOT of the constant. */
12345 if (equality_comparison_p
12346 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12347 {
12348 op0 = XEXP (op0, 0);
12349 op1 = tem;
12350 continue;
12351 }
12352
12353 /* If just looking at the sign bit, reverse the sense of the
12354 comparison. */
12355 if (sign_bit_comparison_p)
12356 {
12357 op0 = XEXP (op0, 0);
12358 code = (code == GE ? LT : GE);
12359 continue;
12360 }
12361 break;
12362
12363 case NEG:
12364 /* If testing for equality, we can take the NEG of the constant. */
12365 if (equality_comparison_p
12366 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12367 {
12368 op0 = XEXP (op0, 0);
12369 op1 = tem;
12370 continue;
12371 }
12372
12373 /* The remaining cases only apply to comparisons with zero. */
12374 if (const_op != 0)
12375 break;
12376
12377 /* When X is ABS or is known positive,
12378 (neg X) is < 0 if and only if X != 0. */
12379
12380 if (sign_bit_comparison_p
12381 && (GET_CODE (XEXP (op0, 0)) == ABS
12382 || (mode_width <= HOST_BITS_PER_WIDE_INT
12383 && (nonzero_bits (XEXP (op0, 0), mode)
12384 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12385 == 0)))
12386 {
12387 op0 = XEXP (op0, 0);
12388 code = (code == LT ? NE : EQ);
12389 continue;
12390 }
12391
12392 /* If we have NEG of something whose two high-order bits are the
12393 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12394 if (num_sign_bit_copies (op0, mode) >= 2)
12395 {
12396 op0 = XEXP (op0, 0);
12397 code = swap_condition (code);
12398 continue;
12399 }
12400 break;
12401
12402 case ROTATE:
12403 /* If we are testing equality and our count is a constant, we
12404 can perform the inverse operation on our RHS. */
12405 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12406 && (tem = simplify_binary_operation (ROTATERT, mode,
12407 op1, XEXP (op0, 1))) != 0)
12408 {
12409 op0 = XEXP (op0, 0);
12410 op1 = tem;
12411 continue;
12412 }
12413
12414 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12415 a particular bit. Convert it to an AND of a constant of that
12416 bit. This will be converted into a ZERO_EXTRACT. */
12417 if (const_op == 0 && sign_bit_comparison_p
12418 && CONST_INT_P (XEXP (op0, 1))
12419 && mode_width <= HOST_BITS_PER_WIDE_INT)
12420 {
12421 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12422 (HOST_WIDE_INT_1U
12423 << (mode_width - 1
12424 - INTVAL (XEXP (op0, 1)))));
12425 code = (code == LT ? NE : EQ);
12426 continue;
12427 }
12428
12429 /* Fall through. */
12430
12431 case ABS:
12432 /* ABS is ignorable inside an equality comparison with zero. */
12433 if (const_op == 0 && equality_comparison_p)
12434 {
12435 op0 = XEXP (op0, 0);
12436 continue;
12437 }
12438 break;
12439
12440 case SIGN_EXTEND:
12441 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12442 (compare FOO CONST) if CONST fits in FOO's mode and we
12443 are either testing inequality or have an unsigned
12444 comparison with ZERO_EXTEND or a signed comparison with
12445 SIGN_EXTEND. But don't do it if we don't have a compare
12446 insn of the given mode, since we'd have to revert it
12447 later on, and then we wouldn't know whether to sign- or
12448 zero-extend. */
12449 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12450 && ! unsigned_comparison_p
12451 && HWI_COMPUTABLE_MODE_P (mode)
12452 && trunc_int_for_mode (const_op, mode) == const_op
12453 && have_insn_for (COMPARE, mode))
12454 {
12455 op0 = XEXP (op0, 0);
12456 continue;
12457 }
12458 break;
12459
12460 case SUBREG:
12461 /* Check for the case where we are comparing A - C1 with C2, that is
12462
12463 (subreg:MODE (plus (A) (-C1))) op (C2)
12464
12465 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12466 comparison in the wider mode. One of the following two conditions
12467 must be true in order for this to be valid:
12468
12469 1. The mode extension results in the same bit pattern being added
12470 on both sides and the comparison is equality or unsigned. As
12471 C2 has been truncated to fit in MODE, the pattern can only be
12472 all 0s or all 1s.
12473
12474 2. The mode extension results in the sign bit being copied on
12475 each side.
12476
12477 The difficulty here is that we have predicates for A but not for
12478 (A - C1) so we need to check that C1 is within proper bounds so
12479 as to perturbate A as little as possible. */
12480
12481 if (mode_width <= HOST_BITS_PER_WIDE_INT
12482 && subreg_lowpart_p (op0)
12483 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12484 &inner_mode)
12485 && GET_MODE_PRECISION (inner_mode) > mode_width
12486 && GET_CODE (SUBREG_REG (op0)) == PLUS
12487 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12488 {
12489 rtx a = XEXP (SUBREG_REG (op0), 0);
12490 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12491
12492 if ((c1 > 0
12493 && (unsigned HOST_WIDE_INT) c1
12494 < HOST_WIDE_INT_1U << (mode_width - 1)
12495 && (equality_comparison_p || unsigned_comparison_p)
12496 /* (A - C1) zero-extends if it is positive and sign-extends
12497 if it is negative, C2 both zero- and sign-extends. */
12498 && (((nonzero_bits (a, inner_mode)
12499 & ~GET_MODE_MASK (mode)) == 0
12500 && const_op >= 0)
12501 /* (A - C1) sign-extends if it is positive and 1-extends
12502 if it is negative, C2 both sign- and 1-extends. */
12503 || (num_sign_bit_copies (a, inner_mode)
12504 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12505 - mode_width)
12506 && const_op < 0)))
12507 || ((unsigned HOST_WIDE_INT) c1
12508 < HOST_WIDE_INT_1U << (mode_width - 2)
12509 /* (A - C1) always sign-extends, like C2. */
12510 && num_sign_bit_copies (a, inner_mode)
12511 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12512 - (mode_width - 1))))
12513 {
12514 op0 = SUBREG_REG (op0);
12515 continue;
12516 }
12517 }
12518
12519 /* If the inner mode is narrower and we are extracting the low part,
12520 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12521 if (paradoxical_subreg_p (op0))
12522 ;
12523 else if (subreg_lowpart_p (op0)
12524 && GET_MODE_CLASS (mode) == MODE_INT
12525 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12526 && (code == NE || code == EQ)
12527 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12528 && !paradoxical_subreg_p (op0)
12529 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12530 & ~GET_MODE_MASK (mode)) == 0)
12531 {
12532 /* Remove outer subregs that don't do anything. */
12533 tem = gen_lowpart (inner_mode, op1);
12534
12535 if ((nonzero_bits (tem, inner_mode)
12536 & ~GET_MODE_MASK (mode)) == 0)
12537 {
12538 op0 = SUBREG_REG (op0);
12539 op1 = tem;
12540 continue;
12541 }
12542 break;
12543 }
12544 else
12545 break;
12546
12547 /* FALLTHROUGH */
12548
12549 case ZERO_EXTEND:
12550 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12551 && (unsigned_comparison_p || equality_comparison_p)
12552 && HWI_COMPUTABLE_MODE_P (mode)
12553 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12554 && const_op >= 0
12555 && have_insn_for (COMPARE, mode))
12556 {
12557 op0 = XEXP (op0, 0);
12558 continue;
12559 }
12560 break;
12561
12562 case PLUS:
12563 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12564 this for equality comparisons due to pathological cases involving
12565 overflows. */
12566 if (equality_comparison_p
12567 && (tem = simplify_binary_operation (MINUS, mode,
12568 op1, XEXP (op0, 1))) != 0)
12569 {
12570 op0 = XEXP (op0, 0);
12571 op1 = tem;
12572 continue;
12573 }
12574
12575 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12576 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12577 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12578 {
12579 op0 = XEXP (XEXP (op0, 0), 0);
12580 code = (code == LT ? EQ : NE);
12581 continue;
12582 }
12583 break;
12584
12585 case MINUS:
12586 /* We used to optimize signed comparisons against zero, but that
12587 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12588 arrive here as equality comparisons, or (GEU, LTU) are
12589 optimized away. No need to special-case them. */
12590
12591 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12592 (eq B (minus A C)), whichever simplifies. We can only do
12593 this for equality comparisons due to pathological cases involving
12594 overflows. */
12595 if (equality_comparison_p
12596 && (tem = simplify_binary_operation (PLUS, mode,
12597 XEXP (op0, 1), op1)) != 0)
12598 {
12599 op0 = XEXP (op0, 0);
12600 op1 = tem;
12601 continue;
12602 }
12603
12604 if (equality_comparison_p
12605 && (tem = simplify_binary_operation (MINUS, mode,
12606 XEXP (op0, 0), op1)) != 0)
12607 {
12608 op0 = XEXP (op0, 1);
12609 op1 = tem;
12610 continue;
12611 }
12612
12613 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12614 of bits in X minus 1, is one iff X > 0. */
12615 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12616 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12617 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12618 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12619 {
12620 op0 = XEXP (op0, 1);
12621 code = (code == GE ? LE : GT);
12622 continue;
12623 }
12624 break;
12625
12626 case XOR:
12627 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12628 if C is zero or B is a constant. */
12629 if (equality_comparison_p
12630 && (tem = simplify_binary_operation (XOR, mode,
12631 XEXP (op0, 1), op1)) != 0)
12632 {
12633 op0 = XEXP (op0, 0);
12634 op1 = tem;
12635 continue;
12636 }
12637 break;
12638
12639
12640 case IOR:
12641 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12642 iff X <= 0. */
12643 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12644 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12645 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12646 {
12647 op0 = XEXP (op0, 1);
12648 code = (code == GE ? GT : LE);
12649 continue;
12650 }
12651 break;
12652
12653 case AND:
12654 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12655 will be converted to a ZERO_EXTRACT later. */
12656 if (const_op == 0 && equality_comparison_p
12657 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12658 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12659 {
12660 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12661 XEXP (XEXP (op0, 0), 1));
12662 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12663 continue;
12664 }
12665
12666 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12667 zero and X is a comparison and C1 and C2 describe only bits set
12668 in STORE_FLAG_VALUE, we can compare with X. */
12669 if (const_op == 0 && equality_comparison_p
12670 && mode_width <= HOST_BITS_PER_WIDE_INT
12671 && CONST_INT_P (XEXP (op0, 1))
12672 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12673 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12674 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12675 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12676 {
12677 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12678 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12679 if ((~STORE_FLAG_VALUE & mask) == 0
12680 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12681 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12682 && COMPARISON_P (tem))))
12683 {
12684 op0 = XEXP (XEXP (op0, 0), 0);
12685 continue;
12686 }
12687 }
12688
12689 /* If we are doing an equality comparison of an AND of a bit equal
12690 to the sign bit, replace this with a LT or GE comparison of
12691 the underlying value. */
12692 if (equality_comparison_p
12693 && const_op == 0
12694 && CONST_INT_P (XEXP (op0, 1))
12695 && mode_width <= HOST_BITS_PER_WIDE_INT
12696 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12697 == HOST_WIDE_INT_1U << (mode_width - 1)))
12698 {
12699 op0 = XEXP (op0, 0);
12700 code = (code == EQ ? GE : LT);
12701 continue;
12702 }
12703
12704 /* If this AND operation is really a ZERO_EXTEND from a narrower
12705 mode, the constant fits within that mode, and this is either an
12706 equality or unsigned comparison, try to do this comparison in
12707 the narrower mode.
12708
12709 Note that in:
12710
12711 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12712 -> (ne:DI (reg:SI 4) (const_int 0))
12713
12714 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12715 known to hold a value of the required mode the
12716 transformation is invalid. */
12717 if ((equality_comparison_p || unsigned_comparison_p)
12718 && CONST_INT_P (XEXP (op0, 1))
12719 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12720 & GET_MODE_MASK (mode))
12721 + 1)) >= 0
12722 && const_op >> i == 0
12723 && int_mode_for_size (i, 1).exists (&tmode))
12724 {
12725 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12726 continue;
12727 }
12728
12729 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12730 fits in both M1 and M2 and the SUBREG is either paradoxical
12731 or represents the low part, permute the SUBREG and the AND
12732 and try again. */
12733 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12734 && CONST_INT_P (XEXP (op0, 1)))
12735 {
12736 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12737 /* Require an integral mode, to avoid creating something like
12738 (AND:SF ...). */
12739 if ((is_a <scalar_int_mode>
12740 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12741 /* It is unsafe to commute the AND into the SUBREG if the
12742 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12743 not defined. As originally written the upper bits
12744 have a defined value due to the AND operation.
12745 However, if we commute the AND inside the SUBREG then
12746 they no longer have defined values and the meaning of
12747 the code has been changed.
12748 Also C1 should not change value in the smaller mode,
12749 see PR67028 (a positive C1 can become negative in the
12750 smaller mode, so that the AND does no longer mask the
12751 upper bits). */
12752 && ((WORD_REGISTER_OPERATIONS
12753 && mode_width > GET_MODE_PRECISION (tmode)
12754 && mode_width <= BITS_PER_WORD
12755 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12756 || (mode_width <= GET_MODE_PRECISION (tmode)
12757 && subreg_lowpart_p (XEXP (op0, 0))))
12758 && mode_width <= HOST_BITS_PER_WIDE_INT
12759 && HWI_COMPUTABLE_MODE_P (tmode)
12760 && (c1 & ~mask) == 0
12761 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12762 && c1 != mask
12763 && c1 != GET_MODE_MASK (tmode))
12764 {
12765 op0 = simplify_gen_binary (AND, tmode,
12766 SUBREG_REG (XEXP (op0, 0)),
12767 gen_int_mode (c1, tmode));
12768 op0 = gen_lowpart (mode, op0);
12769 continue;
12770 }
12771 }
12772
12773 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12774 if (const_op == 0 && equality_comparison_p
12775 && XEXP (op0, 1) == const1_rtx
12776 && GET_CODE (XEXP (op0, 0)) == NOT)
12777 {
12778 op0 = simplify_and_const_int (NULL_RTX, mode,
12779 XEXP (XEXP (op0, 0), 0), 1);
12780 code = (code == NE ? EQ : NE);
12781 continue;
12782 }
12783
12784 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12785 (eq (and (lshiftrt X) 1) 0).
12786 Also handle the case where (not X) is expressed using xor. */
12787 if (const_op == 0 && equality_comparison_p
12788 && XEXP (op0, 1) == const1_rtx
12789 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12790 {
12791 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12792 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12793
12794 if (GET_CODE (shift_op) == NOT
12795 || (GET_CODE (shift_op) == XOR
12796 && CONST_INT_P (XEXP (shift_op, 1))
12797 && CONST_INT_P (shift_count)
12798 && HWI_COMPUTABLE_MODE_P (mode)
12799 && (UINTVAL (XEXP (shift_op, 1))
12800 == HOST_WIDE_INT_1U
12801 << INTVAL (shift_count))))
12802 {
12803 op0
12804 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12805 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12806 code = (code == NE ? EQ : NE);
12807 continue;
12808 }
12809 }
12810 break;
12811
12812 case ASHIFT:
12813 /* If we have (compare (ashift FOO N) (const_int C)) and
12814 the high order N bits of FOO (N+1 if an inequality comparison)
12815 are known to be zero, we can do this by comparing FOO with C
12816 shifted right N bits so long as the low-order N bits of C are
12817 zero. */
12818 if (CONST_INT_P (XEXP (op0, 1))
12819 && INTVAL (XEXP (op0, 1)) >= 0
12820 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12821 < HOST_BITS_PER_WIDE_INT)
12822 && (((unsigned HOST_WIDE_INT) const_op
12823 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12824 - 1)) == 0)
12825 && mode_width <= HOST_BITS_PER_WIDE_INT
12826 && (nonzero_bits (XEXP (op0, 0), mode)
12827 & ~(mask >> (INTVAL (XEXP (op0, 1))
12828 + ! equality_comparison_p))) == 0)
12829 {
12830 /* We must perform a logical shift, not an arithmetic one,
12831 as we want the top N bits of C to be zero. */
12832 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12833
12834 temp >>= INTVAL (XEXP (op0, 1));
12835 op1 = gen_int_mode (temp, mode);
12836 op0 = XEXP (op0, 0);
12837 continue;
12838 }
12839
12840 /* If we are doing a sign bit comparison, it means we are testing
12841 a particular bit. Convert it to the appropriate AND. */
12842 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12843 && mode_width <= HOST_BITS_PER_WIDE_INT)
12844 {
12845 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12846 (HOST_WIDE_INT_1U
12847 << (mode_width - 1
12848 - INTVAL (XEXP (op0, 1)))));
12849 code = (code == LT ? NE : EQ);
12850 continue;
12851 }
12852
12853 /* If this an equality comparison with zero and we are shifting
12854 the low bit to the sign bit, we can convert this to an AND of the
12855 low-order bit. */
12856 if (const_op == 0 && equality_comparison_p
12857 && CONST_INT_P (XEXP (op0, 1))
12858 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12859 {
12860 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12861 continue;
12862 }
12863 break;
12864
12865 case ASHIFTRT:
12866 /* If this is an equality comparison with zero, we can do this
12867 as a logical shift, which might be much simpler. */
12868 if (equality_comparison_p && const_op == 0
12869 && CONST_INT_P (XEXP (op0, 1)))
12870 {
12871 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12872 XEXP (op0, 0),
12873 INTVAL (XEXP (op0, 1)));
12874 continue;
12875 }
12876
12877 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12878 do the comparison in a narrower mode. */
12879 if (! unsigned_comparison_p
12880 && CONST_INT_P (XEXP (op0, 1))
12881 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12882 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12883 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12884 .exists (&tmode))
12885 && (((unsigned HOST_WIDE_INT) const_op
12886 + (GET_MODE_MASK (tmode) >> 1) + 1)
12887 <= GET_MODE_MASK (tmode)))
12888 {
12889 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12890 continue;
12891 }
12892
12893 /* Likewise if OP0 is a PLUS of a sign extension with a
12894 constant, which is usually represented with the PLUS
12895 between the shifts. */
12896 if (! unsigned_comparison_p
12897 && CONST_INT_P (XEXP (op0, 1))
12898 && GET_CODE (XEXP (op0, 0)) == PLUS
12899 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12900 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12901 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12902 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12903 .exists (&tmode))
12904 && (((unsigned HOST_WIDE_INT) const_op
12905 + (GET_MODE_MASK (tmode) >> 1) + 1)
12906 <= GET_MODE_MASK (tmode)))
12907 {
12908 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12909 rtx add_const = XEXP (XEXP (op0, 0), 1);
12910 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12911 add_const, XEXP (op0, 1));
12912
12913 op0 = simplify_gen_binary (PLUS, tmode,
12914 gen_lowpart (tmode, inner),
12915 new_const);
12916 continue;
12917 }
12918
12919 /* FALLTHROUGH */
12920 case LSHIFTRT:
12921 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12922 the low order N bits of FOO are known to be zero, we can do this
12923 by comparing FOO with C shifted left N bits so long as no
12924 overflow occurs. Even if the low order N bits of FOO aren't known
12925 to be zero, if the comparison is >= or < we can use the same
12926 optimization and for > or <= by setting all the low
12927 order N bits in the comparison constant. */
12928 if (CONST_INT_P (XEXP (op0, 1))
12929 && INTVAL (XEXP (op0, 1)) > 0
12930 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12931 && mode_width <= HOST_BITS_PER_WIDE_INT
12932 && (((unsigned HOST_WIDE_INT) const_op
12933 + (GET_CODE (op0) != LSHIFTRT
12934 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12935 + 1)
12936 : 0))
12937 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12938 {
12939 unsigned HOST_WIDE_INT low_bits
12940 = (nonzero_bits (XEXP (op0, 0), mode)
12941 & ((HOST_WIDE_INT_1U
12942 << INTVAL (XEXP (op0, 1))) - 1));
12943 if (low_bits == 0 || !equality_comparison_p)
12944 {
12945 /* If the shift was logical, then we must make the condition
12946 unsigned. */
12947 if (GET_CODE (op0) == LSHIFTRT)
12948 code = unsigned_condition (code);
12949
12950 const_op = (unsigned HOST_WIDE_INT) const_op
12951 << INTVAL (XEXP (op0, 1));
12952 if (low_bits != 0
12953 && (code == GT || code == GTU
12954 || code == LE || code == LEU))
12955 const_op
12956 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12957 op1 = GEN_INT (const_op);
12958 op0 = XEXP (op0, 0);
12959 continue;
12960 }
12961 }
12962
12963 /* If we are using this shift to extract just the sign bit, we
12964 can replace this with an LT or GE comparison. */
12965 if (const_op == 0
12966 && (equality_comparison_p || sign_bit_comparison_p)
12967 && CONST_INT_P (XEXP (op0, 1))
12968 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12969 {
12970 op0 = XEXP (op0, 0);
12971 code = (code == NE || code == GT ? LT : GE);
12972 continue;
12973 }
12974 break;
12975
12976 default:
12977 break;
12978 }
12979
12980 break;
12981 }
12982
12983 /* Now make any compound operations involved in this comparison. Then,
12984 check for an outmost SUBREG on OP0 that is not doing anything or is
12985 paradoxical. The latter transformation must only be performed when
12986 it is known that the "extra" bits will be the same in op0 and op1 or
12987 that they don't matter. There are three cases to consider:
12988
12989 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12990 care bits and we can assume they have any convenient value. So
12991 making the transformation is safe.
12992
12993 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12994 In this case the upper bits of op0 are undefined. We should not make
12995 the simplification in that case as we do not know the contents of
12996 those bits.
12997
12998 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12999 In that case we know those bits are zeros or ones. We must also be
13000 sure that they are the same as the upper bits of op1.
13001
13002 We can never remove a SUBREG for a non-equality comparison because
13003 the sign bit is in a different place in the underlying object. */
13004
13005 rtx_code op0_mco_code = SET;
13006 if (op1 == const0_rtx)
13007 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13008
13009 op0 = make_compound_operation (op0, op0_mco_code);
13010 op1 = make_compound_operation (op1, SET);
13011
13012 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13013 && is_int_mode (GET_MODE (op0), &mode)
13014 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13015 && (code == NE || code == EQ))
13016 {
13017 if (paradoxical_subreg_p (op0))
13018 {
13019 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
13020 implemented. */
13021 if (REG_P (SUBREG_REG (op0)))
13022 {
13023 op0 = SUBREG_REG (op0);
13024 op1 = gen_lowpart (inner_mode, op1);
13025 }
13026 }
13027 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13028 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13029 & ~GET_MODE_MASK (mode)) == 0)
13030 {
13031 tem = gen_lowpart (inner_mode, op1);
13032
13033 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13034 op0 = SUBREG_REG (op0), op1 = tem;
13035 }
13036 }
13037
13038 /* We now do the opposite procedure: Some machines don't have compare
13039 insns in all modes. If OP0's mode is an integer mode smaller than a
13040 word and we can't do a compare in that mode, see if there is a larger
13041 mode for which we can do the compare. There are a number of cases in
13042 which we can use the wider mode. */
13043
13044 if (is_int_mode (GET_MODE (op0), &mode)
13045 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13046 && ! have_insn_for (COMPARE, mode))
13047 FOR_EACH_WIDER_MODE (tmode_iter, mode)
13048 {
13049 tmode = tmode_iter.require ();
13050 if (!HWI_COMPUTABLE_MODE_P (tmode))
13051 break;
13052 if (have_insn_for (COMPARE, tmode))
13053 {
13054 int zero_extended;
13055
13056 /* If this is a test for negative, we can make an explicit
13057 test of the sign bit. Test this first so we can use
13058 a paradoxical subreg to extend OP0. */
13059
13060 if (op1 == const0_rtx && (code == LT || code == GE)
13061 && HWI_COMPUTABLE_MODE_P (mode))
13062 {
13063 unsigned HOST_WIDE_INT sign
13064 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13065 op0 = simplify_gen_binary (AND, tmode,
13066 gen_lowpart (tmode, op0),
13067 gen_int_mode (sign, tmode));
13068 code = (code == LT) ? NE : EQ;
13069 break;
13070 }
13071
13072 /* If the only nonzero bits in OP0 and OP1 are those in the
13073 narrower mode and this is an equality or unsigned comparison,
13074 we can use the wider mode. Similarly for sign-extended
13075 values, in which case it is true for all comparisons. */
13076 zero_extended = ((code == EQ || code == NE
13077 || code == GEU || code == GTU
13078 || code == LEU || code == LTU)
13079 && (nonzero_bits (op0, tmode)
13080 & ~GET_MODE_MASK (mode)) == 0
13081 && ((CONST_INT_P (op1)
13082 || (nonzero_bits (op1, tmode)
13083 & ~GET_MODE_MASK (mode)) == 0)));
13084
13085 if (zero_extended
13086 || ((num_sign_bit_copies (op0, tmode)
13087 > (unsigned int) (GET_MODE_PRECISION (tmode)
13088 - GET_MODE_PRECISION (mode)))
13089 && (num_sign_bit_copies (op1, tmode)
13090 > (unsigned int) (GET_MODE_PRECISION (tmode)
13091 - GET_MODE_PRECISION (mode)))))
13092 {
13093 /* If OP0 is an AND and we don't have an AND in MODE either,
13094 make a new AND in the proper mode. */
13095 if (GET_CODE (op0) == AND
13096 && !have_insn_for (AND, mode))
13097 op0 = simplify_gen_binary (AND, tmode,
13098 gen_lowpart (tmode,
13099 XEXP (op0, 0)),
13100 gen_lowpart (tmode,
13101 XEXP (op0, 1)));
13102 else
13103 {
13104 if (zero_extended)
13105 {
13106 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13107 op0, mode);
13108 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13109 op1, mode);
13110 }
13111 else
13112 {
13113 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13114 op0, mode);
13115 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13116 op1, mode);
13117 }
13118 break;
13119 }
13120 }
13121 }
13122 }
13123
13124 /* We may have changed the comparison operands. Re-canonicalize. */
13125 if (swap_commutative_operands_p (op0, op1))
13126 {
13127 std::swap (op0, op1);
13128 code = swap_condition (code);
13129 }
13130
13131 /* If this machine only supports a subset of valid comparisons, see if we
13132 can convert an unsupported one into a supported one. */
13133 target_canonicalize_comparison (&code, &op0, &op1, 0);
13134
13135 *pop0 = op0;
13136 *pop1 = op1;
13137
13138 return code;
13139 }
13140 \f
13141 /* Utility function for record_value_for_reg. Count number of
13142 rtxs in X. */
13143 static int
13144 count_rtxs (rtx x)
13145 {
13146 enum rtx_code code = GET_CODE (x);
13147 const char *fmt;
13148 int i, j, ret = 1;
13149
13150 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13151 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13152 {
13153 rtx x0 = XEXP (x, 0);
13154 rtx x1 = XEXP (x, 1);
13155
13156 if (x0 == x1)
13157 return 1 + 2 * count_rtxs (x0);
13158
13159 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13160 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13161 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13162 return 2 + 2 * count_rtxs (x0)
13163 + count_rtxs (x == XEXP (x1, 0)
13164 ? XEXP (x1, 1) : XEXP (x1, 0));
13165
13166 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13167 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13168 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13169 return 2 + 2 * count_rtxs (x1)
13170 + count_rtxs (x == XEXP (x0, 0)
13171 ? XEXP (x0, 1) : XEXP (x0, 0));
13172 }
13173
13174 fmt = GET_RTX_FORMAT (code);
13175 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13176 if (fmt[i] == 'e')
13177 ret += count_rtxs (XEXP (x, i));
13178 else if (fmt[i] == 'E')
13179 for (j = 0; j < XVECLEN (x, i); j++)
13180 ret += count_rtxs (XVECEXP (x, i, j));
13181
13182 return ret;
13183 }
13184 \f
13185 /* Utility function for following routine. Called when X is part of a value
13186 being stored into last_set_value. Sets last_set_table_tick
13187 for each register mentioned. Similar to mention_regs in cse.c */
13188
13189 static void
13190 update_table_tick (rtx x)
13191 {
13192 enum rtx_code code = GET_CODE (x);
13193 const char *fmt = GET_RTX_FORMAT (code);
13194 int i, j;
13195
13196 if (code == REG)
13197 {
13198 unsigned int regno = REGNO (x);
13199 unsigned int endregno = END_REGNO (x);
13200 unsigned int r;
13201
13202 for (r = regno; r < endregno; r++)
13203 {
13204 reg_stat_type *rsp = &reg_stat[r];
13205 rsp->last_set_table_tick = label_tick;
13206 }
13207
13208 return;
13209 }
13210
13211 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13212 if (fmt[i] == 'e')
13213 {
13214 /* Check for identical subexpressions. If x contains
13215 identical subexpression we only have to traverse one of
13216 them. */
13217 if (i == 0 && ARITHMETIC_P (x))
13218 {
13219 /* Note that at this point x1 has already been
13220 processed. */
13221 rtx x0 = XEXP (x, 0);
13222 rtx x1 = XEXP (x, 1);
13223
13224 /* If x0 and x1 are identical then there is no need to
13225 process x0. */
13226 if (x0 == x1)
13227 break;
13228
13229 /* If x0 is identical to a subexpression of x1 then while
13230 processing x1, x0 has already been processed. Thus we
13231 are done with x. */
13232 if (ARITHMETIC_P (x1)
13233 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13234 break;
13235
13236 /* If x1 is identical to a subexpression of x0 then we
13237 still have to process the rest of x0. */
13238 if (ARITHMETIC_P (x0)
13239 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13240 {
13241 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13242 break;
13243 }
13244 }
13245
13246 update_table_tick (XEXP (x, i));
13247 }
13248 else if (fmt[i] == 'E')
13249 for (j = 0; j < XVECLEN (x, i); j++)
13250 update_table_tick (XVECEXP (x, i, j));
13251 }
13252
13253 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13254 are saying that the register is clobbered and we no longer know its
13255 value. If INSN is zero, don't update reg_stat[].last_set; this is
13256 only permitted with VALUE also zero and is used to invalidate the
13257 register. */
13258
13259 static void
13260 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13261 {
13262 unsigned int regno = REGNO (reg);
13263 unsigned int endregno = END_REGNO (reg);
13264 unsigned int i;
13265 reg_stat_type *rsp;
13266
13267 /* If VALUE contains REG and we have a previous value for REG, substitute
13268 the previous value. */
13269 if (value && insn && reg_overlap_mentioned_p (reg, value))
13270 {
13271 rtx tem;
13272
13273 /* Set things up so get_last_value is allowed to see anything set up to
13274 our insn. */
13275 subst_low_luid = DF_INSN_LUID (insn);
13276 tem = get_last_value (reg);
13277
13278 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13279 it isn't going to be useful and will take a lot of time to process,
13280 so just use the CLOBBER. */
13281
13282 if (tem)
13283 {
13284 if (ARITHMETIC_P (tem)
13285 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13286 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13287 tem = XEXP (tem, 0);
13288 else if (count_occurrences (value, reg, 1) >= 2)
13289 {
13290 /* If there are two or more occurrences of REG in VALUE,
13291 prevent the value from growing too much. */
13292 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13293 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13294 }
13295
13296 value = replace_rtx (copy_rtx (value), reg, tem);
13297 }
13298 }
13299
13300 /* For each register modified, show we don't know its value, that
13301 we don't know about its bitwise content, that its value has been
13302 updated, and that we don't know the location of the death of the
13303 register. */
13304 for (i = regno; i < endregno; i++)
13305 {
13306 rsp = &reg_stat[i];
13307
13308 if (insn)
13309 rsp->last_set = insn;
13310
13311 rsp->last_set_value = 0;
13312 rsp->last_set_mode = VOIDmode;
13313 rsp->last_set_nonzero_bits = 0;
13314 rsp->last_set_sign_bit_copies = 0;
13315 rsp->last_death = 0;
13316 rsp->truncated_to_mode = VOIDmode;
13317 }
13318
13319 /* Mark registers that are being referenced in this value. */
13320 if (value)
13321 update_table_tick (value);
13322
13323 /* Now update the status of each register being set.
13324 If someone is using this register in this block, set this register
13325 to invalid since we will get confused between the two lives in this
13326 basic block. This makes using this register always invalid. In cse, we
13327 scan the table to invalidate all entries using this register, but this
13328 is too much work for us. */
13329
13330 for (i = regno; i < endregno; i++)
13331 {
13332 rsp = &reg_stat[i];
13333 rsp->last_set_label = label_tick;
13334 if (!insn
13335 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13336 rsp->last_set_invalid = 1;
13337 else
13338 rsp->last_set_invalid = 0;
13339 }
13340
13341 /* The value being assigned might refer to X (like in "x++;"). In that
13342 case, we must replace it with (clobber (const_int 0)) to prevent
13343 infinite loops. */
13344 rsp = &reg_stat[regno];
13345 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13346 {
13347 value = copy_rtx (value);
13348 if (!get_last_value_validate (&value, insn, label_tick, 1))
13349 value = 0;
13350 }
13351
13352 /* For the main register being modified, update the value, the mode, the
13353 nonzero bits, and the number of sign bit copies. */
13354
13355 rsp->last_set_value = value;
13356
13357 if (value)
13358 {
13359 machine_mode mode = GET_MODE (reg);
13360 subst_low_luid = DF_INSN_LUID (insn);
13361 rsp->last_set_mode = mode;
13362 if (GET_MODE_CLASS (mode) == MODE_INT
13363 && HWI_COMPUTABLE_MODE_P (mode))
13364 mode = nonzero_bits_mode;
13365 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13366 rsp->last_set_sign_bit_copies
13367 = num_sign_bit_copies (value, GET_MODE (reg));
13368 }
13369 }
13370
13371 /* Called via note_stores from record_dead_and_set_regs to handle one
13372 SET or CLOBBER in an insn. DATA is the instruction in which the
13373 set is occurring. */
13374
13375 static void
13376 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13377 {
13378 rtx_insn *record_dead_insn = (rtx_insn *) data;
13379
13380 if (GET_CODE (dest) == SUBREG)
13381 dest = SUBREG_REG (dest);
13382
13383 if (!record_dead_insn)
13384 {
13385 if (REG_P (dest))
13386 record_value_for_reg (dest, NULL, NULL_RTX);
13387 return;
13388 }
13389
13390 if (REG_P (dest))
13391 {
13392 /* If we are setting the whole register, we know its value. Otherwise
13393 show that we don't know the value. We can handle a SUBREG if it's
13394 the low part, but we must be careful with paradoxical SUBREGs on
13395 RISC architectures because we cannot strip e.g. an extension around
13396 a load and record the naked load since the RTL middle-end considers
13397 that the upper bits are defined according to LOAD_EXTEND_OP. */
13398 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13399 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13400 else if (GET_CODE (setter) == SET
13401 && GET_CODE (SET_DEST (setter)) == SUBREG
13402 && SUBREG_REG (SET_DEST (setter)) == dest
13403 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13404 BITS_PER_WORD)
13405 && subreg_lowpart_p (SET_DEST (setter)))
13406 record_value_for_reg (dest, record_dead_insn,
13407 WORD_REGISTER_OPERATIONS
13408 && word_register_operation_p (SET_SRC (setter))
13409 && paradoxical_subreg_p (SET_DEST (setter))
13410 ? SET_SRC (setter)
13411 : gen_lowpart (GET_MODE (dest),
13412 SET_SRC (setter)));
13413 else if (GET_CODE (setter) == CLOBBER_HIGH)
13414 {
13415 reg_stat_type *rsp = &reg_stat[REGNO (dest)];
13416 if (rsp->last_set_value
13417 && reg_is_clobbered_by_clobber_high
13418 (REGNO (dest), GET_MODE (rsp->last_set_value),
13419 XEXP (setter, 0)))
13420 record_value_for_reg (dest, NULL, NULL_RTX);
13421 }
13422 else
13423 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13424 }
13425 else if (MEM_P (dest)
13426 /* Ignore pushes, they clobber nothing. */
13427 && ! push_operand (dest, GET_MODE (dest)))
13428 mem_last_set = DF_INSN_LUID (record_dead_insn);
13429 }
13430
13431 /* Update the records of when each REG was most recently set or killed
13432 for the things done by INSN. This is the last thing done in processing
13433 INSN in the combiner loop.
13434
13435 We update reg_stat[], in particular fields last_set, last_set_value,
13436 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13437 last_death, and also the similar information mem_last_set (which insn
13438 most recently modified memory) and last_call_luid (which insn was the
13439 most recent subroutine call). */
13440
13441 static void
13442 record_dead_and_set_regs (rtx_insn *insn)
13443 {
13444 rtx link;
13445 unsigned int i;
13446
13447 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13448 {
13449 if (REG_NOTE_KIND (link) == REG_DEAD
13450 && REG_P (XEXP (link, 0)))
13451 {
13452 unsigned int regno = REGNO (XEXP (link, 0));
13453 unsigned int endregno = END_REGNO (XEXP (link, 0));
13454
13455 for (i = regno; i < endregno; i++)
13456 {
13457 reg_stat_type *rsp;
13458
13459 rsp = &reg_stat[i];
13460 rsp->last_death = insn;
13461 }
13462 }
13463 else if (REG_NOTE_KIND (link) == REG_INC)
13464 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13465 }
13466
13467 if (CALL_P (insn))
13468 {
13469 hard_reg_set_iterator hrsi;
13470 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13471 {
13472 reg_stat_type *rsp;
13473
13474 rsp = &reg_stat[i];
13475 rsp->last_set_invalid = 1;
13476 rsp->last_set = insn;
13477 rsp->last_set_value = 0;
13478 rsp->last_set_mode = VOIDmode;
13479 rsp->last_set_nonzero_bits = 0;
13480 rsp->last_set_sign_bit_copies = 0;
13481 rsp->last_death = 0;
13482 rsp->truncated_to_mode = VOIDmode;
13483 }
13484
13485 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13486
13487 /* We can't combine into a call pattern. Remember, though, that
13488 the return value register is set at this LUID. We could
13489 still replace a register with the return value from the
13490 wrong subroutine call! */
13491 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13492 }
13493 else
13494 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13495 }
13496
13497 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13498 register present in the SUBREG, so for each such SUBREG go back and
13499 adjust nonzero and sign bit information of the registers that are
13500 known to have some zero/sign bits set.
13501
13502 This is needed because when combine blows the SUBREGs away, the
13503 information on zero/sign bits is lost and further combines can be
13504 missed because of that. */
13505
13506 static void
13507 record_promoted_value (rtx_insn *insn, rtx subreg)
13508 {
13509 struct insn_link *links;
13510 rtx set;
13511 unsigned int regno = REGNO (SUBREG_REG (subreg));
13512 machine_mode mode = GET_MODE (subreg);
13513
13514 if (!HWI_COMPUTABLE_MODE_P (mode))
13515 return;
13516
13517 for (links = LOG_LINKS (insn); links;)
13518 {
13519 reg_stat_type *rsp;
13520
13521 insn = links->insn;
13522 set = single_set (insn);
13523
13524 if (! set || !REG_P (SET_DEST (set))
13525 || REGNO (SET_DEST (set)) != regno
13526 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13527 {
13528 links = links->next;
13529 continue;
13530 }
13531
13532 rsp = &reg_stat[regno];
13533 if (rsp->last_set == insn)
13534 {
13535 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13536 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13537 }
13538
13539 if (REG_P (SET_SRC (set)))
13540 {
13541 regno = REGNO (SET_SRC (set));
13542 links = LOG_LINKS (insn);
13543 }
13544 else
13545 break;
13546 }
13547 }
13548
13549 /* Check if X, a register, is known to contain a value already
13550 truncated to MODE. In this case we can use a subreg to refer to
13551 the truncated value even though in the generic case we would need
13552 an explicit truncation. */
13553
13554 static bool
13555 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13556 {
13557 reg_stat_type *rsp = &reg_stat[REGNO (x)];
13558 machine_mode truncated = rsp->truncated_to_mode;
13559
13560 if (truncated == 0
13561 || rsp->truncation_label < label_tick_ebb_start)
13562 return false;
13563 if (!partial_subreg_p (mode, truncated))
13564 return true;
13565 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13566 return true;
13567 return false;
13568 }
13569
13570 /* If X is a hard reg or a subreg record the mode that the register is
13571 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13572 able to turn a truncate into a subreg using this information. Return true
13573 if traversing X is complete. */
13574
13575 static bool
13576 record_truncated_value (rtx x)
13577 {
13578 machine_mode truncated_mode;
13579 reg_stat_type *rsp;
13580
13581 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13582 {
13583 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13584 truncated_mode = GET_MODE (x);
13585
13586 if (!partial_subreg_p (truncated_mode, original_mode))
13587 return true;
13588
13589 truncated_mode = GET_MODE (x);
13590 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13591 return true;
13592
13593 x = SUBREG_REG (x);
13594 }
13595 /* ??? For hard-regs we now record everything. We might be able to
13596 optimize this using last_set_mode. */
13597 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13598 truncated_mode = GET_MODE (x);
13599 else
13600 return false;
13601
13602 rsp = &reg_stat[REGNO (x)];
13603 if (rsp->truncated_to_mode == 0
13604 || rsp->truncation_label < label_tick_ebb_start
13605 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13606 {
13607 rsp->truncated_to_mode = truncated_mode;
13608 rsp->truncation_label = label_tick;
13609 }
13610
13611 return true;
13612 }
13613
13614 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13615 the modes they are used in. This can help truning TRUNCATEs into
13616 SUBREGs. */
13617
13618 static void
13619 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13620 {
13621 subrtx_var_iterator::array_type array;
13622 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13623 if (record_truncated_value (*iter))
13624 iter.skip_subrtxes ();
13625 }
13626
13627 /* Scan X for promoted SUBREGs. For each one found,
13628 note what it implies to the registers used in it. */
13629
13630 static void
13631 check_promoted_subreg (rtx_insn *insn, rtx x)
13632 {
13633 if (GET_CODE (x) == SUBREG
13634 && SUBREG_PROMOTED_VAR_P (x)
13635 && REG_P (SUBREG_REG (x)))
13636 record_promoted_value (insn, x);
13637 else
13638 {
13639 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13640 int i, j;
13641
13642 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13643 switch (format[i])
13644 {
13645 case 'e':
13646 check_promoted_subreg (insn, XEXP (x, i));
13647 break;
13648 case 'V':
13649 case 'E':
13650 if (XVEC (x, i) != 0)
13651 for (j = 0; j < XVECLEN (x, i); j++)
13652 check_promoted_subreg (insn, XVECEXP (x, i, j));
13653 break;
13654 }
13655 }
13656 }
13657 \f
13658 /* Verify that all the registers and memory references mentioned in *LOC are
13659 still valid. *LOC was part of a value set in INSN when label_tick was
13660 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13661 the invalid references with (clobber (const_int 0)) and return 1. This
13662 replacement is useful because we often can get useful information about
13663 the form of a value (e.g., if it was produced by a shift that always
13664 produces -1 or 0) even though we don't know exactly what registers it
13665 was produced from. */
13666
13667 static int
13668 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13669 {
13670 rtx x = *loc;
13671 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13672 int len = GET_RTX_LENGTH (GET_CODE (x));
13673 int i, j;
13674
13675 if (REG_P (x))
13676 {
13677 unsigned int regno = REGNO (x);
13678 unsigned int endregno = END_REGNO (x);
13679 unsigned int j;
13680
13681 for (j = regno; j < endregno; j++)
13682 {
13683 reg_stat_type *rsp = &reg_stat[j];
13684 if (rsp->last_set_invalid
13685 /* If this is a pseudo-register that was only set once and not
13686 live at the beginning of the function, it is always valid. */
13687 || (! (regno >= FIRST_PSEUDO_REGISTER
13688 && regno < reg_n_sets_max
13689 && REG_N_SETS (regno) == 1
13690 && (!REGNO_REG_SET_P
13691 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13692 regno)))
13693 && rsp->last_set_label > tick))
13694 {
13695 if (replace)
13696 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13697 return replace;
13698 }
13699 }
13700
13701 return 1;
13702 }
13703 /* If this is a memory reference, make sure that there were no stores after
13704 it that might have clobbered the value. We don't have alias info, so we
13705 assume any store invalidates it. Moreover, we only have local UIDs, so
13706 we also assume that there were stores in the intervening basic blocks. */
13707 else if (MEM_P (x) && !MEM_READONLY_P (x)
13708 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13709 {
13710 if (replace)
13711 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13712 return replace;
13713 }
13714
13715 for (i = 0; i < len; i++)
13716 {
13717 if (fmt[i] == 'e')
13718 {
13719 /* Check for identical subexpressions. If x contains
13720 identical subexpression we only have to traverse one of
13721 them. */
13722 if (i == 1 && ARITHMETIC_P (x))
13723 {
13724 /* Note that at this point x0 has already been checked
13725 and found valid. */
13726 rtx x0 = XEXP (x, 0);
13727 rtx x1 = XEXP (x, 1);
13728
13729 /* If x0 and x1 are identical then x is also valid. */
13730 if (x0 == x1)
13731 return 1;
13732
13733 /* If x1 is identical to a subexpression of x0 then
13734 while checking x0, x1 has already been checked. Thus
13735 it is valid and so as x. */
13736 if (ARITHMETIC_P (x0)
13737 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13738 return 1;
13739
13740 /* If x0 is identical to a subexpression of x1 then x is
13741 valid iff the rest of x1 is valid. */
13742 if (ARITHMETIC_P (x1)
13743 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13744 return
13745 get_last_value_validate (&XEXP (x1,
13746 x0 == XEXP (x1, 0) ? 1 : 0),
13747 insn, tick, replace);
13748 }
13749
13750 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13751 replace) == 0)
13752 return 0;
13753 }
13754 else if (fmt[i] == 'E')
13755 for (j = 0; j < XVECLEN (x, i); j++)
13756 if (get_last_value_validate (&XVECEXP (x, i, j),
13757 insn, tick, replace) == 0)
13758 return 0;
13759 }
13760
13761 /* If we haven't found a reason for it to be invalid, it is valid. */
13762 return 1;
13763 }
13764
13765 /* Get the last value assigned to X, if known. Some registers
13766 in the value may be replaced with (clobber (const_int 0)) if their value
13767 is known longer known reliably. */
13768
13769 static rtx
13770 get_last_value (const_rtx x)
13771 {
13772 unsigned int regno;
13773 rtx value;
13774 reg_stat_type *rsp;
13775
13776 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13777 then convert it to the desired mode. If this is a paradoxical SUBREG,
13778 we cannot predict what values the "extra" bits might have. */
13779 if (GET_CODE (x) == SUBREG
13780 && subreg_lowpart_p (x)
13781 && !paradoxical_subreg_p (x)
13782 && (value = get_last_value (SUBREG_REG (x))) != 0)
13783 return gen_lowpart (GET_MODE (x), value);
13784
13785 if (!REG_P (x))
13786 return 0;
13787
13788 regno = REGNO (x);
13789 rsp = &reg_stat[regno];
13790 value = rsp->last_set_value;
13791
13792 /* If we don't have a value, or if it isn't for this basic block and
13793 it's either a hard register, set more than once, or it's a live
13794 at the beginning of the function, return 0.
13795
13796 Because if it's not live at the beginning of the function then the reg
13797 is always set before being used (is never used without being set).
13798 And, if it's set only once, and it's always set before use, then all
13799 uses must have the same last value, even if it's not from this basic
13800 block. */
13801
13802 if (value == 0
13803 || (rsp->last_set_label < label_tick_ebb_start
13804 && (regno < FIRST_PSEUDO_REGISTER
13805 || regno >= reg_n_sets_max
13806 || REG_N_SETS (regno) != 1
13807 || REGNO_REG_SET_P
13808 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13809 return 0;
13810
13811 /* If the value was set in a later insn than the ones we are processing,
13812 we can't use it even if the register was only set once. */
13813 if (rsp->last_set_label == label_tick
13814 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13815 return 0;
13816
13817 /* If fewer bits were set than what we are asked for now, we cannot use
13818 the value. */
13819 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13820 GET_MODE_PRECISION (GET_MODE (x))))
13821 return 0;
13822
13823 /* If the value has all its registers valid, return it. */
13824 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13825 return value;
13826
13827 /* Otherwise, make a copy and replace any invalid register with
13828 (clobber (const_int 0)). If that fails for some reason, return 0. */
13829
13830 value = copy_rtx (value);
13831 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13832 return value;
13833
13834 return 0;
13835 }
13836 \f
13837 /* Define three variables used for communication between the following
13838 routines. */
13839
13840 static unsigned int reg_dead_regno, reg_dead_endregno;
13841 static int reg_dead_flag;
13842 rtx reg_dead_reg;
13843
13844 /* Function called via note_stores from reg_dead_at_p.
13845
13846 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13847 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13848
13849 static void
13850 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13851 {
13852 unsigned int regno, endregno;
13853
13854 if (!REG_P (dest))
13855 return;
13856
13857 if (GET_CODE (x) == CLOBBER_HIGH
13858 && !reg_is_clobbered_by_clobber_high (reg_dead_reg, XEXP (x, 0)))
13859 return;
13860
13861 regno = REGNO (dest);
13862 endregno = END_REGNO (dest);
13863 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13864 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13865 }
13866
13867 /* Return nonzero if REG is known to be dead at INSN.
13868
13869 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13870 referencing REG, it is dead. If we hit a SET referencing REG, it is
13871 live. Otherwise, see if it is live or dead at the start of the basic
13872 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13873 must be assumed to be always live. */
13874
13875 static int
13876 reg_dead_at_p (rtx reg, rtx_insn *insn)
13877 {
13878 basic_block block;
13879 unsigned int i;
13880
13881 /* Set variables for reg_dead_at_p_1. */
13882 reg_dead_regno = REGNO (reg);
13883 reg_dead_endregno = END_REGNO (reg);
13884 reg_dead_reg = reg;
13885
13886 reg_dead_flag = 0;
13887
13888 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13889 we allow the machine description to decide whether use-and-clobber
13890 patterns are OK. */
13891 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13892 {
13893 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13894 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13895 return 0;
13896 }
13897
13898 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13899 beginning of basic block. */
13900 block = BLOCK_FOR_INSN (insn);
13901 for (;;)
13902 {
13903 if (INSN_P (insn))
13904 {
13905 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13906 return 1;
13907
13908 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13909 if (reg_dead_flag)
13910 return reg_dead_flag == 1 ? 1 : 0;
13911
13912 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13913 return 1;
13914 }
13915
13916 if (insn == BB_HEAD (block))
13917 break;
13918
13919 insn = PREV_INSN (insn);
13920 }
13921
13922 /* Look at live-in sets for the basic block that we were in. */
13923 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13924 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13925 return 0;
13926
13927 return 1;
13928 }
13929 \f
13930 /* Note hard registers in X that are used. */
13931
13932 static void
13933 mark_used_regs_combine (rtx x)
13934 {
13935 RTX_CODE code = GET_CODE (x);
13936 unsigned int regno;
13937 int i;
13938
13939 switch (code)
13940 {
13941 case LABEL_REF:
13942 case SYMBOL_REF:
13943 case CONST:
13944 CASE_CONST_ANY:
13945 case PC:
13946 case ADDR_VEC:
13947 case ADDR_DIFF_VEC:
13948 case ASM_INPUT:
13949 /* CC0 must die in the insn after it is set, so we don't need to take
13950 special note of it here. */
13951 case CC0:
13952 return;
13953
13954 case CLOBBER:
13955 /* If we are clobbering a MEM, mark any hard registers inside the
13956 address as used. */
13957 if (MEM_P (XEXP (x, 0)))
13958 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13959 return;
13960
13961 case REG:
13962 regno = REGNO (x);
13963 /* A hard reg in a wide mode may really be multiple registers.
13964 If so, mark all of them just like the first. */
13965 if (regno < FIRST_PSEUDO_REGISTER)
13966 {
13967 /* None of this applies to the stack, frame or arg pointers. */
13968 if (regno == STACK_POINTER_REGNUM
13969 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13970 && regno == HARD_FRAME_POINTER_REGNUM)
13971 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13972 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13973 || regno == FRAME_POINTER_REGNUM)
13974 return;
13975
13976 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13977 }
13978 return;
13979
13980 case SET:
13981 {
13982 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13983 the address. */
13984 rtx testreg = SET_DEST (x);
13985
13986 while (GET_CODE (testreg) == SUBREG
13987 || GET_CODE (testreg) == ZERO_EXTRACT
13988 || GET_CODE (testreg) == STRICT_LOW_PART)
13989 testreg = XEXP (testreg, 0);
13990
13991 if (MEM_P (testreg))
13992 mark_used_regs_combine (XEXP (testreg, 0));
13993
13994 mark_used_regs_combine (SET_SRC (x));
13995 }
13996 return;
13997
13998 default:
13999 break;
14000 }
14001
14002 /* Recursively scan the operands of this expression. */
14003
14004 {
14005 const char *fmt = GET_RTX_FORMAT (code);
14006
14007 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
14008 {
14009 if (fmt[i] == 'e')
14010 mark_used_regs_combine (XEXP (x, i));
14011 else if (fmt[i] == 'E')
14012 {
14013 int j;
14014
14015 for (j = 0; j < XVECLEN (x, i); j++)
14016 mark_used_regs_combine (XVECEXP (x, i, j));
14017 }
14018 }
14019 }
14020 }
14021 \f
14022 /* Remove register number REGNO from the dead registers list of INSN.
14023
14024 Return the note used to record the death, if there was one. */
14025
14026 rtx
14027 remove_death (unsigned int regno, rtx_insn *insn)
14028 {
14029 rtx note = find_regno_note (insn, REG_DEAD, regno);
14030
14031 if (note)
14032 remove_note (insn, note);
14033
14034 return note;
14035 }
14036
14037 /* For each register (hardware or pseudo) used within expression X, if its
14038 death is in an instruction with luid between FROM_LUID (inclusive) and
14039 TO_INSN (exclusive), put a REG_DEAD note for that register in the
14040 list headed by PNOTES.
14041
14042 That said, don't move registers killed by maybe_kill_insn.
14043
14044 This is done when X is being merged by combination into TO_INSN. These
14045 notes will then be distributed as needed. */
14046
14047 static void
14048 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14049 rtx *pnotes)
14050 {
14051 const char *fmt;
14052 int len, i;
14053 enum rtx_code code = GET_CODE (x);
14054
14055 if (code == REG)
14056 {
14057 unsigned int regno = REGNO (x);
14058 rtx_insn *where_dead = reg_stat[regno].last_death;
14059
14060 /* If we do not know where the register died, it may still die between
14061 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
14062 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14063 {
14064 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14065 while (insn
14066 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14067 && DF_INSN_LUID (insn) >= from_luid)
14068 {
14069 if (dead_or_set_regno_p (insn, regno))
14070 {
14071 if (find_regno_note (insn, REG_DEAD, regno))
14072 where_dead = insn;
14073 break;
14074 }
14075
14076 insn = prev_real_nondebug_insn (insn);
14077 }
14078 }
14079
14080 /* Don't move the register if it gets killed in between from and to. */
14081 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14082 && ! reg_referenced_p (x, maybe_kill_insn))
14083 return;
14084
14085 if (where_dead
14086 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14087 && DF_INSN_LUID (where_dead) >= from_luid
14088 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14089 {
14090 rtx note = remove_death (regno, where_dead);
14091
14092 /* It is possible for the call above to return 0. This can occur
14093 when last_death points to I2 or I1 that we combined with.
14094 In that case make a new note.
14095
14096 We must also check for the case where X is a hard register
14097 and NOTE is a death note for a range of hard registers
14098 including X. In that case, we must put REG_DEAD notes for
14099 the remaining registers in place of NOTE. */
14100
14101 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14102 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14103 {
14104 unsigned int deadregno = REGNO (XEXP (note, 0));
14105 unsigned int deadend = END_REGNO (XEXP (note, 0));
14106 unsigned int ourend = END_REGNO (x);
14107 unsigned int i;
14108
14109 for (i = deadregno; i < deadend; i++)
14110 if (i < regno || i >= ourend)
14111 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14112 }
14113
14114 /* If we didn't find any note, or if we found a REG_DEAD note that
14115 covers only part of the given reg, and we have a multi-reg hard
14116 register, then to be safe we must check for REG_DEAD notes
14117 for each register other than the first. They could have
14118 their own REG_DEAD notes lying around. */
14119 else if ((note == 0
14120 || (note != 0
14121 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14122 GET_MODE (x))))
14123 && regno < FIRST_PSEUDO_REGISTER
14124 && REG_NREGS (x) > 1)
14125 {
14126 unsigned int ourend = END_REGNO (x);
14127 unsigned int i, offset;
14128 rtx oldnotes = 0;
14129
14130 if (note)
14131 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14132 else
14133 offset = 1;
14134
14135 for (i = regno + offset; i < ourend; i++)
14136 move_deaths (regno_reg_rtx[i],
14137 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14138 }
14139
14140 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14141 {
14142 XEXP (note, 1) = *pnotes;
14143 *pnotes = note;
14144 }
14145 else
14146 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14147 }
14148
14149 return;
14150 }
14151
14152 else if (GET_CODE (x) == SET)
14153 {
14154 rtx dest = SET_DEST (x);
14155
14156 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14157
14158 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14159 that accesses one word of a multi-word item, some
14160 piece of everything register in the expression is used by
14161 this insn, so remove any old death. */
14162 /* ??? So why do we test for equality of the sizes? */
14163
14164 if (GET_CODE (dest) == ZERO_EXTRACT
14165 || GET_CODE (dest) == STRICT_LOW_PART
14166 || (GET_CODE (dest) == SUBREG
14167 && !read_modify_subreg_p (dest)))
14168 {
14169 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14170 return;
14171 }
14172
14173 /* If this is some other SUBREG, we know it replaces the entire
14174 value, so use that as the destination. */
14175 if (GET_CODE (dest) == SUBREG)
14176 dest = SUBREG_REG (dest);
14177
14178 /* If this is a MEM, adjust deaths of anything used in the address.
14179 For a REG (the only other possibility), the entire value is
14180 being replaced so the old value is not used in this insn. */
14181
14182 if (MEM_P (dest))
14183 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14184 to_insn, pnotes);
14185 return;
14186 }
14187
14188 else if (GET_CODE (x) == CLOBBER)
14189 return;
14190
14191 len = GET_RTX_LENGTH (code);
14192 fmt = GET_RTX_FORMAT (code);
14193
14194 for (i = 0; i < len; i++)
14195 {
14196 if (fmt[i] == 'E')
14197 {
14198 int j;
14199 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14200 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14201 to_insn, pnotes);
14202 }
14203 else if (fmt[i] == 'e')
14204 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14205 }
14206 }
14207 \f
14208 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14209 pattern of an insn. X must be a REG. */
14210
14211 static int
14212 reg_bitfield_target_p (rtx x, rtx body)
14213 {
14214 int i;
14215
14216 if (GET_CODE (body) == SET)
14217 {
14218 rtx dest = SET_DEST (body);
14219 rtx target;
14220 unsigned int regno, tregno, endregno, endtregno;
14221
14222 if (GET_CODE (dest) == ZERO_EXTRACT)
14223 target = XEXP (dest, 0);
14224 else if (GET_CODE (dest) == STRICT_LOW_PART)
14225 target = SUBREG_REG (XEXP (dest, 0));
14226 else
14227 return 0;
14228
14229 if (GET_CODE (target) == SUBREG)
14230 target = SUBREG_REG (target);
14231
14232 if (!REG_P (target))
14233 return 0;
14234
14235 tregno = REGNO (target), regno = REGNO (x);
14236 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14237 return target == x;
14238
14239 endtregno = end_hard_regno (GET_MODE (target), tregno);
14240 endregno = end_hard_regno (GET_MODE (x), regno);
14241
14242 return endregno > tregno && regno < endtregno;
14243 }
14244
14245 else if (GET_CODE (body) == PARALLEL)
14246 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14247 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14248 return 1;
14249
14250 return 0;
14251 }
14252 \f
14253 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14254 as appropriate. I3 and I2 are the insns resulting from the combination
14255 insns including FROM (I2 may be zero).
14256
14257 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14258 not need REG_DEAD notes because they are being substituted for. This
14259 saves searching in the most common cases.
14260
14261 Each note in the list is either ignored or placed on some insns, depending
14262 on the type of note. */
14263
14264 static void
14265 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14266 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14267 {
14268 rtx note, next_note;
14269 rtx tem_note;
14270 rtx_insn *tem_insn;
14271
14272 for (note = notes; note; note = next_note)
14273 {
14274 rtx_insn *place = 0, *place2 = 0;
14275
14276 next_note = XEXP (note, 1);
14277 switch (REG_NOTE_KIND (note))
14278 {
14279 case REG_BR_PROB:
14280 case REG_BR_PRED:
14281 /* Doesn't matter much where we put this, as long as it's somewhere.
14282 It is preferable to keep these notes on branches, which is most
14283 likely to be i3. */
14284 place = i3;
14285 break;
14286
14287 case REG_NON_LOCAL_GOTO:
14288 if (JUMP_P (i3))
14289 place = i3;
14290 else
14291 {
14292 gcc_assert (i2 && JUMP_P (i2));
14293 place = i2;
14294 }
14295 break;
14296
14297 case REG_EH_REGION:
14298 /* These notes must remain with the call or trapping instruction. */
14299 if (CALL_P (i3))
14300 place = i3;
14301 else if (i2 && CALL_P (i2))
14302 place = i2;
14303 else
14304 {
14305 gcc_assert (cfun->can_throw_non_call_exceptions);
14306 if (may_trap_p (i3))
14307 place = i3;
14308 else if (i2 && may_trap_p (i2))
14309 place = i2;
14310 /* ??? Otherwise assume we've combined things such that we
14311 can now prove that the instructions can't trap. Drop the
14312 note in this case. */
14313 }
14314 break;
14315
14316 case REG_ARGS_SIZE:
14317 /* ??? How to distribute between i3-i1. Assume i3 contains the
14318 entire adjustment. Assert i3 contains at least some adjust. */
14319 if (!noop_move_p (i3))
14320 {
14321 poly_int64 old_size, args_size = get_args_size (note);
14322 /* fixup_args_size_notes looks at REG_NORETURN note,
14323 so ensure the note is placed there first. */
14324 if (CALL_P (i3))
14325 {
14326 rtx *np;
14327 for (np = &next_note; *np; np = &XEXP (*np, 1))
14328 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14329 {
14330 rtx n = *np;
14331 *np = XEXP (n, 1);
14332 XEXP (n, 1) = REG_NOTES (i3);
14333 REG_NOTES (i3) = n;
14334 break;
14335 }
14336 }
14337 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14338 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14339 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14340 gcc_assert (maybe_ne (old_size, args_size)
14341 || (CALL_P (i3)
14342 && !ACCUMULATE_OUTGOING_ARGS
14343 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14344 }
14345 break;
14346
14347 case REG_NORETURN:
14348 case REG_SETJMP:
14349 case REG_TM:
14350 case REG_CALL_DECL:
14351 case REG_CALL_NOCF_CHECK:
14352 /* These notes must remain with the call. It should not be
14353 possible for both I2 and I3 to be a call. */
14354 if (CALL_P (i3))
14355 place = i3;
14356 else
14357 {
14358 gcc_assert (i2 && CALL_P (i2));
14359 place = i2;
14360 }
14361 break;
14362
14363 case REG_UNUSED:
14364 /* Any clobbers for i3 may still exist, and so we must process
14365 REG_UNUSED notes from that insn.
14366
14367 Any clobbers from i2 or i1 can only exist if they were added by
14368 recog_for_combine. In that case, recog_for_combine created the
14369 necessary REG_UNUSED notes. Trying to keep any original
14370 REG_UNUSED notes from these insns can cause incorrect output
14371 if it is for the same register as the original i3 dest.
14372 In that case, we will notice that the register is set in i3,
14373 and then add a REG_UNUSED note for the destination of i3, which
14374 is wrong. However, it is possible to have REG_UNUSED notes from
14375 i2 or i1 for register which were both used and clobbered, so
14376 we keep notes from i2 or i1 if they will turn into REG_DEAD
14377 notes. */
14378
14379 /* If this register is set or clobbered in I3, put the note there
14380 unless there is one already. */
14381 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14382 {
14383 if (from_insn != i3)
14384 break;
14385
14386 if (! (REG_P (XEXP (note, 0))
14387 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14388 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14389 place = i3;
14390 }
14391 /* Otherwise, if this register is used by I3, then this register
14392 now dies here, so we must put a REG_DEAD note here unless there
14393 is one already. */
14394 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14395 && ! (REG_P (XEXP (note, 0))
14396 ? find_regno_note (i3, REG_DEAD,
14397 REGNO (XEXP (note, 0)))
14398 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14399 {
14400 PUT_REG_NOTE_KIND (note, REG_DEAD);
14401 place = i3;
14402 }
14403
14404 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14405 but we can't tell which at this point. We must reset any
14406 expectations we had about the value that was previously
14407 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14408 and, if appropriate, restore its previous value, but we
14409 don't have enough information for that at this point. */
14410 else
14411 {
14412 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14413
14414 /* Otherwise, if this register is now referenced in i2
14415 then the register used to be modified in one of the
14416 original insns. If it was i3 (say, in an unused
14417 parallel), it's now completely gone, so the note can
14418 be discarded. But if it was modified in i2, i1 or i0
14419 and we still reference it in i2, then we're
14420 referencing the previous value, and since the
14421 register was modified and REG_UNUSED, we know that
14422 the previous value is now dead. So, if we only
14423 reference the register in i2, we change the note to
14424 REG_DEAD, to reflect the previous value. However, if
14425 we're also setting or clobbering the register as
14426 scratch, we know (because the register was not
14427 referenced in i3) that it's unused, just as it was
14428 unused before, and we place the note in i2. */
14429 if (from_insn != i3 && i2 && INSN_P (i2)
14430 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14431 {
14432 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14433 PUT_REG_NOTE_KIND (note, REG_DEAD);
14434 if (! (REG_P (XEXP (note, 0))
14435 ? find_regno_note (i2, REG_NOTE_KIND (note),
14436 REGNO (XEXP (note, 0)))
14437 : find_reg_note (i2, REG_NOTE_KIND (note),
14438 XEXP (note, 0))))
14439 place = i2;
14440 }
14441 }
14442
14443 break;
14444
14445 case REG_EQUAL:
14446 case REG_EQUIV:
14447 case REG_NOALIAS:
14448 /* These notes say something about results of an insn. We can
14449 only support them if they used to be on I3 in which case they
14450 remain on I3. Otherwise they are ignored.
14451
14452 If the note refers to an expression that is not a constant, we
14453 must also ignore the note since we cannot tell whether the
14454 equivalence is still true. It might be possible to do
14455 slightly better than this (we only have a problem if I2DEST
14456 or I1DEST is present in the expression), but it doesn't
14457 seem worth the trouble. */
14458
14459 if (from_insn == i3
14460 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14461 place = i3;
14462 break;
14463
14464 case REG_INC:
14465 /* These notes say something about how a register is used. They must
14466 be present on any use of the register in I2 or I3. */
14467 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14468 place = i3;
14469
14470 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14471 {
14472 if (place)
14473 place2 = i2;
14474 else
14475 place = i2;
14476 }
14477 break;
14478
14479 case REG_LABEL_TARGET:
14480 case REG_LABEL_OPERAND:
14481 /* This can show up in several ways -- either directly in the
14482 pattern, or hidden off in the constant pool with (or without?)
14483 a REG_EQUAL note. */
14484 /* ??? Ignore the without-reg_equal-note problem for now. */
14485 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14486 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14487 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14488 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14489 place = i3;
14490
14491 if (i2
14492 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14493 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14494 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14495 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14496 {
14497 if (place)
14498 place2 = i2;
14499 else
14500 place = i2;
14501 }
14502
14503 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14504 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14505 there. */
14506 if (place && JUMP_P (place)
14507 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14508 && (JUMP_LABEL (place) == NULL
14509 || JUMP_LABEL (place) == XEXP (note, 0)))
14510 {
14511 rtx label = JUMP_LABEL (place);
14512
14513 if (!label)
14514 JUMP_LABEL (place) = XEXP (note, 0);
14515 else if (LABEL_P (label))
14516 LABEL_NUSES (label)--;
14517 }
14518
14519 if (place2 && JUMP_P (place2)
14520 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14521 && (JUMP_LABEL (place2) == NULL
14522 || JUMP_LABEL (place2) == XEXP (note, 0)))
14523 {
14524 rtx label = JUMP_LABEL (place2);
14525
14526 if (!label)
14527 JUMP_LABEL (place2) = XEXP (note, 0);
14528 else if (LABEL_P (label))
14529 LABEL_NUSES (label)--;
14530 place2 = 0;
14531 }
14532 break;
14533
14534 case REG_NONNEG:
14535 /* This note says something about the value of a register prior
14536 to the execution of an insn. It is too much trouble to see
14537 if the note is still correct in all situations. It is better
14538 to simply delete it. */
14539 break;
14540
14541 case REG_DEAD:
14542 /* If we replaced the right hand side of FROM_INSN with a
14543 REG_EQUAL note, the original use of the dying register
14544 will not have been combined into I3 and I2. In such cases,
14545 FROM_INSN is guaranteed to be the first of the combined
14546 instructions, so we simply need to search back before
14547 FROM_INSN for the previous use or set of this register,
14548 then alter the notes there appropriately.
14549
14550 If the register is used as an input in I3, it dies there.
14551 Similarly for I2, if it is nonzero and adjacent to I3.
14552
14553 If the register is not used as an input in either I3 or I2
14554 and it is not one of the registers we were supposed to eliminate,
14555 there are two possibilities. We might have a non-adjacent I2
14556 or we might have somehow eliminated an additional register
14557 from a computation. For example, we might have had A & B where
14558 we discover that B will always be zero. In this case we will
14559 eliminate the reference to A.
14560
14561 In both cases, we must search to see if we can find a previous
14562 use of A and put the death note there. */
14563
14564 if (from_insn
14565 && from_insn == i2mod
14566 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14567 tem_insn = from_insn;
14568 else
14569 {
14570 if (from_insn
14571 && CALL_P (from_insn)
14572 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14573 place = from_insn;
14574 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14575 {
14576 /* If the new I2 sets the same register that is marked
14577 dead in the note, we do not in general know where to
14578 put the note. One important case we _can_ handle is
14579 when the note comes from I3. */
14580 if (from_insn == i3)
14581 place = i3;
14582 else
14583 break;
14584 }
14585 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14586 place = i3;
14587 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14588 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14589 place = i2;
14590 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14591 && !(i2mod
14592 && reg_overlap_mentioned_p (XEXP (note, 0),
14593 i2mod_old_rhs)))
14594 || rtx_equal_p (XEXP (note, 0), elim_i1)
14595 || rtx_equal_p (XEXP (note, 0), elim_i0))
14596 break;
14597 tem_insn = i3;
14598 }
14599
14600 if (place == 0)
14601 {
14602 basic_block bb = this_basic_block;
14603
14604 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14605 {
14606 if (!NONDEBUG_INSN_P (tem_insn))
14607 {
14608 if (tem_insn == BB_HEAD (bb))
14609 break;
14610 continue;
14611 }
14612
14613 /* If the register is being set at TEM_INSN, see if that is all
14614 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14615 into a REG_UNUSED note instead. Don't delete sets to
14616 global register vars. */
14617 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14618 || !global_regs[REGNO (XEXP (note, 0))])
14619 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14620 {
14621 rtx set = single_set (tem_insn);
14622 rtx inner_dest = 0;
14623 rtx_insn *cc0_setter = NULL;
14624
14625 if (set != 0)
14626 for (inner_dest = SET_DEST (set);
14627 (GET_CODE (inner_dest) == STRICT_LOW_PART
14628 || GET_CODE (inner_dest) == SUBREG
14629 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14630 inner_dest = XEXP (inner_dest, 0))
14631 ;
14632
14633 /* Verify that it was the set, and not a clobber that
14634 modified the register.
14635
14636 CC0 targets must be careful to maintain setter/user
14637 pairs. If we cannot delete the setter due to side
14638 effects, mark the user with an UNUSED note instead
14639 of deleting it. */
14640
14641 if (set != 0 && ! side_effects_p (SET_SRC (set))
14642 && rtx_equal_p (XEXP (note, 0), inner_dest)
14643 && (!HAVE_cc0
14644 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14645 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14646 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14647 {
14648 /* Move the notes and links of TEM_INSN elsewhere.
14649 This might delete other dead insns recursively.
14650 First set the pattern to something that won't use
14651 any register. */
14652 rtx old_notes = REG_NOTES (tem_insn);
14653
14654 PATTERN (tem_insn) = pc_rtx;
14655 REG_NOTES (tem_insn) = NULL;
14656
14657 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14658 NULL_RTX, NULL_RTX, NULL_RTX);
14659 distribute_links (LOG_LINKS (tem_insn));
14660
14661 unsigned int regno = REGNO (XEXP (note, 0));
14662 reg_stat_type *rsp = &reg_stat[regno];
14663 if (rsp->last_set == tem_insn)
14664 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14665
14666 SET_INSN_DELETED (tem_insn);
14667 if (tem_insn == i2)
14668 i2 = NULL;
14669
14670 /* Delete the setter too. */
14671 if (cc0_setter)
14672 {
14673 PATTERN (cc0_setter) = pc_rtx;
14674 old_notes = REG_NOTES (cc0_setter);
14675 REG_NOTES (cc0_setter) = NULL;
14676
14677 distribute_notes (old_notes, cc0_setter,
14678 cc0_setter, NULL,
14679 NULL_RTX, NULL_RTX, NULL_RTX);
14680 distribute_links (LOG_LINKS (cc0_setter));
14681
14682 SET_INSN_DELETED (cc0_setter);
14683 if (cc0_setter == i2)
14684 i2 = NULL;
14685 }
14686 }
14687 else
14688 {
14689 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14690
14691 /* If there isn't already a REG_UNUSED note, put one
14692 here. Do not place a REG_DEAD note, even if
14693 the register is also used here; that would not
14694 match the algorithm used in lifetime analysis
14695 and can cause the consistency check in the
14696 scheduler to fail. */
14697 if (! find_regno_note (tem_insn, REG_UNUSED,
14698 REGNO (XEXP (note, 0))))
14699 place = tem_insn;
14700 break;
14701 }
14702 }
14703 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14704 || (CALL_P (tem_insn)
14705 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14706 {
14707 place = tem_insn;
14708
14709 /* If we are doing a 3->2 combination, and we have a
14710 register which formerly died in i3 and was not used
14711 by i2, which now no longer dies in i3 and is used in
14712 i2 but does not die in i2, and place is between i2
14713 and i3, then we may need to move a link from place to
14714 i2. */
14715 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14716 && from_insn
14717 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14718 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14719 {
14720 struct insn_link *links = LOG_LINKS (place);
14721 LOG_LINKS (place) = NULL;
14722 distribute_links (links);
14723 }
14724 break;
14725 }
14726
14727 if (tem_insn == BB_HEAD (bb))
14728 break;
14729 }
14730
14731 }
14732
14733 /* If the register is set or already dead at PLACE, we needn't do
14734 anything with this note if it is still a REG_DEAD note.
14735 We check here if it is set at all, not if is it totally replaced,
14736 which is what `dead_or_set_p' checks, so also check for it being
14737 set partially. */
14738
14739 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14740 {
14741 unsigned int regno = REGNO (XEXP (note, 0));
14742 reg_stat_type *rsp = &reg_stat[regno];
14743
14744 if (dead_or_set_p (place, XEXP (note, 0))
14745 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14746 {
14747 /* Unless the register previously died in PLACE, clear
14748 last_death. [I no longer understand why this is
14749 being done.] */
14750 if (rsp->last_death != place)
14751 rsp->last_death = 0;
14752 place = 0;
14753 }
14754 else
14755 rsp->last_death = place;
14756
14757 /* If this is a death note for a hard reg that is occupying
14758 multiple registers, ensure that we are still using all
14759 parts of the object. If we find a piece of the object
14760 that is unused, we must arrange for an appropriate REG_DEAD
14761 note to be added for it. However, we can't just emit a USE
14762 and tag the note to it, since the register might actually
14763 be dead; so we recourse, and the recursive call then finds
14764 the previous insn that used this register. */
14765
14766 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14767 {
14768 unsigned int endregno = END_REGNO (XEXP (note, 0));
14769 bool all_used = true;
14770 unsigned int i;
14771
14772 for (i = regno; i < endregno; i++)
14773 if ((! refers_to_regno_p (i, PATTERN (place))
14774 && ! find_regno_fusage (place, USE, i))
14775 || dead_or_set_regno_p (place, i))
14776 {
14777 all_used = false;
14778 break;
14779 }
14780
14781 if (! all_used)
14782 {
14783 /* Put only REG_DEAD notes for pieces that are
14784 not already dead or set. */
14785
14786 for (i = regno; i < endregno;
14787 i += hard_regno_nregs (i, reg_raw_mode[i]))
14788 {
14789 rtx piece = regno_reg_rtx[i];
14790 basic_block bb = this_basic_block;
14791
14792 if (! dead_or_set_p (place, piece)
14793 && ! reg_bitfield_target_p (piece,
14794 PATTERN (place)))
14795 {
14796 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14797 NULL_RTX);
14798
14799 distribute_notes (new_note, place, place,
14800 NULL, NULL_RTX, NULL_RTX,
14801 NULL_RTX);
14802 }
14803 else if (! refers_to_regno_p (i, PATTERN (place))
14804 && ! find_regno_fusage (place, USE, i))
14805 for (tem_insn = PREV_INSN (place); ;
14806 tem_insn = PREV_INSN (tem_insn))
14807 {
14808 if (!NONDEBUG_INSN_P (tem_insn))
14809 {
14810 if (tem_insn == BB_HEAD (bb))
14811 break;
14812 continue;
14813 }
14814 if (dead_or_set_p (tem_insn, piece)
14815 || reg_bitfield_target_p (piece,
14816 PATTERN (tem_insn)))
14817 {
14818 add_reg_note (tem_insn, REG_UNUSED, piece);
14819 break;
14820 }
14821 }
14822 }
14823
14824 place = 0;
14825 }
14826 }
14827 }
14828 break;
14829
14830 default:
14831 /* Any other notes should not be present at this point in the
14832 compilation. */
14833 gcc_unreachable ();
14834 }
14835
14836 if (place)
14837 {
14838 XEXP (note, 1) = REG_NOTES (place);
14839 REG_NOTES (place) = note;
14840
14841 /* Set added_notes_insn to the earliest insn we added a note to. */
14842 if (added_notes_insn == 0
14843 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14844 added_notes_insn = place;
14845 }
14846
14847 if (place2)
14848 {
14849 add_shallow_copy_of_reg_note (place2, note);
14850
14851 /* Set added_notes_insn to the earliest insn we added a note to. */
14852 if (added_notes_insn == 0
14853 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14854 added_notes_insn = place2;
14855 }
14856 }
14857 }
14858 \f
14859 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14860 I3, I2, and I1 to new locations. This is also called to add a link
14861 pointing at I3 when I3's destination is changed. */
14862
14863 static void
14864 distribute_links (struct insn_link *links)
14865 {
14866 struct insn_link *link, *next_link;
14867
14868 for (link = links; link; link = next_link)
14869 {
14870 rtx_insn *place = 0;
14871 rtx_insn *insn;
14872 rtx set, reg;
14873
14874 next_link = link->next;
14875
14876 /* If the insn that this link points to is a NOTE, ignore it. */
14877 if (NOTE_P (link->insn))
14878 continue;
14879
14880 set = 0;
14881 rtx pat = PATTERN (link->insn);
14882 if (GET_CODE (pat) == SET)
14883 set = pat;
14884 else if (GET_CODE (pat) == PARALLEL)
14885 {
14886 int i;
14887 for (i = 0; i < XVECLEN (pat, 0); i++)
14888 {
14889 set = XVECEXP (pat, 0, i);
14890 if (GET_CODE (set) != SET)
14891 continue;
14892
14893 reg = SET_DEST (set);
14894 while (GET_CODE (reg) == ZERO_EXTRACT
14895 || GET_CODE (reg) == STRICT_LOW_PART
14896 || GET_CODE (reg) == SUBREG)
14897 reg = XEXP (reg, 0);
14898
14899 if (!REG_P (reg))
14900 continue;
14901
14902 if (REGNO (reg) == link->regno)
14903 break;
14904 }
14905 if (i == XVECLEN (pat, 0))
14906 continue;
14907 }
14908 else
14909 continue;
14910
14911 reg = SET_DEST (set);
14912
14913 while (GET_CODE (reg) == ZERO_EXTRACT
14914 || GET_CODE (reg) == STRICT_LOW_PART
14915 || GET_CODE (reg) == SUBREG)
14916 reg = XEXP (reg, 0);
14917
14918 if (reg == pc_rtx)
14919 continue;
14920
14921 /* A LOG_LINK is defined as being placed on the first insn that uses
14922 a register and points to the insn that sets the register. Start
14923 searching at the next insn after the target of the link and stop
14924 when we reach a set of the register or the end of the basic block.
14925
14926 Note that this correctly handles the link that used to point from
14927 I3 to I2. Also note that not much searching is typically done here
14928 since most links don't point very far away. */
14929
14930 for (insn = NEXT_INSN (link->insn);
14931 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14932 || BB_HEAD (this_basic_block->next_bb) != insn));
14933 insn = NEXT_INSN (insn))
14934 if (DEBUG_INSN_P (insn))
14935 continue;
14936 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14937 {
14938 if (reg_referenced_p (reg, PATTERN (insn)))
14939 place = insn;
14940 break;
14941 }
14942 else if (CALL_P (insn)
14943 && find_reg_fusage (insn, USE, reg))
14944 {
14945 place = insn;
14946 break;
14947 }
14948 else if (INSN_P (insn) && reg_set_p (reg, insn))
14949 break;
14950
14951 /* If we found a place to put the link, place it there unless there
14952 is already a link to the same insn as LINK at that point. */
14953
14954 if (place)
14955 {
14956 struct insn_link *link2;
14957
14958 FOR_EACH_LOG_LINK (link2, place)
14959 if (link2->insn == link->insn && link2->regno == link->regno)
14960 break;
14961
14962 if (link2 == NULL)
14963 {
14964 link->next = LOG_LINKS (place);
14965 LOG_LINKS (place) = link;
14966
14967 /* Set added_links_insn to the earliest insn we added a
14968 link to. */
14969 if (added_links_insn == 0
14970 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14971 added_links_insn = place;
14972 }
14973 }
14974 }
14975 }
14976 \f
14977 /* Check for any register or memory mentioned in EQUIV that is not
14978 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14979 of EXPR where some registers may have been replaced by constants. */
14980
14981 static bool
14982 unmentioned_reg_p (rtx equiv, rtx expr)
14983 {
14984 subrtx_iterator::array_type array;
14985 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14986 {
14987 const_rtx x = *iter;
14988 if ((REG_P (x) || MEM_P (x))
14989 && !reg_mentioned_p (x, expr))
14990 return true;
14991 }
14992 return false;
14993 }
14994 \f
14995 DEBUG_FUNCTION void
14996 dump_combine_stats (FILE *file)
14997 {
14998 fprintf
14999 (file,
15000 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
15001 combine_attempts, combine_merges, combine_extras, combine_successes);
15002 }
15003
15004 void
15005 dump_combine_total_stats (FILE *file)
15006 {
15007 fprintf
15008 (file,
15009 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15010 total_attempts, total_merges, total_extras, total_successes);
15011 }
15012 \f
15013 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15014 the reg-to-reg copy can usefully combine with later instructions, but we
15015 do not want to combine the hard reg into later instructions, for that
15016 restricts register allocation. */
15017 static void
15018 make_more_copies (void)
15019 {
15020 basic_block bb;
15021
15022 FOR_EACH_BB_FN (bb, cfun)
15023 {
15024 rtx_insn *insn;
15025
15026 FOR_BB_INSNS (bb, insn)
15027 {
15028 if (!NONDEBUG_INSN_P (insn))
15029 continue;
15030
15031 rtx set = single_set (insn);
15032 if (!set)
15033 continue;
15034
15035 rtx dest = SET_DEST (set);
15036 if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15037 continue;
15038
15039 rtx src = SET_SRC (set);
15040 if (!(REG_P (src) && HARD_REGISTER_P (src)))
15041 continue;
15042 if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15043 continue;
15044
15045 rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15046 rtx_insn *new_insn = gen_move_insn (new_reg, src);
15047 SET_SRC (set) = new_reg;
15048 emit_insn_before (new_insn, insn);
15049 df_insn_rescan (insn);
15050 }
15051 }
15052 }
15053
15054 /* Try combining insns through substitution. */
15055 static unsigned int
15056 rest_of_handle_combine (void)
15057 {
15058 make_more_copies ();
15059
15060 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15061 df_note_add_problem ();
15062 df_analyze ();
15063
15064 regstat_init_n_sets_and_refs ();
15065 reg_n_sets_max = max_reg_num ();
15066
15067 int rebuild_jump_labels_after_combine
15068 = combine_instructions (get_insns (), max_reg_num ());
15069
15070 /* Combining insns may have turned an indirect jump into a
15071 direct jump. Rebuild the JUMP_LABEL fields of jumping
15072 instructions. */
15073 if (rebuild_jump_labels_after_combine)
15074 {
15075 if (dom_info_available_p (CDI_DOMINATORS))
15076 free_dominance_info (CDI_DOMINATORS);
15077 timevar_push (TV_JUMP);
15078 rebuild_jump_labels (get_insns ());
15079 cleanup_cfg (0);
15080 timevar_pop (TV_JUMP);
15081 }
15082
15083 regstat_free_n_sets_and_refs ();
15084 return 0;
15085 }
15086
15087 namespace {
15088
15089 const pass_data pass_data_combine =
15090 {
15091 RTL_PASS, /* type */
15092 "combine", /* name */
15093 OPTGROUP_NONE, /* optinfo_flags */
15094 TV_COMBINE, /* tv_id */
15095 PROP_cfglayout, /* properties_required */
15096 0, /* properties_provided */
15097 0, /* properties_destroyed */
15098 0, /* todo_flags_start */
15099 TODO_df_finish, /* todo_flags_finish */
15100 };
15101
15102 class pass_combine : public rtl_opt_pass
15103 {
15104 public:
15105 pass_combine (gcc::context *ctxt)
15106 : rtl_opt_pass (pass_data_combine, ctxt)
15107 {}
15108
15109 /* opt_pass methods: */
15110 virtual bool gate (function *) { return (optimize > 0); }
15111 virtual unsigned int execute (function *)
15112 {
15113 return rest_of_handle_combine ();
15114 }
15115
15116 }; // class pass_combine
15117
15118 } // anon namespace
15119
15120 rtl_opt_pass *
15121 make_pass_combine (gcc::context *ctxt)
15122 {
15123 return new pass_combine (ctxt);
15124 }