]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expr.c
re PR middle-end/91605 (ICE in ix86_avx256_split_vector_move_misalign, at config...
[thirdparty/gcc.git] / gcc / expr.c
1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-dfa.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
61 #include "builtins.h"
62 #include "ccmp.h"
63 #include "gimple-fold.h"
64 #include "rtx-vector-builder.h"
65
66
67 /* If this is nonzero, we do not bother generating VOLATILE
68 around volatile memory references, and we are willing to
69 output indirect addresses. If cse is to follow, we reject
70 indirect addresses so a useful potential cse is generated;
71 if it is used only once, instruction combination will produce
72 the same indirect address eventually. */
73 int cse_not_expected;
74
75 static bool block_move_libcall_safe_for_call_parm (void);
76 static bool emit_block_move_via_cpymem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT,
77 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
78 unsigned HOST_WIDE_INT);
79 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
80 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
81 static rtx_insn *compress_float_constant (rtx, rtx);
82 static rtx get_subtarget (rtx);
83 static void store_constructor (tree, rtx, int, poly_int64, bool);
84 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
85 machine_mode, tree, alias_set_type, bool, bool);
86
87 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
88
89 static int is_aligning_offset (const_tree, const_tree);
90 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
91 static rtx do_store_flag (sepops, rtx, machine_mode);
92 #ifdef PUSH_ROUNDING
93 static void emit_single_push_insn (machine_mode, rtx, tree);
94 #endif
95 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
96 profile_probability);
97 static rtx const_vector_from_tree (tree);
98 static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
99 static tree tree_expr_size (const_tree);
100 static HOST_WIDE_INT int_expr_size (tree);
101 static void convert_mode_scalar (rtx, rtx, int);
102
103 \f
104 /* This is run to set up which modes can be used
105 directly in memory and to initialize the block move optab. It is run
106 at the beginning of compilation and when the target is reinitialized. */
107
108 void
109 init_expr_target (void)
110 {
111 rtx pat;
112 int num_clobbers;
113 rtx mem, mem1;
114 rtx reg;
115
116 /* Try indexing by frame ptr and try by stack ptr.
117 It is known that on the Convex the stack ptr isn't a valid index.
118 With luck, one or the other is valid on any machine. */
119 mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
120 mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
121
122 /* A scratch register we can modify in-place below to avoid
123 useless RTL allocations. */
124 reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
125
126 rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
127 pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
128 PATTERN (insn) = pat;
129
130 for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
131 mode = (machine_mode) ((int) mode + 1))
132 {
133 int regno;
134
135 direct_load[(int) mode] = direct_store[(int) mode] = 0;
136 PUT_MODE (mem, mode);
137 PUT_MODE (mem1, mode);
138
139 /* See if there is some register that can be used in this mode and
140 directly loaded or stored from memory. */
141
142 if (mode != VOIDmode && mode != BLKmode)
143 for (regno = 0; regno < FIRST_PSEUDO_REGISTER
144 && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
145 regno++)
146 {
147 if (!targetm.hard_regno_mode_ok (regno, mode))
148 continue;
149
150 set_mode_and_regno (reg, mode, regno);
151
152 SET_SRC (pat) = mem;
153 SET_DEST (pat) = reg;
154 if (recog (pat, insn, &num_clobbers) >= 0)
155 direct_load[(int) mode] = 1;
156
157 SET_SRC (pat) = mem1;
158 SET_DEST (pat) = reg;
159 if (recog (pat, insn, &num_clobbers) >= 0)
160 direct_load[(int) mode] = 1;
161
162 SET_SRC (pat) = reg;
163 SET_DEST (pat) = mem;
164 if (recog (pat, insn, &num_clobbers) >= 0)
165 direct_store[(int) mode] = 1;
166
167 SET_SRC (pat) = reg;
168 SET_DEST (pat) = mem1;
169 if (recog (pat, insn, &num_clobbers) >= 0)
170 direct_store[(int) mode] = 1;
171 }
172 }
173
174 mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
175
176 opt_scalar_float_mode mode_iter;
177 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
178 {
179 scalar_float_mode mode = mode_iter.require ();
180 scalar_float_mode srcmode;
181 FOR_EACH_MODE_UNTIL (srcmode, mode)
182 {
183 enum insn_code ic;
184
185 ic = can_extend_p (mode, srcmode, 0);
186 if (ic == CODE_FOR_nothing)
187 continue;
188
189 PUT_MODE (mem, srcmode);
190
191 if (insn_operand_matches (ic, 1, mem))
192 float_extend_from_mem[mode][srcmode] = true;
193 }
194 }
195 }
196
197 /* This is run at the start of compiling a function. */
198
199 void
200 init_expr (void)
201 {
202 memset (&crtl->expr, 0, sizeof (crtl->expr));
203 }
204 \f
205 /* Copy data from FROM to TO, where the machine modes are not the same.
206 Both modes may be integer, or both may be floating, or both may be
207 fixed-point.
208 UNSIGNEDP should be nonzero if FROM is an unsigned type.
209 This causes zero-extension instead of sign-extension. */
210
211 void
212 convert_move (rtx to, rtx from, int unsignedp)
213 {
214 machine_mode to_mode = GET_MODE (to);
215 machine_mode from_mode = GET_MODE (from);
216
217 gcc_assert (to_mode != BLKmode);
218 gcc_assert (from_mode != BLKmode);
219
220 /* If the source and destination are already the same, then there's
221 nothing to do. */
222 if (to == from)
223 return;
224
225 /* If FROM is a SUBREG that indicates that we have already done at least
226 the required extension, strip it. We don't handle such SUBREGs as
227 TO here. */
228
229 scalar_int_mode to_int_mode;
230 if (GET_CODE (from) == SUBREG
231 && SUBREG_PROMOTED_VAR_P (from)
232 && is_a <scalar_int_mode> (to_mode, &to_int_mode)
233 && (GET_MODE_PRECISION (subreg_promoted_mode (from))
234 >= GET_MODE_PRECISION (to_int_mode))
235 && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
236 {
237 from = gen_lowpart (to_int_mode, SUBREG_REG (from));
238 from_mode = to_int_mode;
239 }
240
241 gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
242
243 if (to_mode == from_mode
244 || (from_mode == VOIDmode && CONSTANT_P (from)))
245 {
246 emit_move_insn (to, from);
247 return;
248 }
249
250 if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
251 {
252 gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
253 GET_MODE_BITSIZE (to_mode)));
254
255 if (VECTOR_MODE_P (to_mode))
256 from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
257 else
258 to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
259
260 emit_move_insn (to, from);
261 return;
262 }
263
264 if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
265 {
266 convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
267 convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
268 return;
269 }
270
271 convert_mode_scalar (to, from, unsignedp);
272 }
273
274 /* Like convert_move, but deals only with scalar modes. */
275
276 static void
277 convert_mode_scalar (rtx to, rtx from, int unsignedp)
278 {
279 /* Both modes should be scalar types. */
280 scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
281 scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
282 bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
283 bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
284 enum insn_code code;
285 rtx libcall;
286
287 gcc_assert (to_real == from_real);
288
289 /* rtx code for making an equivalent value. */
290 enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
291 : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
292
293 if (to_real)
294 {
295 rtx value;
296 rtx_insn *insns;
297 convert_optab tab;
298
299 gcc_assert ((GET_MODE_PRECISION (from_mode)
300 != GET_MODE_PRECISION (to_mode))
301 || (DECIMAL_FLOAT_MODE_P (from_mode)
302 != DECIMAL_FLOAT_MODE_P (to_mode)));
303
304 if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
305 /* Conversion between decimal float and binary float, same size. */
306 tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
307 else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
308 tab = sext_optab;
309 else
310 tab = trunc_optab;
311
312 /* Try converting directly if the insn is supported. */
313
314 code = convert_optab_handler (tab, to_mode, from_mode);
315 if (code != CODE_FOR_nothing)
316 {
317 emit_unop_insn (code, to, from,
318 tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
319 return;
320 }
321
322 /* Otherwise use a libcall. */
323 libcall = convert_optab_libfunc (tab, to_mode, from_mode);
324
325 /* Is this conversion implemented yet? */
326 gcc_assert (libcall);
327
328 start_sequence ();
329 value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
330 from, from_mode);
331 insns = get_insns ();
332 end_sequence ();
333 emit_libcall_block (insns, to, value,
334 tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
335 from)
336 : gen_rtx_FLOAT_EXTEND (to_mode, from));
337 return;
338 }
339
340 /* Handle pointer conversion. */ /* SPEE 900220. */
341 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
342 {
343 convert_optab ctab;
344
345 if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
346 ctab = trunc_optab;
347 else if (unsignedp)
348 ctab = zext_optab;
349 else
350 ctab = sext_optab;
351
352 if (convert_optab_handler (ctab, to_mode, from_mode)
353 != CODE_FOR_nothing)
354 {
355 emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
356 to, from, UNKNOWN);
357 return;
358 }
359 }
360
361 /* Targets are expected to provide conversion insns between PxImode and
362 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
363 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
364 {
365 scalar_int_mode full_mode
366 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
367
368 gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
369 != CODE_FOR_nothing);
370
371 if (full_mode != from_mode)
372 from = convert_to_mode (full_mode, from, unsignedp);
373 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
374 to, from, UNKNOWN);
375 return;
376 }
377 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
378 {
379 rtx new_from;
380 scalar_int_mode full_mode
381 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
382 convert_optab ctab = unsignedp ? zext_optab : sext_optab;
383 enum insn_code icode;
384
385 icode = convert_optab_handler (ctab, full_mode, from_mode);
386 gcc_assert (icode != CODE_FOR_nothing);
387
388 if (to_mode == full_mode)
389 {
390 emit_unop_insn (icode, to, from, UNKNOWN);
391 return;
392 }
393
394 new_from = gen_reg_rtx (full_mode);
395 emit_unop_insn (icode, new_from, from, UNKNOWN);
396
397 /* else proceed to integer conversions below. */
398 from_mode = full_mode;
399 from = new_from;
400 }
401
402 /* Make sure both are fixed-point modes or both are not. */
403 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
404 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
405 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
406 {
407 /* If we widen from_mode to to_mode and they are in the same class,
408 we won't saturate the result.
409 Otherwise, always saturate the result to play safe. */
410 if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
411 && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
412 expand_fixed_convert (to, from, 0, 0);
413 else
414 expand_fixed_convert (to, from, 0, 1);
415 return;
416 }
417
418 /* Now both modes are integers. */
419
420 /* Handle expanding beyond a word. */
421 if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
422 && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
423 {
424 rtx_insn *insns;
425 rtx lowpart;
426 rtx fill_value;
427 rtx lowfrom;
428 int i;
429 scalar_mode lowpart_mode;
430 int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
431
432 /* Try converting directly if the insn is supported. */
433 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
434 != CODE_FOR_nothing)
435 {
436 /* If FROM is a SUBREG, put it into a register. Do this
437 so that we always generate the same set of insns for
438 better cse'ing; if an intermediate assignment occurred,
439 we won't be doing the operation directly on the SUBREG. */
440 if (optimize > 0 && GET_CODE (from) == SUBREG)
441 from = force_reg (from_mode, from);
442 emit_unop_insn (code, to, from, equiv_code);
443 return;
444 }
445 /* Next, try converting via full word. */
446 else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
447 && ((code = can_extend_p (to_mode, word_mode, unsignedp))
448 != CODE_FOR_nothing))
449 {
450 rtx word_to = gen_reg_rtx (word_mode);
451 if (REG_P (to))
452 {
453 if (reg_overlap_mentioned_p (to, from))
454 from = force_reg (from_mode, from);
455 emit_clobber (to);
456 }
457 convert_move (word_to, from, unsignedp);
458 emit_unop_insn (code, to, word_to, equiv_code);
459 return;
460 }
461
462 /* No special multiword conversion insn; do it by hand. */
463 start_sequence ();
464
465 /* Since we will turn this into a no conflict block, we must ensure
466 the source does not overlap the target so force it into an isolated
467 register when maybe so. Likewise for any MEM input, since the
468 conversion sequence might require several references to it and we
469 must ensure we're getting the same value every time. */
470
471 if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
472 from = force_reg (from_mode, from);
473
474 /* Get a copy of FROM widened to a word, if necessary. */
475 if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
476 lowpart_mode = word_mode;
477 else
478 lowpart_mode = from_mode;
479
480 lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
481
482 lowpart = gen_lowpart (lowpart_mode, to);
483 emit_move_insn (lowpart, lowfrom);
484
485 /* Compute the value to put in each remaining word. */
486 if (unsignedp)
487 fill_value = const0_rtx;
488 else
489 fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
490 LT, lowfrom, const0_rtx,
491 lowpart_mode, 0, -1);
492
493 /* Fill the remaining words. */
494 for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
495 {
496 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
497 rtx subword = operand_subword (to, index, 1, to_mode);
498
499 gcc_assert (subword);
500
501 if (fill_value != subword)
502 emit_move_insn (subword, fill_value);
503 }
504
505 insns = get_insns ();
506 end_sequence ();
507
508 emit_insn (insns);
509 return;
510 }
511
512 /* Truncating multi-word to a word or less. */
513 if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
514 && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
515 {
516 if (!((MEM_P (from)
517 && ! MEM_VOLATILE_P (from)
518 && direct_load[(int) to_mode]
519 && ! mode_dependent_address_p (XEXP (from, 0),
520 MEM_ADDR_SPACE (from)))
521 || REG_P (from)
522 || GET_CODE (from) == SUBREG))
523 from = force_reg (from_mode, from);
524 convert_move (to, gen_lowpart (word_mode, from), 0);
525 return;
526 }
527
528 /* Now follow all the conversions between integers
529 no more than a word long. */
530
531 /* For truncation, usually we can just refer to FROM in a narrower mode. */
532 if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
533 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
534 {
535 if (!((MEM_P (from)
536 && ! MEM_VOLATILE_P (from)
537 && direct_load[(int) to_mode]
538 && ! mode_dependent_address_p (XEXP (from, 0),
539 MEM_ADDR_SPACE (from)))
540 || REG_P (from)
541 || GET_CODE (from) == SUBREG))
542 from = force_reg (from_mode, from);
543 if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
544 && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
545 from = copy_to_reg (from);
546 emit_move_insn (to, gen_lowpart (to_mode, from));
547 return;
548 }
549
550 /* Handle extension. */
551 if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
552 {
553 /* Convert directly if that works. */
554 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
555 != CODE_FOR_nothing)
556 {
557 emit_unop_insn (code, to, from, equiv_code);
558 return;
559 }
560 else
561 {
562 scalar_mode intermediate;
563 rtx tmp;
564 int shift_amount;
565
566 /* Search for a mode to convert via. */
567 opt_scalar_mode intermediate_iter;
568 FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
569 {
570 scalar_mode intermediate = intermediate_iter.require ();
571 if (((can_extend_p (to_mode, intermediate, unsignedp)
572 != CODE_FOR_nothing)
573 || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
574 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
575 intermediate)))
576 && (can_extend_p (intermediate, from_mode, unsignedp)
577 != CODE_FOR_nothing))
578 {
579 convert_move (to, convert_to_mode (intermediate, from,
580 unsignedp), unsignedp);
581 return;
582 }
583 }
584
585 /* No suitable intermediate mode.
586 Generate what we need with shifts. */
587 shift_amount = (GET_MODE_PRECISION (to_mode)
588 - GET_MODE_PRECISION (from_mode));
589 from = gen_lowpart (to_mode, force_reg (from_mode, from));
590 tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
591 to, unsignedp);
592 tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
593 to, unsignedp);
594 if (tmp != to)
595 emit_move_insn (to, tmp);
596 return;
597 }
598 }
599
600 /* Support special truncate insns for certain modes. */
601 if (convert_optab_handler (trunc_optab, to_mode,
602 from_mode) != CODE_FOR_nothing)
603 {
604 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
605 to, from, UNKNOWN);
606 return;
607 }
608
609 /* Handle truncation of volatile memrefs, and so on;
610 the things that couldn't be truncated directly,
611 and for which there was no special instruction.
612
613 ??? Code above formerly short-circuited this, for most integer
614 mode pairs, with a force_reg in from_mode followed by a recursive
615 call to this routine. Appears always to have been wrong. */
616 if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
617 {
618 rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
619 emit_move_insn (to, temp);
620 return;
621 }
622
623 /* Mode combination is not recognized. */
624 gcc_unreachable ();
625 }
626
627 /* Return an rtx for a value that would result
628 from converting X to mode MODE.
629 Both X and MODE may be floating, or both integer.
630 UNSIGNEDP is nonzero if X is an unsigned value.
631 This can be done by referring to a part of X in place
632 or by copying to a new temporary with conversion. */
633
634 rtx
635 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
636 {
637 return convert_modes (mode, VOIDmode, x, unsignedp);
638 }
639
640 /* Return an rtx for a value that would result
641 from converting X from mode OLDMODE to mode MODE.
642 Both modes may be floating, or both integer.
643 UNSIGNEDP is nonzero if X is an unsigned value.
644
645 This can be done by referring to a part of X in place
646 or by copying to a new temporary with conversion.
647
648 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
649
650 rtx
651 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
652 {
653 rtx temp;
654 scalar_int_mode int_mode;
655
656 /* If FROM is a SUBREG that indicates that we have already done at least
657 the required extension, strip it. */
658
659 if (GET_CODE (x) == SUBREG
660 && SUBREG_PROMOTED_VAR_P (x)
661 && is_a <scalar_int_mode> (mode, &int_mode)
662 && (GET_MODE_PRECISION (subreg_promoted_mode (x))
663 >= GET_MODE_PRECISION (int_mode))
664 && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
665 x = gen_lowpart (int_mode, SUBREG_REG (x));
666
667 if (GET_MODE (x) != VOIDmode)
668 oldmode = GET_MODE (x);
669
670 if (mode == oldmode)
671 return x;
672
673 if (CONST_SCALAR_INT_P (x)
674 && is_int_mode (mode, &int_mode))
675 {
676 /* If the caller did not tell us the old mode, then there is not
677 much to do with respect to canonicalization. We have to
678 assume that all the bits are significant. */
679 if (GET_MODE_CLASS (oldmode) != MODE_INT)
680 oldmode = MAX_MODE_INT;
681 wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
682 GET_MODE_PRECISION (int_mode),
683 unsignedp ? UNSIGNED : SIGNED);
684 return immed_wide_int_const (w, int_mode);
685 }
686
687 /* We can do this with a gen_lowpart if both desired and current modes
688 are integer, and this is either a constant integer, a register, or a
689 non-volatile MEM. */
690 scalar_int_mode int_oldmode;
691 if (is_int_mode (mode, &int_mode)
692 && is_int_mode (oldmode, &int_oldmode)
693 && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
694 && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
695 || CONST_POLY_INT_P (x)
696 || (REG_P (x)
697 && (!HARD_REGISTER_P (x)
698 || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
699 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
700 return gen_lowpart (int_mode, x);
701
702 /* Converting from integer constant into mode is always equivalent to an
703 subreg operation. */
704 if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
705 {
706 gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
707 GET_MODE_BITSIZE (oldmode)));
708 return simplify_gen_subreg (mode, x, oldmode, 0);
709 }
710
711 temp = gen_reg_rtx (mode);
712 convert_move (temp, x, unsignedp);
713 return temp;
714 }
715 \f
716 /* Return the largest alignment we can use for doing a move (or store)
717 of MAX_PIECES. ALIGN is the largest alignment we could use. */
718
719 static unsigned int
720 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
721 {
722 scalar_int_mode tmode
723 = int_mode_for_size (max_pieces * BITS_PER_UNIT, 1).require ();
724
725 if (align >= GET_MODE_ALIGNMENT (tmode))
726 align = GET_MODE_ALIGNMENT (tmode);
727 else
728 {
729 scalar_int_mode xmode = NARROWEST_INT_MODE;
730 opt_scalar_int_mode mode_iter;
731 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
732 {
733 tmode = mode_iter.require ();
734 if (GET_MODE_SIZE (tmode) > max_pieces
735 || targetm.slow_unaligned_access (tmode, align))
736 break;
737 xmode = tmode;
738 }
739
740 align = MAX (align, GET_MODE_ALIGNMENT (xmode));
741 }
742
743 return align;
744 }
745
746 /* Return the widest integer mode that is narrower than SIZE bytes. */
747
748 static scalar_int_mode
749 widest_int_mode_for_size (unsigned int size)
750 {
751 scalar_int_mode result = NARROWEST_INT_MODE;
752
753 gcc_checking_assert (size > 1);
754
755 opt_scalar_int_mode tmode;
756 FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
757 if (GET_MODE_SIZE (tmode.require ()) < size)
758 result = tmode.require ();
759
760 return result;
761 }
762
763 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
764 and should be performed piecewise. */
765
766 static bool
767 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
768 enum by_pieces_operation op)
769 {
770 return targetm.use_by_pieces_infrastructure_p (len, align, op,
771 optimize_insn_for_speed_p ());
772 }
773
774 /* Determine whether the LEN bytes can be moved by using several move
775 instructions. Return nonzero if a call to move_by_pieces should
776 succeed. */
777
778 bool
779 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
780 {
781 return can_do_by_pieces (len, align, MOVE_BY_PIECES);
782 }
783
784 /* Return number of insns required to perform operation OP by pieces
785 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
786
787 unsigned HOST_WIDE_INT
788 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
789 unsigned int max_size, by_pieces_operation op)
790 {
791 unsigned HOST_WIDE_INT n_insns = 0;
792
793 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
794
795 while (max_size > 1 && l > 0)
796 {
797 scalar_int_mode mode = widest_int_mode_for_size (max_size);
798 enum insn_code icode;
799
800 unsigned int modesize = GET_MODE_SIZE (mode);
801
802 icode = optab_handler (mov_optab, mode);
803 if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
804 {
805 unsigned HOST_WIDE_INT n_pieces = l / modesize;
806 l %= modesize;
807 switch (op)
808 {
809 default:
810 n_insns += n_pieces;
811 break;
812
813 case COMPARE_BY_PIECES:
814 int batch = targetm.compare_by_pieces_branch_ratio (mode);
815 int batch_ops = 4 * batch - 1;
816 unsigned HOST_WIDE_INT full = n_pieces / batch;
817 n_insns += full * batch_ops;
818 if (n_pieces % batch != 0)
819 n_insns++;
820 break;
821
822 }
823 }
824 max_size = modesize;
825 }
826
827 gcc_assert (!l);
828 return n_insns;
829 }
830
831 /* Used when performing piecewise block operations, holds information
832 about one of the memory objects involved. The member functions
833 can be used to generate code for loading from the object and
834 updating the address when iterating. */
835
836 class pieces_addr
837 {
838 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
839 stack pushes. */
840 rtx m_obj;
841 /* The address of the object. Can differ from that seen in the
842 MEM rtx if we copied the address to a register. */
843 rtx m_addr;
844 /* Nonzero if the address on the object has an autoincrement already,
845 signifies whether that was an increment or decrement. */
846 signed char m_addr_inc;
847 /* Nonzero if we intend to use autoinc without the address already
848 having autoinc form. We will insert add insns around each memory
849 reference, expecting later passes to form autoinc addressing modes.
850 The only supported options are predecrement and postincrement. */
851 signed char m_explicit_inc;
852 /* True if we have either of the two possible cases of using
853 autoincrement. */
854 bool m_auto;
855 /* True if this is an address to be used for load operations rather
856 than stores. */
857 bool m_is_load;
858
859 /* Optionally, a function to obtain constants for any given offset into
860 the objects, and data associated with it. */
861 by_pieces_constfn m_constfn;
862 void *m_cfndata;
863 public:
864 pieces_addr (rtx, bool, by_pieces_constfn, void *);
865 rtx adjust (scalar_int_mode, HOST_WIDE_INT);
866 void increment_address (HOST_WIDE_INT);
867 void maybe_predec (HOST_WIDE_INT);
868 void maybe_postinc (HOST_WIDE_INT);
869 void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
870 int get_addr_inc ()
871 {
872 return m_addr_inc;
873 }
874 };
875
876 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
877 true if the operation to be performed on this object is a load
878 rather than a store. For stores, OBJ can be NULL, in which case we
879 assume the operation is a stack push. For loads, the optional
880 CONSTFN and its associated CFNDATA can be used in place of the
881 memory load. */
882
883 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
884 void *cfndata)
885 : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
886 {
887 m_addr_inc = 0;
888 m_auto = false;
889 if (obj)
890 {
891 rtx addr = XEXP (obj, 0);
892 rtx_code code = GET_CODE (addr);
893 m_addr = addr;
894 bool dec = code == PRE_DEC || code == POST_DEC;
895 bool inc = code == PRE_INC || code == POST_INC;
896 m_auto = inc || dec;
897 if (m_auto)
898 m_addr_inc = dec ? -1 : 1;
899
900 /* While we have always looked for these codes here, the code
901 implementing the memory operation has never handled them.
902 Support could be added later if necessary or beneficial. */
903 gcc_assert (code != PRE_INC && code != POST_DEC);
904 }
905 else
906 {
907 m_addr = NULL_RTX;
908 if (!is_load)
909 {
910 m_auto = true;
911 if (STACK_GROWS_DOWNWARD)
912 m_addr_inc = -1;
913 else
914 m_addr_inc = 1;
915 }
916 else
917 gcc_assert (constfn != NULL);
918 }
919 m_explicit_inc = 0;
920 if (constfn)
921 gcc_assert (is_load);
922 }
923
924 /* Decide whether to use autoinc for an address involved in a memory op.
925 MODE is the mode of the accesses, REVERSE is true if we've decided to
926 perform the operation starting from the end, and LEN is the length of
927 the operation. Don't override an earlier decision to set m_auto. */
928
929 void
930 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
931 HOST_WIDE_INT len)
932 {
933 if (m_auto || m_obj == NULL_RTX)
934 return;
935
936 bool use_predec = (m_is_load
937 ? USE_LOAD_PRE_DECREMENT (mode)
938 : USE_STORE_PRE_DECREMENT (mode));
939 bool use_postinc = (m_is_load
940 ? USE_LOAD_POST_INCREMENT (mode)
941 : USE_STORE_POST_INCREMENT (mode));
942 machine_mode addr_mode = get_address_mode (m_obj);
943
944 if (use_predec && reverse)
945 {
946 m_addr = copy_to_mode_reg (addr_mode,
947 plus_constant (addr_mode,
948 m_addr, len));
949 m_auto = true;
950 m_explicit_inc = -1;
951 }
952 else if (use_postinc && !reverse)
953 {
954 m_addr = copy_to_mode_reg (addr_mode, m_addr);
955 m_auto = true;
956 m_explicit_inc = 1;
957 }
958 else if (CONSTANT_P (m_addr))
959 m_addr = copy_to_mode_reg (addr_mode, m_addr);
960 }
961
962 /* Adjust the address to refer to the data at OFFSET in MODE. If we
963 are using autoincrement for this address, we don't add the offset,
964 but we still modify the MEM's properties. */
965
966 rtx
967 pieces_addr::adjust (scalar_int_mode mode, HOST_WIDE_INT offset)
968 {
969 if (m_constfn)
970 return m_constfn (m_cfndata, offset, mode);
971 if (m_obj == NULL_RTX)
972 return NULL_RTX;
973 if (m_auto)
974 return adjust_automodify_address (m_obj, mode, m_addr, offset);
975 else
976 return adjust_address (m_obj, mode, offset);
977 }
978
979 /* Emit an add instruction to increment the address by SIZE. */
980
981 void
982 pieces_addr::increment_address (HOST_WIDE_INT size)
983 {
984 rtx amount = gen_int_mode (size, GET_MODE (m_addr));
985 emit_insn (gen_add2_insn (m_addr, amount));
986 }
987
988 /* If we are supposed to decrement the address after each access, emit code
989 to do so now. Increment by SIZE (which has should have the correct sign
990 already). */
991
992 void
993 pieces_addr::maybe_predec (HOST_WIDE_INT size)
994 {
995 if (m_explicit_inc >= 0)
996 return;
997 gcc_assert (HAVE_PRE_DECREMENT);
998 increment_address (size);
999 }
1000
1001 /* If we are supposed to decrement the address after each access, emit code
1002 to do so now. Increment by SIZE. */
1003
1004 void
1005 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1006 {
1007 if (m_explicit_inc <= 0)
1008 return;
1009 gcc_assert (HAVE_POST_INCREMENT);
1010 increment_address (size);
1011 }
1012
1013 /* This structure is used by do_op_by_pieces to describe the operation
1014 to be performed. */
1015
1016 class op_by_pieces_d
1017 {
1018 protected:
1019 pieces_addr m_to, m_from;
1020 unsigned HOST_WIDE_INT m_len;
1021 HOST_WIDE_INT m_offset;
1022 unsigned int m_align;
1023 unsigned int m_max_size;
1024 bool m_reverse;
1025
1026 /* Virtual functions, overriden by derived classes for the specific
1027 operation. */
1028 virtual void generate (rtx, rtx, machine_mode) = 0;
1029 virtual bool prepare_mode (machine_mode, unsigned int) = 0;
1030 virtual void finish_mode (machine_mode)
1031 {
1032 }
1033
1034 public:
1035 op_by_pieces_d (rtx, bool, rtx, bool, by_pieces_constfn, void *,
1036 unsigned HOST_WIDE_INT, unsigned int);
1037 void run ();
1038 };
1039
1040 /* The constructor for an op_by_pieces_d structure. We require two
1041 objects named TO and FROM, which are identified as loads or stores
1042 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1043 and its associated FROM_CFN_DATA can be used to replace loads with
1044 constant values. LEN describes the length of the operation. */
1045
1046 op_by_pieces_d::op_by_pieces_d (rtx to, bool to_load,
1047 rtx from, bool from_load,
1048 by_pieces_constfn from_cfn,
1049 void *from_cfn_data,
1050 unsigned HOST_WIDE_INT len,
1051 unsigned int align)
1052 : m_to (to, to_load, NULL, NULL),
1053 m_from (from, from_load, from_cfn, from_cfn_data),
1054 m_len (len), m_max_size (MOVE_MAX_PIECES + 1)
1055 {
1056 int toi = m_to.get_addr_inc ();
1057 int fromi = m_from.get_addr_inc ();
1058 if (toi >= 0 && fromi >= 0)
1059 m_reverse = false;
1060 else if (toi <= 0 && fromi <= 0)
1061 m_reverse = true;
1062 else
1063 gcc_unreachable ();
1064
1065 m_offset = m_reverse ? len : 0;
1066 align = MIN (to ? MEM_ALIGN (to) : align,
1067 from ? MEM_ALIGN (from) : align);
1068
1069 /* If copying requires more than two move insns,
1070 copy addresses to registers (to make displacements shorter)
1071 and use post-increment if available. */
1072 if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1073 {
1074 /* Find the mode of the largest comparison. */
1075 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1076
1077 m_from.decide_autoinc (mode, m_reverse, len);
1078 m_to.decide_autoinc (mode, m_reverse, len);
1079 }
1080
1081 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1082 m_align = align;
1083 }
1084
1085 /* This function contains the main loop used for expanding a block
1086 operation. First move what we can in the largest integer mode,
1087 then go to successively smaller modes. For every access, call
1088 GENFUN with the two operands and the EXTRA_DATA. */
1089
1090 void
1091 op_by_pieces_d::run ()
1092 {
1093 while (m_max_size > 1 && m_len > 0)
1094 {
1095 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1096
1097 if (prepare_mode (mode, m_align))
1098 {
1099 unsigned int size = GET_MODE_SIZE (mode);
1100 rtx to1 = NULL_RTX, from1;
1101
1102 while (m_len >= size)
1103 {
1104 if (m_reverse)
1105 m_offset -= size;
1106
1107 to1 = m_to.adjust (mode, m_offset);
1108 from1 = m_from.adjust (mode, m_offset);
1109
1110 m_to.maybe_predec (-(HOST_WIDE_INT)size);
1111 m_from.maybe_predec (-(HOST_WIDE_INT)size);
1112
1113 generate (to1, from1, mode);
1114
1115 m_to.maybe_postinc (size);
1116 m_from.maybe_postinc (size);
1117
1118 if (!m_reverse)
1119 m_offset += size;
1120
1121 m_len -= size;
1122 }
1123
1124 finish_mode (mode);
1125 }
1126
1127 m_max_size = GET_MODE_SIZE (mode);
1128 }
1129
1130 /* The code above should have handled everything. */
1131 gcc_assert (!m_len);
1132 }
1133
1134 /* Derived class from op_by_pieces_d, providing support for block move
1135 operations. */
1136
1137 class move_by_pieces_d : public op_by_pieces_d
1138 {
1139 insn_gen_fn m_gen_fun;
1140 void generate (rtx, rtx, machine_mode);
1141 bool prepare_mode (machine_mode, unsigned int);
1142
1143 public:
1144 move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1145 unsigned int align)
1146 : op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
1147 {
1148 }
1149 rtx finish_retmode (memop_ret);
1150 };
1151
1152 /* Return true if MODE can be used for a set of copies, given an
1153 alignment ALIGN. Prepare whatever data is necessary for later
1154 calls to generate. */
1155
1156 bool
1157 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1158 {
1159 insn_code icode = optab_handler (mov_optab, mode);
1160 m_gen_fun = GEN_FCN (icode);
1161 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1162 }
1163
1164 /* A callback used when iterating for a compare_by_pieces_operation.
1165 OP0 and OP1 are the values that have been loaded and should be
1166 compared in MODE. If OP0 is NULL, this means we should generate a
1167 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1168 gen function that should be used to generate the mode. */
1169
1170 void
1171 move_by_pieces_d::generate (rtx op0, rtx op1,
1172 machine_mode mode ATTRIBUTE_UNUSED)
1173 {
1174 #ifdef PUSH_ROUNDING
1175 if (op0 == NULL_RTX)
1176 {
1177 emit_single_push_insn (mode, op1, NULL);
1178 return;
1179 }
1180 #endif
1181 emit_insn (m_gen_fun (op0, op1));
1182 }
1183
1184 /* Perform the final adjustment at the end of a string to obtain the
1185 correct return value for the block operation.
1186 Return value is based on RETMODE argument. */
1187
1188 rtx
1189 move_by_pieces_d::finish_retmode (memop_ret retmode)
1190 {
1191 gcc_assert (!m_reverse);
1192 if (retmode == RETURN_END_MINUS_ONE)
1193 {
1194 m_to.maybe_postinc (-1);
1195 --m_offset;
1196 }
1197 return m_to.adjust (QImode, m_offset);
1198 }
1199
1200 /* Generate several move instructions to copy LEN bytes from block FROM to
1201 block TO. (These are MEM rtx's with BLKmode).
1202
1203 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1204 used to push FROM to the stack.
1205
1206 ALIGN is maximum stack alignment we can assume.
1207
1208 Return value is based on RETMODE argument. */
1209
1210 rtx
1211 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1212 unsigned int align, memop_ret retmode)
1213 {
1214 #ifndef PUSH_ROUNDING
1215 if (to == NULL)
1216 gcc_unreachable ();
1217 #endif
1218
1219 move_by_pieces_d data (to, from, len, align);
1220
1221 data.run ();
1222
1223 if (retmode != RETURN_BEGIN)
1224 return data.finish_retmode (retmode);
1225 else
1226 return to;
1227 }
1228
1229 /* Derived class from op_by_pieces_d, providing support for block move
1230 operations. */
1231
1232 class store_by_pieces_d : public op_by_pieces_d
1233 {
1234 insn_gen_fn m_gen_fun;
1235 void generate (rtx, rtx, machine_mode);
1236 bool prepare_mode (machine_mode, unsigned int);
1237
1238 public:
1239 store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1240 unsigned HOST_WIDE_INT len, unsigned int align)
1241 : op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
1242 {
1243 }
1244 rtx finish_retmode (memop_ret);
1245 };
1246
1247 /* Return true if MODE can be used for a set of stores, given an
1248 alignment ALIGN. Prepare whatever data is necessary for later
1249 calls to generate. */
1250
1251 bool
1252 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1253 {
1254 insn_code icode = optab_handler (mov_optab, mode);
1255 m_gen_fun = GEN_FCN (icode);
1256 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1257 }
1258
1259 /* A callback used when iterating for a store_by_pieces_operation.
1260 OP0 and OP1 are the values that have been loaded and should be
1261 compared in MODE. If OP0 is NULL, this means we should generate a
1262 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1263 gen function that should be used to generate the mode. */
1264
1265 void
1266 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1267 {
1268 emit_insn (m_gen_fun (op0, op1));
1269 }
1270
1271 /* Perform the final adjustment at the end of a string to obtain the
1272 correct return value for the block operation.
1273 Return value is based on RETMODE argument. */
1274
1275 rtx
1276 store_by_pieces_d::finish_retmode (memop_ret retmode)
1277 {
1278 gcc_assert (!m_reverse);
1279 if (retmode == RETURN_END_MINUS_ONE)
1280 {
1281 m_to.maybe_postinc (-1);
1282 --m_offset;
1283 }
1284 return m_to.adjust (QImode, m_offset);
1285 }
1286
1287 /* Determine whether the LEN bytes generated by CONSTFUN can be
1288 stored to memory using several move instructions. CONSTFUNDATA is
1289 a pointer which will be passed as argument in every CONSTFUN call.
1290 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1291 a memset operation and false if it's a copy of a constant string.
1292 Return nonzero if a call to store_by_pieces should succeed. */
1293
1294 int
1295 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1296 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1297 void *constfundata, unsigned int align, bool memsetp)
1298 {
1299 unsigned HOST_WIDE_INT l;
1300 unsigned int max_size;
1301 HOST_WIDE_INT offset = 0;
1302 enum insn_code icode;
1303 int reverse;
1304 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1305 rtx cst ATTRIBUTE_UNUSED;
1306
1307 if (len == 0)
1308 return 1;
1309
1310 if (!targetm.use_by_pieces_infrastructure_p (len, align,
1311 memsetp
1312 ? SET_BY_PIECES
1313 : STORE_BY_PIECES,
1314 optimize_insn_for_speed_p ()))
1315 return 0;
1316
1317 align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1318
1319 /* We would first store what we can in the largest integer mode, then go to
1320 successively smaller modes. */
1321
1322 for (reverse = 0;
1323 reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1324 reverse++)
1325 {
1326 l = len;
1327 max_size = STORE_MAX_PIECES + 1;
1328 while (max_size > 1 && l > 0)
1329 {
1330 scalar_int_mode mode = widest_int_mode_for_size (max_size);
1331
1332 icode = optab_handler (mov_optab, mode);
1333 if (icode != CODE_FOR_nothing
1334 && align >= GET_MODE_ALIGNMENT (mode))
1335 {
1336 unsigned int size = GET_MODE_SIZE (mode);
1337
1338 while (l >= size)
1339 {
1340 if (reverse)
1341 offset -= size;
1342
1343 cst = (*constfun) (constfundata, offset, mode);
1344 if (!targetm.legitimate_constant_p (mode, cst))
1345 return 0;
1346
1347 if (!reverse)
1348 offset += size;
1349
1350 l -= size;
1351 }
1352 }
1353
1354 max_size = GET_MODE_SIZE (mode);
1355 }
1356
1357 /* The code above should have handled everything. */
1358 gcc_assert (!l);
1359 }
1360
1361 return 1;
1362 }
1363
1364 /* Generate several move instructions to store LEN bytes generated by
1365 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1366 pointer which will be passed as argument in every CONSTFUN call.
1367 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1368 a memset operation and false if it's a copy of a constant string.
1369 Return value is based on RETMODE argument. */
1370
1371 rtx
1372 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1373 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1374 void *constfundata, unsigned int align, bool memsetp,
1375 memop_ret retmode)
1376 {
1377 if (len == 0)
1378 {
1379 gcc_assert (retmode != RETURN_END_MINUS_ONE);
1380 return to;
1381 }
1382
1383 gcc_assert (targetm.use_by_pieces_infrastructure_p
1384 (len, align,
1385 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1386 optimize_insn_for_speed_p ()));
1387
1388 store_by_pieces_d data (to, constfun, constfundata, len, align);
1389 data.run ();
1390
1391 if (retmode != RETURN_BEGIN)
1392 return data.finish_retmode (retmode);
1393 else
1394 return to;
1395 }
1396
1397 /* Callback routine for clear_by_pieces.
1398 Return const0_rtx unconditionally. */
1399
1400 static rtx
1401 clear_by_pieces_1 (void *, HOST_WIDE_INT, scalar_int_mode)
1402 {
1403 return const0_rtx;
1404 }
1405
1406 /* Generate several move instructions to clear LEN bytes of block TO. (A MEM
1407 rtx with BLKmode). ALIGN is maximum alignment we can assume. */
1408
1409 static void
1410 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1411 {
1412 if (len == 0)
1413 return;
1414
1415 store_by_pieces_d data (to, clear_by_pieces_1, NULL, len, align);
1416 data.run ();
1417 }
1418
1419 /* Context used by compare_by_pieces_genfn. It stores the fail label
1420 to jump to in case of miscomparison, and for branch ratios greater than 1,
1421 it stores an accumulator and the current and maximum counts before
1422 emitting another branch. */
1423
1424 class compare_by_pieces_d : public op_by_pieces_d
1425 {
1426 rtx_code_label *m_fail_label;
1427 rtx m_accumulator;
1428 int m_count, m_batch;
1429
1430 void generate (rtx, rtx, machine_mode);
1431 bool prepare_mode (machine_mode, unsigned int);
1432 void finish_mode (machine_mode);
1433 public:
1434 compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1435 void *op1_cfn_data, HOST_WIDE_INT len, int align,
1436 rtx_code_label *fail_label)
1437 : op_by_pieces_d (op0, true, op1, true, op1_cfn, op1_cfn_data, len, align)
1438 {
1439 m_fail_label = fail_label;
1440 }
1441 };
1442
1443 /* A callback used when iterating for a compare_by_pieces_operation.
1444 OP0 and OP1 are the values that have been loaded and should be
1445 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1446 context structure. */
1447
1448 void
1449 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1450 {
1451 if (m_batch > 1)
1452 {
1453 rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1454 true, OPTAB_LIB_WIDEN);
1455 if (m_count != 0)
1456 temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1457 true, OPTAB_LIB_WIDEN);
1458 m_accumulator = temp;
1459
1460 if (++m_count < m_batch)
1461 return;
1462
1463 m_count = 0;
1464 op0 = m_accumulator;
1465 op1 = const0_rtx;
1466 m_accumulator = NULL_RTX;
1467 }
1468 do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1469 m_fail_label, profile_probability::uninitialized ());
1470 }
1471
1472 /* Return true if MODE can be used for a set of moves and comparisons,
1473 given an alignment ALIGN. Prepare whatever data is necessary for
1474 later calls to generate. */
1475
1476 bool
1477 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1478 {
1479 insn_code icode = optab_handler (mov_optab, mode);
1480 if (icode == CODE_FOR_nothing
1481 || align < GET_MODE_ALIGNMENT (mode)
1482 || !can_compare_p (EQ, mode, ccp_jump))
1483 return false;
1484 m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1485 if (m_batch < 0)
1486 return false;
1487 m_accumulator = NULL_RTX;
1488 m_count = 0;
1489 return true;
1490 }
1491
1492 /* Called after expanding a series of comparisons in MODE. If we have
1493 accumulated results for which we haven't emitted a branch yet, do
1494 so now. */
1495
1496 void
1497 compare_by_pieces_d::finish_mode (machine_mode mode)
1498 {
1499 if (m_accumulator != NULL_RTX)
1500 do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1501 NULL_RTX, NULL, m_fail_label,
1502 profile_probability::uninitialized ());
1503 }
1504
1505 /* Generate several move instructions to compare LEN bytes from blocks
1506 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1507
1508 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1509 used to push FROM to the stack.
1510
1511 ALIGN is maximum stack alignment we can assume.
1512
1513 Optionally, the caller can pass a constfn and associated data in A1_CFN
1514 and A1_CFN_DATA. describing that the second operand being compared is a
1515 known constant and how to obtain its data. */
1516
1517 static rtx
1518 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1519 rtx target, unsigned int align,
1520 by_pieces_constfn a1_cfn, void *a1_cfn_data)
1521 {
1522 rtx_code_label *fail_label = gen_label_rtx ();
1523 rtx_code_label *end_label = gen_label_rtx ();
1524
1525 if (target == NULL_RTX
1526 || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1527 target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1528
1529 compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1530 fail_label);
1531
1532 data.run ();
1533
1534 emit_move_insn (target, const0_rtx);
1535 emit_jump (end_label);
1536 emit_barrier ();
1537 emit_label (fail_label);
1538 emit_move_insn (target, const1_rtx);
1539 emit_label (end_label);
1540
1541 return target;
1542 }
1543 \f
1544 /* Emit code to move a block Y to a block X. This may be done with
1545 string-move instructions, with multiple scalar move instructions,
1546 or with a library call.
1547
1548 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1549 SIZE is an rtx that says how long they are.
1550 ALIGN is the maximum alignment we can assume they have.
1551 METHOD describes what kind of copy this is, and what mechanisms may be used.
1552 MIN_SIZE is the minimal size of block to move
1553 MAX_SIZE is the maximal size of block to move, if it cannot be represented
1554 in unsigned HOST_WIDE_INT, than it is mask of all ones.
1555
1556 Return the address of the new block, if memcpy is called and returns it,
1557 0 otherwise. */
1558
1559 rtx
1560 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1561 unsigned int expected_align, HOST_WIDE_INT expected_size,
1562 unsigned HOST_WIDE_INT min_size,
1563 unsigned HOST_WIDE_INT max_size,
1564 unsigned HOST_WIDE_INT probable_max_size,
1565 bool bail_out_libcall, bool *is_move_done)
1566 {
1567 int may_use_call;
1568 rtx retval = 0;
1569 unsigned int align;
1570
1571 if (is_move_done)
1572 *is_move_done = true;
1573
1574 gcc_assert (size);
1575 if (CONST_INT_P (size) && INTVAL (size) == 0)
1576 return 0;
1577
1578 switch (method)
1579 {
1580 case BLOCK_OP_NORMAL:
1581 case BLOCK_OP_TAILCALL:
1582 may_use_call = 1;
1583 break;
1584
1585 case BLOCK_OP_CALL_PARM:
1586 may_use_call = block_move_libcall_safe_for_call_parm ();
1587
1588 /* Make inhibit_defer_pop nonzero around the library call
1589 to force it to pop the arguments right away. */
1590 NO_DEFER_POP;
1591 break;
1592
1593 case BLOCK_OP_NO_LIBCALL:
1594 may_use_call = 0;
1595 break;
1596
1597 case BLOCK_OP_NO_LIBCALL_RET:
1598 may_use_call = -1;
1599 break;
1600
1601 default:
1602 gcc_unreachable ();
1603 }
1604
1605 gcc_assert (MEM_P (x) && MEM_P (y));
1606 align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1607 gcc_assert (align >= BITS_PER_UNIT);
1608
1609 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1610 block copy is more efficient for other large modes, e.g. DCmode. */
1611 x = adjust_address (x, BLKmode, 0);
1612 y = adjust_address (y, BLKmode, 0);
1613
1614 /* Set MEM_SIZE as appropriate for this block copy. The main place this
1615 can be incorrect is coming from __builtin_memcpy. */
1616 poly_int64 const_size;
1617 if (poly_int_rtx_p (size, &const_size))
1618 {
1619 x = shallow_copy_rtx (x);
1620 y = shallow_copy_rtx (y);
1621 set_mem_size (x, const_size);
1622 set_mem_size (y, const_size);
1623 }
1624
1625 if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
1626 move_by_pieces (x, y, INTVAL (size), align, RETURN_BEGIN);
1627 else if (emit_block_move_via_cpymem (x, y, size, align,
1628 expected_align, expected_size,
1629 min_size, max_size, probable_max_size))
1630 ;
1631 else if (may_use_call
1632 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1633 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1634 {
1635 if (bail_out_libcall)
1636 {
1637 if (is_move_done)
1638 *is_move_done = false;
1639 return retval;
1640 }
1641
1642 if (may_use_call < 0)
1643 return pc_rtx;
1644
1645 retval = emit_block_copy_via_libcall (x, y, size,
1646 method == BLOCK_OP_TAILCALL);
1647 }
1648
1649 else
1650 emit_block_move_via_loop (x, y, size, align);
1651
1652 if (method == BLOCK_OP_CALL_PARM)
1653 OK_DEFER_POP;
1654
1655 return retval;
1656 }
1657
1658 rtx
1659 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1660 {
1661 unsigned HOST_WIDE_INT max, min = 0;
1662 if (GET_CODE (size) == CONST_INT)
1663 min = max = UINTVAL (size);
1664 else
1665 max = GET_MODE_MASK (GET_MODE (size));
1666 return emit_block_move_hints (x, y, size, method, 0, -1,
1667 min, max, max);
1668 }
1669
1670 /* A subroutine of emit_block_move. Returns true if calling the
1671 block move libcall will not clobber any parameters which may have
1672 already been placed on the stack. */
1673
1674 static bool
1675 block_move_libcall_safe_for_call_parm (void)
1676 {
1677 #if defined (REG_PARM_STACK_SPACE)
1678 tree fn;
1679 #endif
1680
1681 /* If arguments are pushed on the stack, then they're safe. */
1682 if (PUSH_ARGS)
1683 return true;
1684
1685 /* If registers go on the stack anyway, any argument is sure to clobber
1686 an outgoing argument. */
1687 #if defined (REG_PARM_STACK_SPACE)
1688 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1689 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1690 depend on its argument. */
1691 (void) fn;
1692 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1693 && REG_PARM_STACK_SPACE (fn) != 0)
1694 return false;
1695 #endif
1696
1697 /* If any argument goes in memory, then it might clobber an outgoing
1698 argument. */
1699 {
1700 CUMULATIVE_ARGS args_so_far_v;
1701 cumulative_args_t args_so_far;
1702 tree fn, arg;
1703
1704 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1705 INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1706 args_so_far = pack_cumulative_args (&args_so_far_v);
1707
1708 arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1709 for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1710 {
1711 machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1712 function_arg_info arg_info (mode, /*named=*/true);
1713 rtx tmp = targetm.calls.function_arg (args_so_far, arg_info);
1714 if (!tmp || !REG_P (tmp))
1715 return false;
1716 if (targetm.calls.arg_partial_bytes (args_so_far, arg_info))
1717 return false;
1718 targetm.calls.function_arg_advance (args_so_far, arg_info);
1719 }
1720 }
1721 return true;
1722 }
1723
1724 /* A subroutine of emit_block_move. Expand a cpymem pattern;
1725 return true if successful. */
1726
1727 static bool
1728 emit_block_move_via_cpymem (rtx x, rtx y, rtx size, unsigned int align,
1729 unsigned int expected_align, HOST_WIDE_INT expected_size,
1730 unsigned HOST_WIDE_INT min_size,
1731 unsigned HOST_WIDE_INT max_size,
1732 unsigned HOST_WIDE_INT probable_max_size)
1733 {
1734 if (expected_align < align)
1735 expected_align = align;
1736 if (expected_size != -1)
1737 {
1738 if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1739 expected_size = probable_max_size;
1740 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1741 expected_size = min_size;
1742 }
1743
1744 /* Since this is a move insn, we don't care about volatility. */
1745 temporary_volatile_ok v (true);
1746
1747 /* Try the most limited insn first, because there's no point
1748 including more than one in the machine description unless
1749 the more limited one has some advantage. */
1750
1751 opt_scalar_int_mode mode_iter;
1752 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
1753 {
1754 scalar_int_mode mode = mode_iter.require ();
1755 enum insn_code code = direct_optab_handler (cpymem_optab, mode);
1756
1757 if (code != CODE_FOR_nothing
1758 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1759 here because if SIZE is less than the mode mask, as it is
1760 returned by the macro, it will definitely be less than the
1761 actual mode mask. Since SIZE is within the Pmode address
1762 space, we limit MODE to Pmode. */
1763 && ((CONST_INT_P (size)
1764 && ((unsigned HOST_WIDE_INT) INTVAL (size)
1765 <= (GET_MODE_MASK (mode) >> 1)))
1766 || max_size <= (GET_MODE_MASK (mode) >> 1)
1767 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
1768 {
1769 class expand_operand ops[9];
1770 unsigned int nops;
1771
1772 /* ??? When called via emit_block_move_for_call, it'd be
1773 nice if there were some way to inform the backend, so
1774 that it doesn't fail the expansion because it thinks
1775 emitting the libcall would be more efficient. */
1776 nops = insn_data[(int) code].n_generator_args;
1777 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
1778
1779 create_fixed_operand (&ops[0], x);
1780 create_fixed_operand (&ops[1], y);
1781 /* The check above guarantees that this size conversion is valid. */
1782 create_convert_operand_to (&ops[2], size, mode, true);
1783 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
1784 if (nops >= 6)
1785 {
1786 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
1787 create_integer_operand (&ops[5], expected_size);
1788 }
1789 if (nops >= 8)
1790 {
1791 create_integer_operand (&ops[6], min_size);
1792 /* If we cannot represent the maximal size,
1793 make parameter NULL. */
1794 if ((HOST_WIDE_INT) max_size != -1)
1795 create_integer_operand (&ops[7], max_size);
1796 else
1797 create_fixed_operand (&ops[7], NULL);
1798 }
1799 if (nops == 9)
1800 {
1801 /* If we cannot represent the maximal size,
1802 make parameter NULL. */
1803 if ((HOST_WIDE_INT) probable_max_size != -1)
1804 create_integer_operand (&ops[8], probable_max_size);
1805 else
1806 create_fixed_operand (&ops[8], NULL);
1807 }
1808 if (maybe_expand_insn (code, nops, ops))
1809 return true;
1810 }
1811 }
1812
1813 return false;
1814 }
1815
1816 /* A subroutine of emit_block_move. Copy the data via an explicit
1817 loop. This is used only when libcalls are forbidden. */
1818 /* ??? It'd be nice to copy in hunks larger than QImode. */
1819
1820 static void
1821 emit_block_move_via_loop (rtx x, rtx y, rtx size,
1822 unsigned int align ATTRIBUTE_UNUSED)
1823 {
1824 rtx_code_label *cmp_label, *top_label;
1825 rtx iter, x_addr, y_addr, tmp;
1826 machine_mode x_addr_mode = get_address_mode (x);
1827 machine_mode y_addr_mode = get_address_mode (y);
1828 machine_mode iter_mode;
1829
1830 iter_mode = GET_MODE (size);
1831 if (iter_mode == VOIDmode)
1832 iter_mode = word_mode;
1833
1834 top_label = gen_label_rtx ();
1835 cmp_label = gen_label_rtx ();
1836 iter = gen_reg_rtx (iter_mode);
1837
1838 emit_move_insn (iter, const0_rtx);
1839
1840 x_addr = force_operand (XEXP (x, 0), NULL_RTX);
1841 y_addr = force_operand (XEXP (y, 0), NULL_RTX);
1842 do_pending_stack_adjust ();
1843
1844 emit_jump (cmp_label);
1845 emit_label (top_label);
1846
1847 tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
1848 x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
1849
1850 if (x_addr_mode != y_addr_mode)
1851 tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
1852 y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
1853
1854 x = change_address (x, QImode, x_addr);
1855 y = change_address (y, QImode, y_addr);
1856
1857 emit_move_insn (x, y);
1858
1859 tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
1860 true, OPTAB_LIB_WIDEN);
1861 if (tmp != iter)
1862 emit_move_insn (iter, tmp);
1863
1864 emit_label (cmp_label);
1865
1866 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
1867 true, top_label,
1868 profile_probability::guessed_always ()
1869 .apply_scale (9, 10));
1870 }
1871 \f
1872 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1873 TAILCALL is true if this is a tail call. */
1874
1875 rtx
1876 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
1877 rtx size, bool tailcall)
1878 {
1879 rtx dst_addr, src_addr;
1880 tree call_expr, dst_tree, src_tree, size_tree;
1881 machine_mode size_mode;
1882
1883 /* Since dst and src are passed to a libcall, mark the corresponding
1884 tree EXPR as addressable. */
1885 tree dst_expr = MEM_EXPR (dst);
1886 tree src_expr = MEM_EXPR (src);
1887 if (dst_expr)
1888 mark_addressable (dst_expr);
1889 if (src_expr)
1890 mark_addressable (src_expr);
1891
1892 dst_addr = copy_addr_to_reg (XEXP (dst, 0));
1893 dst_addr = convert_memory_address (ptr_mode, dst_addr);
1894 dst_tree = make_tree (ptr_type_node, dst_addr);
1895
1896 src_addr = copy_addr_to_reg (XEXP (src, 0));
1897 src_addr = convert_memory_address (ptr_mode, src_addr);
1898 src_tree = make_tree (ptr_type_node, src_addr);
1899
1900 size_mode = TYPE_MODE (sizetype);
1901 size = convert_to_mode (size_mode, size, 1);
1902 size = copy_to_mode_reg (size_mode, size);
1903 size_tree = make_tree (sizetype, size);
1904
1905 /* It is incorrect to use the libcall calling conventions for calls to
1906 memcpy/memmove/memcmp because they can be provided by the user. */
1907 tree fn = builtin_decl_implicit (fncode);
1908 call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
1909 CALL_EXPR_TAILCALL (call_expr) = tailcall;
1910
1911 return expand_call (call_expr, NULL_RTX, false);
1912 }
1913
1914 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1915 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
1916 otherwise return null. */
1917
1918 rtx
1919 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
1920 rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
1921 HOST_WIDE_INT align)
1922 {
1923 machine_mode insn_mode = insn_data[icode].operand[0].mode;
1924
1925 if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
1926 target = NULL_RTX;
1927
1928 class expand_operand ops[5];
1929 create_output_operand (&ops[0], target, insn_mode);
1930 create_fixed_operand (&ops[1], arg1_rtx);
1931 create_fixed_operand (&ops[2], arg2_rtx);
1932 create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
1933 TYPE_UNSIGNED (arg3_type));
1934 create_integer_operand (&ops[4], align);
1935 if (maybe_expand_insn (icode, 5, ops))
1936 return ops[0].value;
1937 return NULL_RTX;
1938 }
1939
1940 /* Expand a block compare between X and Y with length LEN using the
1941 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
1942 of the expression that was used to calculate the length. ALIGN
1943 gives the known minimum common alignment. */
1944
1945 static rtx
1946 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
1947 unsigned align)
1948 {
1949 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1950 implementing memcmp because it will stop if it encounters two
1951 zero bytes. */
1952 insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
1953
1954 if (icode == CODE_FOR_nothing)
1955 return NULL_RTX;
1956
1957 return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
1958 }
1959
1960 /* Emit code to compare a block Y to a block X. This may be done with
1961 string-compare instructions, with multiple scalar instructions,
1962 or with a library call.
1963
1964 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
1965 they are. LEN_TYPE is the type of the expression that was used to
1966 calculate it.
1967
1968 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
1969 value of a normal memcmp call, instead we can just compare for equality.
1970 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
1971 returning NULL_RTX.
1972
1973 Optionally, the caller can pass a constfn and associated data in Y_CFN
1974 and Y_CFN_DATA. describing that the second operand being compared is a
1975 known constant and how to obtain its data.
1976 Return the result of the comparison, or NULL_RTX if we failed to
1977 perform the operation. */
1978
1979 rtx
1980 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
1981 bool equality_only, by_pieces_constfn y_cfn,
1982 void *y_cfndata)
1983 {
1984 rtx result = 0;
1985
1986 if (CONST_INT_P (len) && INTVAL (len) == 0)
1987 return const0_rtx;
1988
1989 gcc_assert (MEM_P (x) && MEM_P (y));
1990 unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1991 gcc_assert (align >= BITS_PER_UNIT);
1992
1993 x = adjust_address (x, BLKmode, 0);
1994 y = adjust_address (y, BLKmode, 0);
1995
1996 if (equality_only
1997 && CONST_INT_P (len)
1998 && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
1999 result = compare_by_pieces (x, y, INTVAL (len), target, align,
2000 y_cfn, y_cfndata);
2001 else
2002 result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
2003
2004 return result;
2005 }
2006 \f
2007 /* Copy all or part of a value X into registers starting at REGNO.
2008 The number of registers to be filled is NREGS. */
2009
2010 void
2011 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2012 {
2013 if (nregs == 0)
2014 return;
2015
2016 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2017 x = validize_mem (force_const_mem (mode, x));
2018
2019 /* See if the machine can do this with a load multiple insn. */
2020 if (targetm.have_load_multiple ())
2021 {
2022 rtx_insn *last = get_last_insn ();
2023 rtx first = gen_rtx_REG (word_mode, regno);
2024 if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2025 GEN_INT (nregs)))
2026 {
2027 emit_insn (pat);
2028 return;
2029 }
2030 else
2031 delete_insns_since (last);
2032 }
2033
2034 for (int i = 0; i < nregs; i++)
2035 emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2036 operand_subword_force (x, i, mode));
2037 }
2038
2039 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2040 The number of registers to be filled is NREGS. */
2041
2042 void
2043 move_block_from_reg (int regno, rtx x, int nregs)
2044 {
2045 if (nregs == 0)
2046 return;
2047
2048 /* See if the machine can do this with a store multiple insn. */
2049 if (targetm.have_store_multiple ())
2050 {
2051 rtx_insn *last = get_last_insn ();
2052 rtx first = gen_rtx_REG (word_mode, regno);
2053 if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2054 GEN_INT (nregs)))
2055 {
2056 emit_insn (pat);
2057 return;
2058 }
2059 else
2060 delete_insns_since (last);
2061 }
2062
2063 for (int i = 0; i < nregs; i++)
2064 {
2065 rtx tem = operand_subword (x, i, 1, BLKmode);
2066
2067 gcc_assert (tem);
2068
2069 emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2070 }
2071 }
2072
2073 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2074 ORIG, where ORIG is a non-consecutive group of registers represented by
2075 a PARALLEL. The clone is identical to the original except in that the
2076 original set of registers is replaced by a new set of pseudo registers.
2077 The new set has the same modes as the original set. */
2078
2079 rtx
2080 gen_group_rtx (rtx orig)
2081 {
2082 int i, length;
2083 rtx *tmps;
2084
2085 gcc_assert (GET_CODE (orig) == PARALLEL);
2086
2087 length = XVECLEN (orig, 0);
2088 tmps = XALLOCAVEC (rtx, length);
2089
2090 /* Skip a NULL entry in first slot. */
2091 i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2092
2093 if (i)
2094 tmps[0] = 0;
2095
2096 for (; i < length; i++)
2097 {
2098 machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2099 rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2100
2101 tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2102 }
2103
2104 return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2105 }
2106
2107 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2108 except that values are placed in TMPS[i], and must later be moved
2109 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2110
2111 static void
2112 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2113 poly_int64 ssize)
2114 {
2115 rtx src;
2116 int start, i;
2117 machine_mode m = GET_MODE (orig_src);
2118
2119 gcc_assert (GET_CODE (dst) == PARALLEL);
2120
2121 if (m != VOIDmode
2122 && !SCALAR_INT_MODE_P (m)
2123 && !MEM_P (orig_src)
2124 && GET_CODE (orig_src) != CONCAT)
2125 {
2126 scalar_int_mode imode;
2127 if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2128 {
2129 src = gen_reg_rtx (imode);
2130 emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2131 }
2132 else
2133 {
2134 src = assign_stack_temp (GET_MODE (orig_src), ssize);
2135 emit_move_insn (src, orig_src);
2136 }
2137 emit_group_load_1 (tmps, dst, src, type, ssize);
2138 return;
2139 }
2140
2141 /* Check for a NULL entry, used to indicate that the parameter goes
2142 both on the stack and in registers. */
2143 if (XEXP (XVECEXP (dst, 0, 0), 0))
2144 start = 0;
2145 else
2146 start = 1;
2147
2148 /* Process the pieces. */
2149 for (i = start; i < XVECLEN (dst, 0); i++)
2150 {
2151 machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2152 poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
2153 poly_int64 bytelen = GET_MODE_SIZE (mode);
2154 poly_int64 shift = 0;
2155
2156 /* Handle trailing fragments that run over the size of the struct.
2157 It's the target's responsibility to make sure that the fragment
2158 cannot be strictly smaller in some cases and strictly larger
2159 in others. */
2160 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2161 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2162 {
2163 /* Arrange to shift the fragment to where it belongs.
2164 extract_bit_field loads to the lsb of the reg. */
2165 if (
2166 #ifdef BLOCK_REG_PADDING
2167 BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2168 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2169 #else
2170 BYTES_BIG_ENDIAN
2171 #endif
2172 )
2173 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2174 bytelen = ssize - bytepos;
2175 gcc_assert (maybe_gt (bytelen, 0));
2176 }
2177
2178 /* If we won't be loading directly from memory, protect the real source
2179 from strange tricks we might play; but make sure that the source can
2180 be loaded directly into the destination. */
2181 src = orig_src;
2182 if (!MEM_P (orig_src)
2183 && (!CONSTANT_P (orig_src)
2184 || (GET_MODE (orig_src) != mode
2185 && GET_MODE (orig_src) != VOIDmode)))
2186 {
2187 if (GET_MODE (orig_src) == VOIDmode)
2188 src = gen_reg_rtx (mode);
2189 else
2190 src = gen_reg_rtx (GET_MODE (orig_src));
2191
2192 emit_move_insn (src, orig_src);
2193 }
2194
2195 /* Optimize the access just a bit. */
2196 if (MEM_P (src)
2197 && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2198 || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2199 && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2200 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2201 {
2202 tmps[i] = gen_reg_rtx (mode);
2203 emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2204 }
2205 else if (COMPLEX_MODE_P (mode)
2206 && GET_MODE (src) == mode
2207 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2208 /* Let emit_move_complex do the bulk of the work. */
2209 tmps[i] = src;
2210 else if (GET_CODE (src) == CONCAT)
2211 {
2212 poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2213 poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2214 unsigned int elt;
2215 poly_int64 subpos;
2216
2217 if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2218 && known_le (subpos + bytelen, slen0))
2219 {
2220 /* The following assumes that the concatenated objects all
2221 have the same size. In this case, a simple calculation
2222 can be used to determine the object and the bit field
2223 to be extracted. */
2224 tmps[i] = XEXP (src, elt);
2225 if (maybe_ne (subpos, 0)
2226 || maybe_ne (subpos + bytelen, slen0)
2227 || (!CONSTANT_P (tmps[i])
2228 && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2229 tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2230 subpos * BITS_PER_UNIT,
2231 1, NULL_RTX, mode, mode, false,
2232 NULL);
2233 }
2234 else
2235 {
2236 rtx mem;
2237
2238 gcc_assert (known_eq (bytepos, 0));
2239 mem = assign_stack_temp (GET_MODE (src), slen);
2240 emit_move_insn (mem, src);
2241 tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2242 0, 1, NULL_RTX, mode, mode, false,
2243 NULL);
2244 }
2245 }
2246 /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2247 SIMD register, which is currently broken. While we get GCC
2248 to emit proper RTL for these cases, let's dump to memory. */
2249 else if (VECTOR_MODE_P (GET_MODE (dst))
2250 && REG_P (src))
2251 {
2252 poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
2253 rtx mem;
2254
2255 mem = assign_stack_temp (GET_MODE (src), slen);
2256 emit_move_insn (mem, src);
2257 tmps[i] = adjust_address (mem, mode, bytepos);
2258 }
2259 else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2260 && XVECLEN (dst, 0) > 1)
2261 tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2262 else if (CONSTANT_P (src))
2263 {
2264 if (known_eq (bytelen, ssize))
2265 tmps[i] = src;
2266 else
2267 {
2268 rtx first, second;
2269
2270 /* TODO: const_wide_int can have sizes other than this... */
2271 gcc_assert (known_eq (2 * bytelen, ssize));
2272 split_double (src, &first, &second);
2273 if (i)
2274 tmps[i] = second;
2275 else
2276 tmps[i] = first;
2277 }
2278 }
2279 else if (REG_P (src) && GET_MODE (src) == mode)
2280 tmps[i] = src;
2281 else
2282 tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2283 bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2284 mode, mode, false, NULL);
2285
2286 if (maybe_ne (shift, 0))
2287 tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2288 shift, tmps[i], 0);
2289 }
2290 }
2291
2292 /* Emit code to move a block SRC of type TYPE to a block DST,
2293 where DST is non-consecutive registers represented by a PARALLEL.
2294 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2295 if not known. */
2296
2297 void
2298 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2299 {
2300 rtx *tmps;
2301 int i;
2302
2303 tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2304 emit_group_load_1 (tmps, dst, src, type, ssize);
2305
2306 /* Copy the extracted pieces into the proper (probable) hard regs. */
2307 for (i = 0; i < XVECLEN (dst, 0); i++)
2308 {
2309 rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2310 if (d == NULL)
2311 continue;
2312 emit_move_insn (d, tmps[i]);
2313 }
2314 }
2315
2316 /* Similar, but load SRC into new pseudos in a format that looks like
2317 PARALLEL. This can later be fed to emit_group_move to get things
2318 in the right place. */
2319
2320 rtx
2321 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2322 {
2323 rtvec vec;
2324 int i;
2325
2326 vec = rtvec_alloc (XVECLEN (parallel, 0));
2327 emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2328
2329 /* Convert the vector to look just like the original PARALLEL, except
2330 with the computed values. */
2331 for (i = 0; i < XVECLEN (parallel, 0); i++)
2332 {
2333 rtx e = XVECEXP (parallel, 0, i);
2334 rtx d = XEXP (e, 0);
2335
2336 if (d)
2337 {
2338 d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2339 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2340 }
2341 RTVEC_ELT (vec, i) = e;
2342 }
2343
2344 return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2345 }
2346
2347 /* Emit code to move a block SRC to block DST, where SRC and DST are
2348 non-consecutive groups of registers, each represented by a PARALLEL. */
2349
2350 void
2351 emit_group_move (rtx dst, rtx src)
2352 {
2353 int i;
2354
2355 gcc_assert (GET_CODE (src) == PARALLEL
2356 && GET_CODE (dst) == PARALLEL
2357 && XVECLEN (src, 0) == XVECLEN (dst, 0));
2358
2359 /* Skip first entry if NULL. */
2360 for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2361 emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2362 XEXP (XVECEXP (src, 0, i), 0));
2363 }
2364
2365 /* Move a group of registers represented by a PARALLEL into pseudos. */
2366
2367 rtx
2368 emit_group_move_into_temps (rtx src)
2369 {
2370 rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2371 int i;
2372
2373 for (i = 0; i < XVECLEN (src, 0); i++)
2374 {
2375 rtx e = XVECEXP (src, 0, i);
2376 rtx d = XEXP (e, 0);
2377
2378 if (d)
2379 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2380 RTVEC_ELT (vec, i) = e;
2381 }
2382
2383 return gen_rtx_PARALLEL (GET_MODE (src), vec);
2384 }
2385
2386 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2387 where SRC is non-consecutive registers represented by a PARALLEL.
2388 SSIZE represents the total size of block ORIG_DST, or -1 if not
2389 known. */
2390
2391 void
2392 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2393 poly_int64 ssize)
2394 {
2395 rtx *tmps, dst;
2396 int start, finish, i;
2397 machine_mode m = GET_MODE (orig_dst);
2398
2399 gcc_assert (GET_CODE (src) == PARALLEL);
2400
2401 if (!SCALAR_INT_MODE_P (m)
2402 && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2403 {
2404 scalar_int_mode imode;
2405 if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2406 {
2407 dst = gen_reg_rtx (imode);
2408 emit_group_store (dst, src, type, ssize);
2409 dst = gen_lowpart (GET_MODE (orig_dst), dst);
2410 }
2411 else
2412 {
2413 dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2414 emit_group_store (dst, src, type, ssize);
2415 }
2416 emit_move_insn (orig_dst, dst);
2417 return;
2418 }
2419
2420 /* Check for a NULL entry, used to indicate that the parameter goes
2421 both on the stack and in registers. */
2422 if (XEXP (XVECEXP (src, 0, 0), 0))
2423 start = 0;
2424 else
2425 start = 1;
2426 finish = XVECLEN (src, 0);
2427
2428 tmps = XALLOCAVEC (rtx, finish);
2429
2430 /* Copy the (probable) hard regs into pseudos. */
2431 for (i = start; i < finish; i++)
2432 {
2433 rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2434 if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2435 {
2436 tmps[i] = gen_reg_rtx (GET_MODE (reg));
2437 emit_move_insn (tmps[i], reg);
2438 }
2439 else
2440 tmps[i] = reg;
2441 }
2442
2443 /* If we won't be storing directly into memory, protect the real destination
2444 from strange tricks we might play. */
2445 dst = orig_dst;
2446 if (GET_CODE (dst) == PARALLEL)
2447 {
2448 rtx temp;
2449
2450 /* We can get a PARALLEL dst if there is a conditional expression in
2451 a return statement. In that case, the dst and src are the same,
2452 so no action is necessary. */
2453 if (rtx_equal_p (dst, src))
2454 return;
2455
2456 /* It is unclear if we can ever reach here, but we may as well handle
2457 it. Allocate a temporary, and split this into a store/load to/from
2458 the temporary. */
2459 temp = assign_stack_temp (GET_MODE (dst), ssize);
2460 emit_group_store (temp, src, type, ssize);
2461 emit_group_load (dst, temp, type, ssize);
2462 return;
2463 }
2464 else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2465 {
2466 machine_mode outer = GET_MODE (dst);
2467 machine_mode inner;
2468 poly_int64 bytepos;
2469 bool done = false;
2470 rtx temp;
2471
2472 if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2473 dst = gen_reg_rtx (outer);
2474
2475 /* Make life a bit easier for combine. */
2476 /* If the first element of the vector is the low part
2477 of the destination mode, use a paradoxical subreg to
2478 initialize the destination. */
2479 if (start < finish)
2480 {
2481 inner = GET_MODE (tmps[start]);
2482 bytepos = subreg_lowpart_offset (inner, outer);
2483 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
2484 bytepos))
2485 {
2486 temp = simplify_gen_subreg (outer, tmps[start],
2487 inner, 0);
2488 if (temp)
2489 {
2490 emit_move_insn (dst, temp);
2491 done = true;
2492 start++;
2493 }
2494 }
2495 }
2496
2497 /* If the first element wasn't the low part, try the last. */
2498 if (!done
2499 && start < finish - 1)
2500 {
2501 inner = GET_MODE (tmps[finish - 1]);
2502 bytepos = subreg_lowpart_offset (inner, outer);
2503 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
2504 finish - 1), 1)),
2505 bytepos))
2506 {
2507 temp = simplify_gen_subreg (outer, tmps[finish - 1],
2508 inner, 0);
2509 if (temp)
2510 {
2511 emit_move_insn (dst, temp);
2512 done = true;
2513 finish--;
2514 }
2515 }
2516 }
2517
2518 /* Otherwise, simply initialize the result to zero. */
2519 if (!done)
2520 emit_move_insn (dst, CONST0_RTX (outer));
2521 }
2522
2523 /* Process the pieces. */
2524 for (i = start; i < finish; i++)
2525 {
2526 poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
2527 machine_mode mode = GET_MODE (tmps[i]);
2528 poly_int64 bytelen = GET_MODE_SIZE (mode);
2529 poly_uint64 adj_bytelen;
2530 rtx dest = dst;
2531
2532 /* Handle trailing fragments that run over the size of the struct.
2533 It's the target's responsibility to make sure that the fragment
2534 cannot be strictly smaller in some cases and strictly larger
2535 in others. */
2536 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2537 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2538 adj_bytelen = ssize - bytepos;
2539 else
2540 adj_bytelen = bytelen;
2541
2542 if (GET_CODE (dst) == CONCAT)
2543 {
2544 if (known_le (bytepos + adj_bytelen,
2545 GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2546 dest = XEXP (dst, 0);
2547 else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2548 {
2549 bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2550 dest = XEXP (dst, 1);
2551 }
2552 else
2553 {
2554 machine_mode dest_mode = GET_MODE (dest);
2555 machine_mode tmp_mode = GET_MODE (tmps[i]);
2556
2557 gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2558
2559 if (GET_MODE_ALIGNMENT (dest_mode)
2560 >= GET_MODE_ALIGNMENT (tmp_mode))
2561 {
2562 dest = assign_stack_temp (dest_mode,
2563 GET_MODE_SIZE (dest_mode));
2564 emit_move_insn (adjust_address (dest,
2565 tmp_mode,
2566 bytepos),
2567 tmps[i]);
2568 dst = dest;
2569 }
2570 else
2571 {
2572 dest = assign_stack_temp (tmp_mode,
2573 GET_MODE_SIZE (tmp_mode));
2574 emit_move_insn (dest, tmps[i]);
2575 dst = adjust_address (dest, dest_mode, bytepos);
2576 }
2577 break;
2578 }
2579 }
2580
2581 /* Handle trailing fragments that run over the size of the struct. */
2582 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2583 {
2584 /* store_bit_field always takes its value from the lsb.
2585 Move the fragment to the lsb if it's not already there. */
2586 if (
2587 #ifdef BLOCK_REG_PADDING
2588 BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2589 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2590 #else
2591 BYTES_BIG_ENDIAN
2592 #endif
2593 )
2594 {
2595 poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2596 tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2597 shift, tmps[i], 0);
2598 }
2599
2600 /* Make sure not to write past the end of the struct. */
2601 store_bit_field (dest,
2602 adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2603 bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2604 VOIDmode, tmps[i], false);
2605 }
2606
2607 /* Optimize the access just a bit. */
2608 else if (MEM_P (dest)
2609 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2610 || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2611 && multiple_p (bytepos * BITS_PER_UNIT,
2612 GET_MODE_ALIGNMENT (mode))
2613 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2614 emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2615
2616 else
2617 store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2618 0, 0, mode, tmps[i], false);
2619 }
2620
2621 /* Copy from the pseudo into the (probable) hard reg. */
2622 if (orig_dst != dst)
2623 emit_move_insn (orig_dst, dst);
2624 }
2625
2626 /* Return a form of X that does not use a PARALLEL. TYPE is the type
2627 of the value stored in X. */
2628
2629 rtx
2630 maybe_emit_group_store (rtx x, tree type)
2631 {
2632 machine_mode mode = TYPE_MODE (type);
2633 gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2634 if (GET_CODE (x) == PARALLEL)
2635 {
2636 rtx result = gen_reg_rtx (mode);
2637 emit_group_store (result, x, type, int_size_in_bytes (type));
2638 return result;
2639 }
2640 return x;
2641 }
2642
2643 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2644
2645 This is used on targets that return BLKmode values in registers. */
2646
2647 static void
2648 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2649 {
2650 unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2651 rtx src = NULL, dst = NULL;
2652 unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2653 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2654 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2655 fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2656 fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2657 fixed_size_mode copy_mode;
2658
2659 /* BLKmode registers created in the back-end shouldn't have survived. */
2660 gcc_assert (mode != BLKmode);
2661
2662 /* If the structure doesn't take up a whole number of words, see whether
2663 SRCREG is padded on the left or on the right. If it's on the left,
2664 set PADDING_CORRECTION to the number of bits to skip.
2665
2666 In most ABIs, the structure will be returned at the least end of
2667 the register, which translates to right padding on little-endian
2668 targets and left padding on big-endian targets. The opposite
2669 holds if the structure is returned at the most significant
2670 end of the register. */
2671 if (bytes % UNITS_PER_WORD != 0
2672 && (targetm.calls.return_in_msb (type)
2673 ? !BYTES_BIG_ENDIAN
2674 : BYTES_BIG_ENDIAN))
2675 padding_correction
2676 = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2677
2678 /* We can use a single move if we have an exact mode for the size. */
2679 else if (MEM_P (target)
2680 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2681 || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2682 && bytes == GET_MODE_SIZE (mode))
2683 {
2684 emit_move_insn (adjust_address (target, mode, 0), srcreg);
2685 return;
2686 }
2687
2688 /* And if we additionally have the same mode for a register. */
2689 else if (REG_P (target)
2690 && GET_MODE (target) == mode
2691 && bytes == GET_MODE_SIZE (mode))
2692 {
2693 emit_move_insn (target, srcreg);
2694 return;
2695 }
2696
2697 /* This code assumes srcreg is at least a full word. If it isn't, copy it
2698 into a new pseudo which is a full word. */
2699 if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2700 {
2701 srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2702 mode = word_mode;
2703 }
2704
2705 /* Copy the structure BITSIZE bits at a time. If the target lives in
2706 memory, take care of not reading/writing past its end by selecting
2707 a copy mode suited to BITSIZE. This should always be possible given
2708 how it is computed.
2709
2710 If the target lives in register, make sure not to select a copy mode
2711 larger than the mode of the register.
2712
2713 We could probably emit more efficient code for machines which do not use
2714 strict alignment, but it doesn't seem worth the effort at the current
2715 time. */
2716
2717 copy_mode = word_mode;
2718 if (MEM_P (target))
2719 {
2720 opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2721 if (mem_mode.exists ())
2722 copy_mode = mem_mode.require ();
2723 }
2724 else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2725 copy_mode = tmode;
2726
2727 for (bitpos = 0, xbitpos = padding_correction;
2728 bitpos < bytes * BITS_PER_UNIT;
2729 bitpos += bitsize, xbitpos += bitsize)
2730 {
2731 /* We need a new source operand each time xbitpos is on a
2732 word boundary and when xbitpos == padding_correction
2733 (the first time through). */
2734 if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2735 src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2736
2737 /* We need a new destination operand each time bitpos is on
2738 a word boundary. */
2739 if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2740 dst = target;
2741 else if (bitpos % BITS_PER_WORD == 0)
2742 dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2743
2744 /* Use xbitpos for the source extraction (right justified) and
2745 bitpos for the destination store (left justified). */
2746 store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2747 extract_bit_field (src, bitsize,
2748 xbitpos % BITS_PER_WORD, 1,
2749 NULL_RTX, copy_mode, copy_mode,
2750 false, NULL),
2751 false);
2752 }
2753 }
2754
2755 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
2756 register if it contains any data, otherwise return null.
2757
2758 This is used on targets that return BLKmode values in registers. */
2759
2760 rtx
2761 copy_blkmode_to_reg (machine_mode mode_in, tree src)
2762 {
2763 int i, n_regs;
2764 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
2765 unsigned int bitsize;
2766 rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
2767 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2768 fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
2769 fixed_size_mode dst_mode;
2770 scalar_int_mode min_mode;
2771
2772 gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
2773
2774 x = expand_normal (src);
2775
2776 bytes = arg_int_size_in_bytes (TREE_TYPE (src));
2777 if (bytes == 0)
2778 return NULL_RTX;
2779
2780 /* If the structure doesn't take up a whole number of words, see
2781 whether the register value should be padded on the left or on
2782 the right. Set PADDING_CORRECTION to the number of padding
2783 bits needed on the left side.
2784
2785 In most ABIs, the structure will be returned at the least end of
2786 the register, which translates to right padding on little-endian
2787 targets and left padding on big-endian targets. The opposite
2788 holds if the structure is returned at the most significant
2789 end of the register. */
2790 if (bytes % UNITS_PER_WORD != 0
2791 && (targetm.calls.return_in_msb (TREE_TYPE (src))
2792 ? !BYTES_BIG_ENDIAN
2793 : BYTES_BIG_ENDIAN))
2794 padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
2795 * BITS_PER_UNIT));
2796
2797 n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2798 dst_words = XALLOCAVEC (rtx, n_regs);
2799 bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
2800 min_mode = smallest_int_mode_for_size (bitsize);
2801
2802 /* Copy the structure BITSIZE bits at a time. */
2803 for (bitpos = 0, xbitpos = padding_correction;
2804 bitpos < bytes * BITS_PER_UNIT;
2805 bitpos += bitsize, xbitpos += bitsize)
2806 {
2807 /* We need a new destination pseudo each time xbitpos is
2808 on a word boundary and when xbitpos == padding_correction
2809 (the first time through). */
2810 if (xbitpos % BITS_PER_WORD == 0
2811 || xbitpos == padding_correction)
2812 {
2813 /* Generate an appropriate register. */
2814 dst_word = gen_reg_rtx (word_mode);
2815 dst_words[xbitpos / BITS_PER_WORD] = dst_word;
2816
2817 /* Clear the destination before we move anything into it. */
2818 emit_move_insn (dst_word, CONST0_RTX (word_mode));
2819 }
2820
2821 /* Find the largest integer mode that can be used to copy all or as
2822 many bits as possible of the structure if the target supports larger
2823 copies. There are too many corner cases here w.r.t to alignments on
2824 the read/writes. So if there is any padding just use single byte
2825 operations. */
2826 opt_scalar_int_mode mode_iter;
2827 if (padding_correction == 0 && !STRICT_ALIGNMENT)
2828 {
2829 FOR_EACH_MODE_FROM (mode_iter, min_mode)
2830 {
2831 unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ());
2832 if (msize <= ((bytes * BITS_PER_UNIT) - bitpos)
2833 && msize <= BITS_PER_WORD)
2834 bitsize = msize;
2835 else
2836 break;
2837 }
2838 }
2839
2840 /* We need a new source operand each time bitpos is on a word
2841 boundary. */
2842 if (bitpos % BITS_PER_WORD == 0)
2843 src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
2844
2845 /* Use bitpos for the source extraction (left justified) and
2846 xbitpos for the destination store (right justified). */
2847 store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
2848 0, 0, word_mode,
2849 extract_bit_field (src_word, bitsize,
2850 bitpos % BITS_PER_WORD, 1,
2851 NULL_RTX, word_mode, word_mode,
2852 false, NULL),
2853 false);
2854 }
2855
2856 if (mode == BLKmode)
2857 {
2858 /* Find the smallest integer mode large enough to hold the
2859 entire structure. */
2860 opt_scalar_int_mode mode_iter;
2861 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2862 if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
2863 break;
2864
2865 /* A suitable mode should have been found. */
2866 mode = mode_iter.require ();
2867 }
2868
2869 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
2870 dst_mode = word_mode;
2871 else
2872 dst_mode = mode;
2873 dst = gen_reg_rtx (dst_mode);
2874
2875 for (i = 0; i < n_regs; i++)
2876 emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
2877
2878 if (mode != dst_mode)
2879 dst = gen_lowpart (mode, dst);
2880
2881 return dst;
2882 }
2883
2884 /* Add a USE expression for REG to the (possibly empty) list pointed
2885 to by CALL_FUSAGE. REG must denote a hard register. */
2886
2887 void
2888 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2889 {
2890 gcc_assert (REG_P (reg));
2891
2892 if (!HARD_REGISTER_P (reg))
2893 return;
2894
2895 *call_fusage
2896 = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
2897 }
2898
2899 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2900 to by CALL_FUSAGE. REG must denote a hard register. */
2901
2902 void
2903 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2904 {
2905 gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
2906
2907 *call_fusage
2908 = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
2909 }
2910
2911 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2912 starting at REGNO. All of these registers must be hard registers. */
2913
2914 void
2915 use_regs (rtx *call_fusage, int regno, int nregs)
2916 {
2917 int i;
2918
2919 gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
2920
2921 for (i = 0; i < nregs; i++)
2922 use_reg (call_fusage, regno_reg_rtx[regno + i]);
2923 }
2924
2925 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2926 PARALLEL REGS. This is for calls that pass values in multiple
2927 non-contiguous locations. The Irix 6 ABI has examples of this. */
2928
2929 void
2930 use_group_regs (rtx *call_fusage, rtx regs)
2931 {
2932 int i;
2933
2934 for (i = 0; i < XVECLEN (regs, 0); i++)
2935 {
2936 rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
2937
2938 /* A NULL entry means the parameter goes both on the stack and in
2939 registers. This can also be a MEM for targets that pass values
2940 partially on the stack and partially in registers. */
2941 if (reg != 0 && REG_P (reg))
2942 use_reg (call_fusage, reg);
2943 }
2944 }
2945
2946 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2947 assigment and the code of the expresion on the RHS is CODE. Return
2948 NULL otherwise. */
2949
2950 static gimple *
2951 get_def_for_expr (tree name, enum tree_code code)
2952 {
2953 gimple *def_stmt;
2954
2955 if (TREE_CODE (name) != SSA_NAME)
2956 return NULL;
2957
2958 def_stmt = get_gimple_for_ssa_name (name);
2959 if (!def_stmt
2960 || gimple_assign_rhs_code (def_stmt) != code)
2961 return NULL;
2962
2963 return def_stmt;
2964 }
2965
2966 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2967 assigment and the class of the expresion on the RHS is CLASS. Return
2968 NULL otherwise. */
2969
2970 static gimple *
2971 get_def_for_expr_class (tree name, enum tree_code_class tclass)
2972 {
2973 gimple *def_stmt;
2974
2975 if (TREE_CODE (name) != SSA_NAME)
2976 return NULL;
2977
2978 def_stmt = get_gimple_for_ssa_name (name);
2979 if (!def_stmt
2980 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
2981 return NULL;
2982
2983 return def_stmt;
2984 }
2985 \f
2986 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
2987 its length in bytes. */
2988
2989 rtx
2990 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
2991 unsigned int expected_align, HOST_WIDE_INT expected_size,
2992 unsigned HOST_WIDE_INT min_size,
2993 unsigned HOST_WIDE_INT max_size,
2994 unsigned HOST_WIDE_INT probable_max_size)
2995 {
2996 machine_mode mode = GET_MODE (object);
2997 unsigned int align;
2998
2999 gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
3000
3001 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
3002 just move a zero. Otherwise, do this a piece at a time. */
3003 poly_int64 size_val;
3004 if (mode != BLKmode
3005 && poly_int_rtx_p (size, &size_val)
3006 && known_eq (size_val, GET_MODE_SIZE (mode)))
3007 {
3008 rtx zero = CONST0_RTX (mode);
3009 if (zero != NULL)
3010 {
3011 emit_move_insn (object, zero);
3012 return NULL;
3013 }
3014
3015 if (COMPLEX_MODE_P (mode))
3016 {
3017 zero = CONST0_RTX (GET_MODE_INNER (mode));
3018 if (zero != NULL)
3019 {
3020 write_complex_part (object, zero, 0);
3021 write_complex_part (object, zero, 1);
3022 return NULL;
3023 }
3024 }
3025 }
3026
3027 if (size == const0_rtx)
3028 return NULL;
3029
3030 align = MEM_ALIGN (object);
3031
3032 if (CONST_INT_P (size)
3033 && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3034 CLEAR_BY_PIECES,
3035 optimize_insn_for_speed_p ()))
3036 clear_by_pieces (object, INTVAL (size), align);
3037 else if (set_storage_via_setmem (object, size, const0_rtx, align,
3038 expected_align, expected_size,
3039 min_size, max_size, probable_max_size))
3040 ;
3041 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3042 return set_storage_via_libcall (object, size, const0_rtx,
3043 method == BLOCK_OP_TAILCALL);
3044 else
3045 gcc_unreachable ();
3046
3047 return NULL;
3048 }
3049
3050 rtx
3051 clear_storage (rtx object, rtx size, enum block_op_methods method)
3052 {
3053 unsigned HOST_WIDE_INT max, min = 0;
3054 if (GET_CODE (size) == CONST_INT)
3055 min = max = UINTVAL (size);
3056 else
3057 max = GET_MODE_MASK (GET_MODE (size));
3058 return clear_storage_hints (object, size, method, 0, -1, min, max, max);
3059 }
3060
3061
3062 /* A subroutine of clear_storage. Expand a call to memset.
3063 Return the return value of memset, 0 otherwise. */
3064
3065 rtx
3066 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3067 {
3068 tree call_expr, fn, object_tree, size_tree, val_tree;
3069 machine_mode size_mode;
3070
3071 object = copy_addr_to_reg (XEXP (object, 0));
3072 object_tree = make_tree (ptr_type_node, object);
3073
3074 if (!CONST_INT_P (val))
3075 val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3076 val_tree = make_tree (integer_type_node, val);
3077
3078 size_mode = TYPE_MODE (sizetype);
3079 size = convert_to_mode (size_mode, size, 1);
3080 size = copy_to_mode_reg (size_mode, size);
3081 size_tree = make_tree (sizetype, size);
3082
3083 /* It is incorrect to use the libcall calling conventions for calls to
3084 memset because it can be provided by the user. */
3085 fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3086 call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3087 CALL_EXPR_TAILCALL (call_expr) = tailcall;
3088
3089 return expand_call (call_expr, NULL_RTX, false);
3090 }
3091 \f
3092 /* Expand a setmem pattern; return true if successful. */
3093
3094 bool
3095 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3096 unsigned int expected_align, HOST_WIDE_INT expected_size,
3097 unsigned HOST_WIDE_INT min_size,
3098 unsigned HOST_WIDE_INT max_size,
3099 unsigned HOST_WIDE_INT probable_max_size)
3100 {
3101 /* Try the most limited insn first, because there's no point
3102 including more than one in the machine description unless
3103 the more limited one has some advantage. */
3104
3105 if (expected_align < align)
3106 expected_align = align;
3107 if (expected_size != -1)
3108 {
3109 if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3110 expected_size = max_size;
3111 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3112 expected_size = min_size;
3113 }
3114
3115 opt_scalar_int_mode mode_iter;
3116 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3117 {
3118 scalar_int_mode mode = mode_iter.require ();
3119 enum insn_code code = direct_optab_handler (setmem_optab, mode);
3120
3121 if (code != CODE_FOR_nothing
3122 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3123 here because if SIZE is less than the mode mask, as it is
3124 returned by the macro, it will definitely be less than the
3125 actual mode mask. Since SIZE is within the Pmode address
3126 space, we limit MODE to Pmode. */
3127 && ((CONST_INT_P (size)
3128 && ((unsigned HOST_WIDE_INT) INTVAL (size)
3129 <= (GET_MODE_MASK (mode) >> 1)))
3130 || max_size <= (GET_MODE_MASK (mode) >> 1)
3131 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3132 {
3133 class expand_operand ops[9];
3134 unsigned int nops;
3135
3136 nops = insn_data[(int) code].n_generator_args;
3137 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3138
3139 create_fixed_operand (&ops[0], object);
3140 /* The check above guarantees that this size conversion is valid. */
3141 create_convert_operand_to (&ops[1], size, mode, true);
3142 create_convert_operand_from (&ops[2], val, byte_mode, true);
3143 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3144 if (nops >= 6)
3145 {
3146 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3147 create_integer_operand (&ops[5], expected_size);
3148 }
3149 if (nops >= 8)
3150 {
3151 create_integer_operand (&ops[6], min_size);
3152 /* If we cannot represent the maximal size,
3153 make parameter NULL. */
3154 if ((HOST_WIDE_INT) max_size != -1)
3155 create_integer_operand (&ops[7], max_size);
3156 else
3157 create_fixed_operand (&ops[7], NULL);
3158 }
3159 if (nops == 9)
3160 {
3161 /* If we cannot represent the maximal size,
3162 make parameter NULL. */
3163 if ((HOST_WIDE_INT) probable_max_size != -1)
3164 create_integer_operand (&ops[8], probable_max_size);
3165 else
3166 create_fixed_operand (&ops[8], NULL);
3167 }
3168 if (maybe_expand_insn (code, nops, ops))
3169 return true;
3170 }
3171 }
3172
3173 return false;
3174 }
3175
3176 \f
3177 /* Write to one of the components of the complex value CPLX. Write VAL to
3178 the real part if IMAG_P is false, and the imaginary part if its true. */
3179
3180 void
3181 write_complex_part (rtx cplx, rtx val, bool imag_p)
3182 {
3183 machine_mode cmode;
3184 scalar_mode imode;
3185 unsigned ibitsize;
3186
3187 if (GET_CODE (cplx) == CONCAT)
3188 {
3189 emit_move_insn (XEXP (cplx, imag_p), val);
3190 return;
3191 }
3192
3193 cmode = GET_MODE (cplx);
3194 imode = GET_MODE_INNER (cmode);
3195 ibitsize = GET_MODE_BITSIZE (imode);
3196
3197 /* For MEMs simplify_gen_subreg may generate an invalid new address
3198 because, e.g., the original address is considered mode-dependent
3199 by the target, which restricts simplify_subreg from invoking
3200 adjust_address_nv. Instead of preparing fallback support for an
3201 invalid address, we call adjust_address_nv directly. */
3202 if (MEM_P (cplx))
3203 {
3204 emit_move_insn (adjust_address_nv (cplx, imode,
3205 imag_p ? GET_MODE_SIZE (imode) : 0),
3206 val);
3207 return;
3208 }
3209
3210 /* If the sub-object is at least word sized, then we know that subregging
3211 will work. This special case is important, since store_bit_field
3212 wants to operate on integer modes, and there's rarely an OImode to
3213 correspond to TCmode. */
3214 if (ibitsize >= BITS_PER_WORD
3215 /* For hard regs we have exact predicates. Assume we can split
3216 the original object if it spans an even number of hard regs.
3217 This special case is important for SCmode on 64-bit platforms
3218 where the natural size of floating-point regs is 32-bit. */
3219 || (REG_P (cplx)
3220 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3221 && REG_NREGS (cplx) % 2 == 0))
3222 {
3223 rtx part = simplify_gen_subreg (imode, cplx, cmode,
3224 imag_p ? GET_MODE_SIZE (imode) : 0);
3225 if (part)
3226 {
3227 emit_move_insn (part, val);
3228 return;
3229 }
3230 else
3231 /* simplify_gen_subreg may fail for sub-word MEMs. */
3232 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3233 }
3234
3235 store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3236 false);
3237 }
3238
3239 /* Extract one of the components of the complex value CPLX. Extract the
3240 real part if IMAG_P is false, and the imaginary part if it's true. */
3241
3242 rtx
3243 read_complex_part (rtx cplx, bool imag_p)
3244 {
3245 machine_mode cmode;
3246 scalar_mode imode;
3247 unsigned ibitsize;
3248
3249 if (GET_CODE (cplx) == CONCAT)
3250 return XEXP (cplx, imag_p);
3251
3252 cmode = GET_MODE (cplx);
3253 imode = GET_MODE_INNER (cmode);
3254 ibitsize = GET_MODE_BITSIZE (imode);
3255
3256 /* Special case reads from complex constants that got spilled to memory. */
3257 if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3258 {
3259 tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3260 if (decl && TREE_CODE (decl) == COMPLEX_CST)
3261 {
3262 tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3263 if (CONSTANT_CLASS_P (part))
3264 return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3265 }
3266 }
3267
3268 /* For MEMs simplify_gen_subreg may generate an invalid new address
3269 because, e.g., the original address is considered mode-dependent
3270 by the target, which restricts simplify_subreg from invoking
3271 adjust_address_nv. Instead of preparing fallback support for an
3272 invalid address, we call adjust_address_nv directly. */
3273 if (MEM_P (cplx))
3274 return adjust_address_nv (cplx, imode,
3275 imag_p ? GET_MODE_SIZE (imode) : 0);
3276
3277 /* If the sub-object is at least word sized, then we know that subregging
3278 will work. This special case is important, since extract_bit_field
3279 wants to operate on integer modes, and there's rarely an OImode to
3280 correspond to TCmode. */
3281 if (ibitsize >= BITS_PER_WORD
3282 /* For hard regs we have exact predicates. Assume we can split
3283 the original object if it spans an even number of hard regs.
3284 This special case is important for SCmode on 64-bit platforms
3285 where the natural size of floating-point regs is 32-bit. */
3286 || (REG_P (cplx)
3287 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3288 && REG_NREGS (cplx) % 2 == 0))
3289 {
3290 rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3291 imag_p ? GET_MODE_SIZE (imode) : 0);
3292 if (ret)
3293 return ret;
3294 else
3295 /* simplify_gen_subreg may fail for sub-word MEMs. */
3296 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3297 }
3298
3299 return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3300 true, NULL_RTX, imode, imode, false, NULL);
3301 }
3302 \f
3303 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
3304 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
3305 represented in NEW_MODE. If FORCE is true, this will never happen, as
3306 we'll force-create a SUBREG if needed. */
3307
3308 static rtx
3309 emit_move_change_mode (machine_mode new_mode,
3310 machine_mode old_mode, rtx x, bool force)
3311 {
3312 rtx ret;
3313
3314 if (push_operand (x, GET_MODE (x)))
3315 {
3316 ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3317 MEM_COPY_ATTRIBUTES (ret, x);
3318 }
3319 else if (MEM_P (x))
3320 {
3321 /* We don't have to worry about changing the address since the
3322 size in bytes is supposed to be the same. */
3323 if (reload_in_progress)
3324 {
3325 /* Copy the MEM to change the mode and move any
3326 substitutions from the old MEM to the new one. */
3327 ret = adjust_address_nv (x, new_mode, 0);
3328 copy_replacements (x, ret);
3329 }
3330 else
3331 ret = adjust_address (x, new_mode, 0);
3332 }
3333 else
3334 {
3335 /* Note that we do want simplify_subreg's behavior of validating
3336 that the new mode is ok for a hard register. If we were to use
3337 simplify_gen_subreg, we would create the subreg, but would
3338 probably run into the target not being able to implement it. */
3339 /* Except, of course, when FORCE is true, when this is exactly what
3340 we want. Which is needed for CCmodes on some targets. */
3341 if (force)
3342 ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3343 else
3344 ret = simplify_subreg (new_mode, x, old_mode, 0);
3345 }
3346
3347 return ret;
3348 }
3349
3350 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
3351 an integer mode of the same size as MODE. Returns the instruction
3352 emitted, or NULL if such a move could not be generated. */
3353
3354 static rtx_insn *
3355 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3356 {
3357 scalar_int_mode imode;
3358 enum insn_code code;
3359
3360 /* There must exist a mode of the exact size we require. */
3361 if (!int_mode_for_mode (mode).exists (&imode))
3362 return NULL;
3363
3364 /* The target must support moves in this mode. */
3365 code = optab_handler (mov_optab, imode);
3366 if (code == CODE_FOR_nothing)
3367 return NULL;
3368
3369 x = emit_move_change_mode (imode, mode, x, force);
3370 if (x == NULL_RTX)
3371 return NULL;
3372 y = emit_move_change_mode (imode, mode, y, force);
3373 if (y == NULL_RTX)
3374 return NULL;
3375 return emit_insn (GEN_FCN (code) (x, y));
3376 }
3377
3378 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
3379 Return an equivalent MEM that does not use an auto-increment. */
3380
3381 rtx
3382 emit_move_resolve_push (machine_mode mode, rtx x)
3383 {
3384 enum rtx_code code = GET_CODE (XEXP (x, 0));
3385 rtx temp;
3386
3387 poly_int64 adjust = GET_MODE_SIZE (mode);
3388 #ifdef PUSH_ROUNDING
3389 adjust = PUSH_ROUNDING (adjust);
3390 #endif
3391 if (code == PRE_DEC || code == POST_DEC)
3392 adjust = -adjust;
3393 else if (code == PRE_MODIFY || code == POST_MODIFY)
3394 {
3395 rtx expr = XEXP (XEXP (x, 0), 1);
3396
3397 gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3398 poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3399 if (GET_CODE (expr) == MINUS)
3400 val = -val;
3401 gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3402 adjust = val;
3403 }
3404
3405 /* Do not use anti_adjust_stack, since we don't want to update
3406 stack_pointer_delta. */
3407 temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3408 gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3409 0, OPTAB_LIB_WIDEN);
3410 if (temp != stack_pointer_rtx)
3411 emit_move_insn (stack_pointer_rtx, temp);
3412
3413 switch (code)
3414 {
3415 case PRE_INC:
3416 case PRE_DEC:
3417 case PRE_MODIFY:
3418 temp = stack_pointer_rtx;
3419 break;
3420 case POST_INC:
3421 case POST_DEC:
3422 case POST_MODIFY:
3423 temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3424 break;
3425 default:
3426 gcc_unreachable ();
3427 }
3428
3429 return replace_equiv_address (x, temp);
3430 }
3431
3432 /* A subroutine of emit_move_complex. Generate a move from Y into X.
3433 X is known to satisfy push_operand, and MODE is known to be complex.
3434 Returns the last instruction emitted. */
3435
3436 rtx_insn *
3437 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3438 {
3439 scalar_mode submode = GET_MODE_INNER (mode);
3440 bool imag_first;
3441
3442 #ifdef PUSH_ROUNDING
3443 poly_int64 submodesize = GET_MODE_SIZE (submode);
3444
3445 /* In case we output to the stack, but the size is smaller than the
3446 machine can push exactly, we need to use move instructions. */
3447 if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3448 {
3449 x = emit_move_resolve_push (mode, x);
3450 return emit_move_insn (x, y);
3451 }
3452 #endif
3453
3454 /* Note that the real part always precedes the imag part in memory
3455 regardless of machine's endianness. */
3456 switch (GET_CODE (XEXP (x, 0)))
3457 {
3458 case PRE_DEC:
3459 case POST_DEC:
3460 imag_first = true;
3461 break;
3462 case PRE_INC:
3463 case POST_INC:
3464 imag_first = false;
3465 break;
3466 default:
3467 gcc_unreachable ();
3468 }
3469
3470 emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3471 read_complex_part (y, imag_first));
3472 return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3473 read_complex_part (y, !imag_first));
3474 }
3475
3476 /* A subroutine of emit_move_complex. Perform the move from Y to X
3477 via two moves of the parts. Returns the last instruction emitted. */
3478
3479 rtx_insn *
3480 emit_move_complex_parts (rtx x, rtx y)
3481 {
3482 /* Show the output dies here. This is necessary for SUBREGs
3483 of pseudos since we cannot track their lifetimes correctly;
3484 hard regs shouldn't appear here except as return values. */
3485 if (!reload_completed && !reload_in_progress
3486 && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3487 emit_clobber (x);
3488
3489 write_complex_part (x, read_complex_part (y, false), false);
3490 write_complex_part (x, read_complex_part (y, true), true);
3491
3492 return get_last_insn ();
3493 }
3494
3495 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3496 MODE is known to be complex. Returns the last instruction emitted. */
3497
3498 static rtx_insn *
3499 emit_move_complex (machine_mode mode, rtx x, rtx y)
3500 {
3501 bool try_int;
3502
3503 /* Need to take special care for pushes, to maintain proper ordering
3504 of the data, and possibly extra padding. */
3505 if (push_operand (x, mode))
3506 return emit_move_complex_push (mode, x, y);
3507
3508 /* See if we can coerce the target into moving both values at once, except
3509 for floating point where we favor moving as parts if this is easy. */
3510 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3511 && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3512 && !(REG_P (x)
3513 && HARD_REGISTER_P (x)
3514 && REG_NREGS (x) == 1)
3515 && !(REG_P (y)
3516 && HARD_REGISTER_P (y)
3517 && REG_NREGS (y) == 1))
3518 try_int = false;
3519 /* Not possible if the values are inherently not adjacent. */
3520 else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3521 try_int = false;
3522 /* Is possible if both are registers (or subregs of registers). */
3523 else if (register_operand (x, mode) && register_operand (y, mode))
3524 try_int = true;
3525 /* If one of the operands is a memory, and alignment constraints
3526 are friendly enough, we may be able to do combined memory operations.
3527 We do not attempt this if Y is a constant because that combination is
3528 usually better with the by-parts thing below. */
3529 else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3530 && (!STRICT_ALIGNMENT
3531 || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3532 try_int = true;
3533 else
3534 try_int = false;
3535
3536 if (try_int)
3537 {
3538 rtx_insn *ret;
3539
3540 /* For memory to memory moves, optimal behavior can be had with the
3541 existing block move logic. */
3542 if (MEM_P (x) && MEM_P (y))
3543 {
3544 emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3545 BLOCK_OP_NO_LIBCALL);
3546 return get_last_insn ();
3547 }
3548
3549 ret = emit_move_via_integer (mode, x, y, true);
3550 if (ret)
3551 return ret;
3552 }
3553
3554 return emit_move_complex_parts (x, y);
3555 }
3556
3557 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3558 MODE is known to be MODE_CC. Returns the last instruction emitted. */
3559
3560 static rtx_insn *
3561 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3562 {
3563 rtx_insn *ret;
3564
3565 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
3566 if (mode != CCmode)
3567 {
3568 enum insn_code code = optab_handler (mov_optab, CCmode);
3569 if (code != CODE_FOR_nothing)
3570 {
3571 x = emit_move_change_mode (CCmode, mode, x, true);
3572 y = emit_move_change_mode (CCmode, mode, y, true);
3573 return emit_insn (GEN_FCN (code) (x, y));
3574 }
3575 }
3576
3577 /* Otherwise, find the MODE_INT mode of the same width. */
3578 ret = emit_move_via_integer (mode, x, y, false);
3579 gcc_assert (ret != NULL);
3580 return ret;
3581 }
3582
3583 /* Return true if word I of OP lies entirely in the
3584 undefined bits of a paradoxical subreg. */
3585
3586 static bool
3587 undefined_operand_subword_p (const_rtx op, int i)
3588 {
3589 if (GET_CODE (op) != SUBREG)
3590 return false;
3591 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3592 poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3593 return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3594 || known_le (offset, -UNITS_PER_WORD));
3595 }
3596
3597 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3598 MODE is any multi-word or full-word mode that lacks a move_insn
3599 pattern. Note that you will get better code if you define such
3600 patterns, even if they must turn into multiple assembler instructions. */
3601
3602 static rtx_insn *
3603 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3604 {
3605 rtx_insn *last_insn = 0;
3606 rtx_insn *seq;
3607 rtx inner;
3608 bool need_clobber;
3609 int i, mode_size;
3610
3611 /* This function can only handle cases where the number of words is
3612 known at compile time. */
3613 mode_size = GET_MODE_SIZE (mode).to_constant ();
3614 gcc_assert (mode_size >= UNITS_PER_WORD);
3615
3616 /* If X is a push on the stack, do the push now and replace
3617 X with a reference to the stack pointer. */
3618 if (push_operand (x, mode))
3619 x = emit_move_resolve_push (mode, x);
3620
3621 /* If we are in reload, see if either operand is a MEM whose address
3622 is scheduled for replacement. */
3623 if (reload_in_progress && MEM_P (x)
3624 && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3625 x = replace_equiv_address_nv (x, inner);
3626 if (reload_in_progress && MEM_P (y)
3627 && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3628 y = replace_equiv_address_nv (y, inner);
3629
3630 start_sequence ();
3631
3632 need_clobber = false;
3633 for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3634 {
3635 rtx xpart = operand_subword (x, i, 1, mode);
3636 rtx ypart;
3637
3638 /* Do not generate code for a move if it would come entirely
3639 from the undefined bits of a paradoxical subreg. */
3640 if (undefined_operand_subword_p (y, i))
3641 continue;
3642
3643 ypart = operand_subword (y, i, 1, mode);
3644
3645 /* If we can't get a part of Y, put Y into memory if it is a
3646 constant. Otherwise, force it into a register. Then we must
3647 be able to get a part of Y. */
3648 if (ypart == 0 && CONSTANT_P (y))
3649 {
3650 y = use_anchored_address (force_const_mem (mode, y));
3651 ypart = operand_subword (y, i, 1, mode);
3652 }
3653 else if (ypart == 0)
3654 ypart = operand_subword_force (y, i, mode);
3655
3656 gcc_assert (xpart && ypart);
3657
3658 need_clobber |= (GET_CODE (xpart) == SUBREG);
3659
3660 last_insn = emit_move_insn (xpart, ypart);
3661 }
3662
3663 seq = get_insns ();
3664 end_sequence ();
3665
3666 /* Show the output dies here. This is necessary for SUBREGs
3667 of pseudos since we cannot track their lifetimes correctly;
3668 hard regs shouldn't appear here except as return values.
3669 We never want to emit such a clobber after reload. */
3670 if (x != y
3671 && ! (reload_in_progress || reload_completed)
3672 && need_clobber != 0)
3673 emit_clobber (x);
3674
3675 emit_insn (seq);
3676
3677 return last_insn;
3678 }
3679
3680 /* Low level part of emit_move_insn.
3681 Called just like emit_move_insn, but assumes X and Y
3682 are basically valid. */
3683
3684 rtx_insn *
3685 emit_move_insn_1 (rtx x, rtx y)
3686 {
3687 machine_mode mode = GET_MODE (x);
3688 enum insn_code code;
3689
3690 gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3691
3692 code = optab_handler (mov_optab, mode);
3693 if (code != CODE_FOR_nothing)
3694 return emit_insn (GEN_FCN (code) (x, y));
3695
3696 /* Expand complex moves by moving real part and imag part. */
3697 if (COMPLEX_MODE_P (mode))
3698 return emit_move_complex (mode, x, y);
3699
3700 if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3701 || ALL_FIXED_POINT_MODE_P (mode))
3702 {
3703 rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3704
3705 /* If we can't find an integer mode, use multi words. */
3706 if (result)
3707 return result;
3708 else
3709 return emit_move_multi_word (mode, x, y);
3710 }
3711
3712 if (GET_MODE_CLASS (mode) == MODE_CC)
3713 return emit_move_ccmode (mode, x, y);
3714
3715 /* Try using a move pattern for the corresponding integer mode. This is
3716 only safe when simplify_subreg can convert MODE constants into integer
3717 constants. At present, it can only do this reliably if the value
3718 fits within a HOST_WIDE_INT. */
3719 if (!CONSTANT_P (y)
3720 || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3721 {
3722 rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3723
3724 if (ret)
3725 {
3726 if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3727 return ret;
3728 }
3729 }
3730
3731 return emit_move_multi_word (mode, x, y);
3732 }
3733
3734 /* Generate code to copy Y into X.
3735 Both Y and X must have the same mode, except that
3736 Y can be a constant with VOIDmode.
3737 This mode cannot be BLKmode; use emit_block_move for that.
3738
3739 Return the last instruction emitted. */
3740
3741 rtx_insn *
3742 emit_move_insn (rtx x, rtx y)
3743 {
3744 machine_mode mode = GET_MODE (x);
3745 rtx y_cst = NULL_RTX;
3746 rtx_insn *last_insn;
3747 rtx set;
3748
3749 gcc_assert (mode != BLKmode
3750 && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
3751
3752 if (CONSTANT_P (y))
3753 {
3754 if (optimize
3755 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3756 && (last_insn = compress_float_constant (x, y)))
3757 return last_insn;
3758
3759 y_cst = y;
3760
3761 if (!targetm.legitimate_constant_p (mode, y))
3762 {
3763 y = force_const_mem (mode, y);
3764
3765 /* If the target's cannot_force_const_mem prevented the spill,
3766 assume that the target's move expanders will also take care
3767 of the non-legitimate constant. */
3768 if (!y)
3769 y = y_cst;
3770 else
3771 y = use_anchored_address (y);
3772 }
3773 }
3774
3775 /* If X or Y are memory references, verify that their addresses are valid
3776 for the machine. */
3777 if (MEM_P (x)
3778 && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
3779 MEM_ADDR_SPACE (x))
3780 && ! push_operand (x, GET_MODE (x))))
3781 x = validize_mem (x);
3782
3783 if (MEM_P (y)
3784 && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
3785 MEM_ADDR_SPACE (y)))
3786 y = validize_mem (y);
3787
3788 gcc_assert (mode != BLKmode);
3789
3790 last_insn = emit_move_insn_1 (x, y);
3791
3792 if (y_cst && REG_P (x)
3793 && (set = single_set (last_insn)) != NULL_RTX
3794 && SET_DEST (set) == x
3795 && ! rtx_equal_p (y_cst, SET_SRC (set)))
3796 set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
3797
3798 return last_insn;
3799 }
3800
3801 /* Generate the body of an instruction to copy Y into X.
3802 It may be a list of insns, if one insn isn't enough. */
3803
3804 rtx_insn *
3805 gen_move_insn (rtx x, rtx y)
3806 {
3807 rtx_insn *seq;
3808
3809 start_sequence ();
3810 emit_move_insn_1 (x, y);
3811 seq = get_insns ();
3812 end_sequence ();
3813 return seq;
3814 }
3815
3816 /* If Y is representable exactly in a narrower mode, and the target can
3817 perform the extension directly from constant or memory, then emit the
3818 move as an extension. */
3819
3820 static rtx_insn *
3821 compress_float_constant (rtx x, rtx y)
3822 {
3823 machine_mode dstmode = GET_MODE (x);
3824 machine_mode orig_srcmode = GET_MODE (y);
3825 machine_mode srcmode;
3826 const REAL_VALUE_TYPE *r;
3827 int oldcost, newcost;
3828 bool speed = optimize_insn_for_speed_p ();
3829
3830 r = CONST_DOUBLE_REAL_VALUE (y);
3831
3832 if (targetm.legitimate_constant_p (dstmode, y))
3833 oldcost = set_src_cost (y, orig_srcmode, speed);
3834 else
3835 oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
3836
3837 FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
3838 {
3839 enum insn_code ic;
3840 rtx trunc_y;
3841 rtx_insn *last_insn;
3842
3843 /* Skip if the target can't extend this way. */
3844 ic = can_extend_p (dstmode, srcmode, 0);
3845 if (ic == CODE_FOR_nothing)
3846 continue;
3847
3848 /* Skip if the narrowed value isn't exact. */
3849 if (! exact_real_truncate (srcmode, r))
3850 continue;
3851
3852 trunc_y = const_double_from_real_value (*r, srcmode);
3853
3854 if (targetm.legitimate_constant_p (srcmode, trunc_y))
3855 {
3856 /* Skip if the target needs extra instructions to perform
3857 the extension. */
3858 if (!insn_operand_matches (ic, 1, trunc_y))
3859 continue;
3860 /* This is valid, but may not be cheaper than the original. */
3861 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3862 dstmode, speed);
3863 if (oldcost < newcost)
3864 continue;
3865 }
3866 else if (float_extend_from_mem[dstmode][srcmode])
3867 {
3868 trunc_y = force_const_mem (srcmode, trunc_y);
3869 /* This is valid, but may not be cheaper than the original. */
3870 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3871 dstmode, speed);
3872 if (oldcost < newcost)
3873 continue;
3874 trunc_y = validize_mem (trunc_y);
3875 }
3876 else
3877 continue;
3878
3879 /* For CSE's benefit, force the compressed constant pool entry
3880 into a new pseudo. This constant may be used in different modes,
3881 and if not, combine will put things back together for us. */
3882 trunc_y = force_reg (srcmode, trunc_y);
3883
3884 /* If x is a hard register, perform the extension into a pseudo,
3885 so that e.g. stack realignment code is aware of it. */
3886 rtx target = x;
3887 if (REG_P (x) && HARD_REGISTER_P (x))
3888 target = gen_reg_rtx (dstmode);
3889
3890 emit_unop_insn (ic, target, trunc_y, UNKNOWN);
3891 last_insn = get_last_insn ();
3892
3893 if (REG_P (target))
3894 set_unique_reg_note (last_insn, REG_EQUAL, y);
3895
3896 if (target != x)
3897 return emit_move_insn (x, target);
3898 return last_insn;
3899 }
3900
3901 return NULL;
3902 }
3903 \f
3904 /* Pushing data onto the stack. */
3905
3906 /* Push a block of length SIZE (perhaps variable)
3907 and return an rtx to address the beginning of the block.
3908 The value may be virtual_outgoing_args_rtx.
3909
3910 EXTRA is the number of bytes of padding to push in addition to SIZE.
3911 BELOW nonzero means this padding comes at low addresses;
3912 otherwise, the padding comes at high addresses. */
3913
3914 rtx
3915 push_block (rtx size, poly_int64 extra, int below)
3916 {
3917 rtx temp;
3918
3919 size = convert_modes (Pmode, ptr_mode, size, 1);
3920 if (CONSTANT_P (size))
3921 anti_adjust_stack (plus_constant (Pmode, size, extra));
3922 else if (REG_P (size) && known_eq (extra, 0))
3923 anti_adjust_stack (size);
3924 else
3925 {
3926 temp = copy_to_mode_reg (Pmode, size);
3927 if (maybe_ne (extra, 0))
3928 temp = expand_binop (Pmode, add_optab, temp,
3929 gen_int_mode (extra, Pmode),
3930 temp, 0, OPTAB_LIB_WIDEN);
3931 anti_adjust_stack (temp);
3932 }
3933
3934 if (STACK_GROWS_DOWNWARD)
3935 {
3936 temp = virtual_outgoing_args_rtx;
3937 if (maybe_ne (extra, 0) && below)
3938 temp = plus_constant (Pmode, temp, extra);
3939 }
3940 else
3941 {
3942 poly_int64 csize;
3943 if (poly_int_rtx_p (size, &csize))
3944 temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
3945 -csize - (below ? 0 : extra));
3946 else if (maybe_ne (extra, 0) && !below)
3947 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3948 negate_rtx (Pmode, plus_constant (Pmode, size,
3949 extra)));
3950 else
3951 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3952 negate_rtx (Pmode, size));
3953 }
3954
3955 return memory_address (NARROWEST_INT_MODE, temp);
3956 }
3957
3958 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
3959
3960 static rtx
3961 mem_autoinc_base (rtx mem)
3962 {
3963 if (MEM_P (mem))
3964 {
3965 rtx addr = XEXP (mem, 0);
3966 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3967 return XEXP (addr, 0);
3968 }
3969 return NULL;
3970 }
3971
3972 /* A utility routine used here, in reload, and in try_split. The insns
3973 after PREV up to and including LAST are known to adjust the stack,
3974 with a final value of END_ARGS_SIZE. Iterate backward from LAST
3975 placing notes as appropriate. PREV may be NULL, indicating the
3976 entire insn sequence prior to LAST should be scanned.
3977
3978 The set of allowed stack pointer modifications is small:
3979 (1) One or more auto-inc style memory references (aka pushes),
3980 (2) One or more addition/subtraction with the SP as destination,
3981 (3) A single move insn with the SP as destination,
3982 (4) A call_pop insn,
3983 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
3984
3985 Insns in the sequence that do not modify the SP are ignored,
3986 except for noreturn calls.
3987
3988 The return value is the amount of adjustment that can be trivially
3989 verified, via immediate operand or auto-inc. If the adjustment
3990 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
3991
3992 poly_int64
3993 find_args_size_adjust (rtx_insn *insn)
3994 {
3995 rtx dest, set, pat;
3996 int i;
3997
3998 pat = PATTERN (insn);
3999 set = NULL;
4000
4001 /* Look for a call_pop pattern. */
4002 if (CALL_P (insn))
4003 {
4004 /* We have to allow non-call_pop patterns for the case
4005 of emit_single_push_insn of a TLS address. */
4006 if (GET_CODE (pat) != PARALLEL)
4007 return 0;
4008
4009 /* All call_pop have a stack pointer adjust in the parallel.
4010 The call itself is always first, and the stack adjust is
4011 usually last, so search from the end. */
4012 for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
4013 {
4014 set = XVECEXP (pat, 0, i);
4015 if (GET_CODE (set) != SET)
4016 continue;
4017 dest = SET_DEST (set);
4018 if (dest == stack_pointer_rtx)
4019 break;
4020 }
4021 /* We'd better have found the stack pointer adjust. */
4022 if (i == 0)
4023 return 0;
4024 /* Fall through to process the extracted SET and DEST
4025 as if it was a standalone insn. */
4026 }
4027 else if (GET_CODE (pat) == SET)
4028 set = pat;
4029 else if ((set = single_set (insn)) != NULL)
4030 ;
4031 else if (GET_CODE (pat) == PARALLEL)
4032 {
4033 /* ??? Some older ports use a parallel with a stack adjust
4034 and a store for a PUSH_ROUNDING pattern, rather than a
4035 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4036 /* ??? See h8300 and m68k, pushqi1. */
4037 for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4038 {
4039 set = XVECEXP (pat, 0, i);
4040 if (GET_CODE (set) != SET)
4041 continue;
4042 dest = SET_DEST (set);
4043 if (dest == stack_pointer_rtx)
4044 break;
4045
4046 /* We do not expect an auto-inc of the sp in the parallel. */
4047 gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4048 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4049 != stack_pointer_rtx);
4050 }
4051 if (i < 0)
4052 return 0;
4053 }
4054 else
4055 return 0;
4056
4057 dest = SET_DEST (set);
4058
4059 /* Look for direct modifications of the stack pointer. */
4060 if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4061 {
4062 /* Look for a trivial adjustment, otherwise assume nothing. */
4063 /* Note that the SPU restore_stack_block pattern refers to
4064 the stack pointer in V4SImode. Consider that non-trivial. */
4065 poly_int64 offset;
4066 if (SCALAR_INT_MODE_P (GET_MODE (dest))
4067 && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
4068 return offset;
4069 /* ??? Reload can generate no-op moves, which will be cleaned
4070 up later. Recognize it and continue searching. */
4071 else if (rtx_equal_p (dest, SET_SRC (set)))
4072 return 0;
4073 else
4074 return HOST_WIDE_INT_MIN;
4075 }
4076 else
4077 {
4078 rtx mem, addr;
4079
4080 /* Otherwise only think about autoinc patterns. */
4081 if (mem_autoinc_base (dest) == stack_pointer_rtx)
4082 {
4083 mem = dest;
4084 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4085 != stack_pointer_rtx);
4086 }
4087 else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4088 mem = SET_SRC (set);
4089 else
4090 return 0;
4091
4092 addr = XEXP (mem, 0);
4093 switch (GET_CODE (addr))
4094 {
4095 case PRE_INC:
4096 case POST_INC:
4097 return GET_MODE_SIZE (GET_MODE (mem));
4098 case PRE_DEC:
4099 case POST_DEC:
4100 return -GET_MODE_SIZE (GET_MODE (mem));
4101 case PRE_MODIFY:
4102 case POST_MODIFY:
4103 addr = XEXP (addr, 1);
4104 gcc_assert (GET_CODE (addr) == PLUS);
4105 gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4106 return rtx_to_poly_int64 (XEXP (addr, 1));
4107 default:
4108 gcc_unreachable ();
4109 }
4110 }
4111 }
4112
4113 poly_int64
4114 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4115 poly_int64 end_args_size)
4116 {
4117 poly_int64 args_size = end_args_size;
4118 bool saw_unknown = false;
4119 rtx_insn *insn;
4120
4121 for (insn = last; insn != prev; insn = PREV_INSN (insn))
4122 {
4123 if (!NONDEBUG_INSN_P (insn))
4124 continue;
4125
4126 /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4127 a call argument containing a TLS address that itself requires
4128 a call to __tls_get_addr. The handling of stack_pointer_delta
4129 in emit_single_push_insn is supposed to ensure that any such
4130 notes are already correct. */
4131 rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4132 gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4133
4134 poly_int64 this_delta = find_args_size_adjust (insn);
4135 if (known_eq (this_delta, 0))
4136 {
4137 if (!CALL_P (insn)
4138 || ACCUMULATE_OUTGOING_ARGS
4139 || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4140 continue;
4141 }
4142
4143 gcc_assert (!saw_unknown);
4144 if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4145 saw_unknown = true;
4146
4147 if (!note)
4148 add_args_size_note (insn, args_size);
4149 if (STACK_GROWS_DOWNWARD)
4150 this_delta = -poly_uint64 (this_delta);
4151
4152 if (saw_unknown)
4153 args_size = HOST_WIDE_INT_MIN;
4154 else
4155 args_size -= this_delta;
4156 }
4157
4158 return args_size;
4159 }
4160
4161 #ifdef PUSH_ROUNDING
4162 /* Emit single push insn. */
4163
4164 static void
4165 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4166 {
4167 rtx dest_addr;
4168 poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4169 rtx dest;
4170 enum insn_code icode;
4171
4172 /* If there is push pattern, use it. Otherwise try old way of throwing
4173 MEM representing push operation to move expander. */
4174 icode = optab_handler (push_optab, mode);
4175 if (icode != CODE_FOR_nothing)
4176 {
4177 class expand_operand ops[1];
4178
4179 create_input_operand (&ops[0], x, mode);
4180 if (maybe_expand_insn (icode, 1, ops))
4181 return;
4182 }
4183 if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4184 dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4185 /* If we are to pad downward, adjust the stack pointer first and
4186 then store X into the stack location using an offset. This is
4187 because emit_move_insn does not know how to pad; it does not have
4188 access to type. */
4189 else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4190 {
4191 emit_move_insn (stack_pointer_rtx,
4192 expand_binop (Pmode,
4193 STACK_GROWS_DOWNWARD ? sub_optab
4194 : add_optab,
4195 stack_pointer_rtx,
4196 gen_int_mode (rounded_size, Pmode),
4197 NULL_RTX, 0, OPTAB_LIB_WIDEN));
4198
4199 poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4200 if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4201 /* We have already decremented the stack pointer, so get the
4202 previous value. */
4203 offset += rounded_size;
4204
4205 if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4206 /* We have already incremented the stack pointer, so get the
4207 previous value. */
4208 offset -= rounded_size;
4209
4210 dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4211 }
4212 else
4213 {
4214 if (STACK_GROWS_DOWNWARD)
4215 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
4216 dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4217 else
4218 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
4219 dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4220
4221 dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4222 }
4223
4224 dest = gen_rtx_MEM (mode, dest_addr);
4225
4226 if (type != 0)
4227 {
4228 set_mem_attributes (dest, type, 1);
4229
4230 if (cfun->tail_call_marked)
4231 /* Function incoming arguments may overlap with sibling call
4232 outgoing arguments and we cannot allow reordering of reads
4233 from function arguments with stores to outgoing arguments
4234 of sibling calls. */
4235 set_mem_alias_set (dest, 0);
4236 }
4237 emit_move_insn (dest, x);
4238 }
4239
4240 /* Emit and annotate a single push insn. */
4241
4242 static void
4243 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4244 {
4245 poly_int64 delta, old_delta = stack_pointer_delta;
4246 rtx_insn *prev = get_last_insn ();
4247 rtx_insn *last;
4248
4249 emit_single_push_insn_1 (mode, x, type);
4250
4251 /* Adjust stack_pointer_delta to describe the situation after the push
4252 we just performed. Note that we must do this after the push rather
4253 than before the push in case calculating X needs pushes and pops of
4254 its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
4255 for such pushes and pops must not include the effect of the future
4256 push of X. */
4257 stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4258
4259 last = get_last_insn ();
4260
4261 /* Notice the common case where we emitted exactly one insn. */
4262 if (PREV_INSN (last) == prev)
4263 {
4264 add_args_size_note (last, stack_pointer_delta);
4265 return;
4266 }
4267
4268 delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4269 gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4270 || known_eq (delta, old_delta));
4271 }
4272 #endif
4273
4274 /* If reading SIZE bytes from X will end up reading from
4275 Y return the number of bytes that overlap. Return -1
4276 if there is no overlap or -2 if we can't determine
4277 (for example when X and Y have different base registers). */
4278
4279 static int
4280 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4281 {
4282 rtx tmp = plus_constant (Pmode, x, size);
4283 rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4284
4285 if (!CONST_INT_P (sub))
4286 return -2;
4287
4288 HOST_WIDE_INT val = INTVAL (sub);
4289
4290 return IN_RANGE (val, 1, size) ? val : -1;
4291 }
4292
4293 /* Generate code to push X onto the stack, assuming it has mode MODE and
4294 type TYPE.
4295 MODE is redundant except when X is a CONST_INT (since they don't
4296 carry mode info).
4297 SIZE is an rtx for the size of data to be copied (in bytes),
4298 needed only if X is BLKmode.
4299 Return true if successful. May return false if asked to push a
4300 partial argument during a sibcall optimization (as specified by
4301 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4302 to not overlap.
4303
4304 ALIGN (in bits) is maximum alignment we can assume.
4305
4306 If PARTIAL and REG are both nonzero, then copy that many of the first
4307 bytes of X into registers starting with REG, and push the rest of X.
4308 The amount of space pushed is decreased by PARTIAL bytes.
4309 REG must be a hard register in this case.
4310 If REG is zero but PARTIAL is not, take any all others actions for an
4311 argument partially in registers, but do not actually load any
4312 registers.
4313
4314 EXTRA is the amount in bytes of extra space to leave next to this arg.
4315 This is ignored if an argument block has already been allocated.
4316
4317 On a machine that lacks real push insns, ARGS_ADDR is the address of
4318 the bottom of the argument block for this call. We use indexing off there
4319 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
4320 argument block has not been preallocated.
4321
4322 ARGS_SO_FAR is the size of args previously pushed for this call.
4323
4324 REG_PARM_STACK_SPACE is nonzero if functions require stack space
4325 for arguments passed in registers. If nonzero, it will be the number
4326 of bytes required. */
4327
4328 bool
4329 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4330 unsigned int align, int partial, rtx reg, poly_int64 extra,
4331 rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4332 rtx alignment_pad, bool sibcall_p)
4333 {
4334 rtx xinner;
4335 pad_direction stack_direction
4336 = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4337
4338 /* Decide where to pad the argument: PAD_DOWNWARD for below,
4339 PAD_UPWARD for above, or PAD_NONE for don't pad it.
4340 Default is below for small data on big-endian machines; else above. */
4341 pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4342
4343 /* Invert direction if stack is post-decrement.
4344 FIXME: why? */
4345 if (STACK_PUSH_CODE == POST_DEC)
4346 if (where_pad != PAD_NONE)
4347 where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4348
4349 xinner = x;
4350
4351 int nregs = partial / UNITS_PER_WORD;
4352 rtx *tmp_regs = NULL;
4353 int overlapping = 0;
4354
4355 if (mode == BLKmode
4356 || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
4357 {
4358 /* Copy a block into the stack, entirely or partially. */
4359
4360 rtx temp;
4361 int used;
4362 int offset;
4363 int skip;
4364
4365 offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4366 used = partial - offset;
4367
4368 if (mode != BLKmode)
4369 {
4370 /* A value is to be stored in an insufficiently aligned
4371 stack slot; copy via a suitably aligned slot if
4372 necessary. */
4373 size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4374 if (!MEM_P (xinner))
4375 {
4376 temp = assign_temp (type, 1, 1);
4377 emit_move_insn (temp, xinner);
4378 xinner = temp;
4379 }
4380 }
4381
4382 gcc_assert (size);
4383
4384 /* USED is now the # of bytes we need not copy to the stack
4385 because registers will take care of them. */
4386
4387 if (partial != 0)
4388 xinner = adjust_address (xinner, BLKmode, used);
4389
4390 /* If the partial register-part of the arg counts in its stack size,
4391 skip the part of stack space corresponding to the registers.
4392 Otherwise, start copying to the beginning of the stack space,
4393 by setting SKIP to 0. */
4394 skip = (reg_parm_stack_space == 0) ? 0 : used;
4395
4396 #ifdef PUSH_ROUNDING
4397 /* Do it with several push insns if that doesn't take lots of insns
4398 and if there is no difficulty with push insns that skip bytes
4399 on the stack for alignment purposes. */
4400 if (args_addr == 0
4401 && PUSH_ARGS
4402 && CONST_INT_P (size)
4403 && skip == 0
4404 && MEM_ALIGN (xinner) >= align
4405 && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4406 /* Here we avoid the case of a structure whose weak alignment
4407 forces many pushes of a small amount of data,
4408 and such small pushes do rounding that causes trouble. */
4409 && ((!targetm.slow_unaligned_access (word_mode, align))
4410 || align >= BIGGEST_ALIGNMENT
4411 || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4412 align / BITS_PER_UNIT))
4413 && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4414 {
4415 /* Push padding now if padding above and stack grows down,
4416 or if padding below and stack grows up.
4417 But if space already allocated, this has already been done. */
4418 if (maybe_ne (extra, 0)
4419 && args_addr == 0
4420 && where_pad != PAD_NONE
4421 && where_pad != stack_direction)
4422 anti_adjust_stack (gen_int_mode (extra, Pmode));
4423
4424 move_by_pieces (NULL, xinner, INTVAL (size) - used, align,
4425 RETURN_BEGIN);
4426 }
4427 else
4428 #endif /* PUSH_ROUNDING */
4429 {
4430 rtx target;
4431
4432 /* Otherwise make space on the stack and copy the data
4433 to the address of that space. */
4434
4435 /* Deduct words put into registers from the size we must copy. */
4436 if (partial != 0)
4437 {
4438 if (CONST_INT_P (size))
4439 size = GEN_INT (INTVAL (size) - used);
4440 else
4441 size = expand_binop (GET_MODE (size), sub_optab, size,
4442 gen_int_mode (used, GET_MODE (size)),
4443 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4444 }
4445
4446 /* Get the address of the stack space.
4447 In this case, we do not deal with EXTRA separately.
4448 A single stack adjust will do. */
4449 poly_int64 offset;
4450 if (! args_addr)
4451 {
4452 temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4453 extra = 0;
4454 }
4455 else if (poly_int_rtx_p (args_so_far, &offset))
4456 temp = memory_address (BLKmode,
4457 plus_constant (Pmode, args_addr,
4458 skip + offset));
4459 else
4460 temp = memory_address (BLKmode,
4461 plus_constant (Pmode,
4462 gen_rtx_PLUS (Pmode,
4463 args_addr,
4464 args_so_far),
4465 skip));
4466
4467 if (!ACCUMULATE_OUTGOING_ARGS)
4468 {
4469 /* If the source is referenced relative to the stack pointer,
4470 copy it to another register to stabilize it. We do not need
4471 to do this if we know that we won't be changing sp. */
4472
4473 if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4474 || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4475 temp = copy_to_reg (temp);
4476 }
4477
4478 target = gen_rtx_MEM (BLKmode, temp);
4479
4480 /* We do *not* set_mem_attributes here, because incoming arguments
4481 may overlap with sibling call outgoing arguments and we cannot
4482 allow reordering of reads from function arguments with stores
4483 to outgoing arguments of sibling calls. We do, however, want
4484 to record the alignment of the stack slot. */
4485 /* ALIGN may well be better aligned than TYPE, e.g. due to
4486 PARM_BOUNDARY. Assume the caller isn't lying. */
4487 set_mem_align (target, align);
4488
4489 /* If part should go in registers and pushing to that part would
4490 overwrite some of the values that need to go into regs, load the
4491 overlapping values into temporary pseudos to be moved into the hard
4492 regs at the end after the stack pushing has completed.
4493 We cannot load them directly into the hard regs here because
4494 they can be clobbered by the block move expansions.
4495 See PR 65358. */
4496
4497 if (partial > 0 && reg != 0 && mode == BLKmode
4498 && GET_CODE (reg) != PARALLEL)
4499 {
4500 overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4501 if (overlapping > 0)
4502 {
4503 gcc_assert (overlapping % UNITS_PER_WORD == 0);
4504 overlapping /= UNITS_PER_WORD;
4505
4506 tmp_regs = XALLOCAVEC (rtx, overlapping);
4507
4508 for (int i = 0; i < overlapping; i++)
4509 tmp_regs[i] = gen_reg_rtx (word_mode);
4510
4511 for (int i = 0; i < overlapping; i++)
4512 emit_move_insn (tmp_regs[i],
4513 operand_subword_force (target, i, mode));
4514 }
4515 else if (overlapping == -1)
4516 overlapping = 0;
4517 /* Could not determine whether there is overlap.
4518 Fail the sibcall. */
4519 else
4520 {
4521 overlapping = 0;
4522 if (sibcall_p)
4523 return false;
4524 }
4525 }
4526 emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4527 }
4528 }
4529 else if (partial > 0)
4530 {
4531 /* Scalar partly in registers. This case is only supported
4532 for fixed-wdth modes. */
4533 int size = GET_MODE_SIZE (mode).to_constant ();
4534 size /= UNITS_PER_WORD;
4535 int i;
4536 int not_stack;
4537 /* # bytes of start of argument
4538 that we must make space for but need not store. */
4539 int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4540 int args_offset = INTVAL (args_so_far);
4541 int skip;
4542
4543 /* Push padding now if padding above and stack grows down,
4544 or if padding below and stack grows up.
4545 But if space already allocated, this has already been done. */
4546 if (maybe_ne (extra, 0)
4547 && args_addr == 0
4548 && where_pad != PAD_NONE
4549 && where_pad != stack_direction)
4550 anti_adjust_stack (gen_int_mode (extra, Pmode));
4551
4552 /* If we make space by pushing it, we might as well push
4553 the real data. Otherwise, we can leave OFFSET nonzero
4554 and leave the space uninitialized. */
4555 if (args_addr == 0)
4556 offset = 0;
4557
4558 /* Now NOT_STACK gets the number of words that we don't need to
4559 allocate on the stack. Convert OFFSET to words too. */
4560 not_stack = (partial - offset) / UNITS_PER_WORD;
4561 offset /= UNITS_PER_WORD;
4562
4563 /* If the partial register-part of the arg counts in its stack size,
4564 skip the part of stack space corresponding to the registers.
4565 Otherwise, start copying to the beginning of the stack space,
4566 by setting SKIP to 0. */
4567 skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4568
4569 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4570 x = validize_mem (force_const_mem (mode, x));
4571
4572 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4573 SUBREGs of such registers are not allowed. */
4574 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4575 && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4576 x = copy_to_reg (x);
4577
4578 /* Loop over all the words allocated on the stack for this arg. */
4579 /* We can do it by words, because any scalar bigger than a word
4580 has a size a multiple of a word. */
4581 for (i = size - 1; i >= not_stack; i--)
4582 if (i >= not_stack + offset)
4583 if (!emit_push_insn (operand_subword_force (x, i, mode),
4584 word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4585 0, args_addr,
4586 GEN_INT (args_offset + ((i - not_stack + skip)
4587 * UNITS_PER_WORD)),
4588 reg_parm_stack_space, alignment_pad, sibcall_p))
4589 return false;
4590 }
4591 else
4592 {
4593 rtx addr;
4594 rtx dest;
4595
4596 /* Push padding now if padding above and stack grows down,
4597 or if padding below and stack grows up.
4598 But if space already allocated, this has already been done. */
4599 if (maybe_ne (extra, 0)
4600 && args_addr == 0
4601 && where_pad != PAD_NONE
4602 && where_pad != stack_direction)
4603 anti_adjust_stack (gen_int_mode (extra, Pmode));
4604
4605 #ifdef PUSH_ROUNDING
4606 if (args_addr == 0 && PUSH_ARGS)
4607 emit_single_push_insn (mode, x, type);
4608 else
4609 #endif
4610 {
4611 addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4612 dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4613
4614 /* We do *not* set_mem_attributes here, because incoming arguments
4615 may overlap with sibling call outgoing arguments and we cannot
4616 allow reordering of reads from function arguments with stores
4617 to outgoing arguments of sibling calls. We do, however, want
4618 to record the alignment of the stack slot. */
4619 /* ALIGN may well be better aligned than TYPE, e.g. due to
4620 PARM_BOUNDARY. Assume the caller isn't lying. */
4621 set_mem_align (dest, align);
4622
4623 emit_move_insn (dest, x);
4624 }
4625 }
4626
4627 /* Move the partial arguments into the registers and any overlapping
4628 values that we moved into the pseudos in tmp_regs. */
4629 if (partial > 0 && reg != 0)
4630 {
4631 /* Handle calls that pass values in multiple non-contiguous locations.
4632 The Irix 6 ABI has examples of this. */
4633 if (GET_CODE (reg) == PARALLEL)
4634 emit_group_load (reg, x, type, -1);
4635 else
4636 {
4637 gcc_assert (partial % UNITS_PER_WORD == 0);
4638 move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4639
4640 for (int i = 0; i < overlapping; i++)
4641 emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4642 + nregs - overlapping + i),
4643 tmp_regs[i]);
4644
4645 }
4646 }
4647
4648 if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4649 anti_adjust_stack (gen_int_mode (extra, Pmode));
4650
4651 if (alignment_pad && args_addr == 0)
4652 anti_adjust_stack (alignment_pad);
4653
4654 return true;
4655 }
4656 \f
4657 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4658 operations. */
4659
4660 static rtx
4661 get_subtarget (rtx x)
4662 {
4663 return (optimize
4664 || x == 0
4665 /* Only registers can be subtargets. */
4666 || !REG_P (x)
4667 /* Don't use hard regs to avoid extending their life. */
4668 || REGNO (x) < FIRST_PSEUDO_REGISTER
4669 ? 0 : x);
4670 }
4671
4672 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
4673 FIELD is a bitfield. Returns true if the optimization was successful,
4674 and there's nothing else to do. */
4675
4676 static bool
4677 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
4678 poly_uint64 pbitpos,
4679 poly_uint64 pbitregion_start,
4680 poly_uint64 pbitregion_end,
4681 machine_mode mode1, rtx str_rtx,
4682 tree to, tree src, bool reverse)
4683 {
4684 /* str_mode is not guaranteed to be a scalar type. */
4685 machine_mode str_mode = GET_MODE (str_rtx);
4686 unsigned int str_bitsize;
4687 tree op0, op1;
4688 rtx value, result;
4689 optab binop;
4690 gimple *srcstmt;
4691 enum tree_code code;
4692
4693 unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
4694 if (mode1 != VOIDmode
4695 || !pbitsize.is_constant (&bitsize)
4696 || !pbitpos.is_constant (&bitpos)
4697 || !pbitregion_start.is_constant (&bitregion_start)
4698 || !pbitregion_end.is_constant (&bitregion_end)
4699 || bitsize >= BITS_PER_WORD
4700 || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
4701 || str_bitsize > BITS_PER_WORD
4702 || TREE_SIDE_EFFECTS (to)
4703 || TREE_THIS_VOLATILE (to))
4704 return false;
4705
4706 STRIP_NOPS (src);
4707 if (TREE_CODE (src) != SSA_NAME)
4708 return false;
4709 if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
4710 return false;
4711
4712 srcstmt = get_gimple_for_ssa_name (src);
4713 if (!srcstmt
4714 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
4715 return false;
4716
4717 code = gimple_assign_rhs_code (srcstmt);
4718
4719 op0 = gimple_assign_rhs1 (srcstmt);
4720
4721 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4722 to find its initialization. Hopefully the initialization will
4723 be from a bitfield load. */
4724 if (TREE_CODE (op0) == SSA_NAME)
4725 {
4726 gimple *op0stmt = get_gimple_for_ssa_name (op0);
4727
4728 /* We want to eventually have OP0 be the same as TO, which
4729 should be a bitfield. */
4730 if (!op0stmt
4731 || !is_gimple_assign (op0stmt)
4732 || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
4733 return false;
4734 op0 = gimple_assign_rhs1 (op0stmt);
4735 }
4736
4737 op1 = gimple_assign_rhs2 (srcstmt);
4738
4739 if (!operand_equal_p (to, op0, 0))
4740 return false;
4741
4742 if (MEM_P (str_rtx))
4743 {
4744 unsigned HOST_WIDE_INT offset1;
4745
4746 if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
4747 str_bitsize = BITS_PER_WORD;
4748
4749 scalar_int_mode best_mode;
4750 if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
4751 MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
4752 return false;
4753 str_mode = best_mode;
4754 str_bitsize = GET_MODE_BITSIZE (best_mode);
4755
4756 offset1 = bitpos;
4757 bitpos %= str_bitsize;
4758 offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
4759 str_rtx = adjust_address (str_rtx, str_mode, offset1);
4760 }
4761 else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
4762 return false;
4763
4764 /* If the bit field covers the whole REG/MEM, store_field
4765 will likely generate better code. */
4766 if (bitsize >= str_bitsize)
4767 return false;
4768
4769 /* We can't handle fields split across multiple entities. */
4770 if (bitpos + bitsize > str_bitsize)
4771 return false;
4772
4773 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
4774 bitpos = str_bitsize - bitpos - bitsize;
4775
4776 switch (code)
4777 {
4778 case PLUS_EXPR:
4779 case MINUS_EXPR:
4780 /* For now, just optimize the case of the topmost bitfield
4781 where we don't need to do any masking and also
4782 1 bit bitfields where xor can be used.
4783 We might win by one instruction for the other bitfields
4784 too if insv/extv instructions aren't used, so that
4785 can be added later. */
4786 if ((reverse || bitpos + bitsize != str_bitsize)
4787 && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
4788 break;
4789
4790 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4791 value = convert_modes (str_mode,
4792 TYPE_MODE (TREE_TYPE (op1)), value,
4793 TYPE_UNSIGNED (TREE_TYPE (op1)));
4794
4795 /* We may be accessing data outside the field, which means
4796 we can alias adjacent data. */
4797 if (MEM_P (str_rtx))
4798 {
4799 str_rtx = shallow_copy_rtx (str_rtx);
4800 set_mem_alias_set (str_rtx, 0);
4801 set_mem_expr (str_rtx, 0);
4802 }
4803
4804 if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
4805 {
4806 value = expand_and (str_mode, value, const1_rtx, NULL);
4807 binop = xor_optab;
4808 }
4809 else
4810 binop = code == PLUS_EXPR ? add_optab : sub_optab;
4811
4812 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4813 if (reverse)
4814 value = flip_storage_order (str_mode, value);
4815 result = expand_binop (str_mode, binop, str_rtx,
4816 value, str_rtx, 1, OPTAB_WIDEN);
4817 if (result != str_rtx)
4818 emit_move_insn (str_rtx, result);
4819 return true;
4820
4821 case BIT_IOR_EXPR:
4822 case BIT_XOR_EXPR:
4823 if (TREE_CODE (op1) != INTEGER_CST)
4824 break;
4825 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4826 value = convert_modes (str_mode,
4827 TYPE_MODE (TREE_TYPE (op1)), value,
4828 TYPE_UNSIGNED (TREE_TYPE (op1)));
4829
4830 /* We may be accessing data outside the field, which means
4831 we can alias adjacent data. */
4832 if (MEM_P (str_rtx))
4833 {
4834 str_rtx = shallow_copy_rtx (str_rtx);
4835 set_mem_alias_set (str_rtx, 0);
4836 set_mem_expr (str_rtx, 0);
4837 }
4838
4839 binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
4840 if (bitpos + bitsize != str_bitsize)
4841 {
4842 rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
4843 str_mode);
4844 value = expand_and (str_mode, value, mask, NULL_RTX);
4845 }
4846 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4847 if (reverse)
4848 value = flip_storage_order (str_mode, value);
4849 result = expand_binop (str_mode, binop, str_rtx,
4850 value, str_rtx, 1, OPTAB_WIDEN);
4851 if (result != str_rtx)
4852 emit_move_insn (str_rtx, result);
4853 return true;
4854
4855 default:
4856 break;
4857 }
4858
4859 return false;
4860 }
4861
4862 /* In the C++ memory model, consecutive bit fields in a structure are
4863 considered one memory location.
4864
4865 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4866 returns the bit range of consecutive bits in which this COMPONENT_REF
4867 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
4868 and *OFFSET may be adjusted in the process.
4869
4870 If the access does not need to be restricted, 0 is returned in both
4871 *BITSTART and *BITEND. */
4872
4873 void
4874 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
4875 poly_int64_pod *bitpos, tree *offset)
4876 {
4877 poly_int64 bitoffset;
4878 tree field, repr;
4879
4880 gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
4881
4882 field = TREE_OPERAND (exp, 1);
4883 repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
4884 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4885 need to limit the range we can access. */
4886 if (!repr)
4887 {
4888 *bitstart = *bitend = 0;
4889 return;
4890 }
4891
4892 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4893 part of a larger bit field, then the representative does not serve any
4894 useful purpose. This can occur in Ada. */
4895 if (handled_component_p (TREE_OPERAND (exp, 0)))
4896 {
4897 machine_mode rmode;
4898 poly_int64 rbitsize, rbitpos;
4899 tree roffset;
4900 int unsignedp, reversep, volatilep = 0;
4901 get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
4902 &roffset, &rmode, &unsignedp, &reversep,
4903 &volatilep);
4904 if (!multiple_p (rbitpos, BITS_PER_UNIT))
4905 {
4906 *bitstart = *bitend = 0;
4907 return;
4908 }
4909 }
4910
4911 /* Compute the adjustment to bitpos from the offset of the field
4912 relative to the representative. DECL_FIELD_OFFSET of field and
4913 repr are the same by construction if they are not constants,
4914 see finish_bitfield_layout. */
4915 poly_uint64 field_offset, repr_offset;
4916 if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
4917 && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
4918 bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
4919 else
4920 bitoffset = 0;
4921 bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
4922 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
4923
4924 /* If the adjustment is larger than bitpos, we would have a negative bit
4925 position for the lower bound and this may wreak havoc later. Adjust
4926 offset and bitpos to make the lower bound non-negative in that case. */
4927 if (maybe_gt (bitoffset, *bitpos))
4928 {
4929 poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
4930 poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
4931
4932 *bitpos += adjust_bits;
4933 if (*offset == NULL_TREE)
4934 *offset = size_int (-adjust_bytes);
4935 else
4936 *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
4937 *bitstart = 0;
4938 }
4939 else
4940 *bitstart = *bitpos - bitoffset;
4941
4942 *bitend = *bitstart + tree_to_poly_uint64 (DECL_SIZE (repr)) - 1;
4943 }
4944
4945 /* Returns true if BASE is a DECL that does not reside in memory and
4946 has non-BLKmode. DECL_RTL must not be a MEM; if
4947 DECL_RTL was not set yet, return false. */
4948
4949 static inline bool
4950 non_mem_decl_p (tree base)
4951 {
4952 if (!DECL_P (base)
4953 || TREE_ADDRESSABLE (base)
4954 || DECL_MODE (base) == BLKmode)
4955 return false;
4956
4957 if (!DECL_RTL_SET_P (base))
4958 return false;
4959
4960 return (!MEM_P (DECL_RTL (base)));
4961 }
4962
4963 /* Returns true if REF refers to an object that does not
4964 reside in memory and has non-BLKmode. */
4965
4966 static inline bool
4967 mem_ref_refers_to_non_mem_p (tree ref)
4968 {
4969 tree base;
4970
4971 if (TREE_CODE (ref) == MEM_REF
4972 || TREE_CODE (ref) == TARGET_MEM_REF)
4973 {
4974 tree addr = TREE_OPERAND (ref, 0);
4975
4976 if (TREE_CODE (addr) != ADDR_EXPR)
4977 return false;
4978
4979 base = TREE_OPERAND (addr, 0);
4980 }
4981 else
4982 base = ref;
4983
4984 return non_mem_decl_p (base);
4985 }
4986
4987 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
4988 is true, try generating a nontemporal store. */
4989
4990 void
4991 expand_assignment (tree to, tree from, bool nontemporal)
4992 {
4993 rtx to_rtx = 0;
4994 rtx result;
4995 machine_mode mode;
4996 unsigned int align;
4997 enum insn_code icode;
4998
4999 /* Don't crash if the lhs of the assignment was erroneous. */
5000 if (TREE_CODE (to) == ERROR_MARK)
5001 {
5002 expand_normal (from);
5003 return;
5004 }
5005
5006 /* Optimize away no-op moves without side-effects. */
5007 if (operand_equal_p (to, from, 0))
5008 return;
5009
5010 /* Handle misaligned stores. */
5011 mode = TYPE_MODE (TREE_TYPE (to));
5012 if ((TREE_CODE (to) == MEM_REF
5013 || TREE_CODE (to) == TARGET_MEM_REF
5014 || DECL_P (to))
5015 && mode != BLKmode
5016 && !mem_ref_refers_to_non_mem_p (to)
5017 && ((align = get_object_alignment (to))
5018 < GET_MODE_ALIGNMENT (mode))
5019 && (((icode = optab_handler (movmisalign_optab, mode))
5020 != CODE_FOR_nothing)
5021 || targetm.slow_unaligned_access (mode, align)))
5022 {
5023 rtx reg, mem;
5024
5025 reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
5026 reg = force_not_mem (reg);
5027 mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5028 if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
5029 reg = flip_storage_order (mode, reg);
5030
5031 if (icode != CODE_FOR_nothing)
5032 {
5033 class expand_operand ops[2];
5034
5035 create_fixed_operand (&ops[0], mem);
5036 create_input_operand (&ops[1], reg, mode);
5037 /* The movmisalign<mode> pattern cannot fail, else the assignment
5038 would silently be omitted. */
5039 expand_insn (icode, 2, ops);
5040 }
5041 else
5042 store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5043 false);
5044 return;
5045 }
5046
5047 /* Assignment of a structure component needs special treatment
5048 if the structure component's rtx is not simply a MEM.
5049 Assignment of an array element at a constant index, and assignment of
5050 an array element in an unaligned packed structure field, has the same
5051 problem. Same for (partially) storing into a non-memory object. */
5052 if (handled_component_p (to)
5053 || (TREE_CODE (to) == MEM_REF
5054 && (REF_REVERSE_STORAGE_ORDER (to)
5055 || mem_ref_refers_to_non_mem_p (to)))
5056 || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5057 {
5058 machine_mode mode1;
5059 poly_int64 bitsize, bitpos;
5060 poly_uint64 bitregion_start = 0;
5061 poly_uint64 bitregion_end = 0;
5062 tree offset;
5063 int unsignedp, reversep, volatilep = 0;
5064 tree tem;
5065
5066 push_temp_slots ();
5067 tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5068 &unsignedp, &reversep, &volatilep);
5069
5070 /* Make sure bitpos is not negative, it can wreak havoc later. */
5071 if (maybe_lt (bitpos, 0))
5072 {
5073 gcc_assert (offset == NULL_TREE);
5074 offset = size_int (bits_to_bytes_round_down (bitpos));
5075 bitpos = num_trailing_bits (bitpos);
5076 }
5077
5078 if (TREE_CODE (to) == COMPONENT_REF
5079 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5080 get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5081 /* The C++ memory model naturally applies to byte-aligned fields.
5082 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5083 BITSIZE are not byte-aligned, there is no need to limit the range
5084 we can access. This can occur with packed structures in Ada. */
5085 else if (maybe_gt (bitsize, 0)
5086 && multiple_p (bitsize, BITS_PER_UNIT)
5087 && multiple_p (bitpos, BITS_PER_UNIT))
5088 {
5089 bitregion_start = bitpos;
5090 bitregion_end = bitpos + bitsize - 1;
5091 }
5092
5093 to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5094
5095 /* If the field has a mode, we want to access it in the
5096 field's mode, not the computed mode.
5097 If a MEM has VOIDmode (external with incomplete type),
5098 use BLKmode for it instead. */
5099 if (MEM_P (to_rtx))
5100 {
5101 if (mode1 != VOIDmode)
5102 to_rtx = adjust_address (to_rtx, mode1, 0);
5103 else if (GET_MODE (to_rtx) == VOIDmode)
5104 to_rtx = adjust_address (to_rtx, BLKmode, 0);
5105 }
5106
5107 if (offset != 0)
5108 {
5109 machine_mode address_mode;
5110 rtx offset_rtx;
5111
5112 if (!MEM_P (to_rtx))
5113 {
5114 /* We can get constant negative offsets into arrays with broken
5115 user code. Translate this to a trap instead of ICEing. */
5116 gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5117 expand_builtin_trap ();
5118 to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5119 }
5120
5121 offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5122 address_mode = get_address_mode (to_rtx);
5123 if (GET_MODE (offset_rtx) != address_mode)
5124 {
5125 /* We cannot be sure that the RTL in offset_rtx is valid outside
5126 of a memory address context, so force it into a register
5127 before attempting to convert it to the desired mode. */
5128 offset_rtx = force_operand (offset_rtx, NULL_RTX);
5129 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5130 }
5131
5132 /* If we have an expression in OFFSET_RTX and a non-zero
5133 byte offset in BITPOS, adding the byte offset before the
5134 OFFSET_RTX results in better intermediate code, which makes
5135 later rtl optimization passes perform better.
5136
5137 We prefer intermediate code like this:
5138
5139 r124:DI=r123:DI+0x18
5140 [r124:DI]=r121:DI
5141
5142 ... instead of ...
5143
5144 r124:DI=r123:DI+0x10
5145 [r124:DI+0x8]=r121:DI
5146
5147 This is only done for aligned data values, as these can
5148 be expected to result in single move instructions. */
5149 poly_int64 bytepos;
5150 if (mode1 != VOIDmode
5151 && maybe_ne (bitpos, 0)
5152 && maybe_gt (bitsize, 0)
5153 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5154 && multiple_p (bitpos, bitsize)
5155 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5156 && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5157 {
5158 to_rtx = adjust_address (to_rtx, mode1, bytepos);
5159 bitregion_start = 0;
5160 if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5161 bitregion_end -= bitpos;
5162 bitpos = 0;
5163 }
5164
5165 to_rtx = offset_address (to_rtx, offset_rtx,
5166 highest_pow2_factor_for_target (to,
5167 offset));
5168 }
5169
5170 /* No action is needed if the target is not a memory and the field
5171 lies completely outside that target. This can occur if the source
5172 code contains an out-of-bounds access to a small array. */
5173 if (!MEM_P (to_rtx)
5174 && GET_MODE (to_rtx) != BLKmode
5175 && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5176 {
5177 expand_normal (from);
5178 result = NULL;
5179 }
5180 /* Handle expand_expr of a complex value returning a CONCAT. */
5181 else if (GET_CODE (to_rtx) == CONCAT)
5182 {
5183 machine_mode to_mode = GET_MODE (to_rtx);
5184 gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5185 poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5186 unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5187 if (TYPE_MODE (TREE_TYPE (from)) == to_mode
5188 && known_eq (bitpos, 0)
5189 && known_eq (bitsize, mode_bitsize))
5190 result = store_expr (from, to_rtx, false, nontemporal, reversep);
5191 else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
5192 && known_eq (bitsize, inner_bitsize)
5193 && (known_eq (bitpos, 0)
5194 || known_eq (bitpos, inner_bitsize)))
5195 result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5196 false, nontemporal, reversep);
5197 else if (known_le (bitpos + bitsize, inner_bitsize))
5198 result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5199 bitregion_start, bitregion_end,
5200 mode1, from, get_alias_set (to),
5201 nontemporal, reversep);
5202 else if (known_ge (bitpos, inner_bitsize))
5203 result = store_field (XEXP (to_rtx, 1), bitsize,
5204 bitpos - inner_bitsize,
5205 bitregion_start, bitregion_end,
5206 mode1, from, get_alias_set (to),
5207 nontemporal, reversep);
5208 else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5209 {
5210 result = expand_normal (from);
5211 if (GET_CODE (result) == CONCAT)
5212 {
5213 to_mode = GET_MODE_INNER (to_mode);
5214 machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5215 rtx from_real
5216 = simplify_gen_subreg (to_mode, XEXP (result, 0),
5217 from_mode, 0);
5218 rtx from_imag
5219 = simplify_gen_subreg (to_mode, XEXP (result, 1),
5220 from_mode, 0);
5221 if (!from_real || !from_imag)
5222 goto concat_store_slow;
5223 emit_move_insn (XEXP (to_rtx, 0), from_real);
5224 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5225 }
5226 else
5227 {
5228 rtx from_rtx;
5229 if (MEM_P (result))
5230 from_rtx = change_address (result, to_mode, NULL_RTX);
5231 else
5232 from_rtx
5233 = simplify_gen_subreg (to_mode, result,
5234 TYPE_MODE (TREE_TYPE (from)), 0);
5235 if (from_rtx)
5236 {
5237 emit_move_insn (XEXP (to_rtx, 0),
5238 read_complex_part (from_rtx, false));
5239 emit_move_insn (XEXP (to_rtx, 1),
5240 read_complex_part (from_rtx, true));
5241 }
5242 else
5243 {
5244 machine_mode to_mode
5245 = GET_MODE_INNER (GET_MODE (to_rtx));
5246 rtx from_real
5247 = simplify_gen_subreg (to_mode, result,
5248 TYPE_MODE (TREE_TYPE (from)),
5249 0);
5250 rtx from_imag
5251 = simplify_gen_subreg (to_mode, result,
5252 TYPE_MODE (TREE_TYPE (from)),
5253 GET_MODE_SIZE (to_mode));
5254 if (!from_real || !from_imag)
5255 goto concat_store_slow;
5256 emit_move_insn (XEXP (to_rtx, 0), from_real);
5257 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5258 }
5259 }
5260 }
5261 else
5262 {
5263 concat_store_slow:;
5264 rtx temp = assign_stack_temp (to_mode,
5265 GET_MODE_SIZE (GET_MODE (to_rtx)));
5266 write_complex_part (temp, XEXP (to_rtx, 0), false);
5267 write_complex_part (temp, XEXP (to_rtx, 1), true);
5268 result = store_field (temp, bitsize, bitpos,
5269 bitregion_start, bitregion_end,
5270 mode1, from, get_alias_set (to),
5271 nontemporal, reversep);
5272 emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5273 emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5274 }
5275 }
5276 /* For calls to functions returning variable length structures, if TO_RTX
5277 is not a MEM, go through a MEM because we must not create temporaries
5278 of the VLA type. */
5279 else if (!MEM_P (to_rtx)
5280 && TREE_CODE (from) == CALL_EXPR
5281 && COMPLETE_TYPE_P (TREE_TYPE (from))
5282 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
5283 {
5284 rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5285 GET_MODE_SIZE (GET_MODE (to_rtx)));
5286 result = store_field (temp, bitsize, bitpos, bitregion_start,
5287 bitregion_end, mode1, from, get_alias_set (to),
5288 nontemporal, reversep);
5289 emit_move_insn (to_rtx, temp);
5290 }
5291 else
5292 {
5293 if (MEM_P (to_rtx))
5294 {
5295 /* If the field is at offset zero, we could have been given the
5296 DECL_RTX of the parent struct. Don't munge it. */
5297 to_rtx = shallow_copy_rtx (to_rtx);
5298 set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5299 if (volatilep)
5300 MEM_VOLATILE_P (to_rtx) = 1;
5301 }
5302
5303 gcc_checking_assert (known_ge (bitpos, 0));
5304 if (optimize_bitfield_assignment_op (bitsize, bitpos,
5305 bitregion_start, bitregion_end,
5306 mode1, to_rtx, to, from,
5307 reversep))
5308 result = NULL;
5309 else
5310 result = store_field (to_rtx, bitsize, bitpos,
5311 bitregion_start, bitregion_end,
5312 mode1, from, get_alias_set (to),
5313 nontemporal, reversep);
5314 }
5315
5316 if (result)
5317 preserve_temp_slots (result);
5318 pop_temp_slots ();
5319 return;
5320 }
5321
5322 /* If the rhs is a function call and its value is not an aggregate,
5323 call the function before we start to compute the lhs.
5324 This is needed for correct code for cases such as
5325 val = setjmp (buf) on machines where reference to val
5326 requires loading up part of an address in a separate insn.
5327
5328 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5329 since it might be a promoted variable where the zero- or sign- extension
5330 needs to be done. Handling this in the normal way is safe because no
5331 computation is done before the call. The same is true for SSA names. */
5332 if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5333 && COMPLETE_TYPE_P (TREE_TYPE (from))
5334 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5335 && ! (((VAR_P (to)
5336 || TREE_CODE (to) == PARM_DECL
5337 || TREE_CODE (to) == RESULT_DECL)
5338 && REG_P (DECL_RTL (to)))
5339 || TREE_CODE (to) == SSA_NAME))
5340 {
5341 rtx value;
5342
5343 push_temp_slots ();
5344 value = expand_normal (from);
5345
5346 if (to_rtx == 0)
5347 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5348
5349 /* Handle calls that return values in multiple non-contiguous locations.
5350 The Irix 6 ABI has examples of this. */
5351 if (GET_CODE (to_rtx) == PARALLEL)
5352 {
5353 if (GET_CODE (value) == PARALLEL)
5354 emit_group_move (to_rtx, value);
5355 else
5356 emit_group_load (to_rtx, value, TREE_TYPE (from),
5357 int_size_in_bytes (TREE_TYPE (from)));
5358 }
5359 else if (GET_CODE (value) == PARALLEL)
5360 emit_group_store (to_rtx, value, TREE_TYPE (from),
5361 int_size_in_bytes (TREE_TYPE (from)));
5362 else if (GET_MODE (to_rtx) == BLKmode)
5363 {
5364 /* Handle calls that return BLKmode values in registers. */
5365 if (REG_P (value))
5366 copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5367 else
5368 emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5369 }
5370 else
5371 {
5372 if (POINTER_TYPE_P (TREE_TYPE (to)))
5373 value = convert_memory_address_addr_space
5374 (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5375 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5376
5377 emit_move_insn (to_rtx, value);
5378 }
5379
5380 preserve_temp_slots (to_rtx);
5381 pop_temp_slots ();
5382 return;
5383 }
5384
5385 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
5386 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5387
5388 /* Don't move directly into a return register. */
5389 if (TREE_CODE (to) == RESULT_DECL
5390 && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5391 {
5392 rtx temp;
5393
5394 push_temp_slots ();
5395
5396 /* If the source is itself a return value, it still is in a pseudo at
5397 this point so we can move it back to the return register directly. */
5398 if (REG_P (to_rtx)
5399 && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5400 && TREE_CODE (from) != CALL_EXPR)
5401 temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5402 else
5403 temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5404
5405 /* Handle calls that return values in multiple non-contiguous locations.
5406 The Irix 6 ABI has examples of this. */
5407 if (GET_CODE (to_rtx) == PARALLEL)
5408 {
5409 if (GET_CODE (temp) == PARALLEL)
5410 emit_group_move (to_rtx, temp);
5411 else
5412 emit_group_load (to_rtx, temp, TREE_TYPE (from),
5413 int_size_in_bytes (TREE_TYPE (from)));
5414 }
5415 else if (temp)
5416 emit_move_insn (to_rtx, temp);
5417
5418 preserve_temp_slots (to_rtx);
5419 pop_temp_slots ();
5420 return;
5421 }
5422
5423 /* In case we are returning the contents of an object which overlaps
5424 the place the value is being stored, use a safe function when copying
5425 a value through a pointer into a structure value return block. */
5426 if (TREE_CODE (to) == RESULT_DECL
5427 && TREE_CODE (from) == INDIRECT_REF
5428 && ADDR_SPACE_GENERIC_P
5429 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5430 && refs_may_alias_p (to, from)
5431 && cfun->returns_struct
5432 && !cfun->returns_pcc_struct)
5433 {
5434 rtx from_rtx, size;
5435
5436 push_temp_slots ();
5437 size = expr_size (from);
5438 from_rtx = expand_normal (from);
5439
5440 emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5441
5442 preserve_temp_slots (to_rtx);
5443 pop_temp_slots ();
5444 return;
5445 }
5446
5447 /* Compute FROM and store the value in the rtx we got. */
5448
5449 push_temp_slots ();
5450 result = store_expr (from, to_rtx, 0, nontemporal, false);
5451 preserve_temp_slots (result);
5452 pop_temp_slots ();
5453 return;
5454 }
5455
5456 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
5457 succeeded, false otherwise. */
5458
5459 bool
5460 emit_storent_insn (rtx to, rtx from)
5461 {
5462 class expand_operand ops[2];
5463 machine_mode mode = GET_MODE (to);
5464 enum insn_code code = optab_handler (storent_optab, mode);
5465
5466 if (code == CODE_FOR_nothing)
5467 return false;
5468
5469 create_fixed_operand (&ops[0], to);
5470 create_input_operand (&ops[1], from, mode);
5471 return maybe_expand_insn (code, 2, ops);
5472 }
5473
5474 /* Helper function for store_expr storing of STRING_CST. */
5475
5476 static rtx
5477 string_cst_read_str (void *data, HOST_WIDE_INT offset, scalar_int_mode mode)
5478 {
5479 tree str = (tree) data;
5480
5481 gcc_assert (offset >= 0);
5482 if (offset >= TREE_STRING_LENGTH (str))
5483 return const0_rtx;
5484
5485 if ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
5486 > (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (str))
5487 {
5488 char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode));
5489 size_t l = TREE_STRING_LENGTH (str) - offset;
5490 memcpy (p, TREE_STRING_POINTER (str) + offset, l);
5491 memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
5492 return c_readstr (p, mode, false);
5493 }
5494
5495 return c_readstr (TREE_STRING_POINTER (str) + offset, mode, false);
5496 }
5497
5498 /* Generate code for computing expression EXP,
5499 and storing the value into TARGET.
5500
5501 If the mode is BLKmode then we may return TARGET itself.
5502 It turns out that in BLKmode it doesn't cause a problem.
5503 because C has no operators that could combine two different
5504 assignments into the same BLKmode object with different values
5505 with no sequence point. Will other languages need this to
5506 be more thorough?
5507
5508 If CALL_PARAM_P is nonzero, this is a store into a call param on the
5509 stack, and block moves may need to be treated specially.
5510
5511 If NONTEMPORAL is true, try using a nontemporal store instruction.
5512
5513 If REVERSE is true, the store is to be done in reverse order. */
5514
5515 rtx
5516 store_expr (tree exp, rtx target, int call_param_p,
5517 bool nontemporal, bool reverse)
5518 {
5519 rtx temp;
5520 rtx alt_rtl = NULL_RTX;
5521 location_t loc = curr_insn_location ();
5522
5523 if (VOID_TYPE_P (TREE_TYPE (exp)))
5524 {
5525 /* C++ can generate ?: expressions with a throw expression in one
5526 branch and an rvalue in the other. Here, we resolve attempts to
5527 store the throw expression's nonexistent result. */
5528 gcc_assert (!call_param_p);
5529 expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5530 return NULL_RTX;
5531 }
5532 if (TREE_CODE (exp) == COMPOUND_EXPR)
5533 {
5534 /* Perform first part of compound expression, then assign from second
5535 part. */
5536 expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5537 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5538 return store_expr (TREE_OPERAND (exp, 1), target,
5539 call_param_p, nontemporal, reverse);
5540 }
5541 else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5542 {
5543 /* For conditional expression, get safe form of the target. Then
5544 test the condition, doing the appropriate assignment on either
5545 side. This avoids the creation of unnecessary temporaries.
5546 For non-BLKmode, it is more efficient not to do this. */
5547
5548 rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5549
5550 do_pending_stack_adjust ();
5551 NO_DEFER_POP;
5552 jumpifnot (TREE_OPERAND (exp, 0), lab1,
5553 profile_probability::uninitialized ());
5554 store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
5555 nontemporal, reverse);
5556 emit_jump_insn (targetm.gen_jump (lab2));
5557 emit_barrier ();
5558 emit_label (lab1);
5559 store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
5560 nontemporal, reverse);
5561 emit_label (lab2);
5562 OK_DEFER_POP;
5563
5564 return NULL_RTX;
5565 }
5566 else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5567 /* If this is a scalar in a register that is stored in a wider mode
5568 than the declared mode, compute the result into its declared mode
5569 and then convert to the wider mode. Our value is the computed
5570 expression. */
5571 {
5572 rtx inner_target = 0;
5573 scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5574 scalar_int_mode inner_mode = subreg_promoted_mode (target);
5575
5576 /* We can do the conversion inside EXP, which will often result
5577 in some optimizations. Do the conversion in two steps: first
5578 change the signedness, if needed, then the extend. But don't
5579 do this if the type of EXP is a subtype of something else
5580 since then the conversion might involve more than just
5581 converting modes. */
5582 if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5583 && TREE_TYPE (TREE_TYPE (exp)) == 0
5584 && GET_MODE_PRECISION (outer_mode)
5585 == TYPE_PRECISION (TREE_TYPE (exp)))
5586 {
5587 if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5588 TYPE_UNSIGNED (TREE_TYPE (exp))))
5589 {
5590 /* Some types, e.g. Fortran's logical*4, won't have a signed
5591 version, so use the mode instead. */
5592 tree ntype
5593 = (signed_or_unsigned_type_for
5594 (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5595 if (ntype == NULL)
5596 ntype = lang_hooks.types.type_for_mode
5597 (TYPE_MODE (TREE_TYPE (exp)),
5598 SUBREG_PROMOTED_SIGN (target));
5599
5600 exp = fold_convert_loc (loc, ntype, exp);
5601 }
5602
5603 exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5604 (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5605 exp);
5606
5607 inner_target = SUBREG_REG (target);
5608 }
5609
5610 temp = expand_expr (exp, inner_target, VOIDmode,
5611 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5612
5613
5614 /* If TEMP is a VOIDmode constant, use convert_modes to make
5615 sure that we properly convert it. */
5616 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5617 {
5618 temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5619 temp, SUBREG_PROMOTED_SIGN (target));
5620 temp = convert_modes (inner_mode, outer_mode, temp,
5621 SUBREG_PROMOTED_SIGN (target));
5622 }
5623
5624 convert_move (SUBREG_REG (target), temp,
5625 SUBREG_PROMOTED_SIGN (target));
5626
5627 return NULL_RTX;
5628 }
5629 else if ((TREE_CODE (exp) == STRING_CST
5630 || (TREE_CODE (exp) == MEM_REF
5631 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
5632 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5633 == STRING_CST
5634 && integer_zerop (TREE_OPERAND (exp, 1))))
5635 && !nontemporal && !call_param_p
5636 && MEM_P (target))
5637 {
5638 /* Optimize initialization of an array with a STRING_CST. */
5639 HOST_WIDE_INT exp_len, str_copy_len;
5640 rtx dest_mem;
5641 tree str = TREE_CODE (exp) == STRING_CST
5642 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5643
5644 exp_len = int_expr_size (exp);
5645 if (exp_len <= 0)
5646 goto normal_expr;
5647
5648 if (TREE_STRING_LENGTH (str) <= 0)
5649 goto normal_expr;
5650
5651 if (can_store_by_pieces (exp_len, string_cst_read_str, (void *) str,
5652 MEM_ALIGN (target), false))
5653 {
5654 store_by_pieces (target, exp_len, string_cst_read_str, (void *) str,
5655 MEM_ALIGN (target), false, RETURN_BEGIN);
5656 return NULL_RTX;
5657 }
5658
5659 str_copy_len = TREE_STRING_LENGTH (str);
5660 if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
5661 {
5662 str_copy_len += STORE_MAX_PIECES - 1;
5663 str_copy_len &= ~(STORE_MAX_PIECES - 1);
5664 }
5665 if (str_copy_len >= exp_len)
5666 goto normal_expr;
5667
5668 if (!can_store_by_pieces (str_copy_len, string_cst_read_str,
5669 (void *) str, MEM_ALIGN (target), false))
5670 goto normal_expr;
5671
5672 dest_mem = store_by_pieces (target, str_copy_len, string_cst_read_str,
5673 (void *) str, MEM_ALIGN (target), false,
5674 RETURN_END);
5675 clear_storage (adjust_address_1 (dest_mem, BLKmode, 0, 1, 1, 0,
5676 exp_len - str_copy_len),
5677 GEN_INT (exp_len - str_copy_len), BLOCK_OP_NORMAL);
5678 return NULL_RTX;
5679 }
5680 else
5681 {
5682 rtx tmp_target;
5683
5684 normal_expr:
5685 /* If we want to use a nontemporal or a reverse order store, force the
5686 value into a register first. */
5687 tmp_target = nontemporal || reverse ? NULL_RTX : target;
5688 temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
5689 (call_param_p
5690 ? EXPAND_STACK_PARM : EXPAND_NORMAL),
5691 &alt_rtl, false);
5692 }
5693
5694 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5695 the same as that of TARGET, adjust the constant. This is needed, for
5696 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5697 only a word-sized value. */
5698 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
5699 && TREE_CODE (exp) != ERROR_MARK
5700 && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
5701 {
5702 if (GET_MODE_CLASS (GET_MODE (target))
5703 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
5704 && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
5705 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
5706 {
5707 rtx t = simplify_gen_subreg (GET_MODE (target), temp,
5708 TYPE_MODE (TREE_TYPE (exp)), 0);
5709 if (t)
5710 temp = t;
5711 }
5712 if (GET_MODE (temp) == VOIDmode)
5713 temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
5714 temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5715 }
5716
5717 /* If value was not generated in the target, store it there.
5718 Convert the value to TARGET's type first if necessary and emit the
5719 pending incrementations that have been queued when expanding EXP.
5720 Note that we cannot emit the whole queue blindly because this will
5721 effectively disable the POST_INC optimization later.
5722
5723 If TEMP and TARGET compare equal according to rtx_equal_p, but
5724 one or both of them are volatile memory refs, we have to distinguish
5725 two cases:
5726 - expand_expr has used TARGET. In this case, we must not generate
5727 another copy. This can be detected by TARGET being equal according
5728 to == .
5729 - expand_expr has not used TARGET - that means that the source just
5730 happens to have the same RTX form. Since temp will have been created
5731 by expand_expr, it will compare unequal according to == .
5732 We must generate a copy in this case, to reach the correct number
5733 of volatile memory references. */
5734
5735 if ((! rtx_equal_p (temp, target)
5736 || (temp != target && (side_effects_p (temp)
5737 || side_effects_p (target))))
5738 && TREE_CODE (exp) != ERROR_MARK
5739 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5740 but TARGET is not valid memory reference, TEMP will differ
5741 from TARGET although it is really the same location. */
5742 && !(alt_rtl
5743 && rtx_equal_p (alt_rtl, target)
5744 && !side_effects_p (alt_rtl)
5745 && !side_effects_p (target))
5746 /* If there's nothing to copy, don't bother. Don't call
5747 expr_size unless necessary, because some front-ends (C++)
5748 expr_size-hook must not be given objects that are not
5749 supposed to be bit-copied or bit-initialized. */
5750 && expr_size (exp) != const0_rtx)
5751 {
5752 if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
5753 {
5754 if (GET_MODE (target) == BLKmode)
5755 {
5756 /* Handle calls that return BLKmode values in registers. */
5757 if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
5758 copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
5759 else
5760 store_bit_field (target,
5761 INTVAL (expr_size (exp)) * BITS_PER_UNIT,
5762 0, 0, 0, GET_MODE (temp), temp, reverse);
5763 }
5764 else
5765 convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5766 }
5767
5768 else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
5769 {
5770 /* Handle copying a string constant into an array. The string
5771 constant may be shorter than the array. So copy just the string's
5772 actual length, and clear the rest. First get the size of the data
5773 type of the string, which is actually the size of the target. */
5774 rtx size = expr_size (exp);
5775
5776 if (CONST_INT_P (size)
5777 && INTVAL (size) < TREE_STRING_LENGTH (exp))
5778 emit_block_move (target, temp, size,
5779 (call_param_p
5780 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5781 else
5782 {
5783 machine_mode pointer_mode
5784 = targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
5785 machine_mode address_mode = get_address_mode (target);
5786
5787 /* Compute the size of the data to copy from the string. */
5788 tree copy_size
5789 = size_binop_loc (loc, MIN_EXPR,
5790 make_tree (sizetype, size),
5791 size_int (TREE_STRING_LENGTH (exp)));
5792 rtx copy_size_rtx
5793 = expand_expr (copy_size, NULL_RTX, VOIDmode,
5794 (call_param_p
5795 ? EXPAND_STACK_PARM : EXPAND_NORMAL));
5796 rtx_code_label *label = 0;
5797
5798 /* Copy that much. */
5799 copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
5800 TYPE_UNSIGNED (sizetype));
5801 emit_block_move (target, temp, copy_size_rtx,
5802 (call_param_p
5803 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5804
5805 /* Figure out how much is left in TARGET that we have to clear.
5806 Do all calculations in pointer_mode. */
5807 poly_int64 const_copy_size;
5808 if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
5809 {
5810 size = plus_constant (address_mode, size, -const_copy_size);
5811 target = adjust_address (target, BLKmode, const_copy_size);
5812 }
5813 else
5814 {
5815 size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
5816 copy_size_rtx, NULL_RTX, 0,
5817 OPTAB_LIB_WIDEN);
5818
5819 if (GET_MODE (copy_size_rtx) != address_mode)
5820 copy_size_rtx = convert_to_mode (address_mode,
5821 copy_size_rtx,
5822 TYPE_UNSIGNED (sizetype));
5823
5824 target = offset_address (target, copy_size_rtx,
5825 highest_pow2_factor (copy_size));
5826 label = gen_label_rtx ();
5827 emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
5828 GET_MODE (size), 0, label);
5829 }
5830
5831 if (size != const0_rtx)
5832 clear_storage (target, size, BLOCK_OP_NORMAL);
5833
5834 if (label)
5835 emit_label (label);
5836 }
5837 }
5838 /* Handle calls that return values in multiple non-contiguous locations.
5839 The Irix 6 ABI has examples of this. */
5840 else if (GET_CODE (target) == PARALLEL)
5841 {
5842 if (GET_CODE (temp) == PARALLEL)
5843 emit_group_move (target, temp);
5844 else
5845 emit_group_load (target, temp, TREE_TYPE (exp),
5846 int_size_in_bytes (TREE_TYPE (exp)));
5847 }
5848 else if (GET_CODE (temp) == PARALLEL)
5849 emit_group_store (target, temp, TREE_TYPE (exp),
5850 int_size_in_bytes (TREE_TYPE (exp)));
5851 else if (GET_MODE (temp) == BLKmode)
5852 emit_block_move (target, temp, expr_size (exp),
5853 (call_param_p
5854 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5855 /* If we emit a nontemporal store, there is nothing else to do. */
5856 else if (nontemporal && emit_storent_insn (target, temp))
5857 ;
5858 else
5859 {
5860 if (reverse)
5861 temp = flip_storage_order (GET_MODE (target), temp);
5862 temp = force_operand (temp, target);
5863 if (temp != target)
5864 emit_move_insn (target, temp);
5865 }
5866 }
5867
5868 return NULL_RTX;
5869 }
5870 \f
5871 /* Return true if field F of structure TYPE is a flexible array. */
5872
5873 static bool
5874 flexible_array_member_p (const_tree f, const_tree type)
5875 {
5876 const_tree tf;
5877
5878 tf = TREE_TYPE (f);
5879 return (DECL_CHAIN (f) == NULL
5880 && TREE_CODE (tf) == ARRAY_TYPE
5881 && TYPE_DOMAIN (tf)
5882 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
5883 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
5884 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
5885 && int_size_in_bytes (type) >= 0);
5886 }
5887
5888 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5889 must have in order for it to completely initialize a value of type TYPE.
5890 Return -1 if the number isn't known.
5891
5892 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
5893
5894 static HOST_WIDE_INT
5895 count_type_elements (const_tree type, bool for_ctor_p)
5896 {
5897 switch (TREE_CODE (type))
5898 {
5899 case ARRAY_TYPE:
5900 {
5901 tree nelts;
5902
5903 nelts = array_type_nelts (type);
5904 if (nelts && tree_fits_uhwi_p (nelts))
5905 {
5906 unsigned HOST_WIDE_INT n;
5907
5908 n = tree_to_uhwi (nelts) + 1;
5909 if (n == 0 || for_ctor_p)
5910 return n;
5911 else
5912 return n * count_type_elements (TREE_TYPE (type), false);
5913 }
5914 return for_ctor_p ? -1 : 1;
5915 }
5916
5917 case RECORD_TYPE:
5918 {
5919 unsigned HOST_WIDE_INT n;
5920 tree f;
5921
5922 n = 0;
5923 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5924 if (TREE_CODE (f) == FIELD_DECL)
5925 {
5926 if (!for_ctor_p)
5927 n += count_type_elements (TREE_TYPE (f), false);
5928 else if (!flexible_array_member_p (f, type))
5929 /* Don't count flexible arrays, which are not supposed
5930 to be initialized. */
5931 n += 1;
5932 }
5933
5934 return n;
5935 }
5936
5937 case UNION_TYPE:
5938 case QUAL_UNION_TYPE:
5939 {
5940 tree f;
5941 HOST_WIDE_INT n, m;
5942
5943 gcc_assert (!for_ctor_p);
5944 /* Estimate the number of scalars in each field and pick the
5945 maximum. Other estimates would do instead; the idea is simply
5946 to make sure that the estimate is not sensitive to the ordering
5947 of the fields. */
5948 n = 1;
5949 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5950 if (TREE_CODE (f) == FIELD_DECL)
5951 {
5952 m = count_type_elements (TREE_TYPE (f), false);
5953 /* If the field doesn't span the whole union, add an extra
5954 scalar for the rest. */
5955 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
5956 TYPE_SIZE (type)) != 1)
5957 m++;
5958 if (n < m)
5959 n = m;
5960 }
5961 return n;
5962 }
5963
5964 case COMPLEX_TYPE:
5965 return 2;
5966
5967 case VECTOR_TYPE:
5968 {
5969 unsigned HOST_WIDE_INT nelts;
5970 if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
5971 return nelts;
5972 else
5973 return -1;
5974 }
5975
5976 case INTEGER_TYPE:
5977 case REAL_TYPE:
5978 case FIXED_POINT_TYPE:
5979 case ENUMERAL_TYPE:
5980 case BOOLEAN_TYPE:
5981 case POINTER_TYPE:
5982 case OFFSET_TYPE:
5983 case REFERENCE_TYPE:
5984 case NULLPTR_TYPE:
5985 return 1;
5986
5987 case ERROR_MARK:
5988 return 0;
5989
5990 case VOID_TYPE:
5991 case METHOD_TYPE:
5992 case FUNCTION_TYPE:
5993 case LANG_TYPE:
5994 default:
5995 gcc_unreachable ();
5996 }
5997 }
5998
5999 /* Helper for categorize_ctor_elements. Identical interface. */
6000
6001 static bool
6002 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6003 HOST_WIDE_INT *p_unique_nz_elts,
6004 HOST_WIDE_INT *p_init_elts, bool *p_complete)
6005 {
6006 unsigned HOST_WIDE_INT idx;
6007 HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
6008 tree value, purpose, elt_type;
6009
6010 /* Whether CTOR is a valid constant initializer, in accordance with what
6011 initializer_constant_valid_p does. If inferred from the constructor
6012 elements, true until proven otherwise. */
6013 bool const_from_elts_p = constructor_static_from_elts_p (ctor);
6014 bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
6015
6016 nz_elts = 0;
6017 unique_nz_elts = 0;
6018 init_elts = 0;
6019 num_fields = 0;
6020 elt_type = NULL_TREE;
6021
6022 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
6023 {
6024 HOST_WIDE_INT mult = 1;
6025
6026 if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
6027 {
6028 tree lo_index = TREE_OPERAND (purpose, 0);
6029 tree hi_index = TREE_OPERAND (purpose, 1);
6030
6031 if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
6032 mult = (tree_to_uhwi (hi_index)
6033 - tree_to_uhwi (lo_index) + 1);
6034 }
6035 num_fields += mult;
6036 elt_type = TREE_TYPE (value);
6037
6038 switch (TREE_CODE (value))
6039 {
6040 case CONSTRUCTOR:
6041 {
6042 HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
6043
6044 bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
6045 &ic, p_complete);
6046
6047 nz_elts += mult * nz;
6048 unique_nz_elts += unz;
6049 init_elts += mult * ic;
6050
6051 if (const_from_elts_p && const_p)
6052 const_p = const_elt_p;
6053 }
6054 break;
6055
6056 case INTEGER_CST:
6057 case REAL_CST:
6058 case FIXED_CST:
6059 if (!initializer_zerop (value))
6060 {
6061 nz_elts += mult;
6062 unique_nz_elts++;
6063 }
6064 init_elts += mult;
6065 break;
6066
6067 case STRING_CST:
6068 nz_elts += mult * TREE_STRING_LENGTH (value);
6069 unique_nz_elts += TREE_STRING_LENGTH (value);
6070 init_elts += mult * TREE_STRING_LENGTH (value);
6071 break;
6072
6073 case COMPLEX_CST:
6074 if (!initializer_zerop (TREE_REALPART (value)))
6075 {
6076 nz_elts += mult;
6077 unique_nz_elts++;
6078 }
6079 if (!initializer_zerop (TREE_IMAGPART (value)))
6080 {
6081 nz_elts += mult;
6082 unique_nz_elts++;
6083 }
6084 init_elts += 2 * mult;
6085 break;
6086
6087 case VECTOR_CST:
6088 {
6089 /* We can only construct constant-length vectors using
6090 CONSTRUCTOR. */
6091 unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6092 for (unsigned int i = 0; i < nunits; ++i)
6093 {
6094 tree v = VECTOR_CST_ELT (value, i);
6095 if (!initializer_zerop (v))
6096 {
6097 nz_elts += mult;
6098 unique_nz_elts++;
6099 }
6100 init_elts += mult;
6101 }
6102 }
6103 break;
6104
6105 default:
6106 {
6107 HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6108 nz_elts += mult * tc;
6109 unique_nz_elts += tc;
6110 init_elts += mult * tc;
6111
6112 if (const_from_elts_p && const_p)
6113 const_p
6114 = initializer_constant_valid_p (value,
6115 elt_type,
6116 TYPE_REVERSE_STORAGE_ORDER
6117 (TREE_TYPE (ctor)))
6118 != NULL_TREE;
6119 }
6120 break;
6121 }
6122 }
6123
6124 if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6125 num_fields, elt_type))
6126 *p_complete = false;
6127
6128 *p_nz_elts += nz_elts;
6129 *p_unique_nz_elts += unique_nz_elts;
6130 *p_init_elts += init_elts;
6131
6132 return const_p;
6133 }
6134
6135 /* Examine CTOR to discover:
6136 * how many scalar fields are set to nonzero values,
6137 and place it in *P_NZ_ELTS;
6138 * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6139 high - low + 1 (this can be useful for callers to determine ctors
6140 that could be cheaply initialized with - perhaps nested - loops
6141 compared to copied from huge read-only data),
6142 and place it in *P_UNIQUE_NZ_ELTS;
6143 * how many scalar fields in total are in CTOR,
6144 and place it in *P_ELT_COUNT.
6145 * whether the constructor is complete -- in the sense that every
6146 meaningful byte is explicitly given a value --
6147 and place it in *P_COMPLETE.
6148
6149 Return whether or not CTOR is a valid static constant initializer, the same
6150 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
6151
6152 bool
6153 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6154 HOST_WIDE_INT *p_unique_nz_elts,
6155 HOST_WIDE_INT *p_init_elts, bool *p_complete)
6156 {
6157 *p_nz_elts = 0;
6158 *p_unique_nz_elts = 0;
6159 *p_init_elts = 0;
6160 *p_complete = true;
6161
6162 return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
6163 p_init_elts, p_complete);
6164 }
6165
6166 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6167 of which had type LAST_TYPE. Each element was itself a complete
6168 initializer, in the sense that every meaningful byte was explicitly
6169 given a value. Return true if the same is true for the constructor
6170 as a whole. */
6171
6172 bool
6173 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6174 const_tree last_type)
6175 {
6176 if (TREE_CODE (type) == UNION_TYPE
6177 || TREE_CODE (type) == QUAL_UNION_TYPE)
6178 {
6179 if (num_elts == 0)
6180 return false;
6181
6182 gcc_assert (num_elts == 1 && last_type);
6183
6184 /* ??? We could look at each element of the union, and find the
6185 largest element. Which would avoid comparing the size of the
6186 initialized element against any tail padding in the union.
6187 Doesn't seem worth the effort... */
6188 return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6189 }
6190
6191 return count_type_elements (type, true) == num_elts;
6192 }
6193
6194 /* Return 1 if EXP contains mostly (3/4) zeros. */
6195
6196 static int
6197 mostly_zeros_p (const_tree exp)
6198 {
6199 if (TREE_CODE (exp) == CONSTRUCTOR)
6200 {
6201 HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6202 bool complete_p;
6203
6204 categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6205 &complete_p);
6206 return !complete_p || nz_elts < init_elts / 4;
6207 }
6208
6209 return initializer_zerop (exp);
6210 }
6211
6212 /* Return 1 if EXP contains all zeros. */
6213
6214 static int
6215 all_zeros_p (const_tree exp)
6216 {
6217 if (TREE_CODE (exp) == CONSTRUCTOR)
6218 {
6219 HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6220 bool complete_p;
6221
6222 categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6223 &complete_p);
6224 return nz_elts == 0;
6225 }
6226
6227 return initializer_zerop (exp);
6228 }
6229 \f
6230 /* Helper function for store_constructor.
6231 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6232 CLEARED is as for store_constructor.
6233 ALIAS_SET is the alias set to use for any stores.
6234 If REVERSE is true, the store is to be done in reverse order.
6235
6236 This provides a recursive shortcut back to store_constructor when it isn't
6237 necessary to go through store_field. This is so that we can pass through
6238 the cleared field to let store_constructor know that we may not have to
6239 clear a substructure if the outer structure has already been cleared. */
6240
6241 static void
6242 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6243 poly_uint64 bitregion_start,
6244 poly_uint64 bitregion_end,
6245 machine_mode mode,
6246 tree exp, int cleared,
6247 alias_set_type alias_set, bool reverse)
6248 {
6249 poly_int64 bytepos;
6250 poly_uint64 bytesize;
6251 if (TREE_CODE (exp) == CONSTRUCTOR
6252 /* We can only call store_constructor recursively if the size and
6253 bit position are on a byte boundary. */
6254 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6255 && maybe_ne (bitsize, 0U)
6256 && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6257 /* If we have a nonzero bitpos for a register target, then we just
6258 let store_field do the bitfield handling. This is unlikely to
6259 generate unnecessary clear instructions anyways. */
6260 && (known_eq (bitpos, 0) || MEM_P (target)))
6261 {
6262 if (MEM_P (target))
6263 {
6264 machine_mode target_mode = GET_MODE (target);
6265 if (target_mode != BLKmode
6266 && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6267 target_mode = BLKmode;
6268 target = adjust_address (target, target_mode, bytepos);
6269 }
6270
6271
6272 /* Update the alias set, if required. */
6273 if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6274 && MEM_ALIAS_SET (target) != 0)
6275 {
6276 target = copy_rtx (target);
6277 set_mem_alias_set (target, alias_set);
6278 }
6279
6280 store_constructor (exp, target, cleared, bytesize, reverse);
6281 }
6282 else
6283 store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6284 exp, alias_set, false, reverse);
6285 }
6286
6287
6288 /* Returns the number of FIELD_DECLs in TYPE. */
6289
6290 static int
6291 fields_length (const_tree type)
6292 {
6293 tree t = TYPE_FIELDS (type);
6294 int count = 0;
6295
6296 for (; t; t = DECL_CHAIN (t))
6297 if (TREE_CODE (t) == FIELD_DECL)
6298 ++count;
6299
6300 return count;
6301 }
6302
6303
6304 /* Store the value of constructor EXP into the rtx TARGET.
6305 TARGET is either a REG or a MEM; we know it cannot conflict, since
6306 safe_from_p has been called.
6307 CLEARED is true if TARGET is known to have been zero'd.
6308 SIZE is the number of bytes of TARGET we are allowed to modify: this
6309 may not be the same as the size of EXP if we are assigning to a field
6310 which has been packed to exclude padding bits.
6311 If REVERSE is true, the store is to be done in reverse order. */
6312
6313 static void
6314 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6315 bool reverse)
6316 {
6317 tree type = TREE_TYPE (exp);
6318 HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6319 poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6320
6321 switch (TREE_CODE (type))
6322 {
6323 case RECORD_TYPE:
6324 case UNION_TYPE:
6325 case QUAL_UNION_TYPE:
6326 {
6327 unsigned HOST_WIDE_INT idx;
6328 tree field, value;
6329
6330 /* The storage order is specified for every aggregate type. */
6331 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6332
6333 /* If size is zero or the target is already cleared, do nothing. */
6334 if (known_eq (size, 0) || cleared)
6335 cleared = 1;
6336 /* We either clear the aggregate or indicate the value is dead. */
6337 else if ((TREE_CODE (type) == UNION_TYPE
6338 || TREE_CODE (type) == QUAL_UNION_TYPE)
6339 && ! CONSTRUCTOR_ELTS (exp))
6340 /* If the constructor is empty, clear the union. */
6341 {
6342 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6343 cleared = 1;
6344 }
6345
6346 /* If we are building a static constructor into a register,
6347 set the initial value as zero so we can fold the value into
6348 a constant. But if more than one register is involved,
6349 this probably loses. */
6350 else if (REG_P (target) && TREE_STATIC (exp)
6351 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6352 REGMODE_NATURAL_SIZE (GET_MODE (target))))
6353 {
6354 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6355 cleared = 1;
6356 }
6357
6358 /* If the constructor has fewer fields than the structure or
6359 if we are initializing the structure to mostly zeros, clear
6360 the whole structure first. Don't do this if TARGET is a
6361 register whose mode size isn't equal to SIZE since
6362 clear_storage can't handle this case. */
6363 else if (known_size_p (size)
6364 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6365 || mostly_zeros_p (exp))
6366 && (!REG_P (target)
6367 || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6368 {
6369 clear_storage (target, gen_int_mode (size, Pmode),
6370 BLOCK_OP_NORMAL);
6371 cleared = 1;
6372 }
6373
6374 if (REG_P (target) && !cleared)
6375 emit_clobber (target);
6376
6377 /* Store each element of the constructor into the
6378 corresponding field of TARGET. */
6379 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6380 {
6381 machine_mode mode;
6382 HOST_WIDE_INT bitsize;
6383 HOST_WIDE_INT bitpos = 0;
6384 tree offset;
6385 rtx to_rtx = target;
6386
6387 /* Just ignore missing fields. We cleared the whole
6388 structure, above, if any fields are missing. */
6389 if (field == 0)
6390 continue;
6391
6392 if (cleared && initializer_zerop (value))
6393 continue;
6394
6395 if (tree_fits_uhwi_p (DECL_SIZE (field)))
6396 bitsize = tree_to_uhwi (DECL_SIZE (field));
6397 else
6398 gcc_unreachable ();
6399
6400 mode = DECL_MODE (field);
6401 if (DECL_BIT_FIELD (field))
6402 mode = VOIDmode;
6403
6404 offset = DECL_FIELD_OFFSET (field);
6405 if (tree_fits_shwi_p (offset)
6406 && tree_fits_shwi_p (bit_position (field)))
6407 {
6408 bitpos = int_bit_position (field);
6409 offset = NULL_TREE;
6410 }
6411 else
6412 gcc_unreachable ();
6413
6414 /* If this initializes a field that is smaller than a
6415 word, at the start of a word, try to widen it to a full
6416 word. This special case allows us to output C++ member
6417 function initializations in a form that the optimizers
6418 can understand. */
6419 if (WORD_REGISTER_OPERATIONS
6420 && REG_P (target)
6421 && bitsize < BITS_PER_WORD
6422 && bitpos % BITS_PER_WORD == 0
6423 && GET_MODE_CLASS (mode) == MODE_INT
6424 && TREE_CODE (value) == INTEGER_CST
6425 && exp_size >= 0
6426 && bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6427 {
6428 tree type = TREE_TYPE (value);
6429
6430 if (TYPE_PRECISION (type) < BITS_PER_WORD)
6431 {
6432 type = lang_hooks.types.type_for_mode
6433 (word_mode, TYPE_UNSIGNED (type));
6434 value = fold_convert (type, value);
6435 /* Make sure the bits beyond the original bitsize are zero
6436 so that we can correctly avoid extra zeroing stores in
6437 later constructor elements. */
6438 tree bitsize_mask
6439 = wide_int_to_tree (type, wi::mask (bitsize, false,
6440 BITS_PER_WORD));
6441 value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6442 }
6443
6444 if (BYTES_BIG_ENDIAN)
6445 value
6446 = fold_build2 (LSHIFT_EXPR, type, value,
6447 build_int_cst (type,
6448 BITS_PER_WORD - bitsize));
6449 bitsize = BITS_PER_WORD;
6450 mode = word_mode;
6451 }
6452
6453 if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6454 && DECL_NONADDRESSABLE_P (field))
6455 {
6456 to_rtx = copy_rtx (to_rtx);
6457 MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6458 }
6459
6460 store_constructor_field (to_rtx, bitsize, bitpos,
6461 0, bitregion_end, mode,
6462 value, cleared,
6463 get_alias_set (TREE_TYPE (field)),
6464 reverse);
6465 }
6466 break;
6467 }
6468 case ARRAY_TYPE:
6469 {
6470 tree value, index;
6471 unsigned HOST_WIDE_INT i;
6472 int need_to_clear;
6473 tree domain;
6474 tree elttype = TREE_TYPE (type);
6475 int const_bounds_p;
6476 HOST_WIDE_INT minelt = 0;
6477 HOST_WIDE_INT maxelt = 0;
6478
6479 /* The storage order is specified for every aggregate type. */
6480 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6481
6482 domain = TYPE_DOMAIN (type);
6483 const_bounds_p = (TYPE_MIN_VALUE (domain)
6484 && TYPE_MAX_VALUE (domain)
6485 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6486 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6487
6488 /* If we have constant bounds for the range of the type, get them. */
6489 if (const_bounds_p)
6490 {
6491 minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6492 maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6493 }
6494
6495 /* If the constructor has fewer elements than the array, clear
6496 the whole array first. Similarly if this is static
6497 constructor of a non-BLKmode object. */
6498 if (cleared)
6499 need_to_clear = 0;
6500 else if (REG_P (target) && TREE_STATIC (exp))
6501 need_to_clear = 1;
6502 else
6503 {
6504 unsigned HOST_WIDE_INT idx;
6505 tree index, value;
6506 HOST_WIDE_INT count = 0, zero_count = 0;
6507 need_to_clear = ! const_bounds_p;
6508
6509 /* This loop is a more accurate version of the loop in
6510 mostly_zeros_p (it handles RANGE_EXPR in an index). It
6511 is also needed to check for missing elements. */
6512 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6513 {
6514 HOST_WIDE_INT this_node_count;
6515
6516 if (need_to_clear)
6517 break;
6518
6519 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6520 {
6521 tree lo_index = TREE_OPERAND (index, 0);
6522 tree hi_index = TREE_OPERAND (index, 1);
6523
6524 if (! tree_fits_uhwi_p (lo_index)
6525 || ! tree_fits_uhwi_p (hi_index))
6526 {
6527 need_to_clear = 1;
6528 break;
6529 }
6530
6531 this_node_count = (tree_to_uhwi (hi_index)
6532 - tree_to_uhwi (lo_index) + 1);
6533 }
6534 else
6535 this_node_count = 1;
6536
6537 count += this_node_count;
6538 if (mostly_zeros_p (value))
6539 zero_count += this_node_count;
6540 }
6541
6542 /* Clear the entire array first if there are any missing
6543 elements, or if the incidence of zero elements is >=
6544 75%. */
6545 if (! need_to_clear
6546 && (count < maxelt - minelt + 1
6547 || 4 * zero_count >= 3 * count))
6548 need_to_clear = 1;
6549 }
6550
6551 if (need_to_clear && maybe_gt (size, 0))
6552 {
6553 if (REG_P (target))
6554 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6555 else
6556 clear_storage (target, gen_int_mode (size, Pmode),
6557 BLOCK_OP_NORMAL);
6558 cleared = 1;
6559 }
6560
6561 if (!cleared && REG_P (target))
6562 /* Inform later passes that the old value is dead. */
6563 emit_clobber (target);
6564
6565 /* Store each element of the constructor into the
6566 corresponding element of TARGET, determined by counting the
6567 elements. */
6568 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6569 {
6570 machine_mode mode;
6571 poly_int64 bitsize;
6572 HOST_WIDE_INT bitpos;
6573 rtx xtarget = target;
6574
6575 if (cleared && initializer_zerop (value))
6576 continue;
6577
6578 mode = TYPE_MODE (elttype);
6579 if (mode != BLKmode)
6580 bitsize = GET_MODE_BITSIZE (mode);
6581 else if (!poly_int_tree_p (TYPE_SIZE (elttype), &bitsize))
6582 bitsize = -1;
6583
6584 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6585 {
6586 tree lo_index = TREE_OPERAND (index, 0);
6587 tree hi_index = TREE_OPERAND (index, 1);
6588 rtx index_r, pos_rtx;
6589 HOST_WIDE_INT lo, hi, count;
6590 tree position;
6591
6592 /* If the range is constant and "small", unroll the loop. */
6593 if (const_bounds_p
6594 && tree_fits_shwi_p (lo_index)
6595 && tree_fits_shwi_p (hi_index)
6596 && (lo = tree_to_shwi (lo_index),
6597 hi = tree_to_shwi (hi_index),
6598 count = hi - lo + 1,
6599 (!MEM_P (target)
6600 || count <= 2
6601 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6602 && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
6603 <= 40 * 8)))))
6604 {
6605 lo -= minelt; hi -= minelt;
6606 for (; lo <= hi; lo++)
6607 {
6608 bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
6609
6610 if (MEM_P (target)
6611 && !MEM_KEEP_ALIAS_SET_P (target)
6612 && TREE_CODE (type) == ARRAY_TYPE
6613 && TYPE_NONALIASED_COMPONENT (type))
6614 {
6615 target = copy_rtx (target);
6616 MEM_KEEP_ALIAS_SET_P (target) = 1;
6617 }
6618
6619 store_constructor_field
6620 (target, bitsize, bitpos, 0, bitregion_end,
6621 mode, value, cleared,
6622 get_alias_set (elttype), reverse);
6623 }
6624 }
6625 else
6626 {
6627 rtx_code_label *loop_start = gen_label_rtx ();
6628 rtx_code_label *loop_end = gen_label_rtx ();
6629 tree exit_cond;
6630
6631 expand_normal (hi_index);
6632
6633 index = build_decl (EXPR_LOCATION (exp),
6634 VAR_DECL, NULL_TREE, domain);
6635 index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
6636 SET_DECL_RTL (index, index_r);
6637 store_expr (lo_index, index_r, 0, false, reverse);
6638
6639 /* Build the head of the loop. */
6640 do_pending_stack_adjust ();
6641 emit_label (loop_start);
6642
6643 /* Assign value to element index. */
6644 position =
6645 fold_convert (ssizetype,
6646 fold_build2 (MINUS_EXPR,
6647 TREE_TYPE (index),
6648 index,
6649 TYPE_MIN_VALUE (domain)));
6650
6651 position =
6652 size_binop (MULT_EXPR, position,
6653 fold_convert (ssizetype,
6654 TYPE_SIZE_UNIT (elttype)));
6655
6656 pos_rtx = expand_normal (position);
6657 xtarget = offset_address (target, pos_rtx,
6658 highest_pow2_factor (position));
6659 xtarget = adjust_address (xtarget, mode, 0);
6660 if (TREE_CODE (value) == CONSTRUCTOR)
6661 store_constructor (value, xtarget, cleared,
6662 exact_div (bitsize, BITS_PER_UNIT),
6663 reverse);
6664 else
6665 store_expr (value, xtarget, 0, false, reverse);
6666
6667 /* Generate a conditional jump to exit the loop. */
6668 exit_cond = build2 (LT_EXPR, integer_type_node,
6669 index, hi_index);
6670 jumpif (exit_cond, loop_end,
6671 profile_probability::uninitialized ());
6672
6673 /* Update the loop counter, and jump to the head of
6674 the loop. */
6675 expand_assignment (index,
6676 build2 (PLUS_EXPR, TREE_TYPE (index),
6677 index, integer_one_node),
6678 false);
6679
6680 emit_jump (loop_start);
6681
6682 /* Build the end of the loop. */
6683 emit_label (loop_end);
6684 }
6685 }
6686 else if ((index != 0 && ! tree_fits_shwi_p (index))
6687 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
6688 {
6689 tree position;
6690
6691 if (index == 0)
6692 index = ssize_int (1);
6693
6694 if (minelt)
6695 index = fold_convert (ssizetype,
6696 fold_build2 (MINUS_EXPR,
6697 TREE_TYPE (index),
6698 index,
6699 TYPE_MIN_VALUE (domain)));
6700
6701 position =
6702 size_binop (MULT_EXPR, index,
6703 fold_convert (ssizetype,
6704 TYPE_SIZE_UNIT (elttype)));
6705 xtarget = offset_address (target,
6706 expand_normal (position),
6707 highest_pow2_factor (position));
6708 xtarget = adjust_address (xtarget, mode, 0);
6709 store_expr (value, xtarget, 0, false, reverse);
6710 }
6711 else
6712 {
6713 if (index != 0)
6714 bitpos = ((tree_to_shwi (index) - minelt)
6715 * tree_to_uhwi (TYPE_SIZE (elttype)));
6716 else
6717 bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
6718
6719 if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
6720 && TREE_CODE (type) == ARRAY_TYPE
6721 && TYPE_NONALIASED_COMPONENT (type))
6722 {
6723 target = copy_rtx (target);
6724 MEM_KEEP_ALIAS_SET_P (target) = 1;
6725 }
6726 store_constructor_field (target, bitsize, bitpos, 0,
6727 bitregion_end, mode, value,
6728 cleared, get_alias_set (elttype),
6729 reverse);
6730 }
6731 }
6732 break;
6733 }
6734
6735 case VECTOR_TYPE:
6736 {
6737 unsigned HOST_WIDE_INT idx;
6738 constructor_elt *ce;
6739 int i;
6740 int need_to_clear;
6741 insn_code icode = CODE_FOR_nothing;
6742 tree elt;
6743 tree elttype = TREE_TYPE (type);
6744 int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
6745 machine_mode eltmode = TYPE_MODE (elttype);
6746 HOST_WIDE_INT bitsize;
6747 HOST_WIDE_INT bitpos;
6748 rtvec vector = NULL;
6749 poly_uint64 n_elts;
6750 unsigned HOST_WIDE_INT const_n_elts;
6751 alias_set_type alias;
6752 bool vec_vec_init_p = false;
6753 machine_mode mode = GET_MODE (target);
6754
6755 gcc_assert (eltmode != BLKmode);
6756
6757 /* Try using vec_duplicate_optab for uniform vectors. */
6758 if (!TREE_SIDE_EFFECTS (exp)
6759 && VECTOR_MODE_P (mode)
6760 && eltmode == GET_MODE_INNER (mode)
6761 && ((icode = optab_handler (vec_duplicate_optab, mode))
6762 != CODE_FOR_nothing)
6763 && (elt = uniform_vector_p (exp)))
6764 {
6765 class expand_operand ops[2];
6766 create_output_operand (&ops[0], target, mode);
6767 create_input_operand (&ops[1], expand_normal (elt), eltmode);
6768 expand_insn (icode, 2, ops);
6769 if (!rtx_equal_p (target, ops[0].value))
6770 emit_move_insn (target, ops[0].value);
6771 break;
6772 }
6773
6774 n_elts = TYPE_VECTOR_SUBPARTS (type);
6775 if (REG_P (target)
6776 && VECTOR_MODE_P (mode)
6777 && n_elts.is_constant (&const_n_elts))
6778 {
6779 machine_mode emode = eltmode;
6780
6781 if (CONSTRUCTOR_NELTS (exp)
6782 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
6783 == VECTOR_TYPE))
6784 {
6785 tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
6786 gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
6787 * TYPE_VECTOR_SUBPARTS (etype),
6788 n_elts));
6789 emode = TYPE_MODE (etype);
6790 }
6791 icode = convert_optab_handler (vec_init_optab, mode, emode);
6792 if (icode != CODE_FOR_nothing)
6793 {
6794 unsigned int i, n = const_n_elts;
6795
6796 if (emode != eltmode)
6797 {
6798 n = CONSTRUCTOR_NELTS (exp);
6799 vec_vec_init_p = true;
6800 }
6801 vector = rtvec_alloc (n);
6802 for (i = 0; i < n; i++)
6803 RTVEC_ELT (vector, i) = CONST0_RTX (emode);
6804 }
6805 }
6806
6807 /* If the constructor has fewer elements than the vector,
6808 clear the whole array first. Similarly if this is static
6809 constructor of a non-BLKmode object. */
6810 if (cleared)
6811 need_to_clear = 0;
6812 else if (REG_P (target) && TREE_STATIC (exp))
6813 need_to_clear = 1;
6814 else
6815 {
6816 unsigned HOST_WIDE_INT count = 0, zero_count = 0;
6817 tree value;
6818
6819 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
6820 {
6821 tree sz = TYPE_SIZE (TREE_TYPE (value));
6822 int n_elts_here
6823 = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, sz,
6824 TYPE_SIZE (elttype)));
6825
6826 count += n_elts_here;
6827 if (mostly_zeros_p (value))
6828 zero_count += n_elts_here;
6829 }
6830
6831 /* Clear the entire vector first if there are any missing elements,
6832 or if the incidence of zero elements is >= 75%. */
6833 need_to_clear = (maybe_lt (count, n_elts)
6834 || 4 * zero_count >= 3 * count);
6835 }
6836
6837 if (need_to_clear && maybe_gt (size, 0) && !vector)
6838 {
6839 if (REG_P (target))
6840 emit_move_insn (target, CONST0_RTX (mode));
6841 else
6842 clear_storage (target, gen_int_mode (size, Pmode),
6843 BLOCK_OP_NORMAL);
6844 cleared = 1;
6845 }
6846
6847 /* Inform later passes that the old value is dead. */
6848 if (!cleared && !vector && REG_P (target))
6849 emit_move_insn (target, CONST0_RTX (mode));
6850
6851 if (MEM_P (target))
6852 alias = MEM_ALIAS_SET (target);
6853 else
6854 alias = get_alias_set (elttype);
6855
6856 /* Store each element of the constructor into the corresponding
6857 element of TARGET, determined by counting the elements. */
6858 for (idx = 0, i = 0;
6859 vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
6860 idx++, i += bitsize / elt_size)
6861 {
6862 HOST_WIDE_INT eltpos;
6863 tree value = ce->value;
6864
6865 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)));
6866 if (cleared && initializer_zerop (value))
6867 continue;
6868
6869 if (ce->index)
6870 eltpos = tree_to_uhwi (ce->index);
6871 else
6872 eltpos = i;
6873
6874 if (vector)
6875 {
6876 if (vec_vec_init_p)
6877 {
6878 gcc_assert (ce->index == NULL_TREE);
6879 gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
6880 eltpos = idx;
6881 }
6882 else
6883 gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
6884 RTVEC_ELT (vector, eltpos) = expand_normal (value);
6885 }
6886 else
6887 {
6888 machine_mode value_mode
6889 = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
6890 ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
6891 bitpos = eltpos * elt_size;
6892 store_constructor_field (target, bitsize, bitpos, 0,
6893 bitregion_end, value_mode,
6894 value, cleared, alias, reverse);
6895 }
6896 }
6897
6898 if (vector)
6899 emit_insn (GEN_FCN (icode) (target,
6900 gen_rtx_PARALLEL (mode, vector)));
6901 break;
6902 }
6903
6904 default:
6905 gcc_unreachable ();
6906 }
6907 }
6908
6909 /* Store the value of EXP (an expression tree)
6910 into a subfield of TARGET which has mode MODE and occupies
6911 BITSIZE bits, starting BITPOS bits from the start of TARGET.
6912 If MODE is VOIDmode, it means that we are storing into a bit-field.
6913
6914 BITREGION_START is bitpos of the first bitfield in this region.
6915 BITREGION_END is the bitpos of the ending bitfield in this region.
6916 These two fields are 0, if the C++ memory model does not apply,
6917 or we are not interested in keeping track of bitfield regions.
6918
6919 Always return const0_rtx unless we have something particular to
6920 return.
6921
6922 ALIAS_SET is the alias set for the destination. This value will
6923 (in general) be different from that for TARGET, since TARGET is a
6924 reference to the containing structure.
6925
6926 If NONTEMPORAL is true, try generating a nontemporal store.
6927
6928 If REVERSE is true, the store is to be done in reverse order. */
6929
6930 static rtx
6931 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
6932 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
6933 machine_mode mode, tree exp,
6934 alias_set_type alias_set, bool nontemporal, bool reverse)
6935 {
6936 if (TREE_CODE (exp) == ERROR_MARK)
6937 return const0_rtx;
6938
6939 /* If we have nothing to store, do nothing unless the expression has
6940 side-effects. Don't do that for zero sized addressable lhs of
6941 calls. */
6942 if (known_eq (bitsize, 0)
6943 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6944 || TREE_CODE (exp) != CALL_EXPR))
6945 return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6946
6947 if (GET_CODE (target) == CONCAT)
6948 {
6949 /* We're storing into a struct containing a single __complex. */
6950
6951 gcc_assert (known_eq (bitpos, 0));
6952 return store_expr (exp, target, 0, nontemporal, reverse);
6953 }
6954
6955 /* If the structure is in a register or if the component
6956 is a bit field, we cannot use addressing to access it.
6957 Use bit-field techniques or SUBREG to store in it. */
6958
6959 poly_int64 decl_bitsize;
6960 if (mode == VOIDmode
6961 || (mode != BLKmode && ! direct_store[(int) mode]
6962 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
6963 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
6964 || REG_P (target)
6965 || GET_CODE (target) == SUBREG
6966 /* If the field isn't aligned enough to store as an ordinary memref,
6967 store it as a bit field. */
6968 || (mode != BLKmode
6969 && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
6970 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
6971 && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
6972 || !multiple_p (bitpos, BITS_PER_UNIT)))
6973 || (known_size_p (bitsize)
6974 && mode != BLKmode
6975 && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
6976 /* If the RHS and field are a constant size and the size of the
6977 RHS isn't the same size as the bitfield, we must use bitfield
6978 operations. */
6979 || (known_size_p (bitsize)
6980 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
6981 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
6982 bitsize)
6983 /* Except for initialization of full bytes from a CONSTRUCTOR, which
6984 we will handle specially below. */
6985 && !(TREE_CODE (exp) == CONSTRUCTOR
6986 && multiple_p (bitsize, BITS_PER_UNIT))
6987 /* And except for bitwise copying of TREE_ADDRESSABLE types,
6988 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
6989 includes some extra padding. store_expr / expand_expr will in
6990 that case call get_inner_reference that will have the bitsize
6991 we check here and thus the block move will not clobber the
6992 padding that shouldn't be clobbered. In the future we could
6993 replace the TREE_ADDRESSABLE check with a check that
6994 get_base_address needs to live in memory. */
6995 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6996 || TREE_CODE (exp) != COMPONENT_REF
6997 || !multiple_p (bitsize, BITS_PER_UNIT)
6998 || !multiple_p (bitpos, BITS_PER_UNIT)
6999 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
7000 &decl_bitsize)
7001 || maybe_ne (decl_bitsize, bitsize)))
7002 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
7003 decl we must use bitfield operations. */
7004 || (known_size_p (bitsize)
7005 && TREE_CODE (exp) == MEM_REF
7006 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
7007 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7008 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7009 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
7010 {
7011 rtx temp;
7012 gimple *nop_def;
7013
7014 /* If EXP is a NOP_EXPR of precision less than its mode, then that
7015 implies a mask operation. If the precision is the same size as
7016 the field we're storing into, that mask is redundant. This is
7017 particularly common with bit field assignments generated by the
7018 C front end. */
7019 nop_def = get_def_for_expr (exp, NOP_EXPR);
7020 if (nop_def)
7021 {
7022 tree type = TREE_TYPE (exp);
7023 if (INTEGRAL_TYPE_P (type)
7024 && maybe_ne (TYPE_PRECISION (type),
7025 GET_MODE_BITSIZE (TYPE_MODE (type)))
7026 && known_eq (bitsize, TYPE_PRECISION (type)))
7027 {
7028 tree op = gimple_assign_rhs1 (nop_def);
7029 type = TREE_TYPE (op);
7030 if (INTEGRAL_TYPE_P (type)
7031 && known_ge (TYPE_PRECISION (type), bitsize))
7032 exp = op;
7033 }
7034 }
7035
7036 temp = expand_normal (exp);
7037
7038 /* We don't support variable-sized BLKmode bitfields, since our
7039 handling of BLKmode is bound up with the ability to break
7040 things into words. */
7041 gcc_assert (mode != BLKmode || bitsize.is_constant ());
7042
7043 /* Handle calls that return values in multiple non-contiguous locations.
7044 The Irix 6 ABI has examples of this. */
7045 if (GET_CODE (temp) == PARALLEL)
7046 {
7047 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7048 machine_mode temp_mode = GET_MODE (temp);
7049 if (temp_mode == BLKmode || temp_mode == VOIDmode)
7050 temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
7051 rtx temp_target = gen_reg_rtx (temp_mode);
7052 emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
7053 temp = temp_target;
7054 }
7055
7056 /* Handle calls that return BLKmode values in registers. */
7057 else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
7058 {
7059 rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7060 copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7061 temp = temp_target;
7062 }
7063
7064 /* If the value has aggregate type and an integral mode then, if BITSIZE
7065 is narrower than this mode and this is for big-endian data, we first
7066 need to put the value into the low-order bits for store_bit_field,
7067 except when MODE is BLKmode and BITSIZE larger than the word size
7068 (see the handling of fields larger than a word in store_bit_field).
7069 Moreover, the field may be not aligned on a byte boundary; in this
7070 case, if it has reverse storage order, it needs to be accessed as a
7071 scalar field with reverse storage order and we must first put the
7072 value into target order. */
7073 scalar_int_mode temp_mode;
7074 if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7075 && is_int_mode (GET_MODE (temp), &temp_mode))
7076 {
7077 HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7078
7079 reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7080
7081 if (reverse)
7082 temp = flip_storage_order (temp_mode, temp);
7083
7084 gcc_checking_assert (known_le (bitsize, size));
7085 if (maybe_lt (bitsize, size)
7086 && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7087 /* Use of to_constant for BLKmode was checked above. */
7088 && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7089 temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7090 size - bitsize, NULL_RTX, 1);
7091 }
7092
7093 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
7094 if (mode != VOIDmode && mode != BLKmode
7095 && mode != TYPE_MODE (TREE_TYPE (exp)))
7096 temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7097
7098 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7099 and BITPOS must be aligned on a byte boundary. If so, we simply do
7100 a block copy. Likewise for a BLKmode-like TARGET. */
7101 if (GET_MODE (temp) == BLKmode
7102 && (GET_MODE (target) == BLKmode
7103 || (MEM_P (target)
7104 && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7105 && multiple_p (bitpos, BITS_PER_UNIT)
7106 && multiple_p (bitsize, BITS_PER_UNIT))))
7107 {
7108 gcc_assert (MEM_P (target) && MEM_P (temp));
7109 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7110 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7111
7112 target = adjust_address (target, VOIDmode, bytepos);
7113 emit_block_move (target, temp,
7114 gen_int_mode (bytesize, Pmode),
7115 BLOCK_OP_NORMAL);
7116
7117 return const0_rtx;
7118 }
7119
7120 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7121 word size, we need to load the value (see again store_bit_field). */
7122 if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7123 {
7124 scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
7125 temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7126 temp_mode, false, NULL);
7127 }
7128
7129 /* Store the value in the bitfield. */
7130 gcc_checking_assert (known_ge (bitpos, 0));
7131 store_bit_field (target, bitsize, bitpos,
7132 bitregion_start, bitregion_end,
7133 mode, temp, reverse);
7134
7135 return const0_rtx;
7136 }
7137 else
7138 {
7139 /* Now build a reference to just the desired component. */
7140 rtx to_rtx = adjust_address (target, mode,
7141 exact_div (bitpos, BITS_PER_UNIT));
7142
7143 if (to_rtx == target)
7144 to_rtx = copy_rtx (to_rtx);
7145
7146 if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7147 set_mem_alias_set (to_rtx, alias_set);
7148
7149 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7150 into a target smaller than its type; handle that case now. */
7151 if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7152 {
7153 poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7154 store_constructor (exp, to_rtx, 0, bytesize, reverse);
7155 return to_rtx;
7156 }
7157
7158 return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7159 }
7160 }
7161 \f
7162 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7163 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7164 codes and find the ultimate containing object, which we return.
7165
7166 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7167 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7168 storage order of the field.
7169 If the position of the field is variable, we store a tree
7170 giving the variable offset (in units) in *POFFSET.
7171 This offset is in addition to the bit position.
7172 If the position is not variable, we store 0 in *POFFSET.
7173
7174 If any of the extraction expressions is volatile,
7175 we store 1 in *PVOLATILEP. Otherwise we don't change that.
7176
7177 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7178 Otherwise, it is a mode that can be used to access the field.
7179
7180 If the field describes a variable-sized object, *PMODE is set to
7181 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
7182 this case, but the address of the object can be found. */
7183
7184 tree
7185 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7186 poly_int64_pod *pbitpos, tree *poffset,
7187 machine_mode *pmode, int *punsignedp,
7188 int *preversep, int *pvolatilep)
7189 {
7190 tree size_tree = 0;
7191 machine_mode mode = VOIDmode;
7192 bool blkmode_bitfield = false;
7193 tree offset = size_zero_node;
7194 poly_offset_int bit_offset = 0;
7195
7196 /* First get the mode, signedness, storage order and size. We do this from
7197 just the outermost expression. */
7198 *pbitsize = -1;
7199 if (TREE_CODE (exp) == COMPONENT_REF)
7200 {
7201 tree field = TREE_OPERAND (exp, 1);
7202 size_tree = DECL_SIZE (field);
7203 if (flag_strict_volatile_bitfields > 0
7204 && TREE_THIS_VOLATILE (exp)
7205 && DECL_BIT_FIELD_TYPE (field)
7206 && DECL_MODE (field) != BLKmode)
7207 /* Volatile bitfields should be accessed in the mode of the
7208 field's type, not the mode computed based on the bit
7209 size. */
7210 mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7211 else if (!DECL_BIT_FIELD (field))
7212 {
7213 mode = DECL_MODE (field);
7214 /* For vector fields re-check the target flags, as DECL_MODE
7215 could have been set with different target flags than
7216 the current function has. */
7217 if (mode == BLKmode
7218 && VECTOR_TYPE_P (TREE_TYPE (field))
7219 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7220 mode = TYPE_MODE (TREE_TYPE (field));
7221 }
7222 else if (DECL_MODE (field) == BLKmode)
7223 blkmode_bitfield = true;
7224
7225 *punsignedp = DECL_UNSIGNED (field);
7226 }
7227 else if (TREE_CODE (exp) == BIT_FIELD_REF)
7228 {
7229 size_tree = TREE_OPERAND (exp, 1);
7230 *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7231 || TYPE_UNSIGNED (TREE_TYPE (exp)));
7232
7233 /* For vector types, with the correct size of access, use the mode of
7234 inner type. */
7235 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7236 && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7237 && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7238 mode = TYPE_MODE (TREE_TYPE (exp));
7239 }
7240 else
7241 {
7242 mode = TYPE_MODE (TREE_TYPE (exp));
7243 *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7244
7245 if (mode == BLKmode)
7246 size_tree = TYPE_SIZE (TREE_TYPE (exp));
7247 else
7248 *pbitsize = GET_MODE_BITSIZE (mode);
7249 }
7250
7251 if (size_tree != 0)
7252 {
7253 if (! tree_fits_uhwi_p (size_tree))
7254 mode = BLKmode, *pbitsize = -1;
7255 else
7256 *pbitsize = tree_to_uhwi (size_tree);
7257 }
7258
7259 *preversep = reverse_storage_order_for_component_p (exp);
7260
7261 /* Compute cumulative bit-offset for nested component-refs and array-refs,
7262 and find the ultimate containing object. */
7263 while (1)
7264 {
7265 switch (TREE_CODE (exp))
7266 {
7267 case BIT_FIELD_REF:
7268 bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7269 break;
7270
7271 case COMPONENT_REF:
7272 {
7273 tree field = TREE_OPERAND (exp, 1);
7274 tree this_offset = component_ref_field_offset (exp);
7275
7276 /* If this field hasn't been filled in yet, don't go past it.
7277 This should only happen when folding expressions made during
7278 type construction. */
7279 if (this_offset == 0)
7280 break;
7281
7282 offset = size_binop (PLUS_EXPR, offset, this_offset);
7283 bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7284
7285 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
7286 }
7287 break;
7288
7289 case ARRAY_REF:
7290 case ARRAY_RANGE_REF:
7291 {
7292 tree index = TREE_OPERAND (exp, 1);
7293 tree low_bound = array_ref_low_bound (exp);
7294 tree unit_size = array_ref_element_size (exp);
7295
7296 /* We assume all arrays have sizes that are a multiple of a byte.
7297 First subtract the lower bound, if any, in the type of the
7298 index, then convert to sizetype and multiply by the size of
7299 the array element. */
7300 if (! integer_zerop (low_bound))
7301 index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7302 index, low_bound);
7303
7304 offset = size_binop (PLUS_EXPR, offset,
7305 size_binop (MULT_EXPR,
7306 fold_convert (sizetype, index),
7307 unit_size));
7308 }
7309 break;
7310
7311 case REALPART_EXPR:
7312 break;
7313
7314 case IMAGPART_EXPR:
7315 bit_offset += *pbitsize;
7316 break;
7317
7318 case VIEW_CONVERT_EXPR:
7319 break;
7320
7321 case MEM_REF:
7322 /* Hand back the decl for MEM[&decl, off]. */
7323 if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7324 {
7325 tree off = TREE_OPERAND (exp, 1);
7326 if (!integer_zerop (off))
7327 {
7328 poly_offset_int boff = mem_ref_offset (exp);
7329 boff <<= LOG2_BITS_PER_UNIT;
7330 bit_offset += boff;
7331 }
7332 exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7333 }
7334 goto done;
7335
7336 default:
7337 goto done;
7338 }
7339
7340 /* If any reference in the chain is volatile, the effect is volatile. */
7341 if (TREE_THIS_VOLATILE (exp))
7342 *pvolatilep = 1;
7343
7344 exp = TREE_OPERAND (exp, 0);
7345 }
7346 done:
7347
7348 /* If OFFSET is constant, see if we can return the whole thing as a
7349 constant bit position. Make sure to handle overflow during
7350 this conversion. */
7351 if (poly_int_tree_p (offset))
7352 {
7353 poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7354 TYPE_PRECISION (sizetype));
7355 tem <<= LOG2_BITS_PER_UNIT;
7356 tem += bit_offset;
7357 if (tem.to_shwi (pbitpos))
7358 *poffset = offset = NULL_TREE;
7359 }
7360
7361 /* Otherwise, split it up. */
7362 if (offset)
7363 {
7364 /* Avoid returning a negative bitpos as this may wreak havoc later. */
7365 if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7366 {
7367 *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7368 poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7369 offset = size_binop (PLUS_EXPR, offset,
7370 build_int_cst (sizetype, bytes.force_shwi ()));
7371 }
7372
7373 *poffset = offset;
7374 }
7375
7376 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
7377 if (mode == VOIDmode
7378 && blkmode_bitfield
7379 && multiple_p (*pbitpos, BITS_PER_UNIT)
7380 && multiple_p (*pbitsize, BITS_PER_UNIT))
7381 *pmode = BLKmode;
7382 else
7383 *pmode = mode;
7384
7385 return exp;
7386 }
7387
7388 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
7389
7390 static unsigned HOST_WIDE_INT
7391 target_align (const_tree target)
7392 {
7393 /* We might have a chain of nested references with intermediate misaligning
7394 bitfields components, so need to recurse to find out. */
7395
7396 unsigned HOST_WIDE_INT this_align, outer_align;
7397
7398 switch (TREE_CODE (target))
7399 {
7400 case BIT_FIELD_REF:
7401 return 1;
7402
7403 case COMPONENT_REF:
7404 this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7405 outer_align = target_align (TREE_OPERAND (target, 0));
7406 return MIN (this_align, outer_align);
7407
7408 case ARRAY_REF:
7409 case ARRAY_RANGE_REF:
7410 this_align = TYPE_ALIGN (TREE_TYPE (target));
7411 outer_align = target_align (TREE_OPERAND (target, 0));
7412 return MIN (this_align, outer_align);
7413
7414 CASE_CONVERT:
7415 case NON_LVALUE_EXPR:
7416 case VIEW_CONVERT_EXPR:
7417 this_align = TYPE_ALIGN (TREE_TYPE (target));
7418 outer_align = target_align (TREE_OPERAND (target, 0));
7419 return MAX (this_align, outer_align);
7420
7421 default:
7422 return TYPE_ALIGN (TREE_TYPE (target));
7423 }
7424 }
7425
7426 \f
7427 /* Given an rtx VALUE that may contain additions and multiplications, return
7428 an equivalent value that just refers to a register, memory, or constant.
7429 This is done by generating instructions to perform the arithmetic and
7430 returning a pseudo-register containing the value.
7431
7432 The returned value may be a REG, SUBREG, MEM or constant. */
7433
7434 rtx
7435 force_operand (rtx value, rtx target)
7436 {
7437 rtx op1, op2;
7438 /* Use subtarget as the target for operand 0 of a binary operation. */
7439 rtx subtarget = get_subtarget (target);
7440 enum rtx_code code = GET_CODE (value);
7441
7442 /* Check for subreg applied to an expression produced by loop optimizer. */
7443 if (code == SUBREG
7444 && !REG_P (SUBREG_REG (value))
7445 && !MEM_P (SUBREG_REG (value)))
7446 {
7447 value
7448 = simplify_gen_subreg (GET_MODE (value),
7449 force_reg (GET_MODE (SUBREG_REG (value)),
7450 force_operand (SUBREG_REG (value),
7451 NULL_RTX)),
7452 GET_MODE (SUBREG_REG (value)),
7453 SUBREG_BYTE (value));
7454 code = GET_CODE (value);
7455 }
7456
7457 /* Check for a PIC address load. */
7458 if ((code == PLUS || code == MINUS)
7459 && XEXP (value, 0) == pic_offset_table_rtx
7460 && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7461 || GET_CODE (XEXP (value, 1)) == LABEL_REF
7462 || GET_CODE (XEXP (value, 1)) == CONST))
7463 {
7464 if (!subtarget)
7465 subtarget = gen_reg_rtx (GET_MODE (value));
7466 emit_move_insn (subtarget, value);
7467 return subtarget;
7468 }
7469
7470 if (ARITHMETIC_P (value))
7471 {
7472 op2 = XEXP (value, 1);
7473 if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7474 subtarget = 0;
7475 if (code == MINUS && CONST_INT_P (op2))
7476 {
7477 code = PLUS;
7478 op2 = negate_rtx (GET_MODE (value), op2);
7479 }
7480
7481 /* Check for an addition with OP2 a constant integer and our first
7482 operand a PLUS of a virtual register and something else. In that
7483 case, we want to emit the sum of the virtual register and the
7484 constant first and then add the other value. This allows virtual
7485 register instantiation to simply modify the constant rather than
7486 creating another one around this addition. */
7487 if (code == PLUS && CONST_INT_P (op2)
7488 && GET_CODE (XEXP (value, 0)) == PLUS
7489 && REG_P (XEXP (XEXP (value, 0), 0))
7490 && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7491 && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7492 {
7493 rtx temp = expand_simple_binop (GET_MODE (value), code,
7494 XEXP (XEXP (value, 0), 0), op2,
7495 subtarget, 0, OPTAB_LIB_WIDEN);
7496 return expand_simple_binop (GET_MODE (value), code, temp,
7497 force_operand (XEXP (XEXP (value,
7498 0), 1), 0),
7499 target, 0, OPTAB_LIB_WIDEN);
7500 }
7501
7502 op1 = force_operand (XEXP (value, 0), subtarget);
7503 op2 = force_operand (op2, NULL_RTX);
7504 switch (code)
7505 {
7506 case MULT:
7507 return expand_mult (GET_MODE (value), op1, op2, target, 1);
7508 case DIV:
7509 if (!INTEGRAL_MODE_P (GET_MODE (value)))
7510 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7511 target, 1, OPTAB_LIB_WIDEN);
7512 else
7513 return expand_divmod (0,
7514 FLOAT_MODE_P (GET_MODE (value))
7515 ? RDIV_EXPR : TRUNC_DIV_EXPR,
7516 GET_MODE (value), op1, op2, target, 0);
7517 case MOD:
7518 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7519 target, 0);
7520 case UDIV:
7521 return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7522 target, 1);
7523 case UMOD:
7524 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7525 target, 1);
7526 case ASHIFTRT:
7527 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7528 target, 0, OPTAB_LIB_WIDEN);
7529 default:
7530 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7531 target, 1, OPTAB_LIB_WIDEN);
7532 }
7533 }
7534 if (UNARY_P (value))
7535 {
7536 if (!target)
7537 target = gen_reg_rtx (GET_MODE (value));
7538 op1 = force_operand (XEXP (value, 0), NULL_RTX);
7539 switch (code)
7540 {
7541 case ZERO_EXTEND:
7542 case SIGN_EXTEND:
7543 case TRUNCATE:
7544 case FLOAT_EXTEND:
7545 case FLOAT_TRUNCATE:
7546 convert_move (target, op1, code == ZERO_EXTEND);
7547 return target;
7548
7549 case FIX:
7550 case UNSIGNED_FIX:
7551 expand_fix (target, op1, code == UNSIGNED_FIX);
7552 return target;
7553
7554 case FLOAT:
7555 case UNSIGNED_FLOAT:
7556 expand_float (target, op1, code == UNSIGNED_FLOAT);
7557 return target;
7558
7559 default:
7560 return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7561 }
7562 }
7563
7564 #ifdef INSN_SCHEDULING
7565 /* On machines that have insn scheduling, we want all memory reference to be
7566 explicit, so we need to deal with such paradoxical SUBREGs. */
7567 if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7568 value
7569 = simplify_gen_subreg (GET_MODE (value),
7570 force_reg (GET_MODE (SUBREG_REG (value)),
7571 force_operand (SUBREG_REG (value),
7572 NULL_RTX)),
7573 GET_MODE (SUBREG_REG (value)),
7574 SUBREG_BYTE (value));
7575 #endif
7576
7577 return value;
7578 }
7579 \f
7580 /* Subroutine of expand_expr: return nonzero iff there is no way that
7581 EXP can reference X, which is being modified. TOP_P is nonzero if this
7582 call is going to be used to determine whether we need a temporary
7583 for EXP, as opposed to a recursive call to this function.
7584
7585 It is always safe for this routine to return zero since it merely
7586 searches for optimization opportunities. */
7587
7588 int
7589 safe_from_p (const_rtx x, tree exp, int top_p)
7590 {
7591 rtx exp_rtl = 0;
7592 int i, nops;
7593
7594 if (x == 0
7595 /* If EXP has varying size, we MUST use a target since we currently
7596 have no way of allocating temporaries of variable size
7597 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7598 So we assume here that something at a higher level has prevented a
7599 clash. This is somewhat bogus, but the best we can do. Only
7600 do this when X is BLKmode and when we are at the top level. */
7601 || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
7602 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
7603 && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
7604 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
7605 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
7606 != INTEGER_CST)
7607 && GET_MODE (x) == BLKmode)
7608 /* If X is in the outgoing argument area, it is always safe. */
7609 || (MEM_P (x)
7610 && (XEXP (x, 0) == virtual_outgoing_args_rtx
7611 || (GET_CODE (XEXP (x, 0)) == PLUS
7612 && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
7613 return 1;
7614
7615 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7616 find the underlying pseudo. */
7617 if (GET_CODE (x) == SUBREG)
7618 {
7619 x = SUBREG_REG (x);
7620 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7621 return 0;
7622 }
7623
7624 /* Now look at our tree code and possibly recurse. */
7625 switch (TREE_CODE_CLASS (TREE_CODE (exp)))
7626 {
7627 case tcc_declaration:
7628 exp_rtl = DECL_RTL_IF_SET (exp);
7629 break;
7630
7631 case tcc_constant:
7632 return 1;
7633
7634 case tcc_exceptional:
7635 if (TREE_CODE (exp) == TREE_LIST)
7636 {
7637 while (1)
7638 {
7639 if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
7640 return 0;
7641 exp = TREE_CHAIN (exp);
7642 if (!exp)
7643 return 1;
7644 if (TREE_CODE (exp) != TREE_LIST)
7645 return safe_from_p (x, exp, 0);
7646 }
7647 }
7648 else if (TREE_CODE (exp) == CONSTRUCTOR)
7649 {
7650 constructor_elt *ce;
7651 unsigned HOST_WIDE_INT idx;
7652
7653 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
7654 if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
7655 || !safe_from_p (x, ce->value, 0))
7656 return 0;
7657 return 1;
7658 }
7659 else if (TREE_CODE (exp) == ERROR_MARK)
7660 return 1; /* An already-visited SAVE_EXPR? */
7661 else
7662 return 0;
7663
7664 case tcc_statement:
7665 /* The only case we look at here is the DECL_INITIAL inside a
7666 DECL_EXPR. */
7667 return (TREE_CODE (exp) != DECL_EXPR
7668 || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
7669 || !DECL_INITIAL (DECL_EXPR_DECL (exp))
7670 || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
7671
7672 case tcc_binary:
7673 case tcc_comparison:
7674 if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
7675 return 0;
7676 /* Fall through. */
7677
7678 case tcc_unary:
7679 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7680
7681 case tcc_expression:
7682 case tcc_reference:
7683 case tcc_vl_exp:
7684 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
7685 the expression. If it is set, we conflict iff we are that rtx or
7686 both are in memory. Otherwise, we check all operands of the
7687 expression recursively. */
7688
7689 switch (TREE_CODE (exp))
7690 {
7691 case ADDR_EXPR:
7692 /* If the operand is static or we are static, we can't conflict.
7693 Likewise if we don't conflict with the operand at all. */
7694 if (staticp (TREE_OPERAND (exp, 0))
7695 || TREE_STATIC (exp)
7696 || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
7697 return 1;
7698
7699 /* Otherwise, the only way this can conflict is if we are taking
7700 the address of a DECL a that address if part of X, which is
7701 very rare. */
7702 exp = TREE_OPERAND (exp, 0);
7703 if (DECL_P (exp))
7704 {
7705 if (!DECL_RTL_SET_P (exp)
7706 || !MEM_P (DECL_RTL (exp)))
7707 return 0;
7708 else
7709 exp_rtl = XEXP (DECL_RTL (exp), 0);
7710 }
7711 break;
7712
7713 case MEM_REF:
7714 if (MEM_P (x)
7715 && alias_sets_conflict_p (MEM_ALIAS_SET (x),
7716 get_alias_set (exp)))
7717 return 0;
7718 break;
7719
7720 case CALL_EXPR:
7721 /* Assume that the call will clobber all hard registers and
7722 all of memory. */
7723 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7724 || MEM_P (x))
7725 return 0;
7726 break;
7727
7728 case WITH_CLEANUP_EXPR:
7729 case CLEANUP_POINT_EXPR:
7730 /* Lowered by gimplify.c. */
7731 gcc_unreachable ();
7732
7733 case SAVE_EXPR:
7734 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7735
7736 default:
7737 break;
7738 }
7739
7740 /* If we have an rtx, we do not need to scan our operands. */
7741 if (exp_rtl)
7742 break;
7743
7744 nops = TREE_OPERAND_LENGTH (exp);
7745 for (i = 0; i < nops; i++)
7746 if (TREE_OPERAND (exp, i) != 0
7747 && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
7748 return 0;
7749
7750 break;
7751
7752 case tcc_type:
7753 /* Should never get a type here. */
7754 gcc_unreachable ();
7755 }
7756
7757 /* If we have an rtl, find any enclosed object. Then see if we conflict
7758 with it. */
7759 if (exp_rtl)
7760 {
7761 if (GET_CODE (exp_rtl) == SUBREG)
7762 {
7763 exp_rtl = SUBREG_REG (exp_rtl);
7764 if (REG_P (exp_rtl)
7765 && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
7766 return 0;
7767 }
7768
7769 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
7770 are memory and they conflict. */
7771 return ! (rtx_equal_p (x, exp_rtl)
7772 || (MEM_P (x) && MEM_P (exp_rtl)
7773 && true_dependence (exp_rtl, VOIDmode, x)));
7774 }
7775
7776 /* If we reach here, it is safe. */
7777 return 1;
7778 }
7779
7780 \f
7781 /* Return the highest power of two that EXP is known to be a multiple of.
7782 This is used in updating alignment of MEMs in array references. */
7783
7784 unsigned HOST_WIDE_INT
7785 highest_pow2_factor (const_tree exp)
7786 {
7787 unsigned HOST_WIDE_INT ret;
7788 int trailing_zeros = tree_ctz (exp);
7789 if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
7790 return BIGGEST_ALIGNMENT;
7791 ret = HOST_WIDE_INT_1U << trailing_zeros;
7792 if (ret > BIGGEST_ALIGNMENT)
7793 return BIGGEST_ALIGNMENT;
7794 return ret;
7795 }
7796
7797 /* Similar, except that the alignment requirements of TARGET are
7798 taken into account. Assume it is at least as aligned as its
7799 type, unless it is a COMPONENT_REF in which case the layout of
7800 the structure gives the alignment. */
7801
7802 static unsigned HOST_WIDE_INT
7803 highest_pow2_factor_for_target (const_tree target, const_tree exp)
7804 {
7805 unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
7806 unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
7807
7808 return MAX (factor, talign);
7809 }
7810 \f
7811 /* Convert the tree comparison code TCODE to the rtl one where the
7812 signedness is UNSIGNEDP. */
7813
7814 static enum rtx_code
7815 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
7816 {
7817 enum rtx_code code;
7818 switch (tcode)
7819 {
7820 case EQ_EXPR:
7821 code = EQ;
7822 break;
7823 case NE_EXPR:
7824 code = NE;
7825 break;
7826 case LT_EXPR:
7827 code = unsignedp ? LTU : LT;
7828 break;
7829 case LE_EXPR:
7830 code = unsignedp ? LEU : LE;
7831 break;
7832 case GT_EXPR:
7833 code = unsignedp ? GTU : GT;
7834 break;
7835 case GE_EXPR:
7836 code = unsignedp ? GEU : GE;
7837 break;
7838 case UNORDERED_EXPR:
7839 code = UNORDERED;
7840 break;
7841 case ORDERED_EXPR:
7842 code = ORDERED;
7843 break;
7844 case UNLT_EXPR:
7845 code = UNLT;
7846 break;
7847 case UNLE_EXPR:
7848 code = UNLE;
7849 break;
7850 case UNGT_EXPR:
7851 code = UNGT;
7852 break;
7853 case UNGE_EXPR:
7854 code = UNGE;
7855 break;
7856 case UNEQ_EXPR:
7857 code = UNEQ;
7858 break;
7859 case LTGT_EXPR:
7860 code = LTGT;
7861 break;
7862
7863 default:
7864 gcc_unreachable ();
7865 }
7866 return code;
7867 }
7868
7869 /* Subroutine of expand_expr. Expand the two operands of a binary
7870 expression EXP0 and EXP1 placing the results in OP0 and OP1.
7871 The value may be stored in TARGET if TARGET is nonzero. The
7872 MODIFIER argument is as documented by expand_expr. */
7873
7874 void
7875 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
7876 enum expand_modifier modifier)
7877 {
7878 if (! safe_from_p (target, exp1, 1))
7879 target = 0;
7880 if (operand_equal_p (exp0, exp1, 0))
7881 {
7882 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7883 *op1 = copy_rtx (*op0);
7884 }
7885 else
7886 {
7887 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7888 *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
7889 }
7890 }
7891
7892 \f
7893 /* Return a MEM that contains constant EXP. DEFER is as for
7894 output_constant_def and MODIFIER is as for expand_expr. */
7895
7896 static rtx
7897 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
7898 {
7899 rtx mem;
7900
7901 mem = output_constant_def (exp, defer);
7902 if (modifier != EXPAND_INITIALIZER)
7903 mem = use_anchored_address (mem);
7904 return mem;
7905 }
7906
7907 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
7908 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
7909
7910 static rtx
7911 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
7912 enum expand_modifier modifier, addr_space_t as)
7913 {
7914 rtx result, subtarget;
7915 tree inner, offset;
7916 poly_int64 bitsize, bitpos;
7917 int unsignedp, reversep, volatilep = 0;
7918 machine_mode mode1;
7919
7920 /* If we are taking the address of a constant and are at the top level,
7921 we have to use output_constant_def since we can't call force_const_mem
7922 at top level. */
7923 /* ??? This should be considered a front-end bug. We should not be
7924 generating ADDR_EXPR of something that isn't an LVALUE. The only
7925 exception here is STRING_CST. */
7926 if (CONSTANT_CLASS_P (exp))
7927 {
7928 result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
7929 if (modifier < EXPAND_SUM)
7930 result = force_operand (result, target);
7931 return result;
7932 }
7933
7934 /* Everything must be something allowed by is_gimple_addressable. */
7935 switch (TREE_CODE (exp))
7936 {
7937 case INDIRECT_REF:
7938 /* This case will happen via recursion for &a->b. */
7939 return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
7940
7941 case MEM_REF:
7942 {
7943 tree tem = TREE_OPERAND (exp, 0);
7944 if (!integer_zerop (TREE_OPERAND (exp, 1)))
7945 tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
7946 return expand_expr (tem, target, tmode, modifier);
7947 }
7948
7949 case TARGET_MEM_REF:
7950 return addr_for_mem_ref (exp, as, true);
7951
7952 case CONST_DECL:
7953 /* Expand the initializer like constants above. */
7954 result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
7955 0, modifier), 0);
7956 if (modifier < EXPAND_SUM)
7957 result = force_operand (result, target);
7958 return result;
7959
7960 case REALPART_EXPR:
7961 /* The real part of the complex number is always first, therefore
7962 the address is the same as the address of the parent object. */
7963 offset = 0;
7964 bitpos = 0;
7965 inner = TREE_OPERAND (exp, 0);
7966 break;
7967
7968 case IMAGPART_EXPR:
7969 /* The imaginary part of the complex number is always second.
7970 The expression is therefore always offset by the size of the
7971 scalar type. */
7972 offset = 0;
7973 bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
7974 inner = TREE_OPERAND (exp, 0);
7975 break;
7976
7977 case COMPOUND_LITERAL_EXPR:
7978 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
7979 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
7980 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
7981 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
7982 the initializers aren't gimplified. */
7983 if (COMPOUND_LITERAL_EXPR_DECL (exp)
7984 && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp)))
7985 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
7986 target, tmode, modifier, as);
7987 /* FALLTHRU */
7988 default:
7989 /* If the object is a DECL, then expand it for its rtl. Don't bypass
7990 expand_expr, as that can have various side effects; LABEL_DECLs for
7991 example, may not have their DECL_RTL set yet. Expand the rtl of
7992 CONSTRUCTORs too, which should yield a memory reference for the
7993 constructor's contents. Assume language specific tree nodes can
7994 be expanded in some interesting way. */
7995 gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
7996 if (DECL_P (exp)
7997 || TREE_CODE (exp) == CONSTRUCTOR
7998 || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
7999 {
8000 result = expand_expr (exp, target, tmode,
8001 modifier == EXPAND_INITIALIZER
8002 ? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
8003
8004 /* If the DECL isn't in memory, then the DECL wasn't properly
8005 marked TREE_ADDRESSABLE, which will be either a front-end
8006 or a tree optimizer bug. */
8007
8008 gcc_assert (MEM_P (result));
8009 result = XEXP (result, 0);
8010
8011 /* ??? Is this needed anymore? */
8012 if (DECL_P (exp))
8013 TREE_USED (exp) = 1;
8014
8015 if (modifier != EXPAND_INITIALIZER
8016 && modifier != EXPAND_CONST_ADDRESS
8017 && modifier != EXPAND_SUM)
8018 result = force_operand (result, target);
8019 return result;
8020 }
8021
8022 /* Pass FALSE as the last argument to get_inner_reference although
8023 we are expanding to RTL. The rationale is that we know how to
8024 handle "aligning nodes" here: we can just bypass them because
8025 they won't change the final object whose address will be returned
8026 (they actually exist only for that purpose). */
8027 inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
8028 &unsignedp, &reversep, &volatilep);
8029 break;
8030 }
8031
8032 /* We must have made progress. */
8033 gcc_assert (inner != exp);
8034
8035 subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
8036 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8037 inner alignment, force the inner to be sufficiently aligned. */
8038 if (CONSTANT_CLASS_P (inner)
8039 && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
8040 {
8041 inner = copy_node (inner);
8042 TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
8043 SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
8044 TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
8045 }
8046 result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
8047
8048 if (offset)
8049 {
8050 rtx tmp;
8051
8052 if (modifier != EXPAND_NORMAL)
8053 result = force_operand (result, NULL);
8054 tmp = expand_expr (offset, NULL_RTX, tmode,
8055 modifier == EXPAND_INITIALIZER
8056 ? EXPAND_INITIALIZER : EXPAND_NORMAL);
8057
8058 /* expand_expr is allowed to return an object in a mode other
8059 than TMODE. If it did, we need to convert. */
8060 if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
8061 tmp = convert_modes (tmode, GET_MODE (tmp),
8062 tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
8063 result = convert_memory_address_addr_space (tmode, result, as);
8064 tmp = convert_memory_address_addr_space (tmode, tmp, as);
8065
8066 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8067 result = simplify_gen_binary (PLUS, tmode, result, tmp);
8068 else
8069 {
8070 subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8071 result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8072 1, OPTAB_LIB_WIDEN);
8073 }
8074 }
8075
8076 if (maybe_ne (bitpos, 0))
8077 {
8078 /* Someone beforehand should have rejected taking the address
8079 of an object that isn't byte-aligned. */
8080 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8081 result = convert_memory_address_addr_space (tmode, result, as);
8082 result = plus_constant (tmode, result, bytepos);
8083 if (modifier < EXPAND_SUM)
8084 result = force_operand (result, target);
8085 }
8086
8087 return result;
8088 }
8089
8090 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
8091 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
8092
8093 static rtx
8094 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8095 enum expand_modifier modifier)
8096 {
8097 addr_space_t as = ADDR_SPACE_GENERIC;
8098 scalar_int_mode address_mode = Pmode;
8099 scalar_int_mode pointer_mode = ptr_mode;
8100 machine_mode rmode;
8101 rtx result;
8102
8103 /* Target mode of VOIDmode says "whatever's natural". */
8104 if (tmode == VOIDmode)
8105 tmode = TYPE_MODE (TREE_TYPE (exp));
8106
8107 if (POINTER_TYPE_P (TREE_TYPE (exp)))
8108 {
8109 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8110 address_mode = targetm.addr_space.address_mode (as);
8111 pointer_mode = targetm.addr_space.pointer_mode (as);
8112 }
8113
8114 /* We can get called with some Weird Things if the user does silliness
8115 like "(short) &a". In that case, convert_memory_address won't do
8116 the right thing, so ignore the given target mode. */
8117 scalar_int_mode new_tmode = (tmode == pointer_mode
8118 ? pointer_mode
8119 : address_mode);
8120
8121 result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8122 new_tmode, modifier, as);
8123
8124 /* Despite expand_expr claims concerning ignoring TMODE when not
8125 strictly convenient, stuff breaks if we don't honor it. Note
8126 that combined with the above, we only do this for pointer modes. */
8127 rmode = GET_MODE (result);
8128 if (rmode == VOIDmode)
8129 rmode = new_tmode;
8130 if (rmode != new_tmode)
8131 result = convert_memory_address_addr_space (new_tmode, result, as);
8132
8133 return result;
8134 }
8135
8136 /* Generate code for computing CONSTRUCTOR EXP.
8137 An rtx for the computed value is returned. If AVOID_TEMP_MEM
8138 is TRUE, instead of creating a temporary variable in memory
8139 NULL is returned and the caller needs to handle it differently. */
8140
8141 static rtx
8142 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8143 bool avoid_temp_mem)
8144 {
8145 tree type = TREE_TYPE (exp);
8146 machine_mode mode = TYPE_MODE (type);
8147
8148 /* Try to avoid creating a temporary at all. This is possible
8149 if all of the initializer is zero.
8150 FIXME: try to handle all [0..255] initializers we can handle
8151 with memset. */
8152 if (TREE_STATIC (exp)
8153 && !TREE_ADDRESSABLE (exp)
8154 && target != 0 && mode == BLKmode
8155 && all_zeros_p (exp))
8156 {
8157 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8158 return target;
8159 }
8160
8161 /* All elts simple constants => refer to a constant in memory. But
8162 if this is a non-BLKmode mode, let it store a field at a time
8163 since that should make a CONST_INT, CONST_WIDE_INT or
8164 CONST_DOUBLE when we fold. Likewise, if we have a target we can
8165 use, it is best to store directly into the target unless the type
8166 is large enough that memcpy will be used. If we are making an
8167 initializer and all operands are constant, put it in memory as
8168 well.
8169
8170 FIXME: Avoid trying to fill vector constructors piece-meal.
8171 Output them with output_constant_def below unless we're sure
8172 they're zeros. This should go away when vector initializers
8173 are treated like VECTOR_CST instead of arrays. */
8174 if ((TREE_STATIC (exp)
8175 && ((mode == BLKmode
8176 && ! (target != 0 && safe_from_p (target, exp, 1)))
8177 || TREE_ADDRESSABLE (exp)
8178 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8179 && (! can_move_by_pieces
8180 (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8181 TYPE_ALIGN (type)))
8182 && ! mostly_zeros_p (exp))))
8183 || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8184 && TREE_CONSTANT (exp)))
8185 {
8186 rtx constructor;
8187
8188 if (avoid_temp_mem)
8189 return NULL_RTX;
8190
8191 constructor = expand_expr_constant (exp, 1, modifier);
8192
8193 if (modifier != EXPAND_CONST_ADDRESS
8194 && modifier != EXPAND_INITIALIZER
8195 && modifier != EXPAND_SUM)
8196 constructor = validize_mem (constructor);
8197
8198 return constructor;
8199 }
8200
8201 /* Handle calls that pass values in multiple non-contiguous
8202 locations. The Irix 6 ABI has examples of this. */
8203 if (target == 0 || ! safe_from_p (target, exp, 1)
8204 || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM)
8205 {
8206 if (avoid_temp_mem)
8207 return NULL_RTX;
8208
8209 target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8210 }
8211
8212 store_constructor (exp, target, 0, int_expr_size (exp), false);
8213 return target;
8214 }
8215
8216
8217 /* expand_expr: generate code for computing expression EXP.
8218 An rtx for the computed value is returned. The value is never null.
8219 In the case of a void EXP, const0_rtx is returned.
8220
8221 The value may be stored in TARGET if TARGET is nonzero.
8222 TARGET is just a suggestion; callers must assume that
8223 the rtx returned may not be the same as TARGET.
8224
8225 If TARGET is CONST0_RTX, it means that the value will be ignored.
8226
8227 If TMODE is not VOIDmode, it suggests generating the
8228 result in mode TMODE. But this is done only when convenient.
8229 Otherwise, TMODE is ignored and the value generated in its natural mode.
8230 TMODE is just a suggestion; callers must assume that
8231 the rtx returned may not have mode TMODE.
8232
8233 Note that TARGET may have neither TMODE nor MODE. In that case, it
8234 probably will not be used.
8235
8236 If MODIFIER is EXPAND_SUM then when EXP is an addition
8237 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8238 or a nest of (PLUS ...) and (MINUS ...) where the terms are
8239 products as above, or REG or MEM, or constant.
8240 Ordinarily in such cases we would output mul or add instructions
8241 and then return a pseudo reg containing the sum.
8242
8243 EXPAND_INITIALIZER is much like EXPAND_SUM except that
8244 it also marks a label as absolutely required (it can't be dead).
8245 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8246 This is used for outputting expressions used in initializers.
8247
8248 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8249 with a constant address even if that address is not normally legitimate.
8250 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8251
8252 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8253 a call parameter. Such targets require special care as we haven't yet
8254 marked TARGET so that it's safe from being trashed by libcalls. We
8255 don't want to use TARGET for anything but the final result;
8256 Intermediate values must go elsewhere. Additionally, calls to
8257 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8258
8259 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8260 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8261 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
8262 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8263 recursively.
8264
8265 If INNER_REFERENCE_P is true, we are expanding an inner reference.
8266 In this case, we don't adjust a returned MEM rtx that wouldn't be
8267 sufficiently aligned for its mode; instead, it's up to the caller
8268 to deal with it afterwards. This is used to make sure that unaligned
8269 base objects for which out-of-bounds accesses are supported, for
8270 example record types with trailing arrays, aren't realigned behind
8271 the back of the caller.
8272 The normal operating mode is to pass FALSE for this parameter. */
8273
8274 rtx
8275 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8276 enum expand_modifier modifier, rtx *alt_rtl,
8277 bool inner_reference_p)
8278 {
8279 rtx ret;
8280
8281 /* Handle ERROR_MARK before anybody tries to access its type. */
8282 if (TREE_CODE (exp) == ERROR_MARK
8283 || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8284 {
8285 ret = CONST0_RTX (tmode);
8286 return ret ? ret : const0_rtx;
8287 }
8288
8289 ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8290 inner_reference_p);
8291 return ret;
8292 }
8293
8294 /* Try to expand the conditional expression which is represented by
8295 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
8296 return the rtl reg which represents the result. Otherwise return
8297 NULL_RTX. */
8298
8299 static rtx
8300 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8301 tree treeop1 ATTRIBUTE_UNUSED,
8302 tree treeop2 ATTRIBUTE_UNUSED)
8303 {
8304 rtx insn;
8305 rtx op00, op01, op1, op2;
8306 enum rtx_code comparison_code;
8307 machine_mode comparison_mode;
8308 gimple *srcstmt;
8309 rtx temp;
8310 tree type = TREE_TYPE (treeop1);
8311 int unsignedp = TYPE_UNSIGNED (type);
8312 machine_mode mode = TYPE_MODE (type);
8313 machine_mode orig_mode = mode;
8314 static bool expanding_cond_expr_using_cmove = false;
8315
8316 /* Conditional move expansion can end up TERing two operands which,
8317 when recursively hitting conditional expressions can result in
8318 exponential behavior if the cmove expansion ultimatively fails.
8319 It's hardly profitable to TER a cmove into a cmove so avoid doing
8320 that by failing early if we end up recursing. */
8321 if (expanding_cond_expr_using_cmove)
8322 return NULL_RTX;
8323
8324 /* If we cannot do a conditional move on the mode, try doing it
8325 with the promoted mode. */
8326 if (!can_conditionally_move_p (mode))
8327 {
8328 mode = promote_mode (type, mode, &unsignedp);
8329 if (!can_conditionally_move_p (mode))
8330 return NULL_RTX;
8331 temp = assign_temp (type, 0, 0); /* Use promoted mode for temp. */
8332 }
8333 else
8334 temp = assign_temp (type, 0, 1);
8335
8336 expanding_cond_expr_using_cmove = true;
8337 start_sequence ();
8338 expand_operands (treeop1, treeop2,
8339 temp, &op1, &op2, EXPAND_NORMAL);
8340
8341 if (TREE_CODE (treeop0) == SSA_NAME
8342 && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8343 {
8344 tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8345 enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8346 op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8347 op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8348 comparison_mode = TYPE_MODE (type);
8349 unsignedp = TYPE_UNSIGNED (type);
8350 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8351 }
8352 else if (COMPARISON_CLASS_P (treeop0))
8353 {
8354 tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8355 enum tree_code cmpcode = TREE_CODE (treeop0);
8356 op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8357 op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8358 unsignedp = TYPE_UNSIGNED (type);
8359 comparison_mode = TYPE_MODE (type);
8360 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8361 }
8362 else
8363 {
8364 op00 = expand_normal (treeop0);
8365 op01 = const0_rtx;
8366 comparison_code = NE;
8367 comparison_mode = GET_MODE (op00);
8368 if (comparison_mode == VOIDmode)
8369 comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8370 }
8371 expanding_cond_expr_using_cmove = false;
8372
8373 if (GET_MODE (op1) != mode)
8374 op1 = gen_lowpart (mode, op1);
8375
8376 if (GET_MODE (op2) != mode)
8377 op2 = gen_lowpart (mode, op2);
8378
8379 /* Try to emit the conditional move. */
8380 insn = emit_conditional_move (temp, comparison_code,
8381 op00, op01, comparison_mode,
8382 op1, op2, mode,
8383 unsignedp);
8384
8385 /* If we could do the conditional move, emit the sequence,
8386 and return. */
8387 if (insn)
8388 {
8389 rtx_insn *seq = get_insns ();
8390 end_sequence ();
8391 emit_insn (seq);
8392 return convert_modes (orig_mode, mode, temp, 0);
8393 }
8394
8395 /* Otherwise discard the sequence and fall back to code with
8396 branches. */
8397 end_sequence ();
8398 return NULL_RTX;
8399 }
8400
8401 rtx
8402 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8403 enum expand_modifier modifier)
8404 {
8405 rtx op0, op1, op2, temp;
8406 rtx_code_label *lab;
8407 tree type;
8408 int unsignedp;
8409 machine_mode mode;
8410 scalar_int_mode int_mode;
8411 enum tree_code code = ops->code;
8412 optab this_optab;
8413 rtx subtarget, original_target;
8414 int ignore;
8415 bool reduce_bit_field;
8416 location_t loc = ops->location;
8417 tree treeop0, treeop1, treeop2;
8418 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
8419 ? reduce_to_bit_field_precision ((expr), \
8420 target, \
8421 type) \
8422 : (expr))
8423
8424 type = ops->type;
8425 mode = TYPE_MODE (type);
8426 unsignedp = TYPE_UNSIGNED (type);
8427
8428 treeop0 = ops->op0;
8429 treeop1 = ops->op1;
8430 treeop2 = ops->op2;
8431
8432 /* We should be called only on simple (binary or unary) expressions,
8433 exactly those that are valid in gimple expressions that aren't
8434 GIMPLE_SINGLE_RHS (or invalid). */
8435 gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8436 || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8437 || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8438
8439 ignore = (target == const0_rtx
8440 || ((CONVERT_EXPR_CODE_P (code)
8441 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8442 && TREE_CODE (type) == VOID_TYPE));
8443
8444 /* We should be called only if we need the result. */
8445 gcc_assert (!ignore);
8446
8447 /* An operation in what may be a bit-field type needs the
8448 result to be reduced to the precision of the bit-field type,
8449 which is narrower than that of the type's mode. */
8450 reduce_bit_field = (INTEGRAL_TYPE_P (type)
8451 && !type_has_mode_precision_p (type));
8452
8453 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
8454 target = 0;
8455
8456 /* Use subtarget as the target for operand 0 of a binary operation. */
8457 subtarget = get_subtarget (target);
8458 original_target = target;
8459
8460 switch (code)
8461 {
8462 case NON_LVALUE_EXPR:
8463 case PAREN_EXPR:
8464 CASE_CONVERT:
8465 if (treeop0 == error_mark_node)
8466 return const0_rtx;
8467
8468 if (TREE_CODE (type) == UNION_TYPE)
8469 {
8470 tree valtype = TREE_TYPE (treeop0);
8471
8472 /* If both input and output are BLKmode, this conversion isn't doing
8473 anything except possibly changing memory attribute. */
8474 if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
8475 {
8476 rtx result = expand_expr (treeop0, target, tmode,
8477 modifier);
8478
8479 result = copy_rtx (result);
8480 set_mem_attributes (result, type, 0);
8481 return result;
8482 }
8483
8484 if (target == 0)
8485 {
8486 if (TYPE_MODE (type) != BLKmode)
8487 target = gen_reg_rtx (TYPE_MODE (type));
8488 else
8489 target = assign_temp (type, 1, 1);
8490 }
8491
8492 if (MEM_P (target))
8493 /* Store data into beginning of memory target. */
8494 store_expr (treeop0,
8495 adjust_address (target, TYPE_MODE (valtype), 0),
8496 modifier == EXPAND_STACK_PARM,
8497 false, TYPE_REVERSE_STORAGE_ORDER (type));
8498
8499 else
8500 {
8501 gcc_assert (REG_P (target)
8502 && !TYPE_REVERSE_STORAGE_ORDER (type));
8503
8504 /* Store this field into a union of the proper type. */
8505 poly_uint64 op0_size
8506 = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
8507 poly_uint64 union_size = GET_MODE_BITSIZE (mode);
8508 store_field (target,
8509 /* The conversion must be constructed so that
8510 we know at compile time how many bits
8511 to preserve. */
8512 ordered_min (op0_size, union_size),
8513 0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
8514 false, false);
8515 }
8516
8517 /* Return the entire union. */
8518 return target;
8519 }
8520
8521 if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
8522 {
8523 op0 = expand_expr (treeop0, target, VOIDmode,
8524 modifier);
8525
8526 /* If the signedness of the conversion differs and OP0 is
8527 a promoted SUBREG, clear that indication since we now
8528 have to do the proper extension. */
8529 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
8530 && GET_CODE (op0) == SUBREG)
8531 SUBREG_PROMOTED_VAR_P (op0) = 0;
8532
8533 return REDUCE_BIT_FIELD (op0);
8534 }
8535
8536 op0 = expand_expr (treeop0, NULL_RTX, mode,
8537 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
8538 if (GET_MODE (op0) == mode)
8539 ;
8540
8541 /* If OP0 is a constant, just convert it into the proper mode. */
8542 else if (CONSTANT_P (op0))
8543 {
8544 tree inner_type = TREE_TYPE (treeop0);
8545 machine_mode inner_mode = GET_MODE (op0);
8546
8547 if (inner_mode == VOIDmode)
8548 inner_mode = TYPE_MODE (inner_type);
8549
8550 if (modifier == EXPAND_INITIALIZER)
8551 op0 = lowpart_subreg (mode, op0, inner_mode);
8552 else
8553 op0= convert_modes (mode, inner_mode, op0,
8554 TYPE_UNSIGNED (inner_type));
8555 }
8556
8557 else if (modifier == EXPAND_INITIALIZER)
8558 op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8559 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
8560
8561 else if (target == 0)
8562 op0 = convert_to_mode (mode, op0,
8563 TYPE_UNSIGNED (TREE_TYPE
8564 (treeop0)));
8565 else
8566 {
8567 convert_move (target, op0,
8568 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
8569 op0 = target;
8570 }
8571
8572 return REDUCE_BIT_FIELD (op0);
8573
8574 case ADDR_SPACE_CONVERT_EXPR:
8575 {
8576 tree treeop0_type = TREE_TYPE (treeop0);
8577
8578 gcc_assert (POINTER_TYPE_P (type));
8579 gcc_assert (POINTER_TYPE_P (treeop0_type));
8580
8581 addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
8582 addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
8583
8584 /* Conversions between pointers to the same address space should
8585 have been implemented via CONVERT_EXPR / NOP_EXPR. */
8586 gcc_assert (as_to != as_from);
8587
8588 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
8589
8590 /* Ask target code to handle conversion between pointers
8591 to overlapping address spaces. */
8592 if (targetm.addr_space.subset_p (as_to, as_from)
8593 || targetm.addr_space.subset_p (as_from, as_to))
8594 {
8595 op0 = targetm.addr_space.convert (op0, treeop0_type, type);
8596 }
8597 else
8598 {
8599 /* For disjoint address spaces, converting anything but a null
8600 pointer invokes undefined behavior. We truncate or extend the
8601 value as if we'd converted via integers, which handles 0 as
8602 required, and all others as the programmer likely expects. */
8603 #ifndef POINTERS_EXTEND_UNSIGNED
8604 const int POINTERS_EXTEND_UNSIGNED = 1;
8605 #endif
8606 op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
8607 op0, POINTERS_EXTEND_UNSIGNED);
8608 }
8609 gcc_assert (op0);
8610 return op0;
8611 }
8612
8613 case POINTER_PLUS_EXPR:
8614 /* Even though the sizetype mode and the pointer's mode can be different
8615 expand is able to handle this correctly and get the correct result out
8616 of the PLUS_EXPR code. */
8617 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8618 if sizetype precision is smaller than pointer precision. */
8619 if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
8620 treeop1 = fold_convert_loc (loc, type,
8621 fold_convert_loc (loc, ssizetype,
8622 treeop1));
8623 /* If sizetype precision is larger than pointer precision, truncate the
8624 offset to have matching modes. */
8625 else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
8626 treeop1 = fold_convert_loc (loc, type, treeop1);
8627 /* FALLTHRU */
8628
8629 case PLUS_EXPR:
8630 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8631 something else, make sure we add the register to the constant and
8632 then to the other thing. This case can occur during strength
8633 reduction and doing it this way will produce better code if the
8634 frame pointer or argument pointer is eliminated.
8635
8636 fold-const.c will ensure that the constant is always in the inner
8637 PLUS_EXPR, so the only case we need to do anything about is if
8638 sp, ap, or fp is our second argument, in which case we must swap
8639 the innermost first argument and our second argument. */
8640
8641 if (TREE_CODE (treeop0) == PLUS_EXPR
8642 && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
8643 && VAR_P (treeop1)
8644 && (DECL_RTL (treeop1) == frame_pointer_rtx
8645 || DECL_RTL (treeop1) == stack_pointer_rtx
8646 || DECL_RTL (treeop1) == arg_pointer_rtx))
8647 {
8648 gcc_unreachable ();
8649 }
8650
8651 /* If the result is to be ptr_mode and we are adding an integer to
8652 something, we might be forming a constant. So try to use
8653 plus_constant. If it produces a sum and we can't accept it,
8654 use force_operand. This allows P = &ARR[const] to generate
8655 efficient code on machines where a SYMBOL_REF is not a valid
8656 address.
8657
8658 If this is an EXPAND_SUM call, always return the sum. */
8659 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
8660 || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
8661 {
8662 if (modifier == EXPAND_STACK_PARM)
8663 target = 0;
8664 if (TREE_CODE (treeop0) == INTEGER_CST
8665 && HWI_COMPUTABLE_MODE_P (mode)
8666 && TREE_CONSTANT (treeop1))
8667 {
8668 rtx constant_part;
8669 HOST_WIDE_INT wc;
8670 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
8671
8672 op1 = expand_expr (treeop1, subtarget, VOIDmode,
8673 EXPAND_SUM);
8674 /* Use wi::shwi to ensure that the constant is
8675 truncated according to the mode of OP1, then sign extended
8676 to a HOST_WIDE_INT. Using the constant directly can result
8677 in non-canonical RTL in a 64x32 cross compile. */
8678 wc = TREE_INT_CST_LOW (treeop0);
8679 constant_part =
8680 immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8681 op1 = plus_constant (mode, op1, INTVAL (constant_part));
8682 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8683 op1 = force_operand (op1, target);
8684 return REDUCE_BIT_FIELD (op1);
8685 }
8686
8687 else if (TREE_CODE (treeop1) == INTEGER_CST
8688 && HWI_COMPUTABLE_MODE_P (mode)
8689 && TREE_CONSTANT (treeop0))
8690 {
8691 rtx constant_part;
8692 HOST_WIDE_INT wc;
8693 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
8694
8695 op0 = expand_expr (treeop0, subtarget, VOIDmode,
8696 (modifier == EXPAND_INITIALIZER
8697 ? EXPAND_INITIALIZER : EXPAND_SUM));
8698 if (! CONSTANT_P (op0))
8699 {
8700 op1 = expand_expr (treeop1, NULL_RTX,
8701 VOIDmode, modifier);
8702 /* Return a PLUS if modifier says it's OK. */
8703 if (modifier == EXPAND_SUM
8704 || modifier == EXPAND_INITIALIZER)
8705 return simplify_gen_binary (PLUS, mode, op0, op1);
8706 goto binop2;
8707 }
8708 /* Use wi::shwi to ensure that the constant is
8709 truncated according to the mode of OP1, then sign extended
8710 to a HOST_WIDE_INT. Using the constant directly can result
8711 in non-canonical RTL in a 64x32 cross compile. */
8712 wc = TREE_INT_CST_LOW (treeop1);
8713 constant_part
8714 = immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8715 op0 = plus_constant (mode, op0, INTVAL (constant_part));
8716 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8717 op0 = force_operand (op0, target);
8718 return REDUCE_BIT_FIELD (op0);
8719 }
8720 }
8721
8722 /* Use TER to expand pointer addition of a negated value
8723 as pointer subtraction. */
8724 if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
8725 || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
8726 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
8727 && TREE_CODE (treeop1) == SSA_NAME
8728 && TYPE_MODE (TREE_TYPE (treeop0))
8729 == TYPE_MODE (TREE_TYPE (treeop1)))
8730 {
8731 gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
8732 if (def)
8733 {
8734 treeop1 = gimple_assign_rhs1 (def);
8735 code = MINUS_EXPR;
8736 goto do_minus;
8737 }
8738 }
8739
8740 /* No sense saving up arithmetic to be done
8741 if it's all in the wrong mode to form part of an address.
8742 And force_operand won't know whether to sign-extend or
8743 zero-extend. */
8744 if (modifier != EXPAND_INITIALIZER
8745 && (modifier != EXPAND_SUM || mode != ptr_mode))
8746 {
8747 expand_operands (treeop0, treeop1,
8748 subtarget, &op0, &op1, modifier);
8749 if (op0 == const0_rtx)
8750 return op1;
8751 if (op1 == const0_rtx)
8752 return op0;
8753 goto binop2;
8754 }
8755
8756 expand_operands (treeop0, treeop1,
8757 subtarget, &op0, &op1, modifier);
8758 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8759
8760 case MINUS_EXPR:
8761 case POINTER_DIFF_EXPR:
8762 do_minus:
8763 /* For initializers, we are allowed to return a MINUS of two
8764 symbolic constants. Here we handle all cases when both operands
8765 are constant. */
8766 /* Handle difference of two symbolic constants,
8767 for the sake of an initializer. */
8768 if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8769 && really_constant_p (treeop0)
8770 && really_constant_p (treeop1))
8771 {
8772 expand_operands (treeop0, treeop1,
8773 NULL_RTX, &op0, &op1, modifier);
8774 return simplify_gen_binary (MINUS, mode, op0, op1);
8775 }
8776
8777 /* No sense saving up arithmetic to be done
8778 if it's all in the wrong mode to form part of an address.
8779 And force_operand won't know whether to sign-extend or
8780 zero-extend. */
8781 if (modifier != EXPAND_INITIALIZER
8782 && (modifier != EXPAND_SUM || mode != ptr_mode))
8783 goto binop;
8784
8785 expand_operands (treeop0, treeop1,
8786 subtarget, &op0, &op1, modifier);
8787
8788 /* Convert A - const to A + (-const). */
8789 if (CONST_INT_P (op1))
8790 {
8791 op1 = negate_rtx (mode, op1);
8792 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8793 }
8794
8795 goto binop2;
8796
8797 case WIDEN_MULT_PLUS_EXPR:
8798 case WIDEN_MULT_MINUS_EXPR:
8799 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
8800 op2 = expand_normal (treeop2);
8801 target = expand_widen_pattern_expr (ops, op0, op1, op2,
8802 target, unsignedp);
8803 return target;
8804
8805 case WIDEN_MULT_EXPR:
8806 /* If first operand is constant, swap them.
8807 Thus the following special case checks need only
8808 check the second operand. */
8809 if (TREE_CODE (treeop0) == INTEGER_CST)
8810 std::swap (treeop0, treeop1);
8811
8812 /* First, check if we have a multiplication of one signed and one
8813 unsigned operand. */
8814 if (TREE_CODE (treeop1) != INTEGER_CST
8815 && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8816 != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
8817 {
8818 machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
8819 this_optab = usmul_widen_optab;
8820 if (find_widening_optab_handler (this_optab, mode, innermode)
8821 != CODE_FOR_nothing)
8822 {
8823 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
8824 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8825 EXPAND_NORMAL);
8826 else
8827 expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
8828 EXPAND_NORMAL);
8829 /* op0 and op1 might still be constant, despite the above
8830 != INTEGER_CST check. Handle it. */
8831 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8832 {
8833 op0 = convert_modes (mode, innermode, op0, true);
8834 op1 = convert_modes (mode, innermode, op1, false);
8835 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8836 target, unsignedp));
8837 }
8838 goto binop3;
8839 }
8840 }
8841 /* Check for a multiplication with matching signedness. */
8842 else if ((TREE_CODE (treeop1) == INTEGER_CST
8843 && int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
8844 || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
8845 == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
8846 {
8847 tree op0type = TREE_TYPE (treeop0);
8848 machine_mode innermode = TYPE_MODE (op0type);
8849 bool zextend_p = TYPE_UNSIGNED (op0type);
8850 optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
8851 this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
8852
8853 if (TREE_CODE (treeop0) != INTEGER_CST)
8854 {
8855 if (find_widening_optab_handler (this_optab, mode, innermode)
8856 != CODE_FOR_nothing)
8857 {
8858 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8859 EXPAND_NORMAL);
8860 /* op0 and op1 might still be constant, despite the above
8861 != INTEGER_CST check. Handle it. */
8862 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8863 {
8864 widen_mult_const:
8865 op0 = convert_modes (mode, innermode, op0, zextend_p);
8866 op1
8867 = convert_modes (mode, innermode, op1,
8868 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8869 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8870 target,
8871 unsignedp));
8872 }
8873 temp = expand_widening_mult (mode, op0, op1, target,
8874 unsignedp, this_optab);
8875 return REDUCE_BIT_FIELD (temp);
8876 }
8877 if (find_widening_optab_handler (other_optab, mode, innermode)
8878 != CODE_FOR_nothing
8879 && innermode == word_mode)
8880 {
8881 rtx htem, hipart;
8882 op0 = expand_normal (treeop0);
8883 op1 = expand_normal (treeop1);
8884 /* op0 and op1 might be constants, despite the above
8885 != INTEGER_CST check. Handle it. */
8886 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8887 goto widen_mult_const;
8888 if (TREE_CODE (treeop1) == INTEGER_CST)
8889 op1 = convert_modes (mode, word_mode, op1,
8890 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8891 temp = expand_binop (mode, other_optab, op0, op1, target,
8892 unsignedp, OPTAB_LIB_WIDEN);
8893 hipart = gen_highpart (word_mode, temp);
8894 htem = expand_mult_highpart_adjust (word_mode, hipart,
8895 op0, op1, hipart,
8896 zextend_p);
8897 if (htem != hipart)
8898 emit_move_insn (hipart, htem);
8899 return REDUCE_BIT_FIELD (temp);
8900 }
8901 }
8902 }
8903 treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
8904 treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
8905 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8906 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8907
8908 case MULT_EXPR:
8909 /* If this is a fixed-point operation, then we cannot use the code
8910 below because "expand_mult" doesn't support sat/no-sat fixed-point
8911 multiplications. */
8912 if (ALL_FIXED_POINT_MODE_P (mode))
8913 goto binop;
8914
8915 /* If first operand is constant, swap them.
8916 Thus the following special case checks need only
8917 check the second operand. */
8918 if (TREE_CODE (treeop0) == INTEGER_CST)
8919 std::swap (treeop0, treeop1);
8920
8921 /* Attempt to return something suitable for generating an
8922 indexed address, for machines that support that. */
8923
8924 if (modifier == EXPAND_SUM && mode == ptr_mode
8925 && tree_fits_shwi_p (treeop1))
8926 {
8927 tree exp1 = treeop1;
8928
8929 op0 = expand_expr (treeop0, subtarget, VOIDmode,
8930 EXPAND_SUM);
8931
8932 if (!REG_P (op0))
8933 op0 = force_operand (op0, NULL_RTX);
8934 if (!REG_P (op0))
8935 op0 = copy_to_mode_reg (mode, op0);
8936
8937 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
8938 gen_int_mode (tree_to_shwi (exp1),
8939 TYPE_MODE (TREE_TYPE (exp1)))));
8940 }
8941
8942 if (modifier == EXPAND_STACK_PARM)
8943 target = 0;
8944
8945 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8946 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8947
8948 case TRUNC_MOD_EXPR:
8949 case FLOOR_MOD_EXPR:
8950 case CEIL_MOD_EXPR:
8951 case ROUND_MOD_EXPR:
8952
8953 case TRUNC_DIV_EXPR:
8954 case FLOOR_DIV_EXPR:
8955 case CEIL_DIV_EXPR:
8956 case ROUND_DIV_EXPR:
8957 case EXACT_DIV_EXPR:
8958 {
8959 /* If this is a fixed-point operation, then we cannot use the code
8960 below because "expand_divmod" doesn't support sat/no-sat fixed-point
8961 divisions. */
8962 if (ALL_FIXED_POINT_MODE_P (mode))
8963 goto binop;
8964
8965 if (modifier == EXPAND_STACK_PARM)
8966 target = 0;
8967 /* Possible optimization: compute the dividend with EXPAND_SUM
8968 then if the divisor is constant can optimize the case
8969 where some terms of the dividend have coeffs divisible by it. */
8970 expand_operands (treeop0, treeop1,
8971 subtarget, &op0, &op1, EXPAND_NORMAL);
8972 bool mod_p = code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
8973 || code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR;
8974 if (SCALAR_INT_MODE_P (mode)
8975 && optimize >= 2
8976 && get_range_pos_neg (treeop0) == 1
8977 && get_range_pos_neg (treeop1) == 1)
8978 {
8979 /* If both arguments are known to be positive when interpreted
8980 as signed, we can expand it as both signed and unsigned
8981 division or modulo. Choose the cheaper sequence in that case. */
8982 bool speed_p = optimize_insn_for_speed_p ();
8983 do_pending_stack_adjust ();
8984 start_sequence ();
8985 rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
8986 rtx_insn *uns_insns = get_insns ();
8987 end_sequence ();
8988 start_sequence ();
8989 rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
8990 rtx_insn *sgn_insns = get_insns ();
8991 end_sequence ();
8992 unsigned uns_cost = seq_cost (uns_insns, speed_p);
8993 unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
8994
8995 /* If costs are the same then use as tie breaker the other
8996 other factor. */
8997 if (uns_cost == sgn_cost)
8998 {
8999 uns_cost = seq_cost (uns_insns, !speed_p);
9000 sgn_cost = seq_cost (sgn_insns, !speed_p);
9001 }
9002
9003 if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
9004 {
9005 emit_insn (uns_insns);
9006 return uns_ret;
9007 }
9008 emit_insn (sgn_insns);
9009 return sgn_ret;
9010 }
9011 return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
9012 }
9013 case RDIV_EXPR:
9014 goto binop;
9015
9016 case MULT_HIGHPART_EXPR:
9017 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9018 temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9019 gcc_assert (temp);
9020 return temp;
9021
9022 case FIXED_CONVERT_EXPR:
9023 op0 = expand_normal (treeop0);
9024 if (target == 0 || modifier == EXPAND_STACK_PARM)
9025 target = gen_reg_rtx (mode);
9026
9027 if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9028 && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9029 || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9030 expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9031 else
9032 expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9033 return target;
9034
9035 case FIX_TRUNC_EXPR:
9036 op0 = expand_normal (treeop0);
9037 if (target == 0 || modifier == EXPAND_STACK_PARM)
9038 target = gen_reg_rtx (mode);
9039 expand_fix (target, op0, unsignedp);
9040 return target;
9041
9042 case FLOAT_EXPR:
9043 op0 = expand_normal (treeop0);
9044 if (target == 0 || modifier == EXPAND_STACK_PARM)
9045 target = gen_reg_rtx (mode);
9046 /* expand_float can't figure out what to do if FROM has VOIDmode.
9047 So give it the correct mode. With -O, cse will optimize this. */
9048 if (GET_MODE (op0) == VOIDmode)
9049 op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9050 op0);
9051 expand_float (target, op0,
9052 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9053 return target;
9054
9055 case NEGATE_EXPR:
9056 op0 = expand_expr (treeop0, subtarget,
9057 VOIDmode, EXPAND_NORMAL);
9058 if (modifier == EXPAND_STACK_PARM)
9059 target = 0;
9060 temp = expand_unop (mode,
9061 optab_for_tree_code (NEGATE_EXPR, type,
9062 optab_default),
9063 op0, target, 0);
9064 gcc_assert (temp);
9065 return REDUCE_BIT_FIELD (temp);
9066
9067 case ABS_EXPR:
9068 case ABSU_EXPR:
9069 op0 = expand_expr (treeop0, subtarget,
9070 VOIDmode, EXPAND_NORMAL);
9071 if (modifier == EXPAND_STACK_PARM)
9072 target = 0;
9073
9074 /* ABS_EXPR is not valid for complex arguments. */
9075 gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9076 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9077
9078 /* Unsigned abs is simply the operand. Testing here means we don't
9079 risk generating incorrect code below. */
9080 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9081 return op0;
9082
9083 return expand_abs (mode, op0, target, unsignedp,
9084 safe_from_p (target, treeop0, 1));
9085
9086 case MAX_EXPR:
9087 case MIN_EXPR:
9088 target = original_target;
9089 if (target == 0
9090 || modifier == EXPAND_STACK_PARM
9091 || (MEM_P (target) && MEM_VOLATILE_P (target))
9092 || GET_MODE (target) != mode
9093 || (REG_P (target)
9094 && REGNO (target) < FIRST_PSEUDO_REGISTER))
9095 target = gen_reg_rtx (mode);
9096 expand_operands (treeop0, treeop1,
9097 target, &op0, &op1, EXPAND_NORMAL);
9098
9099 /* First try to do it with a special MIN or MAX instruction.
9100 If that does not win, use a conditional jump to select the proper
9101 value. */
9102 this_optab = optab_for_tree_code (code, type, optab_default);
9103 temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9104 OPTAB_WIDEN);
9105 if (temp != 0)
9106 return temp;
9107
9108 /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9109 and similarly for MAX <x, y>. */
9110 if (VECTOR_TYPE_P (type))
9111 {
9112 tree t0 = make_tree (type, op0);
9113 tree t1 = make_tree (type, op1);
9114 tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
9115 type, t0, t1);
9116 return expand_vec_cond_expr (type, comparison, t0, t1,
9117 original_target);
9118 }
9119
9120 /* At this point, a MEM target is no longer useful; we will get better
9121 code without it. */
9122
9123 if (! REG_P (target))
9124 target = gen_reg_rtx (mode);
9125
9126 /* If op1 was placed in target, swap op0 and op1. */
9127 if (target != op0 && target == op1)
9128 std::swap (op0, op1);
9129
9130 /* We generate better code and avoid problems with op1 mentioning
9131 target by forcing op1 into a pseudo if it isn't a constant. */
9132 if (! CONSTANT_P (op1))
9133 op1 = force_reg (mode, op1);
9134
9135 {
9136 enum rtx_code comparison_code;
9137 rtx cmpop1 = op1;
9138
9139 if (code == MAX_EXPR)
9140 comparison_code = unsignedp ? GEU : GE;
9141 else
9142 comparison_code = unsignedp ? LEU : LE;
9143
9144 /* Canonicalize to comparisons against 0. */
9145 if (op1 == const1_rtx)
9146 {
9147 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9148 or (a != 0 ? a : 1) for unsigned.
9149 For MIN we are safe converting (a <= 1 ? a : 1)
9150 into (a <= 0 ? a : 1) */
9151 cmpop1 = const0_rtx;
9152 if (code == MAX_EXPR)
9153 comparison_code = unsignedp ? NE : GT;
9154 }
9155 if (op1 == constm1_rtx && !unsignedp)
9156 {
9157 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9158 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9159 cmpop1 = const0_rtx;
9160 if (code == MIN_EXPR)
9161 comparison_code = LT;
9162 }
9163
9164 /* Use a conditional move if possible. */
9165 if (can_conditionally_move_p (mode))
9166 {
9167 rtx insn;
9168
9169 start_sequence ();
9170
9171 /* Try to emit the conditional move. */
9172 insn = emit_conditional_move (target, comparison_code,
9173 op0, cmpop1, mode,
9174 op0, op1, mode,
9175 unsignedp);
9176
9177 /* If we could do the conditional move, emit the sequence,
9178 and return. */
9179 if (insn)
9180 {
9181 rtx_insn *seq = get_insns ();
9182 end_sequence ();
9183 emit_insn (seq);
9184 return target;
9185 }
9186
9187 /* Otherwise discard the sequence and fall back to code with
9188 branches. */
9189 end_sequence ();
9190 }
9191
9192 if (target != op0)
9193 emit_move_insn (target, op0);
9194
9195 lab = gen_label_rtx ();
9196 do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9197 unsignedp, mode, NULL_RTX, NULL, lab,
9198 profile_probability::uninitialized ());
9199 }
9200 emit_move_insn (target, op1);
9201 emit_label (lab);
9202 return target;
9203
9204 case BIT_NOT_EXPR:
9205 op0 = expand_expr (treeop0, subtarget,
9206 VOIDmode, EXPAND_NORMAL);
9207 if (modifier == EXPAND_STACK_PARM)
9208 target = 0;
9209 /* In case we have to reduce the result to bitfield precision
9210 for unsigned bitfield expand this as XOR with a proper constant
9211 instead. */
9212 if (reduce_bit_field && TYPE_UNSIGNED (type))
9213 {
9214 int_mode = SCALAR_INT_TYPE_MODE (type);
9215 wide_int mask = wi::mask (TYPE_PRECISION (type),
9216 false, GET_MODE_PRECISION (int_mode));
9217
9218 temp = expand_binop (int_mode, xor_optab, op0,
9219 immed_wide_int_const (mask, int_mode),
9220 target, 1, OPTAB_LIB_WIDEN);
9221 }
9222 else
9223 temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9224 gcc_assert (temp);
9225 return temp;
9226
9227 /* ??? Can optimize bitwise operations with one arg constant.
9228 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9229 and (a bitwise1 b) bitwise2 b (etc)
9230 but that is probably not worth while. */
9231
9232 case BIT_AND_EXPR:
9233 case BIT_IOR_EXPR:
9234 case BIT_XOR_EXPR:
9235 goto binop;
9236
9237 case LROTATE_EXPR:
9238 case RROTATE_EXPR:
9239 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9240 || type_has_mode_precision_p (type));
9241 /* fall through */
9242
9243 case LSHIFT_EXPR:
9244 case RSHIFT_EXPR:
9245 {
9246 /* If this is a fixed-point operation, then we cannot use the code
9247 below because "expand_shift" doesn't support sat/no-sat fixed-point
9248 shifts. */
9249 if (ALL_FIXED_POINT_MODE_P (mode))
9250 goto binop;
9251
9252 if (! safe_from_p (subtarget, treeop1, 1))
9253 subtarget = 0;
9254 if (modifier == EXPAND_STACK_PARM)
9255 target = 0;
9256 op0 = expand_expr (treeop0, subtarget,
9257 VOIDmode, EXPAND_NORMAL);
9258
9259 /* Left shift optimization when shifting across word_size boundary.
9260
9261 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9262 there isn't native instruction to support this wide mode
9263 left shift. Given below scenario:
9264
9265 Type A = (Type) B << C
9266
9267 |< T >|
9268 | dest_high | dest_low |
9269
9270 | word_size |
9271
9272 If the shift amount C caused we shift B to across the word
9273 size boundary, i.e part of B shifted into high half of
9274 destination register, and part of B remains in the low
9275 half, then GCC will use the following left shift expand
9276 logic:
9277
9278 1. Initialize dest_low to B.
9279 2. Initialize every bit of dest_high to the sign bit of B.
9280 3. Logic left shift dest_low by C bit to finalize dest_low.
9281 The value of dest_low before this shift is kept in a temp D.
9282 4. Logic left shift dest_high by C.
9283 5. Logic right shift D by (word_size - C).
9284 6. Or the result of 4 and 5 to finalize dest_high.
9285
9286 While, by checking gimple statements, if operand B is
9287 coming from signed extension, then we can simplify above
9288 expand logic into:
9289
9290 1. dest_high = src_low >> (word_size - C).
9291 2. dest_low = src_low << C.
9292
9293 We can use one arithmetic right shift to finish all the
9294 purpose of steps 2, 4, 5, 6, thus we reduce the steps
9295 needed from 6 into 2.
9296
9297 The case is similar for zero extension, except that we
9298 initialize dest_high to zero rather than copies of the sign
9299 bit from B. Furthermore, we need to use a logical right shift
9300 in this case.
9301
9302 The choice of sign-extension versus zero-extension is
9303 determined entirely by whether or not B is signed and is
9304 independent of the current setting of unsignedp. */
9305
9306 temp = NULL_RTX;
9307 if (code == LSHIFT_EXPR
9308 && target
9309 && REG_P (target)
9310 && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9311 && mode == int_mode
9312 && TREE_CONSTANT (treeop1)
9313 && TREE_CODE (treeop0) == SSA_NAME)
9314 {
9315 gimple *def = SSA_NAME_DEF_STMT (treeop0);
9316 if (is_gimple_assign (def)
9317 && gimple_assign_rhs_code (def) == NOP_EXPR)
9318 {
9319 scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9320 (TREE_TYPE (gimple_assign_rhs1 (def)));
9321
9322 if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9323 && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9324 && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9325 >= GET_MODE_BITSIZE (word_mode)))
9326 {
9327 rtx_insn *seq, *seq_old;
9328 poly_uint64 high_off = subreg_highpart_offset (word_mode,
9329 int_mode);
9330 bool extend_unsigned
9331 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9332 rtx low = lowpart_subreg (word_mode, op0, int_mode);
9333 rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9334 rtx dest_high = simplify_gen_subreg (word_mode, target,
9335 int_mode, high_off);
9336 HOST_WIDE_INT ramount = (BITS_PER_WORD
9337 - TREE_INT_CST_LOW (treeop1));
9338 tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9339
9340 start_sequence ();
9341 /* dest_high = src_low >> (word_size - C). */
9342 temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9343 rshift, dest_high,
9344 extend_unsigned);
9345 if (temp != dest_high)
9346 emit_move_insn (dest_high, temp);
9347
9348 /* dest_low = src_low << C. */
9349 temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9350 treeop1, dest_low, unsignedp);
9351 if (temp != dest_low)
9352 emit_move_insn (dest_low, temp);
9353
9354 seq = get_insns ();
9355 end_sequence ();
9356 temp = target ;
9357
9358 if (have_insn_for (ASHIFT, int_mode))
9359 {
9360 bool speed_p = optimize_insn_for_speed_p ();
9361 start_sequence ();
9362 rtx ret_old = expand_variable_shift (code, int_mode,
9363 op0, treeop1,
9364 target,
9365 unsignedp);
9366
9367 seq_old = get_insns ();
9368 end_sequence ();
9369 if (seq_cost (seq, speed_p)
9370 >= seq_cost (seq_old, speed_p))
9371 {
9372 seq = seq_old;
9373 temp = ret_old;
9374 }
9375 }
9376 emit_insn (seq);
9377 }
9378 }
9379 }
9380
9381 if (temp == NULL_RTX)
9382 temp = expand_variable_shift (code, mode, op0, treeop1, target,
9383 unsignedp);
9384 if (code == LSHIFT_EXPR)
9385 temp = REDUCE_BIT_FIELD (temp);
9386 return temp;
9387 }
9388
9389 /* Could determine the answer when only additive constants differ. Also,
9390 the addition of one can be handled by changing the condition. */
9391 case LT_EXPR:
9392 case LE_EXPR:
9393 case GT_EXPR:
9394 case GE_EXPR:
9395 case EQ_EXPR:
9396 case NE_EXPR:
9397 case UNORDERED_EXPR:
9398 case ORDERED_EXPR:
9399 case UNLT_EXPR:
9400 case UNLE_EXPR:
9401 case UNGT_EXPR:
9402 case UNGE_EXPR:
9403 case UNEQ_EXPR:
9404 case LTGT_EXPR:
9405 {
9406 temp = do_store_flag (ops,
9407 modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9408 tmode != VOIDmode ? tmode : mode);
9409 if (temp)
9410 return temp;
9411
9412 /* Use a compare and a jump for BLKmode comparisons, or for function
9413 type comparisons is have_canonicalize_funcptr_for_compare. */
9414
9415 if ((target == 0
9416 || modifier == EXPAND_STACK_PARM
9417 || ! safe_from_p (target, treeop0, 1)
9418 || ! safe_from_p (target, treeop1, 1)
9419 /* Make sure we don't have a hard reg (such as function's return
9420 value) live across basic blocks, if not optimizing. */
9421 || (!optimize && REG_P (target)
9422 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9423 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9424
9425 emit_move_insn (target, const0_rtx);
9426
9427 rtx_code_label *lab1 = gen_label_rtx ();
9428 jumpifnot_1 (code, treeop0, treeop1, lab1,
9429 profile_probability::uninitialized ());
9430
9431 if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9432 emit_move_insn (target, constm1_rtx);
9433 else
9434 emit_move_insn (target, const1_rtx);
9435
9436 emit_label (lab1);
9437 return target;
9438 }
9439 case COMPLEX_EXPR:
9440 /* Get the rtx code of the operands. */
9441 op0 = expand_normal (treeop0);
9442 op1 = expand_normal (treeop1);
9443
9444 if (!target)
9445 target = gen_reg_rtx (TYPE_MODE (type));
9446 else
9447 /* If target overlaps with op1, then either we need to force
9448 op1 into a pseudo (if target also overlaps with op0),
9449 or write the complex parts in reverse order. */
9450 switch (GET_CODE (target))
9451 {
9452 case CONCAT:
9453 if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
9454 {
9455 if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
9456 {
9457 complex_expr_force_op1:
9458 temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
9459 emit_move_insn (temp, op1);
9460 op1 = temp;
9461 break;
9462 }
9463 complex_expr_swap_order:
9464 /* Move the imaginary (op1) and real (op0) parts to their
9465 location. */
9466 write_complex_part (target, op1, true);
9467 write_complex_part (target, op0, false);
9468
9469 return target;
9470 }
9471 break;
9472 case MEM:
9473 temp = adjust_address_nv (target,
9474 GET_MODE_INNER (GET_MODE (target)), 0);
9475 if (reg_overlap_mentioned_p (temp, op1))
9476 {
9477 scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
9478 temp = adjust_address_nv (target, imode,
9479 GET_MODE_SIZE (imode));
9480 if (reg_overlap_mentioned_p (temp, op0))
9481 goto complex_expr_force_op1;
9482 goto complex_expr_swap_order;
9483 }
9484 break;
9485 default:
9486 if (reg_overlap_mentioned_p (target, op1))
9487 {
9488 if (reg_overlap_mentioned_p (target, op0))
9489 goto complex_expr_force_op1;
9490 goto complex_expr_swap_order;
9491 }
9492 break;
9493 }
9494
9495 /* Move the real (op0) and imaginary (op1) parts to their location. */
9496 write_complex_part (target, op0, false);
9497 write_complex_part (target, op1, true);
9498
9499 return target;
9500
9501 case WIDEN_SUM_EXPR:
9502 {
9503 tree oprnd0 = treeop0;
9504 tree oprnd1 = treeop1;
9505
9506 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9507 target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
9508 target, unsignedp);
9509 return target;
9510 }
9511
9512 case VEC_UNPACK_HI_EXPR:
9513 case VEC_UNPACK_LO_EXPR:
9514 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
9515 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
9516 {
9517 op0 = expand_normal (treeop0);
9518 temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
9519 target, unsignedp);
9520 gcc_assert (temp);
9521 return temp;
9522 }
9523
9524 case VEC_UNPACK_FLOAT_HI_EXPR:
9525 case VEC_UNPACK_FLOAT_LO_EXPR:
9526 {
9527 op0 = expand_normal (treeop0);
9528 /* The signedness is determined from input operand. */
9529 temp = expand_widen_pattern_expr
9530 (ops, op0, NULL_RTX, NULL_RTX,
9531 target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9532
9533 gcc_assert (temp);
9534 return temp;
9535 }
9536
9537 case VEC_WIDEN_MULT_HI_EXPR:
9538 case VEC_WIDEN_MULT_LO_EXPR:
9539 case VEC_WIDEN_MULT_EVEN_EXPR:
9540 case VEC_WIDEN_MULT_ODD_EXPR:
9541 case VEC_WIDEN_LSHIFT_HI_EXPR:
9542 case VEC_WIDEN_LSHIFT_LO_EXPR:
9543 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9544 target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
9545 target, unsignedp);
9546 gcc_assert (target);
9547 return target;
9548
9549 case VEC_PACK_SAT_EXPR:
9550 case VEC_PACK_FIX_TRUNC_EXPR:
9551 mode = TYPE_MODE (TREE_TYPE (treeop0));
9552 goto binop;
9553
9554 case VEC_PACK_TRUNC_EXPR:
9555 if (VECTOR_BOOLEAN_TYPE_P (type)
9556 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0))
9557 && mode == TYPE_MODE (TREE_TYPE (treeop0))
9558 && SCALAR_INT_MODE_P (mode))
9559 {
9560 class expand_operand eops[4];
9561 machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
9562 expand_operands (treeop0, treeop1,
9563 subtarget, &op0, &op1, EXPAND_NORMAL);
9564 this_optab = vec_pack_sbool_trunc_optab;
9565 enum insn_code icode = optab_handler (this_optab, imode);
9566 create_output_operand (&eops[0], target, mode);
9567 create_convert_operand_from (&eops[1], op0, imode, false);
9568 create_convert_operand_from (&eops[2], op1, imode, false);
9569 temp = GEN_INT (TYPE_VECTOR_SUBPARTS (type).to_constant ());
9570 create_input_operand (&eops[3], temp, imode);
9571 expand_insn (icode, 4, eops);
9572 return eops[0].value;
9573 }
9574 mode = TYPE_MODE (TREE_TYPE (treeop0));
9575 goto binop;
9576
9577 case VEC_PACK_FLOAT_EXPR:
9578 mode = TYPE_MODE (TREE_TYPE (treeop0));
9579 expand_operands (treeop0, treeop1,
9580 subtarget, &op0, &op1, EXPAND_NORMAL);
9581 this_optab = optab_for_tree_code (code, TREE_TYPE (treeop0),
9582 optab_default);
9583 target = expand_binop (mode, this_optab, op0, op1, target,
9584 TYPE_UNSIGNED (TREE_TYPE (treeop0)),
9585 OPTAB_LIB_WIDEN);
9586 gcc_assert (target);
9587 return target;
9588
9589 case VEC_PERM_EXPR:
9590 {
9591 expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
9592 vec_perm_builder sel;
9593 if (TREE_CODE (treeop2) == VECTOR_CST
9594 && tree_to_vec_perm_builder (&sel, treeop2))
9595 {
9596 machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
9597 temp = expand_vec_perm_const (mode, op0, op1, sel,
9598 sel_mode, target);
9599 }
9600 else
9601 {
9602 op2 = expand_normal (treeop2);
9603 temp = expand_vec_perm_var (mode, op0, op1, op2, target);
9604 }
9605 gcc_assert (temp);
9606 return temp;
9607 }
9608
9609 case DOT_PROD_EXPR:
9610 {
9611 tree oprnd0 = treeop0;
9612 tree oprnd1 = treeop1;
9613 tree oprnd2 = treeop2;
9614 rtx op2;
9615
9616 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9617 op2 = expand_normal (oprnd2);
9618 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9619 target, unsignedp);
9620 return target;
9621 }
9622
9623 case SAD_EXPR:
9624 {
9625 tree oprnd0 = treeop0;
9626 tree oprnd1 = treeop1;
9627 tree oprnd2 = treeop2;
9628 rtx op2;
9629
9630 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9631 op2 = expand_normal (oprnd2);
9632 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9633 target, unsignedp);
9634 return target;
9635 }
9636
9637 case REALIGN_LOAD_EXPR:
9638 {
9639 tree oprnd0 = treeop0;
9640 tree oprnd1 = treeop1;
9641 tree oprnd2 = treeop2;
9642 rtx op2;
9643
9644 this_optab = optab_for_tree_code (code, type, optab_default);
9645 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9646 op2 = expand_normal (oprnd2);
9647 temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
9648 target, unsignedp);
9649 gcc_assert (temp);
9650 return temp;
9651 }
9652
9653 case COND_EXPR:
9654 {
9655 /* A COND_EXPR with its type being VOID_TYPE represents a
9656 conditional jump and is handled in
9657 expand_gimple_cond_expr. */
9658 gcc_assert (!VOID_TYPE_P (type));
9659
9660 /* Note that COND_EXPRs whose type is a structure or union
9661 are required to be constructed to contain assignments of
9662 a temporary variable, so that we can evaluate them here
9663 for side effect only. If type is void, we must do likewise. */
9664
9665 gcc_assert (!TREE_ADDRESSABLE (type)
9666 && !ignore
9667 && TREE_TYPE (treeop1) != void_type_node
9668 && TREE_TYPE (treeop2) != void_type_node);
9669
9670 temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
9671 if (temp)
9672 return temp;
9673
9674 /* If we are not to produce a result, we have no target. Otherwise,
9675 if a target was specified use it; it will not be used as an
9676 intermediate target unless it is safe. If no target, use a
9677 temporary. */
9678
9679 if (modifier != EXPAND_STACK_PARM
9680 && original_target
9681 && safe_from_p (original_target, treeop0, 1)
9682 && GET_MODE (original_target) == mode
9683 && !MEM_P (original_target))
9684 temp = original_target;
9685 else
9686 temp = assign_temp (type, 0, 1);
9687
9688 do_pending_stack_adjust ();
9689 NO_DEFER_POP;
9690 rtx_code_label *lab0 = gen_label_rtx ();
9691 rtx_code_label *lab1 = gen_label_rtx ();
9692 jumpifnot (treeop0, lab0,
9693 profile_probability::uninitialized ());
9694 store_expr (treeop1, temp,
9695 modifier == EXPAND_STACK_PARM,
9696 false, false);
9697
9698 emit_jump_insn (targetm.gen_jump (lab1));
9699 emit_barrier ();
9700 emit_label (lab0);
9701 store_expr (treeop2, temp,
9702 modifier == EXPAND_STACK_PARM,
9703 false, false);
9704
9705 emit_label (lab1);
9706 OK_DEFER_POP;
9707 return temp;
9708 }
9709
9710 case VEC_COND_EXPR:
9711 target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
9712 return target;
9713
9714 case VEC_DUPLICATE_EXPR:
9715 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9716 target = expand_vector_broadcast (mode, op0);
9717 gcc_assert (target);
9718 return target;
9719
9720 case VEC_SERIES_EXPR:
9721 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
9722 return expand_vec_series_expr (mode, op0, op1, target);
9723
9724 case BIT_INSERT_EXPR:
9725 {
9726 unsigned bitpos = tree_to_uhwi (treeop2);
9727 unsigned bitsize;
9728 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
9729 bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
9730 else
9731 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
9732 rtx op0 = expand_normal (treeop0);
9733 rtx op1 = expand_normal (treeop1);
9734 rtx dst = gen_reg_rtx (mode);
9735 emit_move_insn (dst, op0);
9736 store_bit_field (dst, bitsize, bitpos, 0, 0,
9737 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
9738 return dst;
9739 }
9740
9741 default:
9742 gcc_unreachable ();
9743 }
9744
9745 /* Here to do an ordinary binary operator. */
9746 binop:
9747 expand_operands (treeop0, treeop1,
9748 subtarget, &op0, &op1, EXPAND_NORMAL);
9749 binop2:
9750 this_optab = optab_for_tree_code (code, type, optab_default);
9751 binop3:
9752 if (modifier == EXPAND_STACK_PARM)
9753 target = 0;
9754 temp = expand_binop (mode, this_optab, op0, op1, target,
9755 unsignedp, OPTAB_LIB_WIDEN);
9756 gcc_assert (temp);
9757 /* Bitwise operations do not need bitfield reduction as we expect their
9758 operands being properly truncated. */
9759 if (code == BIT_XOR_EXPR
9760 || code == BIT_AND_EXPR
9761 || code == BIT_IOR_EXPR)
9762 return temp;
9763 return REDUCE_BIT_FIELD (temp);
9764 }
9765 #undef REDUCE_BIT_FIELD
9766
9767
9768 /* Return TRUE if expression STMT is suitable for replacement.
9769 Never consider memory loads as replaceable, because those don't ever lead
9770 into constant expressions. */
9771
9772 static bool
9773 stmt_is_replaceable_p (gimple *stmt)
9774 {
9775 if (ssa_is_replaceable_p (stmt))
9776 {
9777 /* Don't move around loads. */
9778 if (!gimple_assign_single_p (stmt)
9779 || is_gimple_val (gimple_assign_rhs1 (stmt)))
9780 return true;
9781 }
9782 return false;
9783 }
9784
9785 rtx
9786 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
9787 enum expand_modifier modifier, rtx *alt_rtl,
9788 bool inner_reference_p)
9789 {
9790 rtx op0, op1, temp, decl_rtl;
9791 tree type;
9792 int unsignedp;
9793 machine_mode mode, dmode;
9794 enum tree_code code = TREE_CODE (exp);
9795 rtx subtarget, original_target;
9796 int ignore;
9797 tree context;
9798 bool reduce_bit_field;
9799 location_t loc = EXPR_LOCATION (exp);
9800 struct separate_ops ops;
9801 tree treeop0, treeop1, treeop2;
9802 tree ssa_name = NULL_TREE;
9803 gimple *g;
9804
9805 type = TREE_TYPE (exp);
9806 mode = TYPE_MODE (type);
9807 unsignedp = TYPE_UNSIGNED (type);
9808
9809 treeop0 = treeop1 = treeop2 = NULL_TREE;
9810 if (!VL_EXP_CLASS_P (exp))
9811 switch (TREE_CODE_LENGTH (code))
9812 {
9813 default:
9814 case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
9815 case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
9816 case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
9817 case 0: break;
9818 }
9819 ops.code = code;
9820 ops.type = type;
9821 ops.op0 = treeop0;
9822 ops.op1 = treeop1;
9823 ops.op2 = treeop2;
9824 ops.location = loc;
9825
9826 ignore = (target == const0_rtx
9827 || ((CONVERT_EXPR_CODE_P (code)
9828 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
9829 && TREE_CODE (type) == VOID_TYPE));
9830
9831 /* An operation in what may be a bit-field type needs the
9832 result to be reduced to the precision of the bit-field type,
9833 which is narrower than that of the type's mode. */
9834 reduce_bit_field = (!ignore
9835 && INTEGRAL_TYPE_P (type)
9836 && !type_has_mode_precision_p (type));
9837
9838 /* If we are going to ignore this result, we need only do something
9839 if there is a side-effect somewhere in the expression. If there
9840 is, short-circuit the most common cases here. Note that we must
9841 not call expand_expr with anything but const0_rtx in case this
9842 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
9843
9844 if (ignore)
9845 {
9846 if (! TREE_SIDE_EFFECTS (exp))
9847 return const0_rtx;
9848
9849 /* Ensure we reference a volatile object even if value is ignored, but
9850 don't do this if all we are doing is taking its address. */
9851 if (TREE_THIS_VOLATILE (exp)
9852 && TREE_CODE (exp) != FUNCTION_DECL
9853 && mode != VOIDmode && mode != BLKmode
9854 && modifier != EXPAND_CONST_ADDRESS)
9855 {
9856 temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
9857 if (MEM_P (temp))
9858 copy_to_reg (temp);
9859 return const0_rtx;
9860 }
9861
9862 if (TREE_CODE_CLASS (code) == tcc_unary
9863 || code == BIT_FIELD_REF
9864 || code == COMPONENT_REF
9865 || code == INDIRECT_REF)
9866 return expand_expr (treeop0, const0_rtx, VOIDmode,
9867 modifier);
9868
9869 else if (TREE_CODE_CLASS (code) == tcc_binary
9870 || TREE_CODE_CLASS (code) == tcc_comparison
9871 || code == ARRAY_REF || code == ARRAY_RANGE_REF)
9872 {
9873 expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
9874 expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
9875 return const0_rtx;
9876 }
9877
9878 target = 0;
9879 }
9880
9881 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
9882 target = 0;
9883
9884 /* Use subtarget as the target for operand 0 of a binary operation. */
9885 subtarget = get_subtarget (target);
9886 original_target = target;
9887
9888 switch (code)
9889 {
9890 case LABEL_DECL:
9891 {
9892 tree function = decl_function_context (exp);
9893
9894 temp = label_rtx (exp);
9895 temp = gen_rtx_LABEL_REF (Pmode, temp);
9896
9897 if (function != current_function_decl
9898 && function != 0)
9899 LABEL_REF_NONLOCAL_P (temp) = 1;
9900
9901 temp = gen_rtx_MEM (FUNCTION_MODE, temp);
9902 return temp;
9903 }
9904
9905 case SSA_NAME:
9906 /* ??? ivopts calls expander, without any preparation from
9907 out-of-ssa. So fake instructions as if this was an access to the
9908 base variable. This unnecessarily allocates a pseudo, see how we can
9909 reuse it, if partition base vars have it set already. */
9910 if (!currently_expanding_to_rtl)
9911 {
9912 tree var = SSA_NAME_VAR (exp);
9913 if (var && DECL_RTL_SET_P (var))
9914 return DECL_RTL (var);
9915 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
9916 LAST_VIRTUAL_REGISTER + 1);
9917 }
9918
9919 g = get_gimple_for_ssa_name (exp);
9920 /* For EXPAND_INITIALIZER try harder to get something simpler. */
9921 if (g == NULL
9922 && modifier == EXPAND_INITIALIZER
9923 && !SSA_NAME_IS_DEFAULT_DEF (exp)
9924 && (optimize || !SSA_NAME_VAR (exp)
9925 || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
9926 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
9927 g = SSA_NAME_DEF_STMT (exp);
9928 if (g)
9929 {
9930 rtx r;
9931 location_t saved_loc = curr_insn_location ();
9932 location_t loc = gimple_location (g);
9933 if (loc != UNKNOWN_LOCATION)
9934 set_curr_insn_location (loc);
9935 ops.code = gimple_assign_rhs_code (g);
9936 switch (get_gimple_rhs_class (ops.code))
9937 {
9938 case GIMPLE_TERNARY_RHS:
9939 ops.op2 = gimple_assign_rhs3 (g);
9940 /* Fallthru */
9941 case GIMPLE_BINARY_RHS:
9942 ops.op1 = gimple_assign_rhs2 (g);
9943
9944 /* Try to expand conditonal compare. */
9945 if (targetm.gen_ccmp_first)
9946 {
9947 gcc_checking_assert (targetm.gen_ccmp_next != NULL);
9948 r = expand_ccmp_expr (g, mode);
9949 if (r)
9950 break;
9951 }
9952 /* Fallthru */
9953 case GIMPLE_UNARY_RHS:
9954 ops.op0 = gimple_assign_rhs1 (g);
9955 ops.type = TREE_TYPE (gimple_assign_lhs (g));
9956 ops.location = loc;
9957 r = expand_expr_real_2 (&ops, target, tmode, modifier);
9958 break;
9959 case GIMPLE_SINGLE_RHS:
9960 {
9961 r = expand_expr_real (gimple_assign_rhs1 (g), target,
9962 tmode, modifier, alt_rtl,
9963 inner_reference_p);
9964 break;
9965 }
9966 default:
9967 gcc_unreachable ();
9968 }
9969 set_curr_insn_location (saved_loc);
9970 if (REG_P (r) && !REG_EXPR (r))
9971 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
9972 return r;
9973 }
9974
9975 ssa_name = exp;
9976 decl_rtl = get_rtx_for_ssa_name (ssa_name);
9977 exp = SSA_NAME_VAR (ssa_name);
9978 goto expand_decl_rtl;
9979
9980 case PARM_DECL:
9981 case VAR_DECL:
9982 /* If a static var's type was incomplete when the decl was written,
9983 but the type is complete now, lay out the decl now. */
9984 if (DECL_SIZE (exp) == 0
9985 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
9986 && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
9987 layout_decl (exp, 0);
9988
9989 /* fall through */
9990
9991 case FUNCTION_DECL:
9992 case RESULT_DECL:
9993 decl_rtl = DECL_RTL (exp);
9994 expand_decl_rtl:
9995 gcc_assert (decl_rtl);
9996
9997 /* DECL_MODE might change when TYPE_MODE depends on attribute target
9998 settings for VECTOR_TYPE_P that might switch for the function. */
9999 if (currently_expanding_to_rtl
10000 && code == VAR_DECL && MEM_P (decl_rtl)
10001 && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
10002 decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
10003 else
10004 decl_rtl = copy_rtx (decl_rtl);
10005
10006 /* Record writes to register variables. */
10007 if (modifier == EXPAND_WRITE
10008 && REG_P (decl_rtl)
10009 && HARD_REGISTER_P (decl_rtl))
10010 add_to_hard_reg_set (&crtl->asm_clobbers,
10011 GET_MODE (decl_rtl), REGNO (decl_rtl));
10012
10013 /* Ensure variable marked as used even if it doesn't go through
10014 a parser. If it hasn't be used yet, write out an external
10015 definition. */
10016 if (exp)
10017 TREE_USED (exp) = 1;
10018
10019 /* Show we haven't gotten RTL for this yet. */
10020 temp = 0;
10021
10022 /* Variables inherited from containing functions should have
10023 been lowered by this point. */
10024 if (exp)
10025 context = decl_function_context (exp);
10026 gcc_assert (!exp
10027 || SCOPE_FILE_SCOPE_P (context)
10028 || context == current_function_decl
10029 || TREE_STATIC (exp)
10030 || DECL_EXTERNAL (exp)
10031 /* ??? C++ creates functions that are not TREE_STATIC. */
10032 || TREE_CODE (exp) == FUNCTION_DECL);
10033
10034 /* This is the case of an array whose size is to be determined
10035 from its initializer, while the initializer is still being parsed.
10036 ??? We aren't parsing while expanding anymore. */
10037
10038 if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10039 temp = validize_mem (decl_rtl);
10040
10041 /* If DECL_RTL is memory, we are in the normal case and the
10042 address is not valid, get the address into a register. */
10043
10044 else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10045 {
10046 if (alt_rtl)
10047 *alt_rtl = decl_rtl;
10048 decl_rtl = use_anchored_address (decl_rtl);
10049 if (modifier != EXPAND_CONST_ADDRESS
10050 && modifier != EXPAND_SUM
10051 && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10052 : GET_MODE (decl_rtl),
10053 XEXP (decl_rtl, 0),
10054 MEM_ADDR_SPACE (decl_rtl)))
10055 temp = replace_equiv_address (decl_rtl,
10056 copy_rtx (XEXP (decl_rtl, 0)));
10057 }
10058
10059 /* If we got something, return it. But first, set the alignment
10060 if the address is a register. */
10061 if (temp != 0)
10062 {
10063 if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10064 mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10065
10066 return temp;
10067 }
10068
10069 if (exp)
10070 dmode = DECL_MODE (exp);
10071 else
10072 dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10073
10074 /* If the mode of DECL_RTL does not match that of the decl,
10075 there are two cases: we are dealing with a BLKmode value
10076 that is returned in a register, or we are dealing with
10077 a promoted value. In the latter case, return a SUBREG
10078 of the wanted mode, but mark it so that we know that it
10079 was already extended. */
10080 if (REG_P (decl_rtl)
10081 && dmode != BLKmode
10082 && GET_MODE (decl_rtl) != dmode)
10083 {
10084 machine_mode pmode;
10085
10086 /* Get the signedness to be used for this variable. Ensure we get
10087 the same mode we got when the variable was declared. */
10088 if (code != SSA_NAME)
10089 pmode = promote_decl_mode (exp, &unsignedp);
10090 else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10091 && gimple_code (g) == GIMPLE_CALL
10092 && !gimple_call_internal_p (g))
10093 pmode = promote_function_mode (type, mode, &unsignedp,
10094 gimple_call_fntype (g),
10095 2);
10096 else
10097 pmode = promote_ssa_mode (ssa_name, &unsignedp);
10098 gcc_assert (GET_MODE (decl_rtl) == pmode);
10099
10100 temp = gen_lowpart_SUBREG (mode, decl_rtl);
10101 SUBREG_PROMOTED_VAR_P (temp) = 1;
10102 SUBREG_PROMOTED_SET (temp, unsignedp);
10103 return temp;
10104 }
10105
10106 return decl_rtl;
10107
10108 case INTEGER_CST:
10109 {
10110 /* Given that TYPE_PRECISION (type) is not always equal to
10111 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10112 the former to the latter according to the signedness of the
10113 type. */
10114 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
10115 temp = immed_wide_int_const
10116 (wi::to_wide (exp, GET_MODE_PRECISION (mode)), mode);
10117 return temp;
10118 }
10119
10120 case VECTOR_CST:
10121 {
10122 tree tmp = NULL_TREE;
10123 if (VECTOR_MODE_P (mode))
10124 return const_vector_from_tree (exp);
10125 scalar_int_mode int_mode;
10126 if (is_int_mode (mode, &int_mode))
10127 {
10128 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
10129 return const_scalar_mask_from_tree (int_mode, exp);
10130 else
10131 {
10132 tree type_for_mode
10133 = lang_hooks.types.type_for_mode (int_mode, 1);
10134 if (type_for_mode)
10135 tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10136 type_for_mode, exp);
10137 }
10138 }
10139 if (!tmp)
10140 {
10141 vec<constructor_elt, va_gc> *v;
10142 /* Constructors need to be fixed-length. FIXME. */
10143 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10144 vec_alloc (v, nunits);
10145 for (unsigned int i = 0; i < nunits; ++i)
10146 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10147 tmp = build_constructor (type, v);
10148 }
10149 return expand_expr (tmp, ignore ? const0_rtx : target,
10150 tmode, modifier);
10151 }
10152
10153 case CONST_DECL:
10154 if (modifier == EXPAND_WRITE)
10155 {
10156 /* Writing into CONST_DECL is always invalid, but handle it
10157 gracefully. */
10158 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10159 scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10160 op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10161 EXPAND_NORMAL, as);
10162 op0 = memory_address_addr_space (mode, op0, as);
10163 temp = gen_rtx_MEM (mode, op0);
10164 set_mem_addr_space (temp, as);
10165 return temp;
10166 }
10167 return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10168
10169 case REAL_CST:
10170 /* If optimized, generate immediate CONST_DOUBLE
10171 which will be turned into memory by reload if necessary.
10172
10173 We used to force a register so that loop.c could see it. But
10174 this does not allow gen_* patterns to perform optimizations with
10175 the constants. It also produces two insns in cases like "x = 1.0;".
10176 On most machines, floating-point constants are not permitted in
10177 many insns, so we'd end up copying it to a register in any case.
10178
10179 Now, we do the copying in expand_binop, if appropriate. */
10180 return const_double_from_real_value (TREE_REAL_CST (exp),
10181 TYPE_MODE (TREE_TYPE (exp)));
10182
10183 case FIXED_CST:
10184 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10185 TYPE_MODE (TREE_TYPE (exp)));
10186
10187 case COMPLEX_CST:
10188 /* Handle evaluating a complex constant in a CONCAT target. */
10189 if (original_target && GET_CODE (original_target) == CONCAT)
10190 {
10191 machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10192 rtx rtarg, itarg;
10193
10194 rtarg = XEXP (original_target, 0);
10195 itarg = XEXP (original_target, 1);
10196
10197 /* Move the real and imaginary parts separately. */
10198 op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10199 op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10200
10201 if (op0 != rtarg)
10202 emit_move_insn (rtarg, op0);
10203 if (op1 != itarg)
10204 emit_move_insn (itarg, op1);
10205
10206 return original_target;
10207 }
10208
10209 /* fall through */
10210
10211 case STRING_CST:
10212 temp = expand_expr_constant (exp, 1, modifier);
10213
10214 /* temp contains a constant address.
10215 On RISC machines where a constant address isn't valid,
10216 make some insns to get that address into a register. */
10217 if (modifier != EXPAND_CONST_ADDRESS
10218 && modifier != EXPAND_INITIALIZER
10219 && modifier != EXPAND_SUM
10220 && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10221 MEM_ADDR_SPACE (temp)))
10222 return replace_equiv_address (temp,
10223 copy_rtx (XEXP (temp, 0)));
10224 return temp;
10225
10226 case POLY_INT_CST:
10227 return immed_wide_int_const (poly_int_cst_value (exp), mode);
10228
10229 case SAVE_EXPR:
10230 {
10231 tree val = treeop0;
10232 rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10233 inner_reference_p);
10234
10235 if (!SAVE_EXPR_RESOLVED_P (exp))
10236 {
10237 /* We can indeed still hit this case, typically via builtin
10238 expanders calling save_expr immediately before expanding
10239 something. Assume this means that we only have to deal
10240 with non-BLKmode values. */
10241 gcc_assert (GET_MODE (ret) != BLKmode);
10242
10243 val = build_decl (curr_insn_location (),
10244 VAR_DECL, NULL, TREE_TYPE (exp));
10245 DECL_ARTIFICIAL (val) = 1;
10246 DECL_IGNORED_P (val) = 1;
10247 treeop0 = val;
10248 TREE_OPERAND (exp, 0) = treeop0;
10249 SAVE_EXPR_RESOLVED_P (exp) = 1;
10250
10251 if (!CONSTANT_P (ret))
10252 ret = copy_to_reg (ret);
10253 SET_DECL_RTL (val, ret);
10254 }
10255
10256 return ret;
10257 }
10258
10259
10260 case CONSTRUCTOR:
10261 /* If we don't need the result, just ensure we evaluate any
10262 subexpressions. */
10263 if (ignore)
10264 {
10265 unsigned HOST_WIDE_INT idx;
10266 tree value;
10267
10268 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10269 expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10270
10271 return const0_rtx;
10272 }
10273
10274 return expand_constructor (exp, target, modifier, false);
10275
10276 case TARGET_MEM_REF:
10277 {
10278 addr_space_t as
10279 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10280 enum insn_code icode;
10281 unsigned int align;
10282
10283 op0 = addr_for_mem_ref (exp, as, true);
10284 op0 = memory_address_addr_space (mode, op0, as);
10285 temp = gen_rtx_MEM (mode, op0);
10286 set_mem_attributes (temp, exp, 0);
10287 set_mem_addr_space (temp, as);
10288 align = get_object_alignment (exp);
10289 if (modifier != EXPAND_WRITE
10290 && modifier != EXPAND_MEMORY
10291 && mode != BLKmode
10292 && align < GET_MODE_ALIGNMENT (mode)
10293 /* If the target does not have special handling for unaligned
10294 loads of mode then it can use regular moves for them. */
10295 && ((icode = optab_handler (movmisalign_optab, mode))
10296 != CODE_FOR_nothing))
10297 {
10298 class expand_operand ops[2];
10299
10300 /* We've already validated the memory, and we're creating a
10301 new pseudo destination. The predicates really can't fail,
10302 nor can the generator. */
10303 create_output_operand (&ops[0], NULL_RTX, mode);
10304 create_fixed_operand (&ops[1], temp);
10305 expand_insn (icode, 2, ops);
10306 temp = ops[0].value;
10307 }
10308 return temp;
10309 }
10310
10311 case MEM_REF:
10312 {
10313 const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10314 addr_space_t as
10315 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10316 machine_mode address_mode;
10317 tree base = TREE_OPERAND (exp, 0);
10318 gimple *def_stmt;
10319 enum insn_code icode;
10320 unsigned align;
10321 /* Handle expansion of non-aliased memory with non-BLKmode. That
10322 might end up in a register. */
10323 if (mem_ref_refers_to_non_mem_p (exp))
10324 {
10325 poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10326 base = TREE_OPERAND (base, 0);
10327 poly_uint64 type_size;
10328 if (known_eq (offset, 0)
10329 && !reverse
10330 && poly_int_tree_p (TYPE_SIZE (type), &type_size)
10331 && known_eq (GET_MODE_BITSIZE (DECL_MODE (base)), type_size))
10332 return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10333 target, tmode, modifier);
10334 if (TYPE_MODE (type) == BLKmode)
10335 {
10336 temp = assign_stack_temp (DECL_MODE (base),
10337 GET_MODE_SIZE (DECL_MODE (base)));
10338 store_expr (base, temp, 0, false, false);
10339 temp = adjust_address (temp, BLKmode, offset);
10340 set_mem_size (temp, int_size_in_bytes (type));
10341 return temp;
10342 }
10343 exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10344 bitsize_int (offset * BITS_PER_UNIT));
10345 REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10346 return expand_expr (exp, target, tmode, modifier);
10347 }
10348 address_mode = targetm.addr_space.address_mode (as);
10349 base = TREE_OPERAND (exp, 0);
10350 if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10351 {
10352 tree mask = gimple_assign_rhs2 (def_stmt);
10353 base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10354 gimple_assign_rhs1 (def_stmt), mask);
10355 TREE_OPERAND (exp, 0) = base;
10356 }
10357 align = get_object_alignment (exp);
10358 op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10359 op0 = memory_address_addr_space (mode, op0, as);
10360 if (!integer_zerop (TREE_OPERAND (exp, 1)))
10361 {
10362 rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10363 op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10364 op0 = memory_address_addr_space (mode, op0, as);
10365 }
10366 temp = gen_rtx_MEM (mode, op0);
10367 set_mem_attributes (temp, exp, 0);
10368 set_mem_addr_space (temp, as);
10369 if (TREE_THIS_VOLATILE (exp))
10370 MEM_VOLATILE_P (temp) = 1;
10371 if (modifier != EXPAND_WRITE
10372 && modifier != EXPAND_MEMORY
10373 && !inner_reference_p
10374 && mode != BLKmode
10375 && align < GET_MODE_ALIGNMENT (mode))
10376 {
10377 if ((icode = optab_handler (movmisalign_optab, mode))
10378 != CODE_FOR_nothing)
10379 {
10380 class expand_operand ops[2];
10381
10382 /* We've already validated the memory, and we're creating a
10383 new pseudo destination. The predicates really can't fail,
10384 nor can the generator. */
10385 create_output_operand (&ops[0], NULL_RTX, mode);
10386 create_fixed_operand (&ops[1], temp);
10387 expand_insn (icode, 2, ops);
10388 temp = ops[0].value;
10389 }
10390 else if (targetm.slow_unaligned_access (mode, align))
10391 temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
10392 0, TYPE_UNSIGNED (TREE_TYPE (exp)),
10393 (modifier == EXPAND_STACK_PARM
10394 ? NULL_RTX : target),
10395 mode, mode, false, alt_rtl);
10396 }
10397 if (reverse
10398 && modifier != EXPAND_MEMORY
10399 && modifier != EXPAND_WRITE)
10400 temp = flip_storage_order (mode, temp);
10401 return temp;
10402 }
10403
10404 case ARRAY_REF:
10405
10406 {
10407 tree array = treeop0;
10408 tree index = treeop1;
10409 tree init;
10410
10411 /* Fold an expression like: "foo"[2].
10412 This is not done in fold so it won't happen inside &.
10413 Don't fold if this is for wide characters since it's too
10414 difficult to do correctly and this is a very rare case. */
10415
10416 if (modifier != EXPAND_CONST_ADDRESS
10417 && modifier != EXPAND_INITIALIZER
10418 && modifier != EXPAND_MEMORY)
10419 {
10420 tree t = fold_read_from_constant_string (exp);
10421
10422 if (t)
10423 return expand_expr (t, target, tmode, modifier);
10424 }
10425
10426 /* If this is a constant index into a constant array,
10427 just get the value from the array. Handle both the cases when
10428 we have an explicit constructor and when our operand is a variable
10429 that was declared const. */
10430
10431 if (modifier != EXPAND_CONST_ADDRESS
10432 && modifier != EXPAND_INITIALIZER
10433 && modifier != EXPAND_MEMORY
10434 && TREE_CODE (array) == CONSTRUCTOR
10435 && ! TREE_SIDE_EFFECTS (array)
10436 && TREE_CODE (index) == INTEGER_CST)
10437 {
10438 unsigned HOST_WIDE_INT ix;
10439 tree field, value;
10440
10441 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10442 field, value)
10443 if (tree_int_cst_equal (field, index))
10444 {
10445 if (!TREE_SIDE_EFFECTS (value))
10446 return expand_expr (fold (value), target, tmode, modifier);
10447 break;
10448 }
10449 }
10450
10451 else if (optimize >= 1
10452 && modifier != EXPAND_CONST_ADDRESS
10453 && modifier != EXPAND_INITIALIZER
10454 && modifier != EXPAND_MEMORY
10455 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10456 && TREE_CODE (index) == INTEGER_CST
10457 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10458 && (init = ctor_for_folding (array)) != error_mark_node)
10459 {
10460 if (init == NULL_TREE)
10461 {
10462 tree value = build_zero_cst (type);
10463 if (TREE_CODE (value) == CONSTRUCTOR)
10464 {
10465 /* If VALUE is a CONSTRUCTOR, this optimization is only
10466 useful if this doesn't store the CONSTRUCTOR into
10467 memory. If it does, it is more efficient to just
10468 load the data from the array directly. */
10469 rtx ret = expand_constructor (value, target,
10470 modifier, true);
10471 if (ret == NULL_RTX)
10472 value = NULL_TREE;
10473 }
10474
10475 if (value)
10476 return expand_expr (value, target, tmode, modifier);
10477 }
10478 else if (TREE_CODE (init) == CONSTRUCTOR)
10479 {
10480 unsigned HOST_WIDE_INT ix;
10481 tree field, value;
10482
10483 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
10484 field, value)
10485 if (tree_int_cst_equal (field, index))
10486 {
10487 if (TREE_SIDE_EFFECTS (value))
10488 break;
10489
10490 if (TREE_CODE (value) == CONSTRUCTOR)
10491 {
10492 /* If VALUE is a CONSTRUCTOR, this
10493 optimization is only useful if
10494 this doesn't store the CONSTRUCTOR
10495 into memory. If it does, it is more
10496 efficient to just load the data from
10497 the array directly. */
10498 rtx ret = expand_constructor (value, target,
10499 modifier, true);
10500 if (ret == NULL_RTX)
10501 break;
10502 }
10503
10504 return
10505 expand_expr (fold (value), target, tmode, modifier);
10506 }
10507 }
10508 else if (TREE_CODE (init) == STRING_CST)
10509 {
10510 tree low_bound = array_ref_low_bound (exp);
10511 tree index1 = fold_convert_loc (loc, sizetype, treeop1);
10512
10513 /* Optimize the special case of a zero lower bound.
10514
10515 We convert the lower bound to sizetype to avoid problems
10516 with constant folding. E.g. suppose the lower bound is
10517 1 and its mode is QI. Without the conversion
10518 (ARRAY + (INDEX - (unsigned char)1))
10519 becomes
10520 (ARRAY + (-(unsigned char)1) + INDEX)
10521 which becomes
10522 (ARRAY + 255 + INDEX). Oops! */
10523 if (!integer_zerop (low_bound))
10524 index1 = size_diffop_loc (loc, index1,
10525 fold_convert_loc (loc, sizetype,
10526 low_bound));
10527
10528 if (tree_fits_uhwi_p (index1)
10529 && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
10530 {
10531 tree type = TREE_TYPE (TREE_TYPE (init));
10532 scalar_int_mode mode;
10533
10534 if (is_int_mode (TYPE_MODE (type), &mode)
10535 && GET_MODE_SIZE (mode) == 1)
10536 return gen_int_mode (TREE_STRING_POINTER (init)
10537 [TREE_INT_CST_LOW (index1)],
10538 mode);
10539 }
10540 }
10541 }
10542 }
10543 goto normal_inner_ref;
10544
10545 case COMPONENT_REF:
10546 /* If the operand is a CONSTRUCTOR, we can just extract the
10547 appropriate field if it is present. */
10548 if (TREE_CODE (treeop0) == CONSTRUCTOR)
10549 {
10550 unsigned HOST_WIDE_INT idx;
10551 tree field, value;
10552 scalar_int_mode field_mode;
10553
10554 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
10555 idx, field, value)
10556 if (field == treeop1
10557 /* We can normally use the value of the field in the
10558 CONSTRUCTOR. However, if this is a bitfield in
10559 an integral mode that we can fit in a HOST_WIDE_INT,
10560 we must mask only the number of bits in the bitfield,
10561 since this is done implicitly by the constructor. If
10562 the bitfield does not meet either of those conditions,
10563 we can't do this optimization. */
10564 && (! DECL_BIT_FIELD (field)
10565 || (is_int_mode (DECL_MODE (field), &field_mode)
10566 && (GET_MODE_PRECISION (field_mode)
10567 <= HOST_BITS_PER_WIDE_INT))))
10568 {
10569 if (DECL_BIT_FIELD (field)
10570 && modifier == EXPAND_STACK_PARM)
10571 target = 0;
10572 op0 = expand_expr (value, target, tmode, modifier);
10573 if (DECL_BIT_FIELD (field))
10574 {
10575 HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
10576 scalar_int_mode imode
10577 = SCALAR_INT_TYPE_MODE (TREE_TYPE (field));
10578
10579 if (TYPE_UNSIGNED (TREE_TYPE (field)))
10580 {
10581 op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1,
10582 imode);
10583 op0 = expand_and (imode, op0, op1, target);
10584 }
10585 else
10586 {
10587 int count = GET_MODE_PRECISION (imode) - bitsize;
10588
10589 op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
10590 target, 0);
10591 op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
10592 target, 0);
10593 }
10594 }
10595
10596 return op0;
10597 }
10598 }
10599 goto normal_inner_ref;
10600
10601 case BIT_FIELD_REF:
10602 case ARRAY_RANGE_REF:
10603 normal_inner_ref:
10604 {
10605 machine_mode mode1, mode2;
10606 poly_int64 bitsize, bitpos, bytepos;
10607 tree offset;
10608 int reversep, volatilep = 0, must_force_mem;
10609 tree tem
10610 = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
10611 &unsignedp, &reversep, &volatilep);
10612 rtx orig_op0, memloc;
10613 bool clear_mem_expr = false;
10614
10615 /* If we got back the original object, something is wrong. Perhaps
10616 we are evaluating an expression too early. In any event, don't
10617 infinitely recurse. */
10618 gcc_assert (tem != exp);
10619
10620 /* If TEM's type is a union of variable size, pass TARGET to the inner
10621 computation, since it will need a temporary and TARGET is known
10622 to have to do. This occurs in unchecked conversion in Ada. */
10623 orig_op0 = op0
10624 = expand_expr_real (tem,
10625 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
10626 && COMPLETE_TYPE_P (TREE_TYPE (tem))
10627 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
10628 != INTEGER_CST)
10629 && modifier != EXPAND_STACK_PARM
10630 ? target : NULL_RTX),
10631 VOIDmode,
10632 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
10633 NULL, true);
10634
10635 /* If the field has a mode, we want to access it in the
10636 field's mode, not the computed mode.
10637 If a MEM has VOIDmode (external with incomplete type),
10638 use BLKmode for it instead. */
10639 if (MEM_P (op0))
10640 {
10641 if (mode1 != VOIDmode)
10642 op0 = adjust_address (op0, mode1, 0);
10643 else if (GET_MODE (op0) == VOIDmode)
10644 op0 = adjust_address (op0, BLKmode, 0);
10645 }
10646
10647 mode2
10648 = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
10649
10650 /* Make sure bitpos is not negative, it can wreak havoc later. */
10651 if (maybe_lt (bitpos, 0))
10652 {
10653 gcc_checking_assert (offset == NULL_TREE);
10654 offset = size_int (bits_to_bytes_round_down (bitpos));
10655 bitpos = num_trailing_bits (bitpos);
10656 }
10657
10658 /* If we have either an offset, a BLKmode result, or a reference
10659 outside the underlying object, we must force it to memory.
10660 Such a case can occur in Ada if we have unchecked conversion
10661 of an expression from a scalar type to an aggregate type or
10662 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10663 passed a partially uninitialized object or a view-conversion
10664 to a larger size. */
10665 must_force_mem = (offset
10666 || mode1 == BLKmode
10667 || (mode == BLKmode
10668 && !int_mode_for_size (bitsize, 1).exists ())
10669 || maybe_gt (bitpos + bitsize,
10670 GET_MODE_BITSIZE (mode2)));
10671
10672 /* Handle CONCAT first. */
10673 if (GET_CODE (op0) == CONCAT && !must_force_mem)
10674 {
10675 if (known_eq (bitpos, 0)
10676 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
10677 && COMPLEX_MODE_P (mode1)
10678 && COMPLEX_MODE_P (GET_MODE (op0))
10679 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
10680 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
10681 {
10682 if (reversep)
10683 op0 = flip_storage_order (GET_MODE (op0), op0);
10684 if (mode1 != GET_MODE (op0))
10685 {
10686 rtx parts[2];
10687 for (int i = 0; i < 2; i++)
10688 {
10689 rtx op = read_complex_part (op0, i != 0);
10690 if (GET_CODE (op) == SUBREG)
10691 op = force_reg (GET_MODE (op), op);
10692 rtx temp = gen_lowpart_common (GET_MODE_INNER (mode1),
10693 op);
10694 if (temp)
10695 op = temp;
10696 else
10697 {
10698 if (!REG_P (op) && !MEM_P (op))
10699 op = force_reg (GET_MODE (op), op);
10700 op = gen_lowpart (GET_MODE_INNER (mode1), op);
10701 }
10702 parts[i] = op;
10703 }
10704 op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
10705 }
10706 return op0;
10707 }
10708 if (known_eq (bitpos, 0)
10709 && known_eq (bitsize,
10710 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10711 && maybe_ne (bitsize, 0))
10712 {
10713 op0 = XEXP (op0, 0);
10714 mode2 = GET_MODE (op0);
10715 }
10716 else if (known_eq (bitpos,
10717 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10718 && known_eq (bitsize,
10719 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
10720 && maybe_ne (bitpos, 0)
10721 && maybe_ne (bitsize, 0))
10722 {
10723 op0 = XEXP (op0, 1);
10724 bitpos = 0;
10725 mode2 = GET_MODE (op0);
10726 }
10727 else
10728 /* Otherwise force into memory. */
10729 must_force_mem = 1;
10730 }
10731
10732 /* If this is a constant, put it in a register if it is a legitimate
10733 constant and we don't need a memory reference. */
10734 if (CONSTANT_P (op0)
10735 && mode2 != BLKmode
10736 && targetm.legitimate_constant_p (mode2, op0)
10737 && !must_force_mem)
10738 op0 = force_reg (mode2, op0);
10739
10740 /* Otherwise, if this is a constant, try to force it to the constant
10741 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
10742 is a legitimate constant. */
10743 else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
10744 op0 = validize_mem (memloc);
10745
10746 /* Otherwise, if this is a constant or the object is not in memory
10747 and need be, put it there. */
10748 else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
10749 {
10750 memloc = assign_temp (TREE_TYPE (tem), 1, 1);
10751 emit_move_insn (memloc, op0);
10752 op0 = memloc;
10753 clear_mem_expr = true;
10754 }
10755
10756 if (offset)
10757 {
10758 machine_mode address_mode;
10759 rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
10760 EXPAND_SUM);
10761
10762 gcc_assert (MEM_P (op0));
10763
10764 address_mode = get_address_mode (op0);
10765 if (GET_MODE (offset_rtx) != address_mode)
10766 {
10767 /* We cannot be sure that the RTL in offset_rtx is valid outside
10768 of a memory address context, so force it into a register
10769 before attempting to convert it to the desired mode. */
10770 offset_rtx = force_operand (offset_rtx, NULL_RTX);
10771 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
10772 }
10773
10774 /* See the comment in expand_assignment for the rationale. */
10775 if (mode1 != VOIDmode
10776 && maybe_ne (bitpos, 0)
10777 && maybe_gt (bitsize, 0)
10778 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
10779 && multiple_p (bitpos, bitsize)
10780 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
10781 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
10782 {
10783 op0 = adjust_address (op0, mode1, bytepos);
10784 bitpos = 0;
10785 }
10786
10787 op0 = offset_address (op0, offset_rtx,
10788 highest_pow2_factor (offset));
10789 }
10790
10791 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10792 record its alignment as BIGGEST_ALIGNMENT. */
10793 if (MEM_P (op0)
10794 && known_eq (bitpos, 0)
10795 && offset != 0
10796 && is_aligning_offset (offset, tem))
10797 set_mem_align (op0, BIGGEST_ALIGNMENT);
10798
10799 /* Don't forget about volatility even if this is a bitfield. */
10800 if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
10801 {
10802 if (op0 == orig_op0)
10803 op0 = copy_rtx (op0);
10804
10805 MEM_VOLATILE_P (op0) = 1;
10806 }
10807
10808 if (MEM_P (op0) && TREE_CODE (tem) == FUNCTION_DECL)
10809 {
10810 if (op0 == orig_op0)
10811 op0 = copy_rtx (op0);
10812
10813 set_mem_align (op0, BITS_PER_UNIT);
10814 }
10815
10816 /* In cases where an aligned union has an unaligned object
10817 as a field, we might be extracting a BLKmode value from
10818 an integer-mode (e.g., SImode) object. Handle this case
10819 by doing the extract into an object as wide as the field
10820 (which we know to be the width of a basic mode), then
10821 storing into memory, and changing the mode to BLKmode. */
10822 if (mode1 == VOIDmode
10823 || REG_P (op0) || GET_CODE (op0) == SUBREG
10824 || (mode1 != BLKmode && ! direct_load[(int) mode1]
10825 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
10826 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
10827 && modifier != EXPAND_CONST_ADDRESS
10828 && modifier != EXPAND_INITIALIZER
10829 && modifier != EXPAND_MEMORY)
10830 /* If the bitfield is volatile and the bitsize
10831 is narrower than the access size of the bitfield,
10832 we need to extract bitfields from the access. */
10833 || (volatilep && TREE_CODE (exp) == COMPONENT_REF
10834 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
10835 && mode1 != BLKmode
10836 && maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
10837 /* If the field isn't aligned enough to fetch as a memref,
10838 fetch it as a bit field. */
10839 || (mode1 != BLKmode
10840 && (((MEM_P (op0)
10841 ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
10842 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
10843 : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
10844 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
10845 && modifier != EXPAND_MEMORY
10846 && ((modifier == EXPAND_CONST_ADDRESS
10847 || modifier == EXPAND_INITIALIZER)
10848 ? STRICT_ALIGNMENT
10849 : targetm.slow_unaligned_access (mode1,
10850 MEM_ALIGN (op0))))
10851 || !multiple_p (bitpos, BITS_PER_UNIT)))
10852 /* If the type and the field are a constant size and the
10853 size of the type isn't the same size as the bitfield,
10854 we must use bitfield operations. */
10855 || (known_size_p (bitsize)
10856 && TYPE_SIZE (TREE_TYPE (exp))
10857 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
10858 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
10859 bitsize)))
10860 {
10861 machine_mode ext_mode = mode;
10862
10863 if (ext_mode == BLKmode
10864 && ! (target != 0 && MEM_P (op0)
10865 && MEM_P (target)
10866 && multiple_p (bitpos, BITS_PER_UNIT)))
10867 ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
10868
10869 if (ext_mode == BLKmode)
10870 {
10871 if (target == 0)
10872 target = assign_temp (type, 1, 1);
10873
10874 /* ??? Unlike the similar test a few lines below, this one is
10875 very likely obsolete. */
10876 if (known_eq (bitsize, 0))
10877 return target;
10878
10879 /* In this case, BITPOS must start at a byte boundary and
10880 TARGET, if specified, must be a MEM. */
10881 gcc_assert (MEM_P (op0)
10882 && (!target || MEM_P (target)));
10883
10884 bytepos = exact_div (bitpos, BITS_PER_UNIT);
10885 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
10886 emit_block_move (target,
10887 adjust_address (op0, VOIDmode, bytepos),
10888 gen_int_mode (bytesize, Pmode),
10889 (modifier == EXPAND_STACK_PARM
10890 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
10891
10892 return target;
10893 }
10894
10895 /* If we have nothing to extract, the result will be 0 for targets
10896 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
10897 return 0 for the sake of consistency, as reading a zero-sized
10898 bitfield is valid in Ada and the value is fully specified. */
10899 if (known_eq (bitsize, 0))
10900 return const0_rtx;
10901
10902 op0 = validize_mem (op0);
10903
10904 if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
10905 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10906
10907 /* If the result has aggregate type and the extraction is done in
10908 an integral mode, then the field may be not aligned on a byte
10909 boundary; in this case, if it has reverse storage order, it
10910 needs to be extracted as a scalar field with reverse storage
10911 order and put back into memory order afterwards. */
10912 if (AGGREGATE_TYPE_P (type)
10913 && GET_MODE_CLASS (ext_mode) == MODE_INT)
10914 reversep = TYPE_REVERSE_STORAGE_ORDER (type);
10915
10916 gcc_checking_assert (known_ge (bitpos, 0));
10917 op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
10918 (modifier == EXPAND_STACK_PARM
10919 ? NULL_RTX : target),
10920 ext_mode, ext_mode, reversep, alt_rtl);
10921
10922 /* If the result has aggregate type and the mode of OP0 is an
10923 integral mode then, if BITSIZE is narrower than this mode
10924 and this is for big-endian data, we must put the field
10925 into the high-order bits. And we must also put it back
10926 into memory order if it has been previously reversed. */
10927 scalar_int_mode op0_mode;
10928 if (AGGREGATE_TYPE_P (type)
10929 && is_int_mode (GET_MODE (op0), &op0_mode))
10930 {
10931 HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
10932
10933 gcc_checking_assert (known_le (bitsize, size));
10934 if (maybe_lt (bitsize, size)
10935 && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
10936 op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
10937 size - bitsize, op0, 1);
10938
10939 if (reversep)
10940 op0 = flip_storage_order (op0_mode, op0);
10941 }
10942
10943 /* If the result type is BLKmode, store the data into a temporary
10944 of the appropriate type, but with the mode corresponding to the
10945 mode for the data we have (op0's mode). */
10946 if (mode == BLKmode)
10947 {
10948 rtx new_rtx
10949 = assign_stack_temp_for_type (ext_mode,
10950 GET_MODE_BITSIZE (ext_mode),
10951 type);
10952 emit_move_insn (new_rtx, op0);
10953 op0 = copy_rtx (new_rtx);
10954 PUT_MODE (op0, BLKmode);
10955 }
10956
10957 return op0;
10958 }
10959
10960 /* If the result is BLKmode, use that to access the object
10961 now as well. */
10962 if (mode == BLKmode)
10963 mode1 = BLKmode;
10964
10965 /* Get a reference to just this component. */
10966 bytepos = bits_to_bytes_round_down (bitpos);
10967 if (modifier == EXPAND_CONST_ADDRESS
10968 || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
10969 op0 = adjust_address_nv (op0, mode1, bytepos);
10970 else
10971 op0 = adjust_address (op0, mode1, bytepos);
10972
10973 if (op0 == orig_op0)
10974 op0 = copy_rtx (op0);
10975
10976 /* Don't set memory attributes if the base expression is
10977 SSA_NAME that got expanded as a MEM. In that case, we should
10978 just honor its original memory attributes. */
10979 if (TREE_CODE (tem) != SSA_NAME || !MEM_P (orig_op0))
10980 set_mem_attributes (op0, exp, 0);
10981
10982 if (REG_P (XEXP (op0, 0)))
10983 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10984
10985 /* If op0 is a temporary because the original expressions was forced
10986 to memory, clear MEM_EXPR so that the original expression cannot
10987 be marked as addressable through MEM_EXPR of the temporary. */
10988 if (clear_mem_expr)
10989 set_mem_expr (op0, NULL_TREE);
10990
10991 MEM_VOLATILE_P (op0) |= volatilep;
10992
10993 if (reversep
10994 && modifier != EXPAND_MEMORY
10995 && modifier != EXPAND_WRITE)
10996 op0 = flip_storage_order (mode1, op0);
10997
10998 if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
10999 || modifier == EXPAND_CONST_ADDRESS
11000 || modifier == EXPAND_INITIALIZER)
11001 return op0;
11002
11003 if (target == 0)
11004 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
11005
11006 convert_move (target, op0, unsignedp);
11007 return target;
11008 }
11009
11010 case OBJ_TYPE_REF:
11011 return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
11012
11013 case CALL_EXPR:
11014 /* All valid uses of __builtin_va_arg_pack () are removed during
11015 inlining. */
11016 if (CALL_EXPR_VA_ARG_PACK (exp))
11017 error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
11018 {
11019 tree fndecl = get_callee_fndecl (exp), attr;
11020
11021 if (fndecl
11022 /* Don't diagnose the error attribute in thunks, those are
11023 artificially created. */
11024 && !CALL_FROM_THUNK_P (exp)
11025 && (attr = lookup_attribute ("error",
11026 DECL_ATTRIBUTES (fndecl))) != NULL)
11027 {
11028 const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11029 error ("%Kcall to %qs declared with attribute error: %s", exp,
11030 identifier_to_locale (ident),
11031 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11032 }
11033 if (fndecl
11034 /* Don't diagnose the warning attribute in thunks, those are
11035 artificially created. */
11036 && !CALL_FROM_THUNK_P (exp)
11037 && (attr = lookup_attribute ("warning",
11038 DECL_ATTRIBUTES (fndecl))) != NULL)
11039 {
11040 const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11041 warning_at (tree_nonartificial_location (exp),
11042 OPT_Wattribute_warning,
11043 "%Kcall to %qs declared with attribute warning: %s",
11044 exp, identifier_to_locale (ident),
11045 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11046 }
11047
11048 /* Check for a built-in function. */
11049 if (fndecl && fndecl_built_in_p (fndecl))
11050 {
11051 gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
11052 return expand_builtin (exp, target, subtarget, tmode, ignore);
11053 }
11054 }
11055 return expand_call (exp, target, ignore);
11056
11057 case VIEW_CONVERT_EXPR:
11058 op0 = NULL_RTX;
11059
11060 /* If we are converting to BLKmode, try to avoid an intermediate
11061 temporary by fetching an inner memory reference. */
11062 if (mode == BLKmode
11063 && poly_int_tree_p (TYPE_SIZE (type))
11064 && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11065 && handled_component_p (treeop0))
11066 {
11067 machine_mode mode1;
11068 poly_int64 bitsize, bitpos, bytepos;
11069 tree offset;
11070 int unsignedp, reversep, volatilep = 0;
11071 tree tem
11072 = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11073 &unsignedp, &reversep, &volatilep);
11074 rtx orig_op0;
11075
11076 /* ??? We should work harder and deal with non-zero offsets. */
11077 if (!offset
11078 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11079 && !reversep
11080 && known_size_p (bitsize)
11081 && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11082 {
11083 /* See the normal_inner_ref case for the rationale. */
11084 orig_op0
11085 = expand_expr_real (tem,
11086 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11087 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11088 != INTEGER_CST)
11089 && modifier != EXPAND_STACK_PARM
11090 ? target : NULL_RTX),
11091 VOIDmode,
11092 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11093 NULL, true);
11094
11095 if (MEM_P (orig_op0))
11096 {
11097 op0 = orig_op0;
11098
11099 /* Get a reference to just this component. */
11100 if (modifier == EXPAND_CONST_ADDRESS
11101 || modifier == EXPAND_SUM
11102 || modifier == EXPAND_INITIALIZER)
11103 op0 = adjust_address_nv (op0, mode, bytepos);
11104 else
11105 op0 = adjust_address (op0, mode, bytepos);
11106
11107 if (op0 == orig_op0)
11108 op0 = copy_rtx (op0);
11109
11110 set_mem_attributes (op0, treeop0, 0);
11111 if (REG_P (XEXP (op0, 0)))
11112 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11113
11114 MEM_VOLATILE_P (op0) |= volatilep;
11115 }
11116 }
11117 }
11118
11119 if (!op0)
11120 op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11121 NULL, inner_reference_p);
11122
11123 /* If the input and output modes are both the same, we are done. */
11124 if (mode == GET_MODE (op0))
11125 ;
11126 /* If neither mode is BLKmode, and both modes are the same size
11127 then we can use gen_lowpart. */
11128 else if (mode != BLKmode
11129 && GET_MODE (op0) != BLKmode
11130 && known_eq (GET_MODE_PRECISION (mode),
11131 GET_MODE_PRECISION (GET_MODE (op0)))
11132 && !COMPLEX_MODE_P (GET_MODE (op0)))
11133 {
11134 if (GET_CODE (op0) == SUBREG)
11135 op0 = force_reg (GET_MODE (op0), op0);
11136 temp = gen_lowpart_common (mode, op0);
11137 if (temp)
11138 op0 = temp;
11139 else
11140 {
11141 if (!REG_P (op0) && !MEM_P (op0))
11142 op0 = force_reg (GET_MODE (op0), op0);
11143 op0 = gen_lowpart (mode, op0);
11144 }
11145 }
11146 /* If both types are integral, convert from one mode to the other. */
11147 else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11148 op0 = convert_modes (mode, GET_MODE (op0), op0,
11149 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11150 /* If the output type is a bit-field type, do an extraction. */
11151 else if (reduce_bit_field)
11152 return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11153 TYPE_UNSIGNED (type), NULL_RTX,
11154 mode, mode, false, NULL);
11155 /* As a last resort, spill op0 to memory, and reload it in a
11156 different mode. */
11157 else if (!MEM_P (op0))
11158 {
11159 /* If the operand is not a MEM, force it into memory. Since we
11160 are going to be changing the mode of the MEM, don't call
11161 force_const_mem for constants because we don't allow pool
11162 constants to change mode. */
11163 tree inner_type = TREE_TYPE (treeop0);
11164
11165 gcc_assert (!TREE_ADDRESSABLE (exp));
11166
11167 if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11168 target
11169 = assign_stack_temp_for_type
11170 (TYPE_MODE (inner_type),
11171 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11172
11173 emit_move_insn (target, op0);
11174 op0 = target;
11175 }
11176
11177 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
11178 output type is such that the operand is known to be aligned, indicate
11179 that it is. Otherwise, we need only be concerned about alignment for
11180 non-BLKmode results. */
11181 if (MEM_P (op0))
11182 {
11183 enum insn_code icode;
11184
11185 if (modifier != EXPAND_WRITE
11186 && modifier != EXPAND_MEMORY
11187 && !inner_reference_p
11188 && mode != BLKmode
11189 && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11190 {
11191 /* If the target does have special handling for unaligned
11192 loads of mode then use them. */
11193 if ((icode = optab_handler (movmisalign_optab, mode))
11194 != CODE_FOR_nothing)
11195 {
11196 rtx reg;
11197
11198 op0 = adjust_address (op0, mode, 0);
11199 /* We've already validated the memory, and we're creating a
11200 new pseudo destination. The predicates really can't
11201 fail. */
11202 reg = gen_reg_rtx (mode);
11203
11204 /* Nor can the insn generator. */
11205 rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11206 emit_insn (insn);
11207 return reg;
11208 }
11209 else if (STRICT_ALIGNMENT)
11210 {
11211 poly_uint64 mode_size = GET_MODE_SIZE (mode);
11212 poly_uint64 temp_size = mode_size;
11213 if (GET_MODE (op0) != BLKmode)
11214 temp_size = upper_bound (temp_size,
11215 GET_MODE_SIZE (GET_MODE (op0)));
11216 rtx new_rtx
11217 = assign_stack_temp_for_type (mode, temp_size, type);
11218 rtx new_with_op0_mode
11219 = adjust_address (new_rtx, GET_MODE (op0), 0);
11220
11221 gcc_assert (!TREE_ADDRESSABLE (exp));
11222
11223 if (GET_MODE (op0) == BLKmode)
11224 {
11225 rtx size_rtx = gen_int_mode (mode_size, Pmode);
11226 emit_block_move (new_with_op0_mode, op0, size_rtx,
11227 (modifier == EXPAND_STACK_PARM
11228 ? BLOCK_OP_CALL_PARM
11229 : BLOCK_OP_NORMAL));
11230 }
11231 else
11232 emit_move_insn (new_with_op0_mode, op0);
11233
11234 op0 = new_rtx;
11235 }
11236 }
11237
11238 op0 = adjust_address (op0, mode, 0);
11239 }
11240
11241 return op0;
11242
11243 case MODIFY_EXPR:
11244 {
11245 tree lhs = treeop0;
11246 tree rhs = treeop1;
11247 gcc_assert (ignore);
11248
11249 /* Check for |= or &= of a bitfield of size one into another bitfield
11250 of size 1. In this case, (unless we need the result of the
11251 assignment) we can do this more efficiently with a
11252 test followed by an assignment, if necessary.
11253
11254 ??? At this point, we can't get a BIT_FIELD_REF here. But if
11255 things change so we do, this code should be enhanced to
11256 support it. */
11257 if (TREE_CODE (lhs) == COMPONENT_REF
11258 && (TREE_CODE (rhs) == BIT_IOR_EXPR
11259 || TREE_CODE (rhs) == BIT_AND_EXPR)
11260 && TREE_OPERAND (rhs, 0) == lhs
11261 && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11262 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11263 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11264 {
11265 rtx_code_label *label = gen_label_rtx ();
11266 int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11267 profile_probability prob = profile_probability::uninitialized ();
11268 if (value)
11269 jumpifnot (TREE_OPERAND (rhs, 1), label, prob);
11270 else
11271 jumpif (TREE_OPERAND (rhs, 1), label, prob);
11272 expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11273 false);
11274 do_pending_stack_adjust ();
11275 emit_label (label);
11276 return const0_rtx;
11277 }
11278
11279 expand_assignment (lhs, rhs, false);
11280 return const0_rtx;
11281 }
11282
11283 case ADDR_EXPR:
11284 return expand_expr_addr_expr (exp, target, tmode, modifier);
11285
11286 case REALPART_EXPR:
11287 op0 = expand_normal (treeop0);
11288 return read_complex_part (op0, false);
11289
11290 case IMAGPART_EXPR:
11291 op0 = expand_normal (treeop0);
11292 return read_complex_part (op0, true);
11293
11294 case RETURN_EXPR:
11295 case LABEL_EXPR:
11296 case GOTO_EXPR:
11297 case SWITCH_EXPR:
11298 case ASM_EXPR:
11299 /* Expanded in cfgexpand.c. */
11300 gcc_unreachable ();
11301
11302 case TRY_CATCH_EXPR:
11303 case CATCH_EXPR:
11304 case EH_FILTER_EXPR:
11305 case TRY_FINALLY_EXPR:
11306 case EH_ELSE_EXPR:
11307 /* Lowered by tree-eh.c. */
11308 gcc_unreachable ();
11309
11310 case WITH_CLEANUP_EXPR:
11311 case CLEANUP_POINT_EXPR:
11312 case TARGET_EXPR:
11313 case CASE_LABEL_EXPR:
11314 case VA_ARG_EXPR:
11315 case BIND_EXPR:
11316 case INIT_EXPR:
11317 case CONJ_EXPR:
11318 case COMPOUND_EXPR:
11319 case PREINCREMENT_EXPR:
11320 case PREDECREMENT_EXPR:
11321 case POSTINCREMENT_EXPR:
11322 case POSTDECREMENT_EXPR:
11323 case LOOP_EXPR:
11324 case EXIT_EXPR:
11325 case COMPOUND_LITERAL_EXPR:
11326 /* Lowered by gimplify.c. */
11327 gcc_unreachable ();
11328
11329 case FDESC_EXPR:
11330 /* Function descriptors are not valid except for as
11331 initialization constants, and should not be expanded. */
11332 gcc_unreachable ();
11333
11334 case WITH_SIZE_EXPR:
11335 /* WITH_SIZE_EXPR expands to its first argument. The caller should
11336 have pulled out the size to use in whatever context it needed. */
11337 return expand_expr_real (treeop0, original_target, tmode,
11338 modifier, alt_rtl, inner_reference_p);
11339
11340 default:
11341 return expand_expr_real_2 (&ops, target, tmode, modifier);
11342 }
11343 }
11344 \f
11345 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11346 signedness of TYPE), possibly returning the result in TARGET.
11347 TYPE is known to be a partial integer type. */
11348 static rtx
11349 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11350 {
11351 HOST_WIDE_INT prec = TYPE_PRECISION (type);
11352 if (target && GET_MODE (target) != GET_MODE (exp))
11353 target = 0;
11354 /* For constant values, reduce using build_int_cst_type. */
11355 poly_int64 const_exp;
11356 if (poly_int_rtx_p (exp, &const_exp))
11357 {
11358 tree t = build_int_cst_type (type, const_exp);
11359 return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11360 }
11361 else if (TYPE_UNSIGNED (type))
11362 {
11363 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11364 rtx mask = immed_wide_int_const
11365 (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11366 return expand_and (mode, exp, mask, target);
11367 }
11368 else
11369 {
11370 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11371 int count = GET_MODE_PRECISION (mode) - prec;
11372 exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11373 return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11374 }
11375 }
11376 \f
11377 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11378 when applied to the address of EXP produces an address known to be
11379 aligned more than BIGGEST_ALIGNMENT. */
11380
11381 static int
11382 is_aligning_offset (const_tree offset, const_tree exp)
11383 {
11384 /* Strip off any conversions. */
11385 while (CONVERT_EXPR_P (offset))
11386 offset = TREE_OPERAND (offset, 0);
11387
11388 /* We must now have a BIT_AND_EXPR with a constant that is one less than
11389 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
11390 if (TREE_CODE (offset) != BIT_AND_EXPR
11391 || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11392 || compare_tree_int (TREE_OPERAND (offset, 1),
11393 BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11394 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11395 return 0;
11396
11397 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11398 It must be NEGATE_EXPR. Then strip any more conversions. */
11399 offset = TREE_OPERAND (offset, 0);
11400 while (CONVERT_EXPR_P (offset))
11401 offset = TREE_OPERAND (offset, 0);
11402
11403 if (TREE_CODE (offset) != NEGATE_EXPR)
11404 return 0;
11405
11406 offset = TREE_OPERAND (offset, 0);
11407 while (CONVERT_EXPR_P (offset))
11408 offset = TREE_OPERAND (offset, 0);
11409
11410 /* This must now be the address of EXP. */
11411 return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11412 }
11413 \f
11414 /* Return the tree node if an ARG corresponds to a string constant or zero
11415 if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
11416 non-constant) offset in bytes within the string that ARG is accessing.
11417 If MEM_SIZE is non-zero the storage size of the memory is returned.
11418 If DECL is non-zero the constant declaration is returned if available. */
11419
11420 tree
11421 string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
11422 {
11423 tree dummy = NULL_TREE;;
11424 if (!mem_size)
11425 mem_size = &dummy;
11426
11427 /* Store the type of the original expression before conversions
11428 via NOP_EXPR or POINTER_PLUS_EXPR to other types have been
11429 removed. */
11430 tree argtype = TREE_TYPE (arg);
11431
11432 tree array;
11433 STRIP_NOPS (arg);
11434
11435 /* Non-constant index into the character array in an ARRAY_REF
11436 expression or null. */
11437 tree varidx = NULL_TREE;
11438
11439 poly_int64 base_off = 0;
11440
11441 if (TREE_CODE (arg) == ADDR_EXPR)
11442 {
11443 arg = TREE_OPERAND (arg, 0);
11444 tree ref = arg;
11445 if (TREE_CODE (arg) == ARRAY_REF)
11446 {
11447 tree idx = TREE_OPERAND (arg, 1);
11448 if (TREE_CODE (idx) != INTEGER_CST)
11449 {
11450 /* From a pointer (but not array) argument extract the variable
11451 index to prevent get_addr_base_and_unit_offset() from failing
11452 due to it. Use it later to compute the non-constant offset
11453 into the string and return it to the caller. */
11454 varidx = idx;
11455 ref = TREE_OPERAND (arg, 0);
11456
11457 if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE)
11458 return NULL_TREE;
11459
11460 if (!integer_zerop (array_ref_low_bound (arg)))
11461 return NULL_TREE;
11462
11463 if (!integer_onep (array_ref_element_size (arg)))
11464 return NULL_TREE;
11465 }
11466 }
11467 array = get_addr_base_and_unit_offset (ref, &base_off);
11468 if (!array
11469 || (TREE_CODE (array) != VAR_DECL
11470 && TREE_CODE (array) != CONST_DECL
11471 && TREE_CODE (array) != STRING_CST))
11472 return NULL_TREE;
11473 }
11474 else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11475 {
11476 tree arg0 = TREE_OPERAND (arg, 0);
11477 tree arg1 = TREE_OPERAND (arg, 1);
11478
11479 tree offset;
11480 tree str = string_constant (arg0, &offset, mem_size, decl);
11481 if (!str)
11482 {
11483 str = string_constant (arg1, &offset, mem_size, decl);
11484 arg1 = arg0;
11485 }
11486
11487 if (str)
11488 {
11489 /* Avoid pointers to arrays (see bug 86622). */
11490 if (POINTER_TYPE_P (TREE_TYPE (arg))
11491 && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == ARRAY_TYPE
11492 && !(decl && !*decl)
11493 && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11494 && tree_fits_uhwi_p (*mem_size)
11495 && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11496 return NULL_TREE;
11497
11498 tree type = TREE_TYPE (offset);
11499 arg1 = fold_convert (type, arg1);
11500 *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, arg1);
11501 return str;
11502 }
11503 return NULL_TREE;
11504 }
11505 else if (TREE_CODE (arg) == SSA_NAME)
11506 {
11507 gimple *stmt = SSA_NAME_DEF_STMT (arg);
11508 if (!is_gimple_assign (stmt))
11509 return NULL_TREE;
11510
11511 tree rhs1 = gimple_assign_rhs1 (stmt);
11512 tree_code code = gimple_assign_rhs_code (stmt);
11513 if (code == ADDR_EXPR)
11514 return string_constant (rhs1, ptr_offset, mem_size, decl);
11515 else if (code != POINTER_PLUS_EXPR)
11516 return NULL_TREE;
11517
11518 tree offset;
11519 if (tree str = string_constant (rhs1, &offset, mem_size, decl))
11520 {
11521 /* Avoid pointers to arrays (see bug 86622). */
11522 if (POINTER_TYPE_P (TREE_TYPE (rhs1))
11523 && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1))) == ARRAY_TYPE
11524 && !(decl && !*decl)
11525 && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11526 && tree_fits_uhwi_p (*mem_size)
11527 && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11528 return NULL_TREE;
11529
11530 tree rhs2 = gimple_assign_rhs2 (stmt);
11531 tree type = TREE_TYPE (offset);
11532 rhs2 = fold_convert (type, rhs2);
11533 *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, rhs2);
11534 return str;
11535 }
11536 return NULL_TREE;
11537 }
11538 else if (DECL_P (arg))
11539 array = arg;
11540 else
11541 return NULL_TREE;
11542
11543 tree offset = wide_int_to_tree (sizetype, base_off);
11544 if (varidx)
11545 {
11546 if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE)
11547 return NULL_TREE;
11548
11549 gcc_assert (TREE_CODE (arg) == ARRAY_REF);
11550 tree chartype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 0)));
11551 if (TREE_CODE (chartype) != INTEGER_TYPE)
11552 return NULL;
11553
11554 offset = fold_convert (sizetype, varidx);
11555 }
11556
11557 if (TREE_CODE (array) == STRING_CST)
11558 {
11559 *ptr_offset = fold_convert (sizetype, offset);
11560 *mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
11561 if (decl)
11562 *decl = NULL_TREE;
11563 gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array)))
11564 >= TREE_STRING_LENGTH (array));
11565 return array;
11566 }
11567
11568 if (!VAR_P (array) && TREE_CODE (array) != CONST_DECL)
11569 return NULL_TREE;
11570
11571 tree init = ctor_for_folding (array);
11572
11573 /* Handle variables initialized with string literals. */
11574 if (!init || init == error_mark_node)
11575 return NULL_TREE;
11576 if (TREE_CODE (init) == CONSTRUCTOR)
11577 {
11578 /* Convert the 64-bit constant offset to a wider type to avoid
11579 overflow. */
11580 offset_int wioff;
11581 if (!base_off.is_constant (&wioff))
11582 return NULL_TREE;
11583
11584 wioff *= BITS_PER_UNIT;
11585 if (!wi::fits_uhwi_p (wioff))
11586 return NULL_TREE;
11587
11588 base_off = wioff.to_uhwi ();
11589 unsigned HOST_WIDE_INT fieldoff = 0;
11590 init = fold_ctor_reference (TREE_TYPE (arg), init, base_off, 0, array,
11591 &fieldoff);
11592 HOST_WIDE_INT cstoff;
11593 if (!base_off.is_constant (&cstoff))
11594 return NULL_TREE;
11595
11596 cstoff = (cstoff - fieldoff) / BITS_PER_UNIT;
11597 tree off = build_int_cst (sizetype, cstoff);
11598 if (varidx)
11599 offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, off);
11600 else
11601 offset = off;
11602 }
11603
11604 if (!init)
11605 return NULL_TREE;
11606
11607 *ptr_offset = offset;
11608
11609 tree inittype = TREE_TYPE (init);
11610
11611 if (TREE_CODE (init) == INTEGER_CST
11612 && (TREE_CODE (TREE_TYPE (array)) == INTEGER_TYPE
11613 || TYPE_MAIN_VARIANT (inittype) == char_type_node))
11614 {
11615 /* For a reference to (address of) a single constant character,
11616 store the native representation of the character in CHARBUF.
11617 If the reference is to an element of an array or a member
11618 of a struct, only consider narrow characters until ctors
11619 for wide character arrays are transformed to STRING_CSTs
11620 like those for narrow arrays. */
11621 unsigned char charbuf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
11622 int len = native_encode_expr (init, charbuf, sizeof charbuf, 0);
11623 if (len > 0)
11624 {
11625 /* Construct a string literal with elements of INITTYPE and
11626 the representation above. Then strip
11627 the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST. */
11628 init = build_string_literal (len, (char *)charbuf, inittype);
11629 init = TREE_OPERAND (TREE_OPERAND (init, 0), 0);
11630 }
11631 }
11632
11633 tree initsize = TYPE_SIZE_UNIT (inittype);
11634
11635 if (TREE_CODE (init) == CONSTRUCTOR && initializer_zerop (init))
11636 {
11637 /* Fold an empty/zero constructor for an implicitly initialized
11638 object or subobject into the empty string. */
11639
11640 /* Determine the character type from that of the original
11641 expression. */
11642 tree chartype = argtype;
11643 if (POINTER_TYPE_P (chartype))
11644 chartype = TREE_TYPE (chartype);
11645 while (TREE_CODE (chartype) == ARRAY_TYPE)
11646 chartype = TREE_TYPE (chartype);
11647 /* Convert a char array to an empty STRING_CST having an array
11648 of the expected type. */
11649 if (!initsize)
11650 initsize = integer_zero_node;
11651
11652 unsigned HOST_WIDE_INT size = tree_to_uhwi (initsize);
11653 init = build_string_literal (size ? 1 : 0, "", chartype, size);
11654 init = TREE_OPERAND (init, 0);
11655 init = TREE_OPERAND (init, 0);
11656
11657 *ptr_offset = integer_zero_node;
11658 }
11659
11660 if (decl)
11661 *decl = array;
11662
11663 if (TREE_CODE (init) != STRING_CST)
11664 return NULL_TREE;
11665
11666 *mem_size = initsize;
11667
11668 gcc_checking_assert (tree_to_shwi (initsize) >= TREE_STRING_LENGTH (init));
11669
11670 return init;
11671 }
11672 \f
11673 /* Compute the modular multiplicative inverse of A modulo M
11674 using extended Euclid's algorithm. Assumes A and M are coprime. */
11675 static wide_int
11676 mod_inv (const wide_int &a, const wide_int &b)
11677 {
11678 /* Verify the assumption. */
11679 gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
11680
11681 unsigned int p = a.get_precision () + 1;
11682 gcc_checking_assert (b.get_precision () + 1 == p);
11683 wide_int c = wide_int::from (a, p, UNSIGNED);
11684 wide_int d = wide_int::from (b, p, UNSIGNED);
11685 wide_int x0 = wide_int::from (0, p, UNSIGNED);
11686 wide_int x1 = wide_int::from (1, p, UNSIGNED);
11687
11688 if (wi::eq_p (b, 1))
11689 return wide_int::from (1, p, UNSIGNED);
11690
11691 while (wi::gt_p (c, 1, UNSIGNED))
11692 {
11693 wide_int t = d;
11694 wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
11695 c = t;
11696 wide_int s = x0;
11697 x0 = wi::sub (x1, wi::mul (q, x0));
11698 x1 = s;
11699 }
11700 if (wi::lt_p (x1, 0, SIGNED))
11701 x1 += d;
11702 return x1;
11703 }
11704
11705 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
11706 is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
11707 for C2 > 0 to x & C3 == C2
11708 for C2 < 0 to x & C3 == (C2 & C3). */
11709 enum tree_code
11710 maybe_optimize_pow2p_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11711 {
11712 gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11713 tree treeop0 = gimple_assign_rhs1 (stmt);
11714 tree treeop1 = gimple_assign_rhs2 (stmt);
11715 tree type = TREE_TYPE (*arg0);
11716 scalar_int_mode mode;
11717 if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11718 return code;
11719 if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11720 || TYPE_PRECISION (type) <= 1
11721 || TYPE_UNSIGNED (type)
11722 /* Signed x % c == 0 should have been optimized into unsigned modulo
11723 earlier. */
11724 || integer_zerop (*arg1)
11725 /* If c is known to be non-negative, modulo will be expanded as unsigned
11726 modulo. */
11727 || get_range_pos_neg (treeop0) == 1)
11728 return code;
11729
11730 /* x % c == d where d < 0 && d <= -c should be always false. */
11731 if (tree_int_cst_sgn (*arg1) == -1
11732 && -wi::to_widest (treeop1) >= wi::to_widest (*arg1))
11733 return code;
11734
11735 int prec = TYPE_PRECISION (type);
11736 wide_int w = wi::to_wide (treeop1) - 1;
11737 w |= wi::shifted_mask (0, prec - 1, true, prec);
11738 tree c3 = wide_int_to_tree (type, w);
11739 tree c4 = *arg1;
11740 if (tree_int_cst_sgn (*arg1) == -1)
11741 c4 = wide_int_to_tree (type, w & wi::to_wide (*arg1));
11742
11743 rtx op0 = expand_normal (treeop0);
11744 treeop0 = make_tree (TREE_TYPE (treeop0), op0);
11745
11746 bool speed_p = optimize_insn_for_speed_p ();
11747
11748 do_pending_stack_adjust ();
11749
11750 location_t loc = gimple_location (stmt);
11751 struct separate_ops ops;
11752 ops.code = TRUNC_MOD_EXPR;
11753 ops.location = loc;
11754 ops.type = TREE_TYPE (treeop0);
11755 ops.op0 = treeop0;
11756 ops.op1 = treeop1;
11757 ops.op2 = NULL_TREE;
11758 start_sequence ();
11759 rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11760 EXPAND_NORMAL);
11761 rtx_insn *moinsns = get_insns ();
11762 end_sequence ();
11763
11764 unsigned mocost = seq_cost (moinsns, speed_p);
11765 mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
11766 mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
11767
11768 ops.code = BIT_AND_EXPR;
11769 ops.location = loc;
11770 ops.type = TREE_TYPE (treeop0);
11771 ops.op0 = treeop0;
11772 ops.op1 = c3;
11773 ops.op2 = NULL_TREE;
11774 start_sequence ();
11775 rtx mur = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11776 EXPAND_NORMAL);
11777 rtx_insn *muinsns = get_insns ();
11778 end_sequence ();
11779
11780 unsigned mucost = seq_cost (muinsns, speed_p);
11781 mucost += rtx_cost (mur, mode, EQ, 0, speed_p);
11782 mucost += rtx_cost (expand_normal (c4), mode, EQ, 1, speed_p);
11783
11784 if (mocost <= mucost)
11785 {
11786 emit_insn (moinsns);
11787 *arg0 = make_tree (TREE_TYPE (*arg0), mor);
11788 return code;
11789 }
11790
11791 emit_insn (muinsns);
11792 *arg0 = make_tree (TREE_TYPE (*arg0), mur);
11793 *arg1 = c4;
11794 return code;
11795 }
11796
11797 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
11798 If C1 is odd to:
11799 (X - C2) * C3 <= C4 (or >), where
11800 C3 is modular multiplicative inverse of C1 and 1<<prec and
11801 C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
11802 if C2 > ((1<<prec) - 1) % C1).
11803 If C1 is even, S = ctz (C1) and C2 is 0, use
11804 ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
11805 inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
11806
11807 For signed (X % C1) == 0 if C1 is odd to (all operations in it
11808 unsigned):
11809 (X * C3) + C4 <= 2 * C4, where
11810 C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
11811 C4 is ((1<<(prec - 1) - 1) / C1).
11812 If C1 is even, S = ctz(C1), use
11813 ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
11814 where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
11815 and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
11816
11817 See the Hacker's Delight book, section 10-17. */
11818 enum tree_code
11819 maybe_optimize_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11820 {
11821 gcc_checking_assert (code == EQ_EXPR || code == NE_EXPR);
11822 gcc_checking_assert (TREE_CODE (*arg1) == INTEGER_CST);
11823
11824 if (optimize < 2)
11825 return code;
11826
11827 gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11828 if (stmt == NULL)
11829 return code;
11830
11831 tree treeop0 = gimple_assign_rhs1 (stmt);
11832 tree treeop1 = gimple_assign_rhs2 (stmt);
11833 if (TREE_CODE (treeop0) != SSA_NAME
11834 || TREE_CODE (treeop1) != INTEGER_CST
11835 /* Don't optimize the undefined behavior case x % 0;
11836 x % 1 should have been optimized into zero, punt if
11837 it makes it here for whatever reason;
11838 x % -c should have been optimized into x % c. */
11839 || compare_tree_int (treeop1, 2) <= 0
11840 /* Likewise x % c == d where d >= c should be always false. */
11841 || tree_int_cst_le (treeop1, *arg1))
11842 return code;
11843
11844 /* Unsigned x % pow2 is handled right already, for signed
11845 modulo handle it in maybe_optimize_pow2p_mod_cmp. */
11846 if (integer_pow2p (treeop1))
11847 return maybe_optimize_pow2p_mod_cmp (code, arg0, arg1);
11848
11849 tree type = TREE_TYPE (*arg0);
11850 scalar_int_mode mode;
11851 if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11852 return code;
11853 if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11854 || TYPE_PRECISION (type) <= 1)
11855 return code;
11856
11857 signop sgn = UNSIGNED;
11858 /* If both operands are known to have the sign bit clear, handle
11859 even the signed modulo case as unsigned. treeop1 is always
11860 positive >= 2, checked above. */
11861 if (!TYPE_UNSIGNED (type) && get_range_pos_neg (treeop0) != 1)
11862 sgn = SIGNED;
11863
11864 if (!TYPE_UNSIGNED (type))
11865 {
11866 if (tree_int_cst_sgn (*arg1) == -1)
11867 return code;
11868 type = unsigned_type_for (type);
11869 if (!type || TYPE_MODE (type) != TYPE_MODE (TREE_TYPE (*arg0)))
11870 return code;
11871 }
11872
11873 int prec = TYPE_PRECISION (type);
11874 wide_int w = wi::to_wide (treeop1);
11875 int shift = wi::ctz (w);
11876 /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
11877 C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
11878 If C1 is odd, we can handle all cases by subtracting
11879 C4 below. We could handle even the even C1 and C2 > -1U % C1 cases
11880 e.g. by testing for overflow on the subtraction, punt on that for now
11881 though. */
11882 if ((sgn == SIGNED || shift) && !integer_zerop (*arg1))
11883 {
11884 if (sgn == SIGNED)
11885 return code;
11886 wide_int x = wi::umod_trunc (wi::mask (prec, false, prec), w);
11887 if (wi::gtu_p (wi::to_wide (*arg1), x))
11888 return code;
11889 }
11890
11891 imm_use_iterator imm_iter;
11892 use_operand_p use_p;
11893 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, treeop0)
11894 {
11895 gimple *use_stmt = USE_STMT (use_p);
11896 /* Punt if treeop0 is used in the same bb in a division
11897 or another modulo with the same divisor. We should expect
11898 the division and modulo combined together. */
11899 if (use_stmt == stmt
11900 || gimple_bb (use_stmt) != gimple_bb (stmt))
11901 continue;
11902 if (!is_gimple_assign (use_stmt)
11903 || (gimple_assign_rhs_code (use_stmt) != TRUNC_DIV_EXPR
11904 && gimple_assign_rhs_code (use_stmt) != TRUNC_MOD_EXPR))
11905 continue;
11906 if (gimple_assign_rhs1 (use_stmt) != treeop0
11907 || !operand_equal_p (gimple_assign_rhs2 (use_stmt), treeop1, 0))
11908 continue;
11909 return code;
11910 }
11911
11912 w = wi::lrshift (w, shift);
11913 wide_int a = wide_int::from (w, prec + 1, UNSIGNED);
11914 wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
11915 wide_int m = wide_int::from (mod_inv (a, b), prec, UNSIGNED);
11916 tree c3 = wide_int_to_tree (type, m);
11917 tree c5 = NULL_TREE;
11918 wide_int d, e;
11919 if (sgn == UNSIGNED)
11920 {
11921 d = wi::divmod_trunc (wi::mask (prec, false, prec), w, UNSIGNED, &e);
11922 /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
11923 otherwise use < or subtract one from C4. E.g. for
11924 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
11925 x % 3U == 1 already needs to be
11926 (x - 1) * 0xaaaaaaabU <= 0x55555554. */
11927 if (!shift && wi::gtu_p (wi::to_wide (*arg1), e))
11928 d -= 1;
11929 if (shift)
11930 d = wi::lrshift (d, shift);
11931 }
11932 else
11933 {
11934 e = wi::udiv_trunc (wi::mask (prec - 1, false, prec), w);
11935 if (!shift)
11936 d = wi::lshift (e, 1);
11937 else
11938 {
11939 e = wi::bit_and (e, wi::mask (shift, true, prec));
11940 d = wi::lrshift (e, shift - 1);
11941 }
11942 c5 = wide_int_to_tree (type, e);
11943 }
11944 tree c4 = wide_int_to_tree (type, d);
11945
11946 rtx op0 = expand_normal (treeop0);
11947 treeop0 = make_tree (TREE_TYPE (treeop0), op0);
11948
11949 bool speed_p = optimize_insn_for_speed_p ();
11950
11951 do_pending_stack_adjust ();
11952
11953 location_t loc = gimple_location (stmt);
11954 struct separate_ops ops;
11955 ops.code = TRUNC_MOD_EXPR;
11956 ops.location = loc;
11957 ops.type = TREE_TYPE (treeop0);
11958 ops.op0 = treeop0;
11959 ops.op1 = treeop1;
11960 ops.op2 = NULL_TREE;
11961 start_sequence ();
11962 rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11963 EXPAND_NORMAL);
11964 rtx_insn *moinsns = get_insns ();
11965 end_sequence ();
11966
11967 unsigned mocost = seq_cost (moinsns, speed_p);
11968 mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
11969 mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
11970
11971 tree t = fold_convert_loc (loc, type, treeop0);
11972 if (!integer_zerop (*arg1))
11973 t = fold_build2_loc (loc, MINUS_EXPR, type, t, fold_convert (type, *arg1));
11974 t = fold_build2_loc (loc, MULT_EXPR, type, t, c3);
11975 if (sgn == SIGNED)
11976 t = fold_build2_loc (loc, PLUS_EXPR, type, t, c5);
11977 if (shift)
11978 {
11979 tree s = build_int_cst (NULL_TREE, shift);
11980 t = fold_build2_loc (loc, RROTATE_EXPR, type, t, s);
11981 }
11982
11983 start_sequence ();
11984 rtx mur = expand_normal (t);
11985 rtx_insn *muinsns = get_insns ();
11986 end_sequence ();
11987
11988 unsigned mucost = seq_cost (muinsns, speed_p);
11989 mucost += rtx_cost (mur, mode, LE, 0, speed_p);
11990 mucost += rtx_cost (expand_normal (c4), mode, LE, 1, speed_p);
11991
11992 if (mocost <= mucost)
11993 {
11994 emit_insn (moinsns);
11995 *arg0 = make_tree (TREE_TYPE (*arg0), mor);
11996 return code;
11997 }
11998
11999 emit_insn (muinsns);
12000 *arg0 = make_tree (type, mur);
12001 *arg1 = c4;
12002 return code == EQ_EXPR ? LE_EXPR : GT_EXPR;
12003 }
12004 \f
12005 /* Generate code to calculate OPS, and exploded expression
12006 using a store-flag instruction and return an rtx for the result.
12007 OPS reflects a comparison.
12008
12009 If TARGET is nonzero, store the result there if convenient.
12010
12011 Return zero if there is no suitable set-flag instruction
12012 available on this machine.
12013
12014 Once expand_expr has been called on the arguments of the comparison,
12015 we are committed to doing the store flag, since it is not safe to
12016 re-evaluate the expression. We emit the store-flag insn by calling
12017 emit_store_flag, but only expand the arguments if we have a reason
12018 to believe that emit_store_flag will be successful. If we think that
12019 it will, but it isn't, we have to simulate the store-flag with a
12020 set/jump/set sequence. */
12021
12022 static rtx
12023 do_store_flag (sepops ops, rtx target, machine_mode mode)
12024 {
12025 enum rtx_code code;
12026 tree arg0, arg1, type;
12027 machine_mode operand_mode;
12028 int unsignedp;
12029 rtx op0, op1;
12030 rtx subtarget = target;
12031 location_t loc = ops->location;
12032
12033 arg0 = ops->op0;
12034 arg1 = ops->op1;
12035
12036 /* Don't crash if the comparison was erroneous. */
12037 if (arg0 == error_mark_node || arg1 == error_mark_node)
12038 return const0_rtx;
12039
12040 type = TREE_TYPE (arg0);
12041 operand_mode = TYPE_MODE (type);
12042 unsignedp = TYPE_UNSIGNED (type);
12043
12044 /* We won't bother with BLKmode store-flag operations because it would mean
12045 passing a lot of information to emit_store_flag. */
12046 if (operand_mode == BLKmode)
12047 return 0;
12048
12049 /* We won't bother with store-flag operations involving function pointers
12050 when function pointers must be canonicalized before comparisons. */
12051 if (targetm.have_canonicalize_funcptr_for_compare ()
12052 && ((POINTER_TYPE_P (TREE_TYPE (arg0))
12053 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
12054 || (POINTER_TYPE_P (TREE_TYPE (arg1))
12055 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
12056 return 0;
12057
12058 STRIP_NOPS (arg0);
12059 STRIP_NOPS (arg1);
12060
12061 /* For vector typed comparisons emit code to generate the desired
12062 all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
12063 expander for this. */
12064 if (TREE_CODE (ops->type) == VECTOR_TYPE)
12065 {
12066 tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
12067 if (VECTOR_BOOLEAN_TYPE_P (ops->type)
12068 && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
12069 return expand_vec_cmp_expr (ops->type, ifexp, target);
12070 else
12071 {
12072 tree if_true = constant_boolean_node (true, ops->type);
12073 tree if_false = constant_boolean_node (false, ops->type);
12074 return expand_vec_cond_expr (ops->type, ifexp, if_true,
12075 if_false, target);
12076 }
12077 }
12078
12079 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
12080 into (x - C2) * C3 < C4. */
12081 if ((ops->code == EQ_EXPR || ops->code == NE_EXPR)
12082 && TREE_CODE (arg0) == SSA_NAME
12083 && TREE_CODE (arg1) == INTEGER_CST)
12084 {
12085 enum tree_code code = maybe_optimize_mod_cmp (ops->code, &arg0, &arg1);
12086 if (code != ops->code)
12087 {
12088 struct separate_ops nops = *ops;
12089 nops.code = ops->code = code;
12090 nops.op0 = arg0;
12091 nops.op1 = arg1;
12092 nops.type = TREE_TYPE (arg0);
12093 return do_store_flag (&nops, target, mode);
12094 }
12095 }
12096
12097 /* Get the rtx comparison code to use. We know that EXP is a comparison
12098 operation of some type. Some comparisons against 1 and -1 can be
12099 converted to comparisons with zero. Do so here so that the tests
12100 below will be aware that we have a comparison with zero. These
12101 tests will not catch constants in the first operand, but constants
12102 are rarely passed as the first operand. */
12103
12104 switch (ops->code)
12105 {
12106 case EQ_EXPR:
12107 code = EQ;
12108 break;
12109 case NE_EXPR:
12110 code = NE;
12111 break;
12112 case LT_EXPR:
12113 if (integer_onep (arg1))
12114 arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
12115 else
12116 code = unsignedp ? LTU : LT;
12117 break;
12118 case LE_EXPR:
12119 if (! unsignedp && integer_all_onesp (arg1))
12120 arg1 = integer_zero_node, code = LT;
12121 else
12122 code = unsignedp ? LEU : LE;
12123 break;
12124 case GT_EXPR:
12125 if (! unsignedp && integer_all_onesp (arg1))
12126 arg1 = integer_zero_node, code = GE;
12127 else
12128 code = unsignedp ? GTU : GT;
12129 break;
12130 case GE_EXPR:
12131 if (integer_onep (arg1))
12132 arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
12133 else
12134 code = unsignedp ? GEU : GE;
12135 break;
12136
12137 case UNORDERED_EXPR:
12138 code = UNORDERED;
12139 break;
12140 case ORDERED_EXPR:
12141 code = ORDERED;
12142 break;
12143 case UNLT_EXPR:
12144 code = UNLT;
12145 break;
12146 case UNLE_EXPR:
12147 code = UNLE;
12148 break;
12149 case UNGT_EXPR:
12150 code = UNGT;
12151 break;
12152 case UNGE_EXPR:
12153 code = UNGE;
12154 break;
12155 case UNEQ_EXPR:
12156 code = UNEQ;
12157 break;
12158 case LTGT_EXPR:
12159 code = LTGT;
12160 break;
12161
12162 default:
12163 gcc_unreachable ();
12164 }
12165
12166 /* Put a constant second. */
12167 if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
12168 || TREE_CODE (arg0) == FIXED_CST)
12169 {
12170 std::swap (arg0, arg1);
12171 code = swap_condition (code);
12172 }
12173
12174 /* If this is an equality or inequality test of a single bit, we can
12175 do this by shifting the bit being tested to the low-order bit and
12176 masking the result with the constant 1. If the condition was EQ,
12177 we xor it with 1. This does not require an scc insn and is faster
12178 than an scc insn even if we have it.
12179
12180 The code to make this transformation was moved into fold_single_bit_test,
12181 so we just call into the folder and expand its result. */
12182
12183 if ((code == NE || code == EQ)
12184 && integer_zerop (arg1)
12185 && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
12186 {
12187 gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
12188 if (srcstmt
12189 && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
12190 {
12191 enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
12192 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
12193 tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
12194 gimple_assign_rhs1 (srcstmt),
12195 gimple_assign_rhs2 (srcstmt));
12196 temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
12197 if (temp)
12198 return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
12199 }
12200 }
12201
12202 if (! get_subtarget (target)
12203 || GET_MODE (subtarget) != operand_mode)
12204 subtarget = 0;
12205
12206 expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
12207
12208 if (target == 0)
12209 target = gen_reg_rtx (mode);
12210
12211 /* Try a cstore if possible. */
12212 return emit_store_flag_force (target, code, op0, op1,
12213 operand_mode, unsignedp,
12214 (TYPE_PRECISION (ops->type) == 1
12215 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
12216 }
12217 \f
12218 /* Attempt to generate a casesi instruction. Returns 1 if successful,
12219 0 otherwise (i.e. if there is no casesi instruction).
12220
12221 DEFAULT_PROBABILITY is the probability of jumping to the default
12222 label. */
12223 int
12224 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
12225 rtx table_label, rtx default_label, rtx fallback_label,
12226 profile_probability default_probability)
12227 {
12228 class expand_operand ops[5];
12229 scalar_int_mode index_mode = SImode;
12230 rtx op1, op2, index;
12231
12232 if (! targetm.have_casesi ())
12233 return 0;
12234
12235 /* The index must be some form of integer. Convert it to SImode. */
12236 scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
12237 if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
12238 {
12239 rtx rangertx = expand_normal (range);
12240
12241 /* We must handle the endpoints in the original mode. */
12242 index_expr = build2 (MINUS_EXPR, index_type,
12243 index_expr, minval);
12244 minval = integer_zero_node;
12245 index = expand_normal (index_expr);
12246 if (default_label)
12247 emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
12248 omode, 1, default_label,
12249 default_probability);
12250 /* Now we can safely truncate. */
12251 index = convert_to_mode (index_mode, index, 0);
12252 }
12253 else
12254 {
12255 if (omode != index_mode)
12256 {
12257 index_type = lang_hooks.types.type_for_mode (index_mode, 0);
12258 index_expr = fold_convert (index_type, index_expr);
12259 }
12260
12261 index = expand_normal (index_expr);
12262 }
12263
12264 do_pending_stack_adjust ();
12265
12266 op1 = expand_normal (minval);
12267 op2 = expand_normal (range);
12268
12269 create_input_operand (&ops[0], index, index_mode);
12270 create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
12271 create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
12272 create_fixed_operand (&ops[3], table_label);
12273 create_fixed_operand (&ops[4], (default_label
12274 ? default_label
12275 : fallback_label));
12276 expand_jump_insn (targetm.code_for_casesi, 5, ops);
12277 return 1;
12278 }
12279
12280 /* Attempt to generate a tablejump instruction; same concept. */
12281 /* Subroutine of the next function.
12282
12283 INDEX is the value being switched on, with the lowest value
12284 in the table already subtracted.
12285 MODE is its expected mode (needed if INDEX is constant).
12286 RANGE is the length of the jump table.
12287 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
12288
12289 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
12290 index value is out of range.
12291 DEFAULT_PROBABILITY is the probability of jumping to
12292 the default label. */
12293
12294 static void
12295 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
12296 rtx default_label, profile_probability default_probability)
12297 {
12298 rtx temp, vector;
12299
12300 if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
12301 cfun->cfg->max_jumptable_ents = INTVAL (range);
12302
12303 /* Do an unsigned comparison (in the proper mode) between the index
12304 expression and the value which represents the length of the range.
12305 Since we just finished subtracting the lower bound of the range
12306 from the index expression, this comparison allows us to simultaneously
12307 check that the original index expression value is both greater than
12308 or equal to the minimum value of the range and less than or equal to
12309 the maximum value of the range. */
12310
12311 if (default_label)
12312 emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
12313 default_label, default_probability);
12314
12315 /* If index is in range, it must fit in Pmode.
12316 Convert to Pmode so we can index with it. */
12317 if (mode != Pmode)
12318 {
12319 unsigned int width;
12320
12321 /* We know the value of INDEX is between 0 and RANGE. If we have a
12322 sign-extended subreg, and RANGE does not have the sign bit set, then
12323 we have a value that is valid for both sign and zero extension. In
12324 this case, we get better code if we sign extend. */
12325 if (GET_CODE (index) == SUBREG
12326 && SUBREG_PROMOTED_VAR_P (index)
12327 && SUBREG_PROMOTED_SIGNED_P (index)
12328 && ((width = GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)))
12329 <= HOST_BITS_PER_WIDE_INT)
12330 && ! (UINTVAL (range) & (HOST_WIDE_INT_1U << (width - 1))))
12331 index = convert_to_mode (Pmode, index, 0);
12332 else
12333 index = convert_to_mode (Pmode, index, 1);
12334 }
12335
12336 /* Don't let a MEM slip through, because then INDEX that comes
12337 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
12338 and break_out_memory_refs will go to work on it and mess it up. */
12339 #ifdef PIC_CASE_VECTOR_ADDRESS
12340 if (flag_pic && !REG_P (index))
12341 index = copy_to_mode_reg (Pmode, index);
12342 #endif
12343
12344 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
12345 GET_MODE_SIZE, because this indicates how large insns are. The other
12346 uses should all be Pmode, because they are addresses. This code
12347 could fail if addresses and insns are not the same size. */
12348 index = simplify_gen_binary (MULT, Pmode, index,
12349 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
12350 Pmode));
12351 index = simplify_gen_binary (PLUS, Pmode, index,
12352 gen_rtx_LABEL_REF (Pmode, table_label));
12353
12354 #ifdef PIC_CASE_VECTOR_ADDRESS
12355 if (flag_pic)
12356 index = PIC_CASE_VECTOR_ADDRESS (index);
12357 else
12358 #endif
12359 index = memory_address (CASE_VECTOR_MODE, index);
12360 temp = gen_reg_rtx (CASE_VECTOR_MODE);
12361 vector = gen_const_mem (CASE_VECTOR_MODE, index);
12362 convert_move (temp, vector, 0);
12363
12364 emit_jump_insn (targetm.gen_tablejump (temp, table_label));
12365
12366 /* If we are generating PIC code or if the table is PC-relative, the
12367 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
12368 if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
12369 emit_barrier ();
12370 }
12371
12372 int
12373 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
12374 rtx table_label, rtx default_label,
12375 profile_probability default_probability)
12376 {
12377 rtx index;
12378
12379 if (! targetm.have_tablejump ())
12380 return 0;
12381
12382 index_expr = fold_build2 (MINUS_EXPR, index_type,
12383 fold_convert (index_type, index_expr),
12384 fold_convert (index_type, minval));
12385 index = expand_normal (index_expr);
12386 do_pending_stack_adjust ();
12387
12388 do_tablejump (index, TYPE_MODE (index_type),
12389 convert_modes (TYPE_MODE (index_type),
12390 TYPE_MODE (TREE_TYPE (range)),
12391 expand_normal (range),
12392 TYPE_UNSIGNED (TREE_TYPE (range))),
12393 table_label, default_label, default_probability);
12394 return 1;
12395 }
12396
12397 /* Return a CONST_VECTOR rtx representing vector mask for
12398 a VECTOR_CST of booleans. */
12399 static rtx
12400 const_vector_mask_from_tree (tree exp)
12401 {
12402 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12403 machine_mode inner = GET_MODE_INNER (mode);
12404
12405 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12406 VECTOR_CST_NELTS_PER_PATTERN (exp));
12407 unsigned int count = builder.encoded_nelts ();
12408 for (unsigned int i = 0; i < count; ++i)
12409 {
12410 tree elt = VECTOR_CST_ELT (exp, i);
12411 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12412 if (integer_zerop (elt))
12413 builder.quick_push (CONST0_RTX (inner));
12414 else if (integer_onep (elt)
12415 || integer_minus_onep (elt))
12416 builder.quick_push (CONSTM1_RTX (inner));
12417 else
12418 gcc_unreachable ();
12419 }
12420 return builder.build ();
12421 }
12422
12423 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
12424 Return a constant scalar rtx of mode MODE in which bit X is set if element
12425 X of EXP is nonzero. */
12426 static rtx
12427 const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
12428 {
12429 wide_int res = wi::zero (GET_MODE_PRECISION (mode));
12430 tree elt;
12431
12432 /* The result has a fixed number of bits so the input must too. */
12433 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
12434 for (unsigned int i = 0; i < nunits; ++i)
12435 {
12436 elt = VECTOR_CST_ELT (exp, i);
12437 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12438 if (integer_all_onesp (elt))
12439 res = wi::set_bit (res, i);
12440 else
12441 gcc_assert (integer_zerop (elt));
12442 }
12443
12444 return immed_wide_int_const (res, mode);
12445 }
12446
12447 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
12448 static rtx
12449 const_vector_from_tree (tree exp)
12450 {
12451 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12452
12453 if (initializer_zerop (exp))
12454 return CONST0_RTX (mode);
12455
12456 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
12457 return const_vector_mask_from_tree (exp);
12458
12459 machine_mode inner = GET_MODE_INNER (mode);
12460
12461 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12462 VECTOR_CST_NELTS_PER_PATTERN (exp));
12463 unsigned int count = builder.encoded_nelts ();
12464 for (unsigned int i = 0; i < count; ++i)
12465 {
12466 tree elt = VECTOR_CST_ELT (exp, i);
12467 if (TREE_CODE (elt) == REAL_CST)
12468 builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
12469 inner));
12470 else if (TREE_CODE (elt) == FIXED_CST)
12471 builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
12472 inner));
12473 else
12474 builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
12475 inner));
12476 }
12477 return builder.build ();
12478 }
12479
12480 /* Build a decl for a personality function given a language prefix. */
12481
12482 tree
12483 build_personality_function (const char *lang)
12484 {
12485 const char *unwind_and_version;
12486 tree decl, type;
12487 char *name;
12488
12489 switch (targetm_common.except_unwind_info (&global_options))
12490 {
12491 case UI_NONE:
12492 return NULL;
12493 case UI_SJLJ:
12494 unwind_and_version = "_sj0";
12495 break;
12496 case UI_DWARF2:
12497 case UI_TARGET:
12498 unwind_and_version = "_v0";
12499 break;
12500 case UI_SEH:
12501 unwind_and_version = "_seh0";
12502 break;
12503 default:
12504 gcc_unreachable ();
12505 }
12506
12507 name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
12508
12509 type = build_function_type_list (integer_type_node, integer_type_node,
12510 long_long_unsigned_type_node,
12511 ptr_type_node, ptr_type_node, NULL_TREE);
12512 decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
12513 get_identifier (name), type);
12514 DECL_ARTIFICIAL (decl) = 1;
12515 DECL_EXTERNAL (decl) = 1;
12516 TREE_PUBLIC (decl) = 1;
12517
12518 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
12519 are the flags assigned by targetm.encode_section_info. */
12520 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
12521
12522 return decl;
12523 }
12524
12525 /* Extracts the personality function of DECL and returns the corresponding
12526 libfunc. */
12527
12528 rtx
12529 get_personality_function (tree decl)
12530 {
12531 tree personality = DECL_FUNCTION_PERSONALITY (decl);
12532 enum eh_personality_kind pk;
12533
12534 pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
12535 if (pk == eh_personality_none)
12536 return NULL;
12537
12538 if (!personality
12539 && pk == eh_personality_any)
12540 personality = lang_hooks.eh_personality ();
12541
12542 if (pk == eh_personality_lang)
12543 gcc_assert (personality != NULL_TREE);
12544
12545 return XEXP (DECL_RTL (personality), 0);
12546 }
12547
12548 /* Returns a tree for the size of EXP in bytes. */
12549
12550 static tree
12551 tree_expr_size (const_tree exp)
12552 {
12553 if (DECL_P (exp)
12554 && DECL_SIZE_UNIT (exp) != 0)
12555 return DECL_SIZE_UNIT (exp);
12556 else
12557 return size_in_bytes (TREE_TYPE (exp));
12558 }
12559
12560 /* Return an rtx for the size in bytes of the value of EXP. */
12561
12562 rtx
12563 expr_size (tree exp)
12564 {
12565 tree size;
12566
12567 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12568 size = TREE_OPERAND (exp, 1);
12569 else
12570 {
12571 size = tree_expr_size (exp);
12572 gcc_assert (size);
12573 gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
12574 }
12575
12576 return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
12577 }
12578
12579 /* Return a wide integer for the size in bytes of the value of EXP, or -1
12580 if the size can vary or is larger than an integer. */
12581
12582 static HOST_WIDE_INT
12583 int_expr_size (tree exp)
12584 {
12585 tree size;
12586
12587 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12588 size = TREE_OPERAND (exp, 1);
12589 else
12590 {
12591 size = tree_expr_size (exp);
12592 gcc_assert (size);
12593 }
12594
12595 if (size == 0 || !tree_fits_shwi_p (size))
12596 return -1;
12597
12598 return tree_to_shwi (size);
12599 }