]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expr.c
re PR rtl-optimization/91976 (RTL check: expected code 'const_int', have 'reg' in...
[thirdparty/gcc.git] / gcc / expr.c
1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-dfa.h"
58 #include "tree-ssa-live.h"
59 #include "tree-outof-ssa.h"
60 #include "tree-ssa-address.h"
61 #include "builtins.h"
62 #include "ccmp.h"
63 #include "gimple-fold.h"
64 #include "rtx-vector-builder.h"
65
66
67 /* If this is nonzero, we do not bother generating VOLATILE
68 around volatile memory references, and we are willing to
69 output indirect addresses. If cse is to follow, we reject
70 indirect addresses so a useful potential cse is generated;
71 if it is used only once, instruction combination will produce
72 the same indirect address eventually. */
73 int cse_not_expected;
74
75 static bool block_move_libcall_safe_for_call_parm (void);
76 static bool emit_block_move_via_pattern (rtx, rtx, rtx, unsigned, unsigned,
77 HOST_WIDE_INT, unsigned HOST_WIDE_INT,
78 unsigned HOST_WIDE_INT,
79 unsigned HOST_WIDE_INT, bool);
80 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
81 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
82 static rtx_insn *compress_float_constant (rtx, rtx);
83 static rtx get_subtarget (rtx);
84 static void store_constructor (tree, rtx, int, poly_int64, bool);
85 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
86 machine_mode, tree, alias_set_type, bool, bool);
87
88 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
89
90 static int is_aligning_offset (const_tree, const_tree);
91 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
92 static rtx do_store_flag (sepops, rtx, machine_mode);
93 #ifdef PUSH_ROUNDING
94 static void emit_single_push_insn (machine_mode, rtx, tree);
95 #endif
96 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
97 profile_probability);
98 static rtx const_vector_from_tree (tree);
99 static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
100 static tree tree_expr_size (const_tree);
101 static HOST_WIDE_INT int_expr_size (tree);
102 static void convert_mode_scalar (rtx, rtx, int);
103
104 \f
105 /* This is run to set up which modes can be used
106 directly in memory and to initialize the block move optab. It is run
107 at the beginning of compilation and when the target is reinitialized. */
108
109 void
110 init_expr_target (void)
111 {
112 rtx pat;
113 int num_clobbers;
114 rtx mem, mem1;
115 rtx reg;
116
117 /* Try indexing by frame ptr and try by stack ptr.
118 It is known that on the Convex the stack ptr isn't a valid index.
119 With luck, one or the other is valid on any machine. */
120 mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
121 mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
122
123 /* A scratch register we can modify in-place below to avoid
124 useless RTL allocations. */
125 reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
126
127 rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
128 pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
129 PATTERN (insn) = pat;
130
131 for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
132 mode = (machine_mode) ((int) mode + 1))
133 {
134 int regno;
135
136 direct_load[(int) mode] = direct_store[(int) mode] = 0;
137 PUT_MODE (mem, mode);
138 PUT_MODE (mem1, mode);
139
140 /* See if there is some register that can be used in this mode and
141 directly loaded or stored from memory. */
142
143 if (mode != VOIDmode && mode != BLKmode)
144 for (regno = 0; regno < FIRST_PSEUDO_REGISTER
145 && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
146 regno++)
147 {
148 if (!targetm.hard_regno_mode_ok (regno, mode))
149 continue;
150
151 set_mode_and_regno (reg, mode, regno);
152
153 SET_SRC (pat) = mem;
154 SET_DEST (pat) = reg;
155 if (recog (pat, insn, &num_clobbers) >= 0)
156 direct_load[(int) mode] = 1;
157
158 SET_SRC (pat) = mem1;
159 SET_DEST (pat) = reg;
160 if (recog (pat, insn, &num_clobbers) >= 0)
161 direct_load[(int) mode] = 1;
162
163 SET_SRC (pat) = reg;
164 SET_DEST (pat) = mem;
165 if (recog (pat, insn, &num_clobbers) >= 0)
166 direct_store[(int) mode] = 1;
167
168 SET_SRC (pat) = reg;
169 SET_DEST (pat) = mem1;
170 if (recog (pat, insn, &num_clobbers) >= 0)
171 direct_store[(int) mode] = 1;
172 }
173 }
174
175 mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
176
177 opt_scalar_float_mode mode_iter;
178 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
179 {
180 scalar_float_mode mode = mode_iter.require ();
181 scalar_float_mode srcmode;
182 FOR_EACH_MODE_UNTIL (srcmode, mode)
183 {
184 enum insn_code ic;
185
186 ic = can_extend_p (mode, srcmode, 0);
187 if (ic == CODE_FOR_nothing)
188 continue;
189
190 PUT_MODE (mem, srcmode);
191
192 if (insn_operand_matches (ic, 1, mem))
193 float_extend_from_mem[mode][srcmode] = true;
194 }
195 }
196 }
197
198 /* This is run at the start of compiling a function. */
199
200 void
201 init_expr (void)
202 {
203 memset (&crtl->expr, 0, sizeof (crtl->expr));
204 }
205 \f
206 /* Copy data from FROM to TO, where the machine modes are not the same.
207 Both modes may be integer, or both may be floating, or both may be
208 fixed-point.
209 UNSIGNEDP should be nonzero if FROM is an unsigned type.
210 This causes zero-extension instead of sign-extension. */
211
212 void
213 convert_move (rtx to, rtx from, int unsignedp)
214 {
215 machine_mode to_mode = GET_MODE (to);
216 machine_mode from_mode = GET_MODE (from);
217
218 gcc_assert (to_mode != BLKmode);
219 gcc_assert (from_mode != BLKmode);
220
221 /* If the source and destination are already the same, then there's
222 nothing to do. */
223 if (to == from)
224 return;
225
226 /* If FROM is a SUBREG that indicates that we have already done at least
227 the required extension, strip it. We don't handle such SUBREGs as
228 TO here. */
229
230 scalar_int_mode to_int_mode;
231 if (GET_CODE (from) == SUBREG
232 && SUBREG_PROMOTED_VAR_P (from)
233 && is_a <scalar_int_mode> (to_mode, &to_int_mode)
234 && (GET_MODE_PRECISION (subreg_promoted_mode (from))
235 >= GET_MODE_PRECISION (to_int_mode))
236 && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
237 {
238 from = gen_lowpart (to_int_mode, SUBREG_REG (from));
239 from_mode = to_int_mode;
240 }
241
242 gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
243
244 if (to_mode == from_mode
245 || (from_mode == VOIDmode && CONSTANT_P (from)))
246 {
247 emit_move_insn (to, from);
248 return;
249 }
250
251 if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
252 {
253 gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
254 GET_MODE_BITSIZE (to_mode)));
255
256 if (VECTOR_MODE_P (to_mode))
257 from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
258 else
259 to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
260
261 emit_move_insn (to, from);
262 return;
263 }
264
265 if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
266 {
267 convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
268 convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
269 return;
270 }
271
272 convert_mode_scalar (to, from, unsignedp);
273 }
274
275 /* Like convert_move, but deals only with scalar modes. */
276
277 static void
278 convert_mode_scalar (rtx to, rtx from, int unsignedp)
279 {
280 /* Both modes should be scalar types. */
281 scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
282 scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
283 bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
284 bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
285 enum insn_code code;
286 rtx libcall;
287
288 gcc_assert (to_real == from_real);
289
290 /* rtx code for making an equivalent value. */
291 enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
292 : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
293
294 if (to_real)
295 {
296 rtx value;
297 rtx_insn *insns;
298 convert_optab tab;
299
300 gcc_assert ((GET_MODE_PRECISION (from_mode)
301 != GET_MODE_PRECISION (to_mode))
302 || (DECIMAL_FLOAT_MODE_P (from_mode)
303 != DECIMAL_FLOAT_MODE_P (to_mode)));
304
305 if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
306 /* Conversion between decimal float and binary float, same size. */
307 tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
308 else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
309 tab = sext_optab;
310 else
311 tab = trunc_optab;
312
313 /* Try converting directly if the insn is supported. */
314
315 code = convert_optab_handler (tab, to_mode, from_mode);
316 if (code != CODE_FOR_nothing)
317 {
318 emit_unop_insn (code, to, from,
319 tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
320 return;
321 }
322
323 /* Otherwise use a libcall. */
324 libcall = convert_optab_libfunc (tab, to_mode, from_mode);
325
326 /* Is this conversion implemented yet? */
327 gcc_assert (libcall);
328
329 start_sequence ();
330 value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
331 from, from_mode);
332 insns = get_insns ();
333 end_sequence ();
334 emit_libcall_block (insns, to, value,
335 tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
336 from)
337 : gen_rtx_FLOAT_EXTEND (to_mode, from));
338 return;
339 }
340
341 /* Handle pointer conversion. */ /* SPEE 900220. */
342 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
343 {
344 convert_optab ctab;
345
346 if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
347 ctab = trunc_optab;
348 else if (unsignedp)
349 ctab = zext_optab;
350 else
351 ctab = sext_optab;
352
353 if (convert_optab_handler (ctab, to_mode, from_mode)
354 != CODE_FOR_nothing)
355 {
356 emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
357 to, from, UNKNOWN);
358 return;
359 }
360 }
361
362 /* Targets are expected to provide conversion insns between PxImode and
363 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
364 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
365 {
366 scalar_int_mode full_mode
367 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
368
369 gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
370 != CODE_FOR_nothing);
371
372 if (full_mode != from_mode)
373 from = convert_to_mode (full_mode, from, unsignedp);
374 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
375 to, from, UNKNOWN);
376 return;
377 }
378 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
379 {
380 rtx new_from;
381 scalar_int_mode full_mode
382 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
383 convert_optab ctab = unsignedp ? zext_optab : sext_optab;
384 enum insn_code icode;
385
386 icode = convert_optab_handler (ctab, full_mode, from_mode);
387 gcc_assert (icode != CODE_FOR_nothing);
388
389 if (to_mode == full_mode)
390 {
391 emit_unop_insn (icode, to, from, UNKNOWN);
392 return;
393 }
394
395 new_from = gen_reg_rtx (full_mode);
396 emit_unop_insn (icode, new_from, from, UNKNOWN);
397
398 /* else proceed to integer conversions below. */
399 from_mode = full_mode;
400 from = new_from;
401 }
402
403 /* Make sure both are fixed-point modes or both are not. */
404 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
405 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
406 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
407 {
408 /* If we widen from_mode to to_mode and they are in the same class,
409 we won't saturate the result.
410 Otherwise, always saturate the result to play safe. */
411 if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
412 && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
413 expand_fixed_convert (to, from, 0, 0);
414 else
415 expand_fixed_convert (to, from, 0, 1);
416 return;
417 }
418
419 /* Now both modes are integers. */
420
421 /* Handle expanding beyond a word. */
422 if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
423 && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
424 {
425 rtx_insn *insns;
426 rtx lowpart;
427 rtx fill_value;
428 rtx lowfrom;
429 int i;
430 scalar_mode lowpart_mode;
431 int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
432
433 /* Try converting directly if the insn is supported. */
434 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
435 != CODE_FOR_nothing)
436 {
437 /* If FROM is a SUBREG, put it into a register. Do this
438 so that we always generate the same set of insns for
439 better cse'ing; if an intermediate assignment occurred,
440 we won't be doing the operation directly on the SUBREG. */
441 if (optimize > 0 && GET_CODE (from) == SUBREG)
442 from = force_reg (from_mode, from);
443 emit_unop_insn (code, to, from, equiv_code);
444 return;
445 }
446 /* Next, try converting via full word. */
447 else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
448 && ((code = can_extend_p (to_mode, word_mode, unsignedp))
449 != CODE_FOR_nothing))
450 {
451 rtx word_to = gen_reg_rtx (word_mode);
452 if (REG_P (to))
453 {
454 if (reg_overlap_mentioned_p (to, from))
455 from = force_reg (from_mode, from);
456 emit_clobber (to);
457 }
458 convert_move (word_to, from, unsignedp);
459 emit_unop_insn (code, to, word_to, equiv_code);
460 return;
461 }
462
463 /* No special multiword conversion insn; do it by hand. */
464 start_sequence ();
465
466 /* Since we will turn this into a no conflict block, we must ensure
467 the source does not overlap the target so force it into an isolated
468 register when maybe so. Likewise for any MEM input, since the
469 conversion sequence might require several references to it and we
470 must ensure we're getting the same value every time. */
471
472 if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
473 from = force_reg (from_mode, from);
474
475 /* Get a copy of FROM widened to a word, if necessary. */
476 if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
477 lowpart_mode = word_mode;
478 else
479 lowpart_mode = from_mode;
480
481 lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
482
483 lowpart = gen_lowpart (lowpart_mode, to);
484 emit_move_insn (lowpart, lowfrom);
485
486 /* Compute the value to put in each remaining word. */
487 if (unsignedp)
488 fill_value = const0_rtx;
489 else
490 fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
491 LT, lowfrom, const0_rtx,
492 lowpart_mode, 0, -1);
493
494 /* Fill the remaining words. */
495 for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
496 {
497 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
498 rtx subword = operand_subword (to, index, 1, to_mode);
499
500 gcc_assert (subword);
501
502 if (fill_value != subword)
503 emit_move_insn (subword, fill_value);
504 }
505
506 insns = get_insns ();
507 end_sequence ();
508
509 emit_insn (insns);
510 return;
511 }
512
513 /* Truncating multi-word to a word or less. */
514 if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
515 && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
516 {
517 if (!((MEM_P (from)
518 && ! MEM_VOLATILE_P (from)
519 && direct_load[(int) to_mode]
520 && ! mode_dependent_address_p (XEXP (from, 0),
521 MEM_ADDR_SPACE (from)))
522 || REG_P (from)
523 || GET_CODE (from) == SUBREG))
524 from = force_reg (from_mode, from);
525 convert_move (to, gen_lowpart (word_mode, from), 0);
526 return;
527 }
528
529 /* Now follow all the conversions between integers
530 no more than a word long. */
531
532 /* For truncation, usually we can just refer to FROM in a narrower mode. */
533 if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
534 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
535 {
536 if (!((MEM_P (from)
537 && ! MEM_VOLATILE_P (from)
538 && direct_load[(int) to_mode]
539 && ! mode_dependent_address_p (XEXP (from, 0),
540 MEM_ADDR_SPACE (from)))
541 || REG_P (from)
542 || GET_CODE (from) == SUBREG))
543 from = force_reg (from_mode, from);
544 if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
545 && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
546 from = copy_to_reg (from);
547 emit_move_insn (to, gen_lowpart (to_mode, from));
548 return;
549 }
550
551 /* Handle extension. */
552 if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
553 {
554 /* Convert directly if that works. */
555 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
556 != CODE_FOR_nothing)
557 {
558 emit_unop_insn (code, to, from, equiv_code);
559 return;
560 }
561 else
562 {
563 scalar_mode intermediate;
564 rtx tmp;
565 int shift_amount;
566
567 /* Search for a mode to convert via. */
568 opt_scalar_mode intermediate_iter;
569 FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
570 {
571 scalar_mode intermediate = intermediate_iter.require ();
572 if (((can_extend_p (to_mode, intermediate, unsignedp)
573 != CODE_FOR_nothing)
574 || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
575 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
576 intermediate)))
577 && (can_extend_p (intermediate, from_mode, unsignedp)
578 != CODE_FOR_nothing))
579 {
580 convert_move (to, convert_to_mode (intermediate, from,
581 unsignedp), unsignedp);
582 return;
583 }
584 }
585
586 /* No suitable intermediate mode.
587 Generate what we need with shifts. */
588 shift_amount = (GET_MODE_PRECISION (to_mode)
589 - GET_MODE_PRECISION (from_mode));
590 from = gen_lowpart (to_mode, force_reg (from_mode, from));
591 tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
592 to, unsignedp);
593 tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
594 to, unsignedp);
595 if (tmp != to)
596 emit_move_insn (to, tmp);
597 return;
598 }
599 }
600
601 /* Support special truncate insns for certain modes. */
602 if (convert_optab_handler (trunc_optab, to_mode,
603 from_mode) != CODE_FOR_nothing)
604 {
605 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
606 to, from, UNKNOWN);
607 return;
608 }
609
610 /* Handle truncation of volatile memrefs, and so on;
611 the things that couldn't be truncated directly,
612 and for which there was no special instruction.
613
614 ??? Code above formerly short-circuited this, for most integer
615 mode pairs, with a force_reg in from_mode followed by a recursive
616 call to this routine. Appears always to have been wrong. */
617 if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
618 {
619 rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
620 emit_move_insn (to, temp);
621 return;
622 }
623
624 /* Mode combination is not recognized. */
625 gcc_unreachable ();
626 }
627
628 /* Return an rtx for a value that would result
629 from converting X to mode MODE.
630 Both X and MODE may be floating, or both integer.
631 UNSIGNEDP is nonzero if X is an unsigned value.
632 This can be done by referring to a part of X in place
633 or by copying to a new temporary with conversion. */
634
635 rtx
636 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
637 {
638 return convert_modes (mode, VOIDmode, x, unsignedp);
639 }
640
641 /* Return an rtx for a value that would result
642 from converting X from mode OLDMODE to mode MODE.
643 Both modes may be floating, or both integer.
644 UNSIGNEDP is nonzero if X is an unsigned value.
645
646 This can be done by referring to a part of X in place
647 or by copying to a new temporary with conversion.
648
649 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
650
651 rtx
652 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
653 {
654 rtx temp;
655 scalar_int_mode int_mode;
656
657 /* If FROM is a SUBREG that indicates that we have already done at least
658 the required extension, strip it. */
659
660 if (GET_CODE (x) == SUBREG
661 && SUBREG_PROMOTED_VAR_P (x)
662 && is_a <scalar_int_mode> (mode, &int_mode)
663 && (GET_MODE_PRECISION (subreg_promoted_mode (x))
664 >= GET_MODE_PRECISION (int_mode))
665 && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
666 x = gen_lowpart (int_mode, SUBREG_REG (x));
667
668 if (GET_MODE (x) != VOIDmode)
669 oldmode = GET_MODE (x);
670
671 if (mode == oldmode)
672 return x;
673
674 if (CONST_SCALAR_INT_P (x)
675 && is_int_mode (mode, &int_mode))
676 {
677 /* If the caller did not tell us the old mode, then there is not
678 much to do with respect to canonicalization. We have to
679 assume that all the bits are significant. */
680 if (GET_MODE_CLASS (oldmode) != MODE_INT)
681 oldmode = MAX_MODE_INT;
682 wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
683 GET_MODE_PRECISION (int_mode),
684 unsignedp ? UNSIGNED : SIGNED);
685 return immed_wide_int_const (w, int_mode);
686 }
687
688 /* We can do this with a gen_lowpart if both desired and current modes
689 are integer, and this is either a constant integer, a register, or a
690 non-volatile MEM. */
691 scalar_int_mode int_oldmode;
692 if (is_int_mode (mode, &int_mode)
693 && is_int_mode (oldmode, &int_oldmode)
694 && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
695 && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
696 || CONST_POLY_INT_P (x)
697 || (REG_P (x)
698 && (!HARD_REGISTER_P (x)
699 || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
700 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
701 return gen_lowpart (int_mode, x);
702
703 /* Converting from integer constant into mode is always equivalent to an
704 subreg operation. */
705 if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
706 {
707 gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
708 GET_MODE_BITSIZE (oldmode)));
709 return simplify_gen_subreg (mode, x, oldmode, 0);
710 }
711
712 temp = gen_reg_rtx (mode);
713 convert_move (temp, x, unsignedp);
714 return temp;
715 }
716 \f
717 /* Return the largest alignment we can use for doing a move (or store)
718 of MAX_PIECES. ALIGN is the largest alignment we could use. */
719
720 static unsigned int
721 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
722 {
723 scalar_int_mode tmode
724 = int_mode_for_size (max_pieces * BITS_PER_UNIT, 1).require ();
725
726 if (align >= GET_MODE_ALIGNMENT (tmode))
727 align = GET_MODE_ALIGNMENT (tmode);
728 else
729 {
730 scalar_int_mode xmode = NARROWEST_INT_MODE;
731 opt_scalar_int_mode mode_iter;
732 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
733 {
734 tmode = mode_iter.require ();
735 if (GET_MODE_SIZE (tmode) > max_pieces
736 || targetm.slow_unaligned_access (tmode, align))
737 break;
738 xmode = tmode;
739 }
740
741 align = MAX (align, GET_MODE_ALIGNMENT (xmode));
742 }
743
744 return align;
745 }
746
747 /* Return the widest integer mode that is narrower than SIZE bytes. */
748
749 static scalar_int_mode
750 widest_int_mode_for_size (unsigned int size)
751 {
752 scalar_int_mode result = NARROWEST_INT_MODE;
753
754 gcc_checking_assert (size > 1);
755
756 opt_scalar_int_mode tmode;
757 FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
758 if (GET_MODE_SIZE (tmode.require ()) < size)
759 result = tmode.require ();
760
761 return result;
762 }
763
764 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
765 and should be performed piecewise. */
766
767 static bool
768 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
769 enum by_pieces_operation op)
770 {
771 return targetm.use_by_pieces_infrastructure_p (len, align, op,
772 optimize_insn_for_speed_p ());
773 }
774
775 /* Determine whether the LEN bytes can be moved by using several move
776 instructions. Return nonzero if a call to move_by_pieces should
777 succeed. */
778
779 bool
780 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
781 {
782 return can_do_by_pieces (len, align, MOVE_BY_PIECES);
783 }
784
785 /* Return number of insns required to perform operation OP by pieces
786 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
787
788 unsigned HOST_WIDE_INT
789 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
790 unsigned int max_size, by_pieces_operation op)
791 {
792 unsigned HOST_WIDE_INT n_insns = 0;
793
794 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
795
796 while (max_size > 1 && l > 0)
797 {
798 scalar_int_mode mode = widest_int_mode_for_size (max_size);
799 enum insn_code icode;
800
801 unsigned int modesize = GET_MODE_SIZE (mode);
802
803 icode = optab_handler (mov_optab, mode);
804 if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
805 {
806 unsigned HOST_WIDE_INT n_pieces = l / modesize;
807 l %= modesize;
808 switch (op)
809 {
810 default:
811 n_insns += n_pieces;
812 break;
813
814 case COMPARE_BY_PIECES:
815 int batch = targetm.compare_by_pieces_branch_ratio (mode);
816 int batch_ops = 4 * batch - 1;
817 unsigned HOST_WIDE_INT full = n_pieces / batch;
818 n_insns += full * batch_ops;
819 if (n_pieces % batch != 0)
820 n_insns++;
821 break;
822
823 }
824 }
825 max_size = modesize;
826 }
827
828 gcc_assert (!l);
829 return n_insns;
830 }
831
832 /* Used when performing piecewise block operations, holds information
833 about one of the memory objects involved. The member functions
834 can be used to generate code for loading from the object and
835 updating the address when iterating. */
836
837 class pieces_addr
838 {
839 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
840 stack pushes. */
841 rtx m_obj;
842 /* The address of the object. Can differ from that seen in the
843 MEM rtx if we copied the address to a register. */
844 rtx m_addr;
845 /* Nonzero if the address on the object has an autoincrement already,
846 signifies whether that was an increment or decrement. */
847 signed char m_addr_inc;
848 /* Nonzero if we intend to use autoinc without the address already
849 having autoinc form. We will insert add insns around each memory
850 reference, expecting later passes to form autoinc addressing modes.
851 The only supported options are predecrement and postincrement. */
852 signed char m_explicit_inc;
853 /* True if we have either of the two possible cases of using
854 autoincrement. */
855 bool m_auto;
856 /* True if this is an address to be used for load operations rather
857 than stores. */
858 bool m_is_load;
859
860 /* Optionally, a function to obtain constants for any given offset into
861 the objects, and data associated with it. */
862 by_pieces_constfn m_constfn;
863 void *m_cfndata;
864 public:
865 pieces_addr (rtx, bool, by_pieces_constfn, void *);
866 rtx adjust (scalar_int_mode, HOST_WIDE_INT);
867 void increment_address (HOST_WIDE_INT);
868 void maybe_predec (HOST_WIDE_INT);
869 void maybe_postinc (HOST_WIDE_INT);
870 void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
871 int get_addr_inc ()
872 {
873 return m_addr_inc;
874 }
875 };
876
877 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
878 true if the operation to be performed on this object is a load
879 rather than a store. For stores, OBJ can be NULL, in which case we
880 assume the operation is a stack push. For loads, the optional
881 CONSTFN and its associated CFNDATA can be used in place of the
882 memory load. */
883
884 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
885 void *cfndata)
886 : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
887 {
888 m_addr_inc = 0;
889 m_auto = false;
890 if (obj)
891 {
892 rtx addr = XEXP (obj, 0);
893 rtx_code code = GET_CODE (addr);
894 m_addr = addr;
895 bool dec = code == PRE_DEC || code == POST_DEC;
896 bool inc = code == PRE_INC || code == POST_INC;
897 m_auto = inc || dec;
898 if (m_auto)
899 m_addr_inc = dec ? -1 : 1;
900
901 /* While we have always looked for these codes here, the code
902 implementing the memory operation has never handled them.
903 Support could be added later if necessary or beneficial. */
904 gcc_assert (code != PRE_INC && code != POST_DEC);
905 }
906 else
907 {
908 m_addr = NULL_RTX;
909 if (!is_load)
910 {
911 m_auto = true;
912 if (STACK_GROWS_DOWNWARD)
913 m_addr_inc = -1;
914 else
915 m_addr_inc = 1;
916 }
917 else
918 gcc_assert (constfn != NULL);
919 }
920 m_explicit_inc = 0;
921 if (constfn)
922 gcc_assert (is_load);
923 }
924
925 /* Decide whether to use autoinc for an address involved in a memory op.
926 MODE is the mode of the accesses, REVERSE is true if we've decided to
927 perform the operation starting from the end, and LEN is the length of
928 the operation. Don't override an earlier decision to set m_auto. */
929
930 void
931 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
932 HOST_WIDE_INT len)
933 {
934 if (m_auto || m_obj == NULL_RTX)
935 return;
936
937 bool use_predec = (m_is_load
938 ? USE_LOAD_PRE_DECREMENT (mode)
939 : USE_STORE_PRE_DECREMENT (mode));
940 bool use_postinc = (m_is_load
941 ? USE_LOAD_POST_INCREMENT (mode)
942 : USE_STORE_POST_INCREMENT (mode));
943 machine_mode addr_mode = get_address_mode (m_obj);
944
945 if (use_predec && reverse)
946 {
947 m_addr = copy_to_mode_reg (addr_mode,
948 plus_constant (addr_mode,
949 m_addr, len));
950 m_auto = true;
951 m_explicit_inc = -1;
952 }
953 else if (use_postinc && !reverse)
954 {
955 m_addr = copy_to_mode_reg (addr_mode, m_addr);
956 m_auto = true;
957 m_explicit_inc = 1;
958 }
959 else if (CONSTANT_P (m_addr))
960 m_addr = copy_to_mode_reg (addr_mode, m_addr);
961 }
962
963 /* Adjust the address to refer to the data at OFFSET in MODE. If we
964 are using autoincrement for this address, we don't add the offset,
965 but we still modify the MEM's properties. */
966
967 rtx
968 pieces_addr::adjust (scalar_int_mode mode, HOST_WIDE_INT offset)
969 {
970 if (m_constfn)
971 return m_constfn (m_cfndata, offset, mode);
972 if (m_obj == NULL_RTX)
973 return NULL_RTX;
974 if (m_auto)
975 return adjust_automodify_address (m_obj, mode, m_addr, offset);
976 else
977 return adjust_address (m_obj, mode, offset);
978 }
979
980 /* Emit an add instruction to increment the address by SIZE. */
981
982 void
983 pieces_addr::increment_address (HOST_WIDE_INT size)
984 {
985 rtx amount = gen_int_mode (size, GET_MODE (m_addr));
986 emit_insn (gen_add2_insn (m_addr, amount));
987 }
988
989 /* If we are supposed to decrement the address after each access, emit code
990 to do so now. Increment by SIZE (which has should have the correct sign
991 already). */
992
993 void
994 pieces_addr::maybe_predec (HOST_WIDE_INT size)
995 {
996 if (m_explicit_inc >= 0)
997 return;
998 gcc_assert (HAVE_PRE_DECREMENT);
999 increment_address (size);
1000 }
1001
1002 /* If we are supposed to decrement the address after each access, emit code
1003 to do so now. Increment by SIZE. */
1004
1005 void
1006 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1007 {
1008 if (m_explicit_inc <= 0)
1009 return;
1010 gcc_assert (HAVE_POST_INCREMENT);
1011 increment_address (size);
1012 }
1013
1014 /* This structure is used by do_op_by_pieces to describe the operation
1015 to be performed. */
1016
1017 class op_by_pieces_d
1018 {
1019 protected:
1020 pieces_addr m_to, m_from;
1021 unsigned HOST_WIDE_INT m_len;
1022 HOST_WIDE_INT m_offset;
1023 unsigned int m_align;
1024 unsigned int m_max_size;
1025 bool m_reverse;
1026
1027 /* Virtual functions, overriden by derived classes for the specific
1028 operation. */
1029 virtual void generate (rtx, rtx, machine_mode) = 0;
1030 virtual bool prepare_mode (machine_mode, unsigned int) = 0;
1031 virtual void finish_mode (machine_mode)
1032 {
1033 }
1034
1035 public:
1036 op_by_pieces_d (rtx, bool, rtx, bool, by_pieces_constfn, void *,
1037 unsigned HOST_WIDE_INT, unsigned int);
1038 void run ();
1039 };
1040
1041 /* The constructor for an op_by_pieces_d structure. We require two
1042 objects named TO and FROM, which are identified as loads or stores
1043 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1044 and its associated FROM_CFN_DATA can be used to replace loads with
1045 constant values. LEN describes the length of the operation. */
1046
1047 op_by_pieces_d::op_by_pieces_d (rtx to, bool to_load,
1048 rtx from, bool from_load,
1049 by_pieces_constfn from_cfn,
1050 void *from_cfn_data,
1051 unsigned HOST_WIDE_INT len,
1052 unsigned int align)
1053 : m_to (to, to_load, NULL, NULL),
1054 m_from (from, from_load, from_cfn, from_cfn_data),
1055 m_len (len), m_max_size (MOVE_MAX_PIECES + 1)
1056 {
1057 int toi = m_to.get_addr_inc ();
1058 int fromi = m_from.get_addr_inc ();
1059 if (toi >= 0 && fromi >= 0)
1060 m_reverse = false;
1061 else if (toi <= 0 && fromi <= 0)
1062 m_reverse = true;
1063 else
1064 gcc_unreachable ();
1065
1066 m_offset = m_reverse ? len : 0;
1067 align = MIN (to ? MEM_ALIGN (to) : align,
1068 from ? MEM_ALIGN (from) : align);
1069
1070 /* If copying requires more than two move insns,
1071 copy addresses to registers (to make displacements shorter)
1072 and use post-increment if available. */
1073 if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1074 {
1075 /* Find the mode of the largest comparison. */
1076 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1077
1078 m_from.decide_autoinc (mode, m_reverse, len);
1079 m_to.decide_autoinc (mode, m_reverse, len);
1080 }
1081
1082 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1083 m_align = align;
1084 }
1085
1086 /* This function contains the main loop used for expanding a block
1087 operation. First move what we can in the largest integer mode,
1088 then go to successively smaller modes. For every access, call
1089 GENFUN with the two operands and the EXTRA_DATA. */
1090
1091 void
1092 op_by_pieces_d::run ()
1093 {
1094 while (m_max_size > 1 && m_len > 0)
1095 {
1096 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1097
1098 if (prepare_mode (mode, m_align))
1099 {
1100 unsigned int size = GET_MODE_SIZE (mode);
1101 rtx to1 = NULL_RTX, from1;
1102
1103 while (m_len >= size)
1104 {
1105 if (m_reverse)
1106 m_offset -= size;
1107
1108 to1 = m_to.adjust (mode, m_offset);
1109 from1 = m_from.adjust (mode, m_offset);
1110
1111 m_to.maybe_predec (-(HOST_WIDE_INT)size);
1112 m_from.maybe_predec (-(HOST_WIDE_INT)size);
1113
1114 generate (to1, from1, mode);
1115
1116 m_to.maybe_postinc (size);
1117 m_from.maybe_postinc (size);
1118
1119 if (!m_reverse)
1120 m_offset += size;
1121
1122 m_len -= size;
1123 }
1124
1125 finish_mode (mode);
1126 }
1127
1128 m_max_size = GET_MODE_SIZE (mode);
1129 }
1130
1131 /* The code above should have handled everything. */
1132 gcc_assert (!m_len);
1133 }
1134
1135 /* Derived class from op_by_pieces_d, providing support for block move
1136 operations. */
1137
1138 class move_by_pieces_d : public op_by_pieces_d
1139 {
1140 insn_gen_fn m_gen_fun;
1141 void generate (rtx, rtx, machine_mode);
1142 bool prepare_mode (machine_mode, unsigned int);
1143
1144 public:
1145 move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1146 unsigned int align)
1147 : op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
1148 {
1149 }
1150 rtx finish_retmode (memop_ret);
1151 };
1152
1153 /* Return true if MODE can be used for a set of copies, given an
1154 alignment ALIGN. Prepare whatever data is necessary for later
1155 calls to generate. */
1156
1157 bool
1158 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1159 {
1160 insn_code icode = optab_handler (mov_optab, mode);
1161 m_gen_fun = GEN_FCN (icode);
1162 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1163 }
1164
1165 /* A callback used when iterating for a compare_by_pieces_operation.
1166 OP0 and OP1 are the values that have been loaded and should be
1167 compared in MODE. If OP0 is NULL, this means we should generate a
1168 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1169 gen function that should be used to generate the mode. */
1170
1171 void
1172 move_by_pieces_d::generate (rtx op0, rtx op1,
1173 machine_mode mode ATTRIBUTE_UNUSED)
1174 {
1175 #ifdef PUSH_ROUNDING
1176 if (op0 == NULL_RTX)
1177 {
1178 emit_single_push_insn (mode, op1, NULL);
1179 return;
1180 }
1181 #endif
1182 emit_insn (m_gen_fun (op0, op1));
1183 }
1184
1185 /* Perform the final adjustment at the end of a string to obtain the
1186 correct return value for the block operation.
1187 Return value is based on RETMODE argument. */
1188
1189 rtx
1190 move_by_pieces_d::finish_retmode (memop_ret retmode)
1191 {
1192 gcc_assert (!m_reverse);
1193 if (retmode == RETURN_END_MINUS_ONE)
1194 {
1195 m_to.maybe_postinc (-1);
1196 --m_offset;
1197 }
1198 return m_to.adjust (QImode, m_offset);
1199 }
1200
1201 /* Generate several move instructions to copy LEN bytes from block FROM to
1202 block TO. (These are MEM rtx's with BLKmode).
1203
1204 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1205 used to push FROM to the stack.
1206
1207 ALIGN is maximum stack alignment we can assume.
1208
1209 Return value is based on RETMODE argument. */
1210
1211 rtx
1212 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1213 unsigned int align, memop_ret retmode)
1214 {
1215 #ifndef PUSH_ROUNDING
1216 if (to == NULL)
1217 gcc_unreachable ();
1218 #endif
1219
1220 move_by_pieces_d data (to, from, len, align);
1221
1222 data.run ();
1223
1224 if (retmode != RETURN_BEGIN)
1225 return data.finish_retmode (retmode);
1226 else
1227 return to;
1228 }
1229
1230 /* Derived class from op_by_pieces_d, providing support for block move
1231 operations. */
1232
1233 class store_by_pieces_d : public op_by_pieces_d
1234 {
1235 insn_gen_fn m_gen_fun;
1236 void generate (rtx, rtx, machine_mode);
1237 bool prepare_mode (machine_mode, unsigned int);
1238
1239 public:
1240 store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1241 unsigned HOST_WIDE_INT len, unsigned int align)
1242 : op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
1243 {
1244 }
1245 rtx finish_retmode (memop_ret);
1246 };
1247
1248 /* Return true if MODE can be used for a set of stores, given an
1249 alignment ALIGN. Prepare whatever data is necessary for later
1250 calls to generate. */
1251
1252 bool
1253 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1254 {
1255 insn_code icode = optab_handler (mov_optab, mode);
1256 m_gen_fun = GEN_FCN (icode);
1257 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1258 }
1259
1260 /* A callback used when iterating for a store_by_pieces_operation.
1261 OP0 and OP1 are the values that have been loaded and should be
1262 compared in MODE. If OP0 is NULL, this means we should generate a
1263 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1264 gen function that should be used to generate the mode. */
1265
1266 void
1267 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1268 {
1269 emit_insn (m_gen_fun (op0, op1));
1270 }
1271
1272 /* Perform the final adjustment at the end of a string to obtain the
1273 correct return value for the block operation.
1274 Return value is based on RETMODE argument. */
1275
1276 rtx
1277 store_by_pieces_d::finish_retmode (memop_ret retmode)
1278 {
1279 gcc_assert (!m_reverse);
1280 if (retmode == RETURN_END_MINUS_ONE)
1281 {
1282 m_to.maybe_postinc (-1);
1283 --m_offset;
1284 }
1285 return m_to.adjust (QImode, m_offset);
1286 }
1287
1288 /* Determine whether the LEN bytes generated by CONSTFUN can be
1289 stored to memory using several move instructions. CONSTFUNDATA is
1290 a pointer which will be passed as argument in every CONSTFUN call.
1291 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1292 a memset operation and false if it's a copy of a constant string.
1293 Return nonzero if a call to store_by_pieces should succeed. */
1294
1295 int
1296 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1297 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1298 void *constfundata, unsigned int align, bool memsetp)
1299 {
1300 unsigned HOST_WIDE_INT l;
1301 unsigned int max_size;
1302 HOST_WIDE_INT offset = 0;
1303 enum insn_code icode;
1304 int reverse;
1305 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1306 rtx cst ATTRIBUTE_UNUSED;
1307
1308 if (len == 0)
1309 return 1;
1310
1311 if (!targetm.use_by_pieces_infrastructure_p (len, align,
1312 memsetp
1313 ? SET_BY_PIECES
1314 : STORE_BY_PIECES,
1315 optimize_insn_for_speed_p ()))
1316 return 0;
1317
1318 align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1319
1320 /* We would first store what we can in the largest integer mode, then go to
1321 successively smaller modes. */
1322
1323 for (reverse = 0;
1324 reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1325 reverse++)
1326 {
1327 l = len;
1328 max_size = STORE_MAX_PIECES + 1;
1329 while (max_size > 1 && l > 0)
1330 {
1331 scalar_int_mode mode = widest_int_mode_for_size (max_size);
1332
1333 icode = optab_handler (mov_optab, mode);
1334 if (icode != CODE_FOR_nothing
1335 && align >= GET_MODE_ALIGNMENT (mode))
1336 {
1337 unsigned int size = GET_MODE_SIZE (mode);
1338
1339 while (l >= size)
1340 {
1341 if (reverse)
1342 offset -= size;
1343
1344 cst = (*constfun) (constfundata, offset, mode);
1345 if (!targetm.legitimate_constant_p (mode, cst))
1346 return 0;
1347
1348 if (!reverse)
1349 offset += size;
1350
1351 l -= size;
1352 }
1353 }
1354
1355 max_size = GET_MODE_SIZE (mode);
1356 }
1357
1358 /* The code above should have handled everything. */
1359 gcc_assert (!l);
1360 }
1361
1362 return 1;
1363 }
1364
1365 /* Generate several move instructions to store LEN bytes generated by
1366 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1367 pointer which will be passed as argument in every CONSTFUN call.
1368 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1369 a memset operation and false if it's a copy of a constant string.
1370 Return value is based on RETMODE argument. */
1371
1372 rtx
1373 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1374 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1375 void *constfundata, unsigned int align, bool memsetp,
1376 memop_ret retmode)
1377 {
1378 if (len == 0)
1379 {
1380 gcc_assert (retmode != RETURN_END_MINUS_ONE);
1381 return to;
1382 }
1383
1384 gcc_assert (targetm.use_by_pieces_infrastructure_p
1385 (len, align,
1386 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1387 optimize_insn_for_speed_p ()));
1388
1389 store_by_pieces_d data (to, constfun, constfundata, len, align);
1390 data.run ();
1391
1392 if (retmode != RETURN_BEGIN)
1393 return data.finish_retmode (retmode);
1394 else
1395 return to;
1396 }
1397
1398 /* Callback routine for clear_by_pieces.
1399 Return const0_rtx unconditionally. */
1400
1401 static rtx
1402 clear_by_pieces_1 (void *, HOST_WIDE_INT, scalar_int_mode)
1403 {
1404 return const0_rtx;
1405 }
1406
1407 /* Generate several move instructions to clear LEN bytes of block TO. (A MEM
1408 rtx with BLKmode). ALIGN is maximum alignment we can assume. */
1409
1410 static void
1411 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1412 {
1413 if (len == 0)
1414 return;
1415
1416 store_by_pieces_d data (to, clear_by_pieces_1, NULL, len, align);
1417 data.run ();
1418 }
1419
1420 /* Context used by compare_by_pieces_genfn. It stores the fail label
1421 to jump to in case of miscomparison, and for branch ratios greater than 1,
1422 it stores an accumulator and the current and maximum counts before
1423 emitting another branch. */
1424
1425 class compare_by_pieces_d : public op_by_pieces_d
1426 {
1427 rtx_code_label *m_fail_label;
1428 rtx m_accumulator;
1429 int m_count, m_batch;
1430
1431 void generate (rtx, rtx, machine_mode);
1432 bool prepare_mode (machine_mode, unsigned int);
1433 void finish_mode (machine_mode);
1434 public:
1435 compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1436 void *op1_cfn_data, HOST_WIDE_INT len, int align,
1437 rtx_code_label *fail_label)
1438 : op_by_pieces_d (op0, true, op1, true, op1_cfn, op1_cfn_data, len, align)
1439 {
1440 m_fail_label = fail_label;
1441 }
1442 };
1443
1444 /* A callback used when iterating for a compare_by_pieces_operation.
1445 OP0 and OP1 are the values that have been loaded and should be
1446 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1447 context structure. */
1448
1449 void
1450 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1451 {
1452 if (m_batch > 1)
1453 {
1454 rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1455 true, OPTAB_LIB_WIDEN);
1456 if (m_count != 0)
1457 temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1458 true, OPTAB_LIB_WIDEN);
1459 m_accumulator = temp;
1460
1461 if (++m_count < m_batch)
1462 return;
1463
1464 m_count = 0;
1465 op0 = m_accumulator;
1466 op1 = const0_rtx;
1467 m_accumulator = NULL_RTX;
1468 }
1469 do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1470 m_fail_label, profile_probability::uninitialized ());
1471 }
1472
1473 /* Return true if MODE can be used for a set of moves and comparisons,
1474 given an alignment ALIGN. Prepare whatever data is necessary for
1475 later calls to generate. */
1476
1477 bool
1478 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1479 {
1480 insn_code icode = optab_handler (mov_optab, mode);
1481 if (icode == CODE_FOR_nothing
1482 || align < GET_MODE_ALIGNMENT (mode)
1483 || !can_compare_p (EQ, mode, ccp_jump))
1484 return false;
1485 m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1486 if (m_batch < 0)
1487 return false;
1488 m_accumulator = NULL_RTX;
1489 m_count = 0;
1490 return true;
1491 }
1492
1493 /* Called after expanding a series of comparisons in MODE. If we have
1494 accumulated results for which we haven't emitted a branch yet, do
1495 so now. */
1496
1497 void
1498 compare_by_pieces_d::finish_mode (machine_mode mode)
1499 {
1500 if (m_accumulator != NULL_RTX)
1501 do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1502 NULL_RTX, NULL, m_fail_label,
1503 profile_probability::uninitialized ());
1504 }
1505
1506 /* Generate several move instructions to compare LEN bytes from blocks
1507 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1508
1509 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1510 used to push FROM to the stack.
1511
1512 ALIGN is maximum stack alignment we can assume.
1513
1514 Optionally, the caller can pass a constfn and associated data in A1_CFN
1515 and A1_CFN_DATA. describing that the second operand being compared is a
1516 known constant and how to obtain its data. */
1517
1518 static rtx
1519 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1520 rtx target, unsigned int align,
1521 by_pieces_constfn a1_cfn, void *a1_cfn_data)
1522 {
1523 rtx_code_label *fail_label = gen_label_rtx ();
1524 rtx_code_label *end_label = gen_label_rtx ();
1525
1526 if (target == NULL_RTX
1527 || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1528 target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1529
1530 compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1531 fail_label);
1532
1533 data.run ();
1534
1535 emit_move_insn (target, const0_rtx);
1536 emit_jump (end_label);
1537 emit_barrier ();
1538 emit_label (fail_label);
1539 emit_move_insn (target, const1_rtx);
1540 emit_label (end_label);
1541
1542 return target;
1543 }
1544 \f
1545 /* Emit code to move a block Y to a block X. This may be done with
1546 string-move instructions, with multiple scalar move instructions,
1547 or with a library call.
1548
1549 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1550 SIZE is an rtx that says how long they are.
1551 ALIGN is the maximum alignment we can assume they have.
1552 METHOD describes what kind of copy this is, and what mechanisms may be used.
1553 MIN_SIZE is the minimal size of block to move
1554 MAX_SIZE is the maximal size of block to move, if it cannot be represented
1555 in unsigned HOST_WIDE_INT, than it is mask of all ones.
1556
1557 Return the address of the new block, if memcpy is called and returns it,
1558 0 otherwise. */
1559
1560 rtx
1561 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1562 unsigned int expected_align, HOST_WIDE_INT expected_size,
1563 unsigned HOST_WIDE_INT min_size,
1564 unsigned HOST_WIDE_INT max_size,
1565 unsigned HOST_WIDE_INT probable_max_size,
1566 bool bail_out_libcall, bool *is_move_done,
1567 bool might_overlap)
1568 {
1569 int may_use_call;
1570 rtx retval = 0;
1571 unsigned int align;
1572
1573 if (is_move_done)
1574 *is_move_done = true;
1575
1576 gcc_assert (size);
1577 if (CONST_INT_P (size) && INTVAL (size) == 0)
1578 return 0;
1579
1580 switch (method)
1581 {
1582 case BLOCK_OP_NORMAL:
1583 case BLOCK_OP_TAILCALL:
1584 may_use_call = 1;
1585 break;
1586
1587 case BLOCK_OP_CALL_PARM:
1588 may_use_call = block_move_libcall_safe_for_call_parm ();
1589
1590 /* Make inhibit_defer_pop nonzero around the library call
1591 to force it to pop the arguments right away. */
1592 NO_DEFER_POP;
1593 break;
1594
1595 case BLOCK_OP_NO_LIBCALL:
1596 may_use_call = 0;
1597 break;
1598
1599 case BLOCK_OP_NO_LIBCALL_RET:
1600 may_use_call = -1;
1601 break;
1602
1603 default:
1604 gcc_unreachable ();
1605 }
1606
1607 gcc_assert (MEM_P (x) && MEM_P (y));
1608 align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1609 gcc_assert (align >= BITS_PER_UNIT);
1610
1611 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1612 block copy is more efficient for other large modes, e.g. DCmode. */
1613 x = adjust_address (x, BLKmode, 0);
1614 y = adjust_address (y, BLKmode, 0);
1615
1616 /* Set MEM_SIZE as appropriate for this block copy. The main place this
1617 can be incorrect is coming from __builtin_memcpy. */
1618 poly_int64 const_size;
1619 if (poly_int_rtx_p (size, &const_size))
1620 {
1621 x = shallow_copy_rtx (x);
1622 y = shallow_copy_rtx (y);
1623 set_mem_size (x, const_size);
1624 set_mem_size (y, const_size);
1625 }
1626
1627 bool pieces_ok = false;
1628 if (CONST_INT_P (size))
1629 pieces_ok = can_move_by_pieces (INTVAL (size), align);
1630 bool pattern_ok = false;
1631
1632 if (!pieces_ok || might_overlap)
1633 {
1634 pattern_ok
1635 = emit_block_move_via_pattern (x, y, size, align,
1636 expected_align, expected_size,
1637 min_size, max_size, probable_max_size,
1638 might_overlap);
1639 if (!pattern_ok && might_overlap)
1640 {
1641 /* Do not try any of the other methods below as they are not safe
1642 for overlapping moves. */
1643 *is_move_done = false;
1644 return retval;
1645 }
1646 }
1647
1648 if (pattern_ok)
1649 ;
1650 else if (pieces_ok)
1651 move_by_pieces (x, y, INTVAL (size), align, RETURN_BEGIN);
1652 else if (may_use_call && !might_overlap
1653 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1654 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1655 {
1656 if (bail_out_libcall)
1657 {
1658 if (is_move_done)
1659 *is_move_done = false;
1660 return retval;
1661 }
1662
1663 if (may_use_call < 0)
1664 return pc_rtx;
1665
1666 retval = emit_block_copy_via_libcall (x, y, size,
1667 method == BLOCK_OP_TAILCALL);
1668 }
1669 else if (might_overlap)
1670 *is_move_done = false;
1671 else
1672 emit_block_move_via_loop (x, y, size, align);
1673
1674 if (method == BLOCK_OP_CALL_PARM)
1675 OK_DEFER_POP;
1676
1677 return retval;
1678 }
1679
1680 rtx
1681 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1682 {
1683 unsigned HOST_WIDE_INT max, min = 0;
1684 if (GET_CODE (size) == CONST_INT)
1685 min = max = UINTVAL (size);
1686 else
1687 max = GET_MODE_MASK (GET_MODE (size));
1688 return emit_block_move_hints (x, y, size, method, 0, -1,
1689 min, max, max);
1690 }
1691
1692 /* A subroutine of emit_block_move. Returns true if calling the
1693 block move libcall will not clobber any parameters which may have
1694 already been placed on the stack. */
1695
1696 static bool
1697 block_move_libcall_safe_for_call_parm (void)
1698 {
1699 #if defined (REG_PARM_STACK_SPACE)
1700 tree fn;
1701 #endif
1702
1703 /* If arguments are pushed on the stack, then they're safe. */
1704 if (PUSH_ARGS)
1705 return true;
1706
1707 /* If registers go on the stack anyway, any argument is sure to clobber
1708 an outgoing argument. */
1709 #if defined (REG_PARM_STACK_SPACE)
1710 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1711 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1712 depend on its argument. */
1713 (void) fn;
1714 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1715 && REG_PARM_STACK_SPACE (fn) != 0)
1716 return false;
1717 #endif
1718
1719 /* If any argument goes in memory, then it might clobber an outgoing
1720 argument. */
1721 {
1722 CUMULATIVE_ARGS args_so_far_v;
1723 cumulative_args_t args_so_far;
1724 tree fn, arg;
1725
1726 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1727 INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1728 args_so_far = pack_cumulative_args (&args_so_far_v);
1729
1730 arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1731 for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1732 {
1733 machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1734 function_arg_info arg_info (mode, /*named=*/true);
1735 rtx tmp = targetm.calls.function_arg (args_so_far, arg_info);
1736 if (!tmp || !REG_P (tmp))
1737 return false;
1738 if (targetm.calls.arg_partial_bytes (args_so_far, arg_info))
1739 return false;
1740 targetm.calls.function_arg_advance (args_so_far, arg_info);
1741 }
1742 }
1743 return true;
1744 }
1745
1746 /* A subroutine of emit_block_move. Expand a cpymem or movmem pattern;
1747 return true if successful.
1748
1749 X is the destination of the copy or move.
1750 Y is the source of the copy or move.
1751 SIZE is the size of the block to be moved.
1752
1753 MIGHT_OVERLAP indicates this originated with expansion of a
1754 builtin_memmove() and the source and destination blocks may
1755 overlap.
1756 */
1757
1758 static bool
1759 emit_block_move_via_pattern (rtx x, rtx y, rtx size, unsigned int align,
1760 unsigned int expected_align,
1761 HOST_WIDE_INT expected_size,
1762 unsigned HOST_WIDE_INT min_size,
1763 unsigned HOST_WIDE_INT max_size,
1764 unsigned HOST_WIDE_INT probable_max_size,
1765 bool might_overlap)
1766 {
1767 if (expected_align < align)
1768 expected_align = align;
1769 if (expected_size != -1)
1770 {
1771 if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1772 expected_size = probable_max_size;
1773 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1774 expected_size = min_size;
1775 }
1776
1777 /* Since this is a move insn, we don't care about volatility. */
1778 temporary_volatile_ok v (true);
1779
1780 /* Try the most limited insn first, because there's no point
1781 including more than one in the machine description unless
1782 the more limited one has some advantage. */
1783
1784 opt_scalar_int_mode mode_iter;
1785 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
1786 {
1787 scalar_int_mode mode = mode_iter.require ();
1788 enum insn_code code;
1789 if (might_overlap)
1790 code = direct_optab_handler (movmem_optab, mode);
1791 else
1792 code = direct_optab_handler (cpymem_optab, mode);
1793
1794 if (code != CODE_FOR_nothing
1795 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1796 here because if SIZE is less than the mode mask, as it is
1797 returned by the macro, it will definitely be less than the
1798 actual mode mask. Since SIZE is within the Pmode address
1799 space, we limit MODE to Pmode. */
1800 && ((CONST_INT_P (size)
1801 && ((unsigned HOST_WIDE_INT) INTVAL (size)
1802 <= (GET_MODE_MASK (mode) >> 1)))
1803 || max_size <= (GET_MODE_MASK (mode) >> 1)
1804 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
1805 {
1806 class expand_operand ops[9];
1807 unsigned int nops;
1808
1809 /* ??? When called via emit_block_move_for_call, it'd be
1810 nice if there were some way to inform the backend, so
1811 that it doesn't fail the expansion because it thinks
1812 emitting the libcall would be more efficient. */
1813 nops = insn_data[(int) code].n_generator_args;
1814 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
1815
1816 create_fixed_operand (&ops[0], x);
1817 create_fixed_operand (&ops[1], y);
1818 /* The check above guarantees that this size conversion is valid. */
1819 create_convert_operand_to (&ops[2], size, mode, true);
1820 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
1821 if (nops >= 6)
1822 {
1823 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
1824 create_integer_operand (&ops[5], expected_size);
1825 }
1826 if (nops >= 8)
1827 {
1828 create_integer_operand (&ops[6], min_size);
1829 /* If we cannot represent the maximal size,
1830 make parameter NULL. */
1831 if ((HOST_WIDE_INT) max_size != -1)
1832 create_integer_operand (&ops[7], max_size);
1833 else
1834 create_fixed_operand (&ops[7], NULL);
1835 }
1836 if (nops == 9)
1837 {
1838 /* If we cannot represent the maximal size,
1839 make parameter NULL. */
1840 if ((HOST_WIDE_INT) probable_max_size != -1)
1841 create_integer_operand (&ops[8], probable_max_size);
1842 else
1843 create_fixed_operand (&ops[8], NULL);
1844 }
1845 if (maybe_expand_insn (code, nops, ops))
1846 return true;
1847 }
1848 }
1849
1850 return false;
1851 }
1852
1853 /* A subroutine of emit_block_move. Copy the data via an explicit
1854 loop. This is used only when libcalls are forbidden. */
1855 /* ??? It'd be nice to copy in hunks larger than QImode. */
1856
1857 static void
1858 emit_block_move_via_loop (rtx x, rtx y, rtx size,
1859 unsigned int align ATTRIBUTE_UNUSED)
1860 {
1861 rtx_code_label *cmp_label, *top_label;
1862 rtx iter, x_addr, y_addr, tmp;
1863 machine_mode x_addr_mode = get_address_mode (x);
1864 machine_mode y_addr_mode = get_address_mode (y);
1865 machine_mode iter_mode;
1866
1867 iter_mode = GET_MODE (size);
1868 if (iter_mode == VOIDmode)
1869 iter_mode = word_mode;
1870
1871 top_label = gen_label_rtx ();
1872 cmp_label = gen_label_rtx ();
1873 iter = gen_reg_rtx (iter_mode);
1874
1875 emit_move_insn (iter, const0_rtx);
1876
1877 x_addr = force_operand (XEXP (x, 0), NULL_RTX);
1878 y_addr = force_operand (XEXP (y, 0), NULL_RTX);
1879 do_pending_stack_adjust ();
1880
1881 emit_jump (cmp_label);
1882 emit_label (top_label);
1883
1884 tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
1885 x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
1886
1887 if (x_addr_mode != y_addr_mode)
1888 tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
1889 y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
1890
1891 x = change_address (x, QImode, x_addr);
1892 y = change_address (y, QImode, y_addr);
1893
1894 emit_move_insn (x, y);
1895
1896 tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
1897 true, OPTAB_LIB_WIDEN);
1898 if (tmp != iter)
1899 emit_move_insn (iter, tmp);
1900
1901 emit_label (cmp_label);
1902
1903 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
1904 true, top_label,
1905 profile_probability::guessed_always ()
1906 .apply_scale (9, 10));
1907 }
1908 \f
1909 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1910 TAILCALL is true if this is a tail call. */
1911
1912 rtx
1913 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
1914 rtx size, bool tailcall)
1915 {
1916 rtx dst_addr, src_addr;
1917 tree call_expr, dst_tree, src_tree, size_tree;
1918 machine_mode size_mode;
1919
1920 /* Since dst and src are passed to a libcall, mark the corresponding
1921 tree EXPR as addressable. */
1922 tree dst_expr = MEM_EXPR (dst);
1923 tree src_expr = MEM_EXPR (src);
1924 if (dst_expr)
1925 mark_addressable (dst_expr);
1926 if (src_expr)
1927 mark_addressable (src_expr);
1928
1929 dst_addr = copy_addr_to_reg (XEXP (dst, 0));
1930 dst_addr = convert_memory_address (ptr_mode, dst_addr);
1931 dst_tree = make_tree (ptr_type_node, dst_addr);
1932
1933 src_addr = copy_addr_to_reg (XEXP (src, 0));
1934 src_addr = convert_memory_address (ptr_mode, src_addr);
1935 src_tree = make_tree (ptr_type_node, src_addr);
1936
1937 size_mode = TYPE_MODE (sizetype);
1938 size = convert_to_mode (size_mode, size, 1);
1939 size = copy_to_mode_reg (size_mode, size);
1940 size_tree = make_tree (sizetype, size);
1941
1942 /* It is incorrect to use the libcall calling conventions for calls to
1943 memcpy/memmove/memcmp because they can be provided by the user. */
1944 tree fn = builtin_decl_implicit (fncode);
1945 call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
1946 CALL_EXPR_TAILCALL (call_expr) = tailcall;
1947
1948 return expand_call (call_expr, NULL_RTX, false);
1949 }
1950
1951 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1952 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
1953 otherwise return null. */
1954
1955 rtx
1956 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
1957 rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
1958 HOST_WIDE_INT align)
1959 {
1960 machine_mode insn_mode = insn_data[icode].operand[0].mode;
1961
1962 if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
1963 target = NULL_RTX;
1964
1965 class expand_operand ops[5];
1966 create_output_operand (&ops[0], target, insn_mode);
1967 create_fixed_operand (&ops[1], arg1_rtx);
1968 create_fixed_operand (&ops[2], arg2_rtx);
1969 create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
1970 TYPE_UNSIGNED (arg3_type));
1971 create_integer_operand (&ops[4], align);
1972 if (maybe_expand_insn (icode, 5, ops))
1973 return ops[0].value;
1974 return NULL_RTX;
1975 }
1976
1977 /* Expand a block compare between X and Y with length LEN using the
1978 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
1979 of the expression that was used to calculate the length. ALIGN
1980 gives the known minimum common alignment. */
1981
1982 static rtx
1983 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
1984 unsigned align)
1985 {
1986 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1987 implementing memcmp because it will stop if it encounters two
1988 zero bytes. */
1989 insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
1990
1991 if (icode == CODE_FOR_nothing)
1992 return NULL_RTX;
1993
1994 return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
1995 }
1996
1997 /* Emit code to compare a block Y to a block X. This may be done with
1998 string-compare instructions, with multiple scalar instructions,
1999 or with a library call.
2000
2001 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
2002 they are. LEN_TYPE is the type of the expression that was used to
2003 calculate it.
2004
2005 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
2006 value of a normal memcmp call, instead we can just compare for equality.
2007 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
2008 returning NULL_RTX.
2009
2010 Optionally, the caller can pass a constfn and associated data in Y_CFN
2011 and Y_CFN_DATA. describing that the second operand being compared is a
2012 known constant and how to obtain its data.
2013 Return the result of the comparison, or NULL_RTX if we failed to
2014 perform the operation. */
2015
2016 rtx
2017 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
2018 bool equality_only, by_pieces_constfn y_cfn,
2019 void *y_cfndata)
2020 {
2021 rtx result = 0;
2022
2023 if (CONST_INT_P (len) && INTVAL (len) == 0)
2024 return const0_rtx;
2025
2026 gcc_assert (MEM_P (x) && MEM_P (y));
2027 unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
2028 gcc_assert (align >= BITS_PER_UNIT);
2029
2030 x = adjust_address (x, BLKmode, 0);
2031 y = adjust_address (y, BLKmode, 0);
2032
2033 if (equality_only
2034 && CONST_INT_P (len)
2035 && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
2036 result = compare_by_pieces (x, y, INTVAL (len), target, align,
2037 y_cfn, y_cfndata);
2038 else
2039 result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
2040
2041 return result;
2042 }
2043 \f
2044 /* Copy all or part of a value X into registers starting at REGNO.
2045 The number of registers to be filled is NREGS. */
2046
2047 void
2048 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2049 {
2050 if (nregs == 0)
2051 return;
2052
2053 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2054 x = validize_mem (force_const_mem (mode, x));
2055
2056 /* See if the machine can do this with a load multiple insn. */
2057 if (targetm.have_load_multiple ())
2058 {
2059 rtx_insn *last = get_last_insn ();
2060 rtx first = gen_rtx_REG (word_mode, regno);
2061 if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2062 GEN_INT (nregs)))
2063 {
2064 emit_insn (pat);
2065 return;
2066 }
2067 else
2068 delete_insns_since (last);
2069 }
2070
2071 for (int i = 0; i < nregs; i++)
2072 emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2073 operand_subword_force (x, i, mode));
2074 }
2075
2076 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2077 The number of registers to be filled is NREGS. */
2078
2079 void
2080 move_block_from_reg (int regno, rtx x, int nregs)
2081 {
2082 if (nregs == 0)
2083 return;
2084
2085 /* See if the machine can do this with a store multiple insn. */
2086 if (targetm.have_store_multiple ())
2087 {
2088 rtx_insn *last = get_last_insn ();
2089 rtx first = gen_rtx_REG (word_mode, regno);
2090 if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2091 GEN_INT (nregs)))
2092 {
2093 emit_insn (pat);
2094 return;
2095 }
2096 else
2097 delete_insns_since (last);
2098 }
2099
2100 for (int i = 0; i < nregs; i++)
2101 {
2102 rtx tem = operand_subword (x, i, 1, BLKmode);
2103
2104 gcc_assert (tem);
2105
2106 emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2107 }
2108 }
2109
2110 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2111 ORIG, where ORIG is a non-consecutive group of registers represented by
2112 a PARALLEL. The clone is identical to the original except in that the
2113 original set of registers is replaced by a new set of pseudo registers.
2114 The new set has the same modes as the original set. */
2115
2116 rtx
2117 gen_group_rtx (rtx orig)
2118 {
2119 int i, length;
2120 rtx *tmps;
2121
2122 gcc_assert (GET_CODE (orig) == PARALLEL);
2123
2124 length = XVECLEN (orig, 0);
2125 tmps = XALLOCAVEC (rtx, length);
2126
2127 /* Skip a NULL entry in first slot. */
2128 i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2129
2130 if (i)
2131 tmps[0] = 0;
2132
2133 for (; i < length; i++)
2134 {
2135 machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2136 rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2137
2138 tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2139 }
2140
2141 return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2142 }
2143
2144 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2145 except that values are placed in TMPS[i], and must later be moved
2146 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2147
2148 static void
2149 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2150 poly_int64 ssize)
2151 {
2152 rtx src;
2153 int start, i;
2154 machine_mode m = GET_MODE (orig_src);
2155
2156 gcc_assert (GET_CODE (dst) == PARALLEL);
2157
2158 if (m != VOIDmode
2159 && !SCALAR_INT_MODE_P (m)
2160 && !MEM_P (orig_src)
2161 && GET_CODE (orig_src) != CONCAT)
2162 {
2163 scalar_int_mode imode;
2164 if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2165 {
2166 src = gen_reg_rtx (imode);
2167 emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2168 }
2169 else
2170 {
2171 src = assign_stack_temp (GET_MODE (orig_src), ssize);
2172 emit_move_insn (src, orig_src);
2173 }
2174 emit_group_load_1 (tmps, dst, src, type, ssize);
2175 return;
2176 }
2177
2178 /* Check for a NULL entry, used to indicate that the parameter goes
2179 both on the stack and in registers. */
2180 if (XEXP (XVECEXP (dst, 0, 0), 0))
2181 start = 0;
2182 else
2183 start = 1;
2184
2185 /* Process the pieces. */
2186 for (i = start; i < XVECLEN (dst, 0); i++)
2187 {
2188 machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2189 poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1));
2190 poly_int64 bytelen = GET_MODE_SIZE (mode);
2191 poly_int64 shift = 0;
2192
2193 /* Handle trailing fragments that run over the size of the struct.
2194 It's the target's responsibility to make sure that the fragment
2195 cannot be strictly smaller in some cases and strictly larger
2196 in others. */
2197 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2198 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2199 {
2200 /* Arrange to shift the fragment to where it belongs.
2201 extract_bit_field loads to the lsb of the reg. */
2202 if (
2203 #ifdef BLOCK_REG_PADDING
2204 BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2205 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2206 #else
2207 BYTES_BIG_ENDIAN
2208 #endif
2209 )
2210 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2211 bytelen = ssize - bytepos;
2212 gcc_assert (maybe_gt (bytelen, 0));
2213 }
2214
2215 /* If we won't be loading directly from memory, protect the real source
2216 from strange tricks we might play; but make sure that the source can
2217 be loaded directly into the destination. */
2218 src = orig_src;
2219 if (!MEM_P (orig_src)
2220 && (!CONSTANT_P (orig_src)
2221 || (GET_MODE (orig_src) != mode
2222 && GET_MODE (orig_src) != VOIDmode)))
2223 {
2224 if (GET_MODE (orig_src) == VOIDmode)
2225 src = gen_reg_rtx (mode);
2226 else
2227 src = gen_reg_rtx (GET_MODE (orig_src));
2228
2229 emit_move_insn (src, orig_src);
2230 }
2231
2232 /* Optimize the access just a bit. */
2233 if (MEM_P (src)
2234 && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2235 || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2236 && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2237 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2238 {
2239 tmps[i] = gen_reg_rtx (mode);
2240 emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2241 }
2242 else if (COMPLEX_MODE_P (mode)
2243 && GET_MODE (src) == mode
2244 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2245 /* Let emit_move_complex do the bulk of the work. */
2246 tmps[i] = src;
2247 else if (GET_CODE (src) == CONCAT)
2248 {
2249 poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2250 poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2251 unsigned int elt;
2252 poly_int64 subpos;
2253
2254 if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2255 && known_le (subpos + bytelen, slen0))
2256 {
2257 /* The following assumes that the concatenated objects all
2258 have the same size. In this case, a simple calculation
2259 can be used to determine the object and the bit field
2260 to be extracted. */
2261 tmps[i] = XEXP (src, elt);
2262 if (maybe_ne (subpos, 0)
2263 || maybe_ne (subpos + bytelen, slen0)
2264 || (!CONSTANT_P (tmps[i])
2265 && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2266 tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2267 subpos * BITS_PER_UNIT,
2268 1, NULL_RTX, mode, mode, false,
2269 NULL);
2270 }
2271 else
2272 {
2273 rtx mem;
2274
2275 gcc_assert (known_eq (bytepos, 0));
2276 mem = assign_stack_temp (GET_MODE (src), slen);
2277 emit_move_insn (mem, src);
2278 tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2279 0, 1, NULL_RTX, mode, mode, false,
2280 NULL);
2281 }
2282 }
2283 /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2284 SIMD register, which is currently broken. While we get GCC
2285 to emit proper RTL for these cases, let's dump to memory. */
2286 else if (VECTOR_MODE_P (GET_MODE (dst))
2287 && REG_P (src))
2288 {
2289 poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
2290 rtx mem;
2291
2292 mem = assign_stack_temp (GET_MODE (src), slen);
2293 emit_move_insn (mem, src);
2294 tmps[i] = adjust_address (mem, mode, bytepos);
2295 }
2296 else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2297 && XVECLEN (dst, 0) > 1)
2298 tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2299 else if (CONSTANT_P (src))
2300 {
2301 if (known_eq (bytelen, ssize))
2302 tmps[i] = src;
2303 else
2304 {
2305 rtx first, second;
2306
2307 /* TODO: const_wide_int can have sizes other than this... */
2308 gcc_assert (known_eq (2 * bytelen, ssize));
2309 split_double (src, &first, &second);
2310 if (i)
2311 tmps[i] = second;
2312 else
2313 tmps[i] = first;
2314 }
2315 }
2316 else if (REG_P (src) && GET_MODE (src) == mode)
2317 tmps[i] = src;
2318 else
2319 tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2320 bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2321 mode, mode, false, NULL);
2322
2323 if (maybe_ne (shift, 0))
2324 tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2325 shift, tmps[i], 0);
2326 }
2327 }
2328
2329 /* Emit code to move a block SRC of type TYPE to a block DST,
2330 where DST is non-consecutive registers represented by a PARALLEL.
2331 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2332 if not known. */
2333
2334 void
2335 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2336 {
2337 rtx *tmps;
2338 int i;
2339
2340 tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2341 emit_group_load_1 (tmps, dst, src, type, ssize);
2342
2343 /* Copy the extracted pieces into the proper (probable) hard regs. */
2344 for (i = 0; i < XVECLEN (dst, 0); i++)
2345 {
2346 rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2347 if (d == NULL)
2348 continue;
2349 emit_move_insn (d, tmps[i]);
2350 }
2351 }
2352
2353 /* Similar, but load SRC into new pseudos in a format that looks like
2354 PARALLEL. This can later be fed to emit_group_move to get things
2355 in the right place. */
2356
2357 rtx
2358 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2359 {
2360 rtvec vec;
2361 int i;
2362
2363 vec = rtvec_alloc (XVECLEN (parallel, 0));
2364 emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2365
2366 /* Convert the vector to look just like the original PARALLEL, except
2367 with the computed values. */
2368 for (i = 0; i < XVECLEN (parallel, 0); i++)
2369 {
2370 rtx e = XVECEXP (parallel, 0, i);
2371 rtx d = XEXP (e, 0);
2372
2373 if (d)
2374 {
2375 d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2376 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2377 }
2378 RTVEC_ELT (vec, i) = e;
2379 }
2380
2381 return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2382 }
2383
2384 /* Emit code to move a block SRC to block DST, where SRC and DST are
2385 non-consecutive groups of registers, each represented by a PARALLEL. */
2386
2387 void
2388 emit_group_move (rtx dst, rtx src)
2389 {
2390 int i;
2391
2392 gcc_assert (GET_CODE (src) == PARALLEL
2393 && GET_CODE (dst) == PARALLEL
2394 && XVECLEN (src, 0) == XVECLEN (dst, 0));
2395
2396 /* Skip first entry if NULL. */
2397 for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2398 emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2399 XEXP (XVECEXP (src, 0, i), 0));
2400 }
2401
2402 /* Move a group of registers represented by a PARALLEL into pseudos. */
2403
2404 rtx
2405 emit_group_move_into_temps (rtx src)
2406 {
2407 rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2408 int i;
2409
2410 for (i = 0; i < XVECLEN (src, 0); i++)
2411 {
2412 rtx e = XVECEXP (src, 0, i);
2413 rtx d = XEXP (e, 0);
2414
2415 if (d)
2416 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2417 RTVEC_ELT (vec, i) = e;
2418 }
2419
2420 return gen_rtx_PARALLEL (GET_MODE (src), vec);
2421 }
2422
2423 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2424 where SRC is non-consecutive registers represented by a PARALLEL.
2425 SSIZE represents the total size of block ORIG_DST, or -1 if not
2426 known. */
2427
2428 void
2429 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2430 poly_int64 ssize)
2431 {
2432 rtx *tmps, dst;
2433 int start, finish, i;
2434 machine_mode m = GET_MODE (orig_dst);
2435
2436 gcc_assert (GET_CODE (src) == PARALLEL);
2437
2438 if (!SCALAR_INT_MODE_P (m)
2439 && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2440 {
2441 scalar_int_mode imode;
2442 if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2443 {
2444 dst = gen_reg_rtx (imode);
2445 emit_group_store (dst, src, type, ssize);
2446 dst = gen_lowpart (GET_MODE (orig_dst), dst);
2447 }
2448 else
2449 {
2450 dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2451 emit_group_store (dst, src, type, ssize);
2452 }
2453 emit_move_insn (orig_dst, dst);
2454 return;
2455 }
2456
2457 /* Check for a NULL entry, used to indicate that the parameter goes
2458 both on the stack and in registers. */
2459 if (XEXP (XVECEXP (src, 0, 0), 0))
2460 start = 0;
2461 else
2462 start = 1;
2463 finish = XVECLEN (src, 0);
2464
2465 tmps = XALLOCAVEC (rtx, finish);
2466
2467 /* Copy the (probable) hard regs into pseudos. */
2468 for (i = start; i < finish; i++)
2469 {
2470 rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2471 if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2472 {
2473 tmps[i] = gen_reg_rtx (GET_MODE (reg));
2474 emit_move_insn (tmps[i], reg);
2475 }
2476 else
2477 tmps[i] = reg;
2478 }
2479
2480 /* If we won't be storing directly into memory, protect the real destination
2481 from strange tricks we might play. */
2482 dst = orig_dst;
2483 if (GET_CODE (dst) == PARALLEL)
2484 {
2485 rtx temp;
2486
2487 /* We can get a PARALLEL dst if there is a conditional expression in
2488 a return statement. In that case, the dst and src are the same,
2489 so no action is necessary. */
2490 if (rtx_equal_p (dst, src))
2491 return;
2492
2493 /* It is unclear if we can ever reach here, but we may as well handle
2494 it. Allocate a temporary, and split this into a store/load to/from
2495 the temporary. */
2496 temp = assign_stack_temp (GET_MODE (dst), ssize);
2497 emit_group_store (temp, src, type, ssize);
2498 emit_group_load (dst, temp, type, ssize);
2499 return;
2500 }
2501 else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2502 {
2503 machine_mode outer = GET_MODE (dst);
2504 machine_mode inner;
2505 poly_int64 bytepos;
2506 bool done = false;
2507 rtx temp;
2508
2509 if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2510 dst = gen_reg_rtx (outer);
2511
2512 /* Make life a bit easier for combine. */
2513 /* If the first element of the vector is the low part
2514 of the destination mode, use a paradoxical subreg to
2515 initialize the destination. */
2516 if (start < finish)
2517 {
2518 inner = GET_MODE (tmps[start]);
2519 bytepos = subreg_lowpart_offset (inner, outer);
2520 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),
2521 bytepos))
2522 {
2523 temp = simplify_gen_subreg (outer, tmps[start],
2524 inner, 0);
2525 if (temp)
2526 {
2527 emit_move_insn (dst, temp);
2528 done = true;
2529 start++;
2530 }
2531 }
2532 }
2533
2534 /* If the first element wasn't the low part, try the last. */
2535 if (!done
2536 && start < finish - 1)
2537 {
2538 inner = GET_MODE (tmps[finish - 1]);
2539 bytepos = subreg_lowpart_offset (inner, outer);
2540 if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,
2541 finish - 1), 1)),
2542 bytepos))
2543 {
2544 temp = simplify_gen_subreg (outer, tmps[finish - 1],
2545 inner, 0);
2546 if (temp)
2547 {
2548 emit_move_insn (dst, temp);
2549 done = true;
2550 finish--;
2551 }
2552 }
2553 }
2554
2555 /* Otherwise, simply initialize the result to zero. */
2556 if (!done)
2557 emit_move_insn (dst, CONST0_RTX (outer));
2558 }
2559
2560 /* Process the pieces. */
2561 for (i = start; i < finish; i++)
2562 {
2563 poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1));
2564 machine_mode mode = GET_MODE (tmps[i]);
2565 poly_int64 bytelen = GET_MODE_SIZE (mode);
2566 poly_uint64 adj_bytelen;
2567 rtx dest = dst;
2568
2569 /* Handle trailing fragments that run over the size of the struct.
2570 It's the target's responsibility to make sure that the fragment
2571 cannot be strictly smaller in some cases and strictly larger
2572 in others. */
2573 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2574 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2575 adj_bytelen = ssize - bytepos;
2576 else
2577 adj_bytelen = bytelen;
2578
2579 if (GET_CODE (dst) == CONCAT)
2580 {
2581 if (known_le (bytepos + adj_bytelen,
2582 GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2583 dest = XEXP (dst, 0);
2584 else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2585 {
2586 bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2587 dest = XEXP (dst, 1);
2588 }
2589 else
2590 {
2591 machine_mode dest_mode = GET_MODE (dest);
2592 machine_mode tmp_mode = GET_MODE (tmps[i]);
2593
2594 gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2595
2596 if (GET_MODE_ALIGNMENT (dest_mode)
2597 >= GET_MODE_ALIGNMENT (tmp_mode))
2598 {
2599 dest = assign_stack_temp (dest_mode,
2600 GET_MODE_SIZE (dest_mode));
2601 emit_move_insn (adjust_address (dest,
2602 tmp_mode,
2603 bytepos),
2604 tmps[i]);
2605 dst = dest;
2606 }
2607 else
2608 {
2609 dest = assign_stack_temp (tmp_mode,
2610 GET_MODE_SIZE (tmp_mode));
2611 emit_move_insn (dest, tmps[i]);
2612 dst = adjust_address (dest, dest_mode, bytepos);
2613 }
2614 break;
2615 }
2616 }
2617
2618 /* Handle trailing fragments that run over the size of the struct. */
2619 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2620 {
2621 /* store_bit_field always takes its value from the lsb.
2622 Move the fragment to the lsb if it's not already there. */
2623 if (
2624 #ifdef BLOCK_REG_PADDING
2625 BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2626 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2627 #else
2628 BYTES_BIG_ENDIAN
2629 #endif
2630 )
2631 {
2632 poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2633 tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2634 shift, tmps[i], 0);
2635 }
2636
2637 /* Make sure not to write past the end of the struct. */
2638 store_bit_field (dest,
2639 adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2640 bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2641 VOIDmode, tmps[i], false);
2642 }
2643
2644 /* Optimize the access just a bit. */
2645 else if (MEM_P (dest)
2646 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2647 || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2648 && multiple_p (bytepos * BITS_PER_UNIT,
2649 GET_MODE_ALIGNMENT (mode))
2650 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2651 emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2652
2653 else
2654 store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2655 0, 0, mode, tmps[i], false);
2656 }
2657
2658 /* Copy from the pseudo into the (probable) hard reg. */
2659 if (orig_dst != dst)
2660 emit_move_insn (orig_dst, dst);
2661 }
2662
2663 /* Return a form of X that does not use a PARALLEL. TYPE is the type
2664 of the value stored in X. */
2665
2666 rtx
2667 maybe_emit_group_store (rtx x, tree type)
2668 {
2669 machine_mode mode = TYPE_MODE (type);
2670 gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2671 if (GET_CODE (x) == PARALLEL)
2672 {
2673 rtx result = gen_reg_rtx (mode);
2674 emit_group_store (result, x, type, int_size_in_bytes (type));
2675 return result;
2676 }
2677 return x;
2678 }
2679
2680 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2681
2682 This is used on targets that return BLKmode values in registers. */
2683
2684 static void
2685 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2686 {
2687 unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2688 rtx src = NULL, dst = NULL;
2689 unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2690 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2691 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2692 fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2693 fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2694 fixed_size_mode copy_mode;
2695
2696 /* BLKmode registers created in the back-end shouldn't have survived. */
2697 gcc_assert (mode != BLKmode);
2698
2699 /* If the structure doesn't take up a whole number of words, see whether
2700 SRCREG is padded on the left or on the right. If it's on the left,
2701 set PADDING_CORRECTION to the number of bits to skip.
2702
2703 In most ABIs, the structure will be returned at the least end of
2704 the register, which translates to right padding on little-endian
2705 targets and left padding on big-endian targets. The opposite
2706 holds if the structure is returned at the most significant
2707 end of the register. */
2708 if (bytes % UNITS_PER_WORD != 0
2709 && (targetm.calls.return_in_msb (type)
2710 ? !BYTES_BIG_ENDIAN
2711 : BYTES_BIG_ENDIAN))
2712 padding_correction
2713 = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2714
2715 /* We can use a single move if we have an exact mode for the size. */
2716 else if (MEM_P (target)
2717 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2718 || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2719 && bytes == GET_MODE_SIZE (mode))
2720 {
2721 emit_move_insn (adjust_address (target, mode, 0), srcreg);
2722 return;
2723 }
2724
2725 /* And if we additionally have the same mode for a register. */
2726 else if (REG_P (target)
2727 && GET_MODE (target) == mode
2728 && bytes == GET_MODE_SIZE (mode))
2729 {
2730 emit_move_insn (target, srcreg);
2731 return;
2732 }
2733
2734 /* This code assumes srcreg is at least a full word. If it isn't, copy it
2735 into a new pseudo which is a full word. */
2736 if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2737 {
2738 srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2739 mode = word_mode;
2740 }
2741
2742 /* Copy the structure BITSIZE bits at a time. If the target lives in
2743 memory, take care of not reading/writing past its end by selecting
2744 a copy mode suited to BITSIZE. This should always be possible given
2745 how it is computed.
2746
2747 If the target lives in register, make sure not to select a copy mode
2748 larger than the mode of the register.
2749
2750 We could probably emit more efficient code for machines which do not use
2751 strict alignment, but it doesn't seem worth the effort at the current
2752 time. */
2753
2754 copy_mode = word_mode;
2755 if (MEM_P (target))
2756 {
2757 opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2758 if (mem_mode.exists ())
2759 copy_mode = mem_mode.require ();
2760 }
2761 else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2762 copy_mode = tmode;
2763
2764 for (bitpos = 0, xbitpos = padding_correction;
2765 bitpos < bytes * BITS_PER_UNIT;
2766 bitpos += bitsize, xbitpos += bitsize)
2767 {
2768 /* We need a new source operand each time xbitpos is on a
2769 word boundary and when xbitpos == padding_correction
2770 (the first time through). */
2771 if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2772 src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2773
2774 /* We need a new destination operand each time bitpos is on
2775 a word boundary. */
2776 if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2777 dst = target;
2778 else if (bitpos % BITS_PER_WORD == 0)
2779 dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2780
2781 /* Use xbitpos for the source extraction (right justified) and
2782 bitpos for the destination store (left justified). */
2783 store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2784 extract_bit_field (src, bitsize,
2785 xbitpos % BITS_PER_WORD, 1,
2786 NULL_RTX, copy_mode, copy_mode,
2787 false, NULL),
2788 false);
2789 }
2790 }
2791
2792 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
2793 register if it contains any data, otherwise return null.
2794
2795 This is used on targets that return BLKmode values in registers. */
2796
2797 rtx
2798 copy_blkmode_to_reg (machine_mode mode_in, tree src)
2799 {
2800 int i, n_regs;
2801 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
2802 unsigned int bitsize;
2803 rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
2804 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2805 fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
2806 fixed_size_mode dst_mode;
2807 scalar_int_mode min_mode;
2808
2809 gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
2810
2811 x = expand_normal (src);
2812
2813 bytes = arg_int_size_in_bytes (TREE_TYPE (src));
2814 if (bytes == 0)
2815 return NULL_RTX;
2816
2817 /* If the structure doesn't take up a whole number of words, see
2818 whether the register value should be padded on the left or on
2819 the right. Set PADDING_CORRECTION to the number of padding
2820 bits needed on the left side.
2821
2822 In most ABIs, the structure will be returned at the least end of
2823 the register, which translates to right padding on little-endian
2824 targets and left padding on big-endian targets. The opposite
2825 holds if the structure is returned at the most significant
2826 end of the register. */
2827 if (bytes % UNITS_PER_WORD != 0
2828 && (targetm.calls.return_in_msb (TREE_TYPE (src))
2829 ? !BYTES_BIG_ENDIAN
2830 : BYTES_BIG_ENDIAN))
2831 padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
2832 * BITS_PER_UNIT));
2833
2834 n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2835 dst_words = XALLOCAVEC (rtx, n_regs);
2836 bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
2837 min_mode = smallest_int_mode_for_size (bitsize);
2838
2839 /* Copy the structure BITSIZE bits at a time. */
2840 for (bitpos = 0, xbitpos = padding_correction;
2841 bitpos < bytes * BITS_PER_UNIT;
2842 bitpos += bitsize, xbitpos += bitsize)
2843 {
2844 /* We need a new destination pseudo each time xbitpos is
2845 on a word boundary and when xbitpos == padding_correction
2846 (the first time through). */
2847 if (xbitpos % BITS_PER_WORD == 0
2848 || xbitpos == padding_correction)
2849 {
2850 /* Generate an appropriate register. */
2851 dst_word = gen_reg_rtx (word_mode);
2852 dst_words[xbitpos / BITS_PER_WORD] = dst_word;
2853
2854 /* Clear the destination before we move anything into it. */
2855 emit_move_insn (dst_word, CONST0_RTX (word_mode));
2856 }
2857
2858 /* Find the largest integer mode that can be used to copy all or as
2859 many bits as possible of the structure if the target supports larger
2860 copies. There are too many corner cases here w.r.t to alignments on
2861 the read/writes. So if there is any padding just use single byte
2862 operations. */
2863 opt_scalar_int_mode mode_iter;
2864 if (padding_correction == 0 && !STRICT_ALIGNMENT)
2865 {
2866 FOR_EACH_MODE_FROM (mode_iter, min_mode)
2867 {
2868 unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ());
2869 if (msize <= ((bytes * BITS_PER_UNIT) - bitpos)
2870 && msize <= BITS_PER_WORD)
2871 bitsize = msize;
2872 else
2873 break;
2874 }
2875 }
2876
2877 /* We need a new source operand each time bitpos is on a word
2878 boundary. */
2879 if (bitpos % BITS_PER_WORD == 0)
2880 src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
2881
2882 /* Use bitpos for the source extraction (left justified) and
2883 xbitpos for the destination store (right justified). */
2884 store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
2885 0, 0, word_mode,
2886 extract_bit_field (src_word, bitsize,
2887 bitpos % BITS_PER_WORD, 1,
2888 NULL_RTX, word_mode, word_mode,
2889 false, NULL),
2890 false);
2891 }
2892
2893 if (mode == BLKmode)
2894 {
2895 /* Find the smallest integer mode large enough to hold the
2896 entire structure. */
2897 opt_scalar_int_mode mode_iter;
2898 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2899 if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
2900 break;
2901
2902 /* A suitable mode should have been found. */
2903 mode = mode_iter.require ();
2904 }
2905
2906 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
2907 dst_mode = word_mode;
2908 else
2909 dst_mode = mode;
2910 dst = gen_reg_rtx (dst_mode);
2911
2912 for (i = 0; i < n_regs; i++)
2913 emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
2914
2915 if (mode != dst_mode)
2916 dst = gen_lowpart (mode, dst);
2917
2918 return dst;
2919 }
2920
2921 /* Add a USE expression for REG to the (possibly empty) list pointed
2922 to by CALL_FUSAGE. REG must denote a hard register. */
2923
2924 void
2925 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2926 {
2927 gcc_assert (REG_P (reg));
2928
2929 if (!HARD_REGISTER_P (reg))
2930 return;
2931
2932 *call_fusage
2933 = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
2934 }
2935
2936 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2937 to by CALL_FUSAGE. REG must denote a hard register. */
2938
2939 void
2940 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2941 {
2942 gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
2943
2944 *call_fusage
2945 = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
2946 }
2947
2948 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2949 starting at REGNO. All of these registers must be hard registers. */
2950
2951 void
2952 use_regs (rtx *call_fusage, int regno, int nregs)
2953 {
2954 int i;
2955
2956 gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
2957
2958 for (i = 0; i < nregs; i++)
2959 use_reg (call_fusage, regno_reg_rtx[regno + i]);
2960 }
2961
2962 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2963 PARALLEL REGS. This is for calls that pass values in multiple
2964 non-contiguous locations. The Irix 6 ABI has examples of this. */
2965
2966 void
2967 use_group_regs (rtx *call_fusage, rtx regs)
2968 {
2969 int i;
2970
2971 for (i = 0; i < XVECLEN (regs, 0); i++)
2972 {
2973 rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
2974
2975 /* A NULL entry means the parameter goes both on the stack and in
2976 registers. This can also be a MEM for targets that pass values
2977 partially on the stack and partially in registers. */
2978 if (reg != 0 && REG_P (reg))
2979 use_reg (call_fusage, reg);
2980 }
2981 }
2982
2983 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2984 assigment and the code of the expresion on the RHS is CODE. Return
2985 NULL otherwise. */
2986
2987 static gimple *
2988 get_def_for_expr (tree name, enum tree_code code)
2989 {
2990 gimple *def_stmt;
2991
2992 if (TREE_CODE (name) != SSA_NAME)
2993 return NULL;
2994
2995 def_stmt = get_gimple_for_ssa_name (name);
2996 if (!def_stmt
2997 || gimple_assign_rhs_code (def_stmt) != code)
2998 return NULL;
2999
3000 return def_stmt;
3001 }
3002
3003 /* Return the defining gimple statement for SSA_NAME NAME if it is an
3004 assigment and the class of the expresion on the RHS is CLASS. Return
3005 NULL otherwise. */
3006
3007 static gimple *
3008 get_def_for_expr_class (tree name, enum tree_code_class tclass)
3009 {
3010 gimple *def_stmt;
3011
3012 if (TREE_CODE (name) != SSA_NAME)
3013 return NULL;
3014
3015 def_stmt = get_gimple_for_ssa_name (name);
3016 if (!def_stmt
3017 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
3018 return NULL;
3019
3020 return def_stmt;
3021 }
3022 \f
3023 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
3024 its length in bytes. */
3025
3026 rtx
3027 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
3028 unsigned int expected_align, HOST_WIDE_INT expected_size,
3029 unsigned HOST_WIDE_INT min_size,
3030 unsigned HOST_WIDE_INT max_size,
3031 unsigned HOST_WIDE_INT probable_max_size)
3032 {
3033 machine_mode mode = GET_MODE (object);
3034 unsigned int align;
3035
3036 gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
3037
3038 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
3039 just move a zero. Otherwise, do this a piece at a time. */
3040 poly_int64 size_val;
3041 if (mode != BLKmode
3042 && poly_int_rtx_p (size, &size_val)
3043 && known_eq (size_val, GET_MODE_SIZE (mode)))
3044 {
3045 rtx zero = CONST0_RTX (mode);
3046 if (zero != NULL)
3047 {
3048 emit_move_insn (object, zero);
3049 return NULL;
3050 }
3051
3052 if (COMPLEX_MODE_P (mode))
3053 {
3054 zero = CONST0_RTX (GET_MODE_INNER (mode));
3055 if (zero != NULL)
3056 {
3057 write_complex_part (object, zero, 0);
3058 write_complex_part (object, zero, 1);
3059 return NULL;
3060 }
3061 }
3062 }
3063
3064 if (size == const0_rtx)
3065 return NULL;
3066
3067 align = MEM_ALIGN (object);
3068
3069 if (CONST_INT_P (size)
3070 && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3071 CLEAR_BY_PIECES,
3072 optimize_insn_for_speed_p ()))
3073 clear_by_pieces (object, INTVAL (size), align);
3074 else if (set_storage_via_setmem (object, size, const0_rtx, align,
3075 expected_align, expected_size,
3076 min_size, max_size, probable_max_size))
3077 ;
3078 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3079 return set_storage_via_libcall (object, size, const0_rtx,
3080 method == BLOCK_OP_TAILCALL);
3081 else
3082 gcc_unreachable ();
3083
3084 return NULL;
3085 }
3086
3087 rtx
3088 clear_storage (rtx object, rtx size, enum block_op_methods method)
3089 {
3090 unsigned HOST_WIDE_INT max, min = 0;
3091 if (GET_CODE (size) == CONST_INT)
3092 min = max = UINTVAL (size);
3093 else
3094 max = GET_MODE_MASK (GET_MODE (size));
3095 return clear_storage_hints (object, size, method, 0, -1, min, max, max);
3096 }
3097
3098
3099 /* A subroutine of clear_storage. Expand a call to memset.
3100 Return the return value of memset, 0 otherwise. */
3101
3102 rtx
3103 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3104 {
3105 tree call_expr, fn, object_tree, size_tree, val_tree;
3106 machine_mode size_mode;
3107
3108 object = copy_addr_to_reg (XEXP (object, 0));
3109 object_tree = make_tree (ptr_type_node, object);
3110
3111 if (!CONST_INT_P (val))
3112 val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3113 val_tree = make_tree (integer_type_node, val);
3114
3115 size_mode = TYPE_MODE (sizetype);
3116 size = convert_to_mode (size_mode, size, 1);
3117 size = copy_to_mode_reg (size_mode, size);
3118 size_tree = make_tree (sizetype, size);
3119
3120 /* It is incorrect to use the libcall calling conventions for calls to
3121 memset because it can be provided by the user. */
3122 fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3123 call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3124 CALL_EXPR_TAILCALL (call_expr) = tailcall;
3125
3126 return expand_call (call_expr, NULL_RTX, false);
3127 }
3128 \f
3129 /* Expand a setmem pattern; return true if successful. */
3130
3131 bool
3132 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3133 unsigned int expected_align, HOST_WIDE_INT expected_size,
3134 unsigned HOST_WIDE_INT min_size,
3135 unsigned HOST_WIDE_INT max_size,
3136 unsigned HOST_WIDE_INT probable_max_size)
3137 {
3138 /* Try the most limited insn first, because there's no point
3139 including more than one in the machine description unless
3140 the more limited one has some advantage. */
3141
3142 if (expected_align < align)
3143 expected_align = align;
3144 if (expected_size != -1)
3145 {
3146 if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3147 expected_size = max_size;
3148 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3149 expected_size = min_size;
3150 }
3151
3152 opt_scalar_int_mode mode_iter;
3153 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3154 {
3155 scalar_int_mode mode = mode_iter.require ();
3156 enum insn_code code = direct_optab_handler (setmem_optab, mode);
3157
3158 if (code != CODE_FOR_nothing
3159 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3160 here because if SIZE is less than the mode mask, as it is
3161 returned by the macro, it will definitely be less than the
3162 actual mode mask. Since SIZE is within the Pmode address
3163 space, we limit MODE to Pmode. */
3164 && ((CONST_INT_P (size)
3165 && ((unsigned HOST_WIDE_INT) INTVAL (size)
3166 <= (GET_MODE_MASK (mode) >> 1)))
3167 || max_size <= (GET_MODE_MASK (mode) >> 1)
3168 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3169 {
3170 class expand_operand ops[9];
3171 unsigned int nops;
3172
3173 nops = insn_data[(int) code].n_generator_args;
3174 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3175
3176 create_fixed_operand (&ops[0], object);
3177 /* The check above guarantees that this size conversion is valid. */
3178 create_convert_operand_to (&ops[1], size, mode, true);
3179 create_convert_operand_from (&ops[2], val, byte_mode, true);
3180 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3181 if (nops >= 6)
3182 {
3183 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3184 create_integer_operand (&ops[5], expected_size);
3185 }
3186 if (nops >= 8)
3187 {
3188 create_integer_operand (&ops[6], min_size);
3189 /* If we cannot represent the maximal size,
3190 make parameter NULL. */
3191 if ((HOST_WIDE_INT) max_size != -1)
3192 create_integer_operand (&ops[7], max_size);
3193 else
3194 create_fixed_operand (&ops[7], NULL);
3195 }
3196 if (nops == 9)
3197 {
3198 /* If we cannot represent the maximal size,
3199 make parameter NULL. */
3200 if ((HOST_WIDE_INT) probable_max_size != -1)
3201 create_integer_operand (&ops[8], probable_max_size);
3202 else
3203 create_fixed_operand (&ops[8], NULL);
3204 }
3205 if (maybe_expand_insn (code, nops, ops))
3206 return true;
3207 }
3208 }
3209
3210 return false;
3211 }
3212
3213 \f
3214 /* Write to one of the components of the complex value CPLX. Write VAL to
3215 the real part if IMAG_P is false, and the imaginary part if its true. */
3216
3217 void
3218 write_complex_part (rtx cplx, rtx val, bool imag_p)
3219 {
3220 machine_mode cmode;
3221 scalar_mode imode;
3222 unsigned ibitsize;
3223
3224 if (GET_CODE (cplx) == CONCAT)
3225 {
3226 emit_move_insn (XEXP (cplx, imag_p), val);
3227 return;
3228 }
3229
3230 cmode = GET_MODE (cplx);
3231 imode = GET_MODE_INNER (cmode);
3232 ibitsize = GET_MODE_BITSIZE (imode);
3233
3234 /* For MEMs simplify_gen_subreg may generate an invalid new address
3235 because, e.g., the original address is considered mode-dependent
3236 by the target, which restricts simplify_subreg from invoking
3237 adjust_address_nv. Instead of preparing fallback support for an
3238 invalid address, we call adjust_address_nv directly. */
3239 if (MEM_P (cplx))
3240 {
3241 emit_move_insn (adjust_address_nv (cplx, imode,
3242 imag_p ? GET_MODE_SIZE (imode) : 0),
3243 val);
3244 return;
3245 }
3246
3247 /* If the sub-object is at least word sized, then we know that subregging
3248 will work. This special case is important, since store_bit_field
3249 wants to operate on integer modes, and there's rarely an OImode to
3250 correspond to TCmode. */
3251 if (ibitsize >= BITS_PER_WORD
3252 /* For hard regs we have exact predicates. Assume we can split
3253 the original object if it spans an even number of hard regs.
3254 This special case is important for SCmode on 64-bit platforms
3255 where the natural size of floating-point regs is 32-bit. */
3256 || (REG_P (cplx)
3257 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3258 && REG_NREGS (cplx) % 2 == 0))
3259 {
3260 rtx part = simplify_gen_subreg (imode, cplx, cmode,
3261 imag_p ? GET_MODE_SIZE (imode) : 0);
3262 if (part)
3263 {
3264 emit_move_insn (part, val);
3265 return;
3266 }
3267 else
3268 /* simplify_gen_subreg may fail for sub-word MEMs. */
3269 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3270 }
3271
3272 store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3273 false);
3274 }
3275
3276 /* Extract one of the components of the complex value CPLX. Extract the
3277 real part if IMAG_P is false, and the imaginary part if it's true. */
3278
3279 rtx
3280 read_complex_part (rtx cplx, bool imag_p)
3281 {
3282 machine_mode cmode;
3283 scalar_mode imode;
3284 unsigned ibitsize;
3285
3286 if (GET_CODE (cplx) == CONCAT)
3287 return XEXP (cplx, imag_p);
3288
3289 cmode = GET_MODE (cplx);
3290 imode = GET_MODE_INNER (cmode);
3291 ibitsize = GET_MODE_BITSIZE (imode);
3292
3293 /* Special case reads from complex constants that got spilled to memory. */
3294 if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3295 {
3296 tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3297 if (decl && TREE_CODE (decl) == COMPLEX_CST)
3298 {
3299 tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3300 if (CONSTANT_CLASS_P (part))
3301 return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3302 }
3303 }
3304
3305 /* For MEMs simplify_gen_subreg may generate an invalid new address
3306 because, e.g., the original address is considered mode-dependent
3307 by the target, which restricts simplify_subreg from invoking
3308 adjust_address_nv. Instead of preparing fallback support for an
3309 invalid address, we call adjust_address_nv directly. */
3310 if (MEM_P (cplx))
3311 return adjust_address_nv (cplx, imode,
3312 imag_p ? GET_MODE_SIZE (imode) : 0);
3313
3314 /* If the sub-object is at least word sized, then we know that subregging
3315 will work. This special case is important, since extract_bit_field
3316 wants to operate on integer modes, and there's rarely an OImode to
3317 correspond to TCmode. */
3318 if (ibitsize >= BITS_PER_WORD
3319 /* For hard regs we have exact predicates. Assume we can split
3320 the original object if it spans an even number of hard regs.
3321 This special case is important for SCmode on 64-bit platforms
3322 where the natural size of floating-point regs is 32-bit. */
3323 || (REG_P (cplx)
3324 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3325 && REG_NREGS (cplx) % 2 == 0))
3326 {
3327 rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3328 imag_p ? GET_MODE_SIZE (imode) : 0);
3329 if (ret)
3330 return ret;
3331 else
3332 /* simplify_gen_subreg may fail for sub-word MEMs. */
3333 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3334 }
3335
3336 return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3337 true, NULL_RTX, imode, imode, false, NULL);
3338 }
3339 \f
3340 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
3341 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
3342 represented in NEW_MODE. If FORCE is true, this will never happen, as
3343 we'll force-create a SUBREG if needed. */
3344
3345 static rtx
3346 emit_move_change_mode (machine_mode new_mode,
3347 machine_mode old_mode, rtx x, bool force)
3348 {
3349 rtx ret;
3350
3351 if (push_operand (x, GET_MODE (x)))
3352 {
3353 ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3354 MEM_COPY_ATTRIBUTES (ret, x);
3355 }
3356 else if (MEM_P (x))
3357 {
3358 /* We don't have to worry about changing the address since the
3359 size in bytes is supposed to be the same. */
3360 if (reload_in_progress)
3361 {
3362 /* Copy the MEM to change the mode and move any
3363 substitutions from the old MEM to the new one. */
3364 ret = adjust_address_nv (x, new_mode, 0);
3365 copy_replacements (x, ret);
3366 }
3367 else
3368 ret = adjust_address (x, new_mode, 0);
3369 }
3370 else
3371 {
3372 /* Note that we do want simplify_subreg's behavior of validating
3373 that the new mode is ok for a hard register. If we were to use
3374 simplify_gen_subreg, we would create the subreg, but would
3375 probably run into the target not being able to implement it. */
3376 /* Except, of course, when FORCE is true, when this is exactly what
3377 we want. Which is needed for CCmodes on some targets. */
3378 if (force)
3379 ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3380 else
3381 ret = simplify_subreg (new_mode, x, old_mode, 0);
3382 }
3383
3384 return ret;
3385 }
3386
3387 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
3388 an integer mode of the same size as MODE. Returns the instruction
3389 emitted, or NULL if such a move could not be generated. */
3390
3391 static rtx_insn *
3392 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3393 {
3394 scalar_int_mode imode;
3395 enum insn_code code;
3396
3397 /* There must exist a mode of the exact size we require. */
3398 if (!int_mode_for_mode (mode).exists (&imode))
3399 return NULL;
3400
3401 /* The target must support moves in this mode. */
3402 code = optab_handler (mov_optab, imode);
3403 if (code == CODE_FOR_nothing)
3404 return NULL;
3405
3406 x = emit_move_change_mode (imode, mode, x, force);
3407 if (x == NULL_RTX)
3408 return NULL;
3409 y = emit_move_change_mode (imode, mode, y, force);
3410 if (y == NULL_RTX)
3411 return NULL;
3412 return emit_insn (GEN_FCN (code) (x, y));
3413 }
3414
3415 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
3416 Return an equivalent MEM that does not use an auto-increment. */
3417
3418 rtx
3419 emit_move_resolve_push (machine_mode mode, rtx x)
3420 {
3421 enum rtx_code code = GET_CODE (XEXP (x, 0));
3422 rtx temp;
3423
3424 poly_int64 adjust = GET_MODE_SIZE (mode);
3425 #ifdef PUSH_ROUNDING
3426 adjust = PUSH_ROUNDING (adjust);
3427 #endif
3428 if (code == PRE_DEC || code == POST_DEC)
3429 adjust = -adjust;
3430 else if (code == PRE_MODIFY || code == POST_MODIFY)
3431 {
3432 rtx expr = XEXP (XEXP (x, 0), 1);
3433
3434 gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3435 poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3436 if (GET_CODE (expr) == MINUS)
3437 val = -val;
3438 gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3439 adjust = val;
3440 }
3441
3442 /* Do not use anti_adjust_stack, since we don't want to update
3443 stack_pointer_delta. */
3444 temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3445 gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3446 0, OPTAB_LIB_WIDEN);
3447 if (temp != stack_pointer_rtx)
3448 emit_move_insn (stack_pointer_rtx, temp);
3449
3450 switch (code)
3451 {
3452 case PRE_INC:
3453 case PRE_DEC:
3454 case PRE_MODIFY:
3455 temp = stack_pointer_rtx;
3456 break;
3457 case POST_INC:
3458 case POST_DEC:
3459 case POST_MODIFY:
3460 temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3461 break;
3462 default:
3463 gcc_unreachable ();
3464 }
3465
3466 return replace_equiv_address (x, temp);
3467 }
3468
3469 /* A subroutine of emit_move_complex. Generate a move from Y into X.
3470 X is known to satisfy push_operand, and MODE is known to be complex.
3471 Returns the last instruction emitted. */
3472
3473 rtx_insn *
3474 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3475 {
3476 scalar_mode submode = GET_MODE_INNER (mode);
3477 bool imag_first;
3478
3479 #ifdef PUSH_ROUNDING
3480 poly_int64 submodesize = GET_MODE_SIZE (submode);
3481
3482 /* In case we output to the stack, but the size is smaller than the
3483 machine can push exactly, we need to use move instructions. */
3484 if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3485 {
3486 x = emit_move_resolve_push (mode, x);
3487 return emit_move_insn (x, y);
3488 }
3489 #endif
3490
3491 /* Note that the real part always precedes the imag part in memory
3492 regardless of machine's endianness. */
3493 switch (GET_CODE (XEXP (x, 0)))
3494 {
3495 case PRE_DEC:
3496 case POST_DEC:
3497 imag_first = true;
3498 break;
3499 case PRE_INC:
3500 case POST_INC:
3501 imag_first = false;
3502 break;
3503 default:
3504 gcc_unreachable ();
3505 }
3506
3507 emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3508 read_complex_part (y, imag_first));
3509 return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3510 read_complex_part (y, !imag_first));
3511 }
3512
3513 /* A subroutine of emit_move_complex. Perform the move from Y to X
3514 via two moves of the parts. Returns the last instruction emitted. */
3515
3516 rtx_insn *
3517 emit_move_complex_parts (rtx x, rtx y)
3518 {
3519 /* Show the output dies here. This is necessary for SUBREGs
3520 of pseudos since we cannot track their lifetimes correctly;
3521 hard regs shouldn't appear here except as return values. */
3522 if (!reload_completed && !reload_in_progress
3523 && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3524 emit_clobber (x);
3525
3526 write_complex_part (x, read_complex_part (y, false), false);
3527 write_complex_part (x, read_complex_part (y, true), true);
3528
3529 return get_last_insn ();
3530 }
3531
3532 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3533 MODE is known to be complex. Returns the last instruction emitted. */
3534
3535 static rtx_insn *
3536 emit_move_complex (machine_mode mode, rtx x, rtx y)
3537 {
3538 bool try_int;
3539
3540 /* Need to take special care for pushes, to maintain proper ordering
3541 of the data, and possibly extra padding. */
3542 if (push_operand (x, mode))
3543 return emit_move_complex_push (mode, x, y);
3544
3545 /* See if we can coerce the target into moving both values at once, except
3546 for floating point where we favor moving as parts if this is easy. */
3547 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3548 && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3549 && !(REG_P (x)
3550 && HARD_REGISTER_P (x)
3551 && REG_NREGS (x) == 1)
3552 && !(REG_P (y)
3553 && HARD_REGISTER_P (y)
3554 && REG_NREGS (y) == 1))
3555 try_int = false;
3556 /* Not possible if the values are inherently not adjacent. */
3557 else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3558 try_int = false;
3559 /* Is possible if both are registers (or subregs of registers). */
3560 else if (register_operand (x, mode) && register_operand (y, mode))
3561 try_int = true;
3562 /* If one of the operands is a memory, and alignment constraints
3563 are friendly enough, we may be able to do combined memory operations.
3564 We do not attempt this if Y is a constant because that combination is
3565 usually better with the by-parts thing below. */
3566 else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3567 && (!STRICT_ALIGNMENT
3568 || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3569 try_int = true;
3570 else
3571 try_int = false;
3572
3573 if (try_int)
3574 {
3575 rtx_insn *ret;
3576
3577 /* For memory to memory moves, optimal behavior can be had with the
3578 existing block move logic. */
3579 if (MEM_P (x) && MEM_P (y))
3580 {
3581 emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3582 BLOCK_OP_NO_LIBCALL);
3583 return get_last_insn ();
3584 }
3585
3586 ret = emit_move_via_integer (mode, x, y, true);
3587 if (ret)
3588 return ret;
3589 }
3590
3591 return emit_move_complex_parts (x, y);
3592 }
3593
3594 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3595 MODE is known to be MODE_CC. Returns the last instruction emitted. */
3596
3597 static rtx_insn *
3598 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3599 {
3600 rtx_insn *ret;
3601
3602 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
3603 if (mode != CCmode)
3604 {
3605 enum insn_code code = optab_handler (mov_optab, CCmode);
3606 if (code != CODE_FOR_nothing)
3607 {
3608 x = emit_move_change_mode (CCmode, mode, x, true);
3609 y = emit_move_change_mode (CCmode, mode, y, true);
3610 return emit_insn (GEN_FCN (code) (x, y));
3611 }
3612 }
3613
3614 /* Otherwise, find the MODE_INT mode of the same width. */
3615 ret = emit_move_via_integer (mode, x, y, false);
3616 gcc_assert (ret != NULL);
3617 return ret;
3618 }
3619
3620 /* Return true if word I of OP lies entirely in the
3621 undefined bits of a paradoxical subreg. */
3622
3623 static bool
3624 undefined_operand_subword_p (const_rtx op, int i)
3625 {
3626 if (GET_CODE (op) != SUBREG)
3627 return false;
3628 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3629 poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3630 return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3631 || known_le (offset, -UNITS_PER_WORD));
3632 }
3633
3634 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3635 MODE is any multi-word or full-word mode that lacks a move_insn
3636 pattern. Note that you will get better code if you define such
3637 patterns, even if they must turn into multiple assembler instructions. */
3638
3639 static rtx_insn *
3640 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3641 {
3642 rtx_insn *last_insn = 0;
3643 rtx_insn *seq;
3644 rtx inner;
3645 bool need_clobber;
3646 int i, mode_size;
3647
3648 /* This function can only handle cases where the number of words is
3649 known at compile time. */
3650 mode_size = GET_MODE_SIZE (mode).to_constant ();
3651 gcc_assert (mode_size >= UNITS_PER_WORD);
3652
3653 /* If X is a push on the stack, do the push now and replace
3654 X with a reference to the stack pointer. */
3655 if (push_operand (x, mode))
3656 x = emit_move_resolve_push (mode, x);
3657
3658 /* If we are in reload, see if either operand is a MEM whose address
3659 is scheduled for replacement. */
3660 if (reload_in_progress && MEM_P (x)
3661 && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3662 x = replace_equiv_address_nv (x, inner);
3663 if (reload_in_progress && MEM_P (y)
3664 && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3665 y = replace_equiv_address_nv (y, inner);
3666
3667 start_sequence ();
3668
3669 need_clobber = false;
3670 for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3671 {
3672 rtx xpart = operand_subword (x, i, 1, mode);
3673 rtx ypart;
3674
3675 /* Do not generate code for a move if it would come entirely
3676 from the undefined bits of a paradoxical subreg. */
3677 if (undefined_operand_subword_p (y, i))
3678 continue;
3679
3680 ypart = operand_subword (y, i, 1, mode);
3681
3682 /* If we can't get a part of Y, put Y into memory if it is a
3683 constant. Otherwise, force it into a register. Then we must
3684 be able to get a part of Y. */
3685 if (ypart == 0 && CONSTANT_P (y))
3686 {
3687 y = use_anchored_address (force_const_mem (mode, y));
3688 ypart = operand_subword (y, i, 1, mode);
3689 }
3690 else if (ypart == 0)
3691 ypart = operand_subword_force (y, i, mode);
3692
3693 gcc_assert (xpart && ypart);
3694
3695 need_clobber |= (GET_CODE (xpart) == SUBREG);
3696
3697 last_insn = emit_move_insn (xpart, ypart);
3698 }
3699
3700 seq = get_insns ();
3701 end_sequence ();
3702
3703 /* Show the output dies here. This is necessary for SUBREGs
3704 of pseudos since we cannot track their lifetimes correctly;
3705 hard regs shouldn't appear here except as return values.
3706 We never want to emit such a clobber after reload. */
3707 if (x != y
3708 && ! (reload_in_progress || reload_completed)
3709 && need_clobber != 0)
3710 emit_clobber (x);
3711
3712 emit_insn (seq);
3713
3714 return last_insn;
3715 }
3716
3717 /* Low level part of emit_move_insn.
3718 Called just like emit_move_insn, but assumes X and Y
3719 are basically valid. */
3720
3721 rtx_insn *
3722 emit_move_insn_1 (rtx x, rtx y)
3723 {
3724 machine_mode mode = GET_MODE (x);
3725 enum insn_code code;
3726
3727 gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3728
3729 code = optab_handler (mov_optab, mode);
3730 if (code != CODE_FOR_nothing)
3731 return emit_insn (GEN_FCN (code) (x, y));
3732
3733 /* Expand complex moves by moving real part and imag part. */
3734 if (COMPLEX_MODE_P (mode))
3735 return emit_move_complex (mode, x, y);
3736
3737 if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3738 || ALL_FIXED_POINT_MODE_P (mode))
3739 {
3740 rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3741
3742 /* If we can't find an integer mode, use multi words. */
3743 if (result)
3744 return result;
3745 else
3746 return emit_move_multi_word (mode, x, y);
3747 }
3748
3749 if (GET_MODE_CLASS (mode) == MODE_CC)
3750 return emit_move_ccmode (mode, x, y);
3751
3752 /* Try using a move pattern for the corresponding integer mode. This is
3753 only safe when simplify_subreg can convert MODE constants into integer
3754 constants. At present, it can only do this reliably if the value
3755 fits within a HOST_WIDE_INT. */
3756 if (!CONSTANT_P (y)
3757 || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3758 {
3759 rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3760
3761 if (ret)
3762 {
3763 if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3764 return ret;
3765 }
3766 }
3767
3768 return emit_move_multi_word (mode, x, y);
3769 }
3770
3771 /* Generate code to copy Y into X.
3772 Both Y and X must have the same mode, except that
3773 Y can be a constant with VOIDmode.
3774 This mode cannot be BLKmode; use emit_block_move for that.
3775
3776 Return the last instruction emitted. */
3777
3778 rtx_insn *
3779 emit_move_insn (rtx x, rtx y)
3780 {
3781 machine_mode mode = GET_MODE (x);
3782 rtx y_cst = NULL_RTX;
3783 rtx_insn *last_insn;
3784 rtx set;
3785
3786 gcc_assert (mode != BLKmode
3787 && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
3788
3789 if (CONSTANT_P (y))
3790 {
3791 if (optimize
3792 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3793 && (last_insn = compress_float_constant (x, y)))
3794 return last_insn;
3795
3796 y_cst = y;
3797
3798 if (!targetm.legitimate_constant_p (mode, y))
3799 {
3800 y = force_const_mem (mode, y);
3801
3802 /* If the target's cannot_force_const_mem prevented the spill,
3803 assume that the target's move expanders will also take care
3804 of the non-legitimate constant. */
3805 if (!y)
3806 y = y_cst;
3807 else
3808 y = use_anchored_address (y);
3809 }
3810 }
3811
3812 /* If X or Y are memory references, verify that their addresses are valid
3813 for the machine. */
3814 if (MEM_P (x)
3815 && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
3816 MEM_ADDR_SPACE (x))
3817 && ! push_operand (x, GET_MODE (x))))
3818 x = validize_mem (x);
3819
3820 if (MEM_P (y)
3821 && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
3822 MEM_ADDR_SPACE (y)))
3823 y = validize_mem (y);
3824
3825 gcc_assert (mode != BLKmode);
3826
3827 last_insn = emit_move_insn_1 (x, y);
3828
3829 if (y_cst && REG_P (x)
3830 && (set = single_set (last_insn)) != NULL_RTX
3831 && SET_DEST (set) == x
3832 && ! rtx_equal_p (y_cst, SET_SRC (set)))
3833 set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
3834
3835 return last_insn;
3836 }
3837
3838 /* Generate the body of an instruction to copy Y into X.
3839 It may be a list of insns, if one insn isn't enough. */
3840
3841 rtx_insn *
3842 gen_move_insn (rtx x, rtx y)
3843 {
3844 rtx_insn *seq;
3845
3846 start_sequence ();
3847 emit_move_insn_1 (x, y);
3848 seq = get_insns ();
3849 end_sequence ();
3850 return seq;
3851 }
3852
3853 /* If Y is representable exactly in a narrower mode, and the target can
3854 perform the extension directly from constant or memory, then emit the
3855 move as an extension. */
3856
3857 static rtx_insn *
3858 compress_float_constant (rtx x, rtx y)
3859 {
3860 machine_mode dstmode = GET_MODE (x);
3861 machine_mode orig_srcmode = GET_MODE (y);
3862 machine_mode srcmode;
3863 const REAL_VALUE_TYPE *r;
3864 int oldcost, newcost;
3865 bool speed = optimize_insn_for_speed_p ();
3866
3867 r = CONST_DOUBLE_REAL_VALUE (y);
3868
3869 if (targetm.legitimate_constant_p (dstmode, y))
3870 oldcost = set_src_cost (y, orig_srcmode, speed);
3871 else
3872 oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
3873
3874 FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
3875 {
3876 enum insn_code ic;
3877 rtx trunc_y;
3878 rtx_insn *last_insn;
3879
3880 /* Skip if the target can't extend this way. */
3881 ic = can_extend_p (dstmode, srcmode, 0);
3882 if (ic == CODE_FOR_nothing)
3883 continue;
3884
3885 /* Skip if the narrowed value isn't exact. */
3886 if (! exact_real_truncate (srcmode, r))
3887 continue;
3888
3889 trunc_y = const_double_from_real_value (*r, srcmode);
3890
3891 if (targetm.legitimate_constant_p (srcmode, trunc_y))
3892 {
3893 /* Skip if the target needs extra instructions to perform
3894 the extension. */
3895 if (!insn_operand_matches (ic, 1, trunc_y))
3896 continue;
3897 /* This is valid, but may not be cheaper than the original. */
3898 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3899 dstmode, speed);
3900 if (oldcost < newcost)
3901 continue;
3902 }
3903 else if (float_extend_from_mem[dstmode][srcmode])
3904 {
3905 trunc_y = force_const_mem (srcmode, trunc_y);
3906 /* This is valid, but may not be cheaper than the original. */
3907 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3908 dstmode, speed);
3909 if (oldcost < newcost)
3910 continue;
3911 trunc_y = validize_mem (trunc_y);
3912 }
3913 else
3914 continue;
3915
3916 /* For CSE's benefit, force the compressed constant pool entry
3917 into a new pseudo. This constant may be used in different modes,
3918 and if not, combine will put things back together for us. */
3919 trunc_y = force_reg (srcmode, trunc_y);
3920
3921 /* If x is a hard register, perform the extension into a pseudo,
3922 so that e.g. stack realignment code is aware of it. */
3923 rtx target = x;
3924 if (REG_P (x) && HARD_REGISTER_P (x))
3925 target = gen_reg_rtx (dstmode);
3926
3927 emit_unop_insn (ic, target, trunc_y, UNKNOWN);
3928 last_insn = get_last_insn ();
3929
3930 if (REG_P (target))
3931 set_unique_reg_note (last_insn, REG_EQUAL, y);
3932
3933 if (target != x)
3934 return emit_move_insn (x, target);
3935 return last_insn;
3936 }
3937
3938 return NULL;
3939 }
3940 \f
3941 /* Pushing data onto the stack. */
3942
3943 /* Push a block of length SIZE (perhaps variable)
3944 and return an rtx to address the beginning of the block.
3945 The value may be virtual_outgoing_args_rtx.
3946
3947 EXTRA is the number of bytes of padding to push in addition to SIZE.
3948 BELOW nonzero means this padding comes at low addresses;
3949 otherwise, the padding comes at high addresses. */
3950
3951 rtx
3952 push_block (rtx size, poly_int64 extra, int below)
3953 {
3954 rtx temp;
3955
3956 size = convert_modes (Pmode, ptr_mode, size, 1);
3957 if (CONSTANT_P (size))
3958 anti_adjust_stack (plus_constant (Pmode, size, extra));
3959 else if (REG_P (size) && known_eq (extra, 0))
3960 anti_adjust_stack (size);
3961 else
3962 {
3963 temp = copy_to_mode_reg (Pmode, size);
3964 if (maybe_ne (extra, 0))
3965 temp = expand_binop (Pmode, add_optab, temp,
3966 gen_int_mode (extra, Pmode),
3967 temp, 0, OPTAB_LIB_WIDEN);
3968 anti_adjust_stack (temp);
3969 }
3970
3971 if (STACK_GROWS_DOWNWARD)
3972 {
3973 temp = virtual_outgoing_args_rtx;
3974 if (maybe_ne (extra, 0) && below)
3975 temp = plus_constant (Pmode, temp, extra);
3976 }
3977 else
3978 {
3979 poly_int64 csize;
3980 if (poly_int_rtx_p (size, &csize))
3981 temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
3982 -csize - (below ? 0 : extra));
3983 else if (maybe_ne (extra, 0) && !below)
3984 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3985 negate_rtx (Pmode, plus_constant (Pmode, size,
3986 extra)));
3987 else
3988 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3989 negate_rtx (Pmode, size));
3990 }
3991
3992 return memory_address (NARROWEST_INT_MODE, temp);
3993 }
3994
3995 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
3996
3997 static rtx
3998 mem_autoinc_base (rtx mem)
3999 {
4000 if (MEM_P (mem))
4001 {
4002 rtx addr = XEXP (mem, 0);
4003 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4004 return XEXP (addr, 0);
4005 }
4006 return NULL;
4007 }
4008
4009 /* A utility routine used here, in reload, and in try_split. The insns
4010 after PREV up to and including LAST are known to adjust the stack,
4011 with a final value of END_ARGS_SIZE. Iterate backward from LAST
4012 placing notes as appropriate. PREV may be NULL, indicating the
4013 entire insn sequence prior to LAST should be scanned.
4014
4015 The set of allowed stack pointer modifications is small:
4016 (1) One or more auto-inc style memory references (aka pushes),
4017 (2) One or more addition/subtraction with the SP as destination,
4018 (3) A single move insn with the SP as destination,
4019 (4) A call_pop insn,
4020 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
4021
4022 Insns in the sequence that do not modify the SP are ignored,
4023 except for noreturn calls.
4024
4025 The return value is the amount of adjustment that can be trivially
4026 verified, via immediate operand or auto-inc. If the adjustment
4027 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
4028
4029 poly_int64
4030 find_args_size_adjust (rtx_insn *insn)
4031 {
4032 rtx dest, set, pat;
4033 int i;
4034
4035 pat = PATTERN (insn);
4036 set = NULL;
4037
4038 /* Look for a call_pop pattern. */
4039 if (CALL_P (insn))
4040 {
4041 /* We have to allow non-call_pop patterns for the case
4042 of emit_single_push_insn of a TLS address. */
4043 if (GET_CODE (pat) != PARALLEL)
4044 return 0;
4045
4046 /* All call_pop have a stack pointer adjust in the parallel.
4047 The call itself is always first, and the stack adjust is
4048 usually last, so search from the end. */
4049 for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
4050 {
4051 set = XVECEXP (pat, 0, i);
4052 if (GET_CODE (set) != SET)
4053 continue;
4054 dest = SET_DEST (set);
4055 if (dest == stack_pointer_rtx)
4056 break;
4057 }
4058 /* We'd better have found the stack pointer adjust. */
4059 if (i == 0)
4060 return 0;
4061 /* Fall through to process the extracted SET and DEST
4062 as if it was a standalone insn. */
4063 }
4064 else if (GET_CODE (pat) == SET)
4065 set = pat;
4066 else if ((set = single_set (insn)) != NULL)
4067 ;
4068 else if (GET_CODE (pat) == PARALLEL)
4069 {
4070 /* ??? Some older ports use a parallel with a stack adjust
4071 and a store for a PUSH_ROUNDING pattern, rather than a
4072 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4073 /* ??? See h8300 and m68k, pushqi1. */
4074 for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4075 {
4076 set = XVECEXP (pat, 0, i);
4077 if (GET_CODE (set) != SET)
4078 continue;
4079 dest = SET_DEST (set);
4080 if (dest == stack_pointer_rtx)
4081 break;
4082
4083 /* We do not expect an auto-inc of the sp in the parallel. */
4084 gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4085 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4086 != stack_pointer_rtx);
4087 }
4088 if (i < 0)
4089 return 0;
4090 }
4091 else
4092 return 0;
4093
4094 dest = SET_DEST (set);
4095
4096 /* Look for direct modifications of the stack pointer. */
4097 if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4098 {
4099 /* Look for a trivial adjustment, otherwise assume nothing. */
4100 /* Note that the SPU restore_stack_block pattern refers to
4101 the stack pointer in V4SImode. Consider that non-trivial. */
4102 poly_int64 offset;
4103 if (SCALAR_INT_MODE_P (GET_MODE (dest))
4104 && strip_offset (SET_SRC (set), &offset) == stack_pointer_rtx)
4105 return offset;
4106 /* ??? Reload can generate no-op moves, which will be cleaned
4107 up later. Recognize it and continue searching. */
4108 else if (rtx_equal_p (dest, SET_SRC (set)))
4109 return 0;
4110 else
4111 return HOST_WIDE_INT_MIN;
4112 }
4113 else
4114 {
4115 rtx mem, addr;
4116
4117 /* Otherwise only think about autoinc patterns. */
4118 if (mem_autoinc_base (dest) == stack_pointer_rtx)
4119 {
4120 mem = dest;
4121 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4122 != stack_pointer_rtx);
4123 }
4124 else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4125 mem = SET_SRC (set);
4126 else
4127 return 0;
4128
4129 addr = XEXP (mem, 0);
4130 switch (GET_CODE (addr))
4131 {
4132 case PRE_INC:
4133 case POST_INC:
4134 return GET_MODE_SIZE (GET_MODE (mem));
4135 case PRE_DEC:
4136 case POST_DEC:
4137 return -GET_MODE_SIZE (GET_MODE (mem));
4138 case PRE_MODIFY:
4139 case POST_MODIFY:
4140 addr = XEXP (addr, 1);
4141 gcc_assert (GET_CODE (addr) == PLUS);
4142 gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4143 return rtx_to_poly_int64 (XEXP (addr, 1));
4144 default:
4145 gcc_unreachable ();
4146 }
4147 }
4148 }
4149
4150 poly_int64
4151 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4152 poly_int64 end_args_size)
4153 {
4154 poly_int64 args_size = end_args_size;
4155 bool saw_unknown = false;
4156 rtx_insn *insn;
4157
4158 for (insn = last; insn != prev; insn = PREV_INSN (insn))
4159 {
4160 if (!NONDEBUG_INSN_P (insn))
4161 continue;
4162
4163 /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4164 a call argument containing a TLS address that itself requires
4165 a call to __tls_get_addr. The handling of stack_pointer_delta
4166 in emit_single_push_insn is supposed to ensure that any such
4167 notes are already correct. */
4168 rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4169 gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4170
4171 poly_int64 this_delta = find_args_size_adjust (insn);
4172 if (known_eq (this_delta, 0))
4173 {
4174 if (!CALL_P (insn)
4175 || ACCUMULATE_OUTGOING_ARGS
4176 || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4177 continue;
4178 }
4179
4180 gcc_assert (!saw_unknown);
4181 if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4182 saw_unknown = true;
4183
4184 if (!note)
4185 add_args_size_note (insn, args_size);
4186 if (STACK_GROWS_DOWNWARD)
4187 this_delta = -poly_uint64 (this_delta);
4188
4189 if (saw_unknown)
4190 args_size = HOST_WIDE_INT_MIN;
4191 else
4192 args_size -= this_delta;
4193 }
4194
4195 return args_size;
4196 }
4197
4198 #ifdef PUSH_ROUNDING
4199 /* Emit single push insn. */
4200
4201 static void
4202 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4203 {
4204 rtx dest_addr;
4205 poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4206 rtx dest;
4207 enum insn_code icode;
4208
4209 /* If there is push pattern, use it. Otherwise try old way of throwing
4210 MEM representing push operation to move expander. */
4211 icode = optab_handler (push_optab, mode);
4212 if (icode != CODE_FOR_nothing)
4213 {
4214 class expand_operand ops[1];
4215
4216 create_input_operand (&ops[0], x, mode);
4217 if (maybe_expand_insn (icode, 1, ops))
4218 return;
4219 }
4220 if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4221 dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4222 /* If we are to pad downward, adjust the stack pointer first and
4223 then store X into the stack location using an offset. This is
4224 because emit_move_insn does not know how to pad; it does not have
4225 access to type. */
4226 else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4227 {
4228 emit_move_insn (stack_pointer_rtx,
4229 expand_binop (Pmode,
4230 STACK_GROWS_DOWNWARD ? sub_optab
4231 : add_optab,
4232 stack_pointer_rtx,
4233 gen_int_mode (rounded_size, Pmode),
4234 NULL_RTX, 0, OPTAB_LIB_WIDEN));
4235
4236 poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4237 if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4238 /* We have already decremented the stack pointer, so get the
4239 previous value. */
4240 offset += rounded_size;
4241
4242 if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4243 /* We have already incremented the stack pointer, so get the
4244 previous value. */
4245 offset -= rounded_size;
4246
4247 dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4248 }
4249 else
4250 {
4251 if (STACK_GROWS_DOWNWARD)
4252 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
4253 dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4254 else
4255 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
4256 dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4257
4258 dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4259 }
4260
4261 dest = gen_rtx_MEM (mode, dest_addr);
4262
4263 if (type != 0)
4264 {
4265 set_mem_attributes (dest, type, 1);
4266
4267 if (cfun->tail_call_marked)
4268 /* Function incoming arguments may overlap with sibling call
4269 outgoing arguments and we cannot allow reordering of reads
4270 from function arguments with stores to outgoing arguments
4271 of sibling calls. */
4272 set_mem_alias_set (dest, 0);
4273 }
4274 emit_move_insn (dest, x);
4275 }
4276
4277 /* Emit and annotate a single push insn. */
4278
4279 static void
4280 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4281 {
4282 poly_int64 delta, old_delta = stack_pointer_delta;
4283 rtx_insn *prev = get_last_insn ();
4284 rtx_insn *last;
4285
4286 emit_single_push_insn_1 (mode, x, type);
4287
4288 /* Adjust stack_pointer_delta to describe the situation after the push
4289 we just performed. Note that we must do this after the push rather
4290 than before the push in case calculating X needs pushes and pops of
4291 its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
4292 for such pushes and pops must not include the effect of the future
4293 push of X. */
4294 stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4295
4296 last = get_last_insn ();
4297
4298 /* Notice the common case where we emitted exactly one insn. */
4299 if (PREV_INSN (last) == prev)
4300 {
4301 add_args_size_note (last, stack_pointer_delta);
4302 return;
4303 }
4304
4305 delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4306 gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4307 || known_eq (delta, old_delta));
4308 }
4309 #endif
4310
4311 /* If reading SIZE bytes from X will end up reading from
4312 Y return the number of bytes that overlap. Return -1
4313 if there is no overlap or -2 if we can't determine
4314 (for example when X and Y have different base registers). */
4315
4316 static int
4317 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4318 {
4319 rtx tmp = plus_constant (Pmode, x, size);
4320 rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4321
4322 if (!CONST_INT_P (sub))
4323 return -2;
4324
4325 HOST_WIDE_INT val = INTVAL (sub);
4326
4327 return IN_RANGE (val, 1, size) ? val : -1;
4328 }
4329
4330 /* Generate code to push X onto the stack, assuming it has mode MODE and
4331 type TYPE.
4332 MODE is redundant except when X is a CONST_INT (since they don't
4333 carry mode info).
4334 SIZE is an rtx for the size of data to be copied (in bytes),
4335 needed only if X is BLKmode.
4336 Return true if successful. May return false if asked to push a
4337 partial argument during a sibcall optimization (as specified by
4338 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4339 to not overlap.
4340
4341 ALIGN (in bits) is maximum alignment we can assume.
4342
4343 If PARTIAL and REG are both nonzero, then copy that many of the first
4344 bytes of X into registers starting with REG, and push the rest of X.
4345 The amount of space pushed is decreased by PARTIAL bytes.
4346 REG must be a hard register in this case.
4347 If REG is zero but PARTIAL is not, take any all others actions for an
4348 argument partially in registers, but do not actually load any
4349 registers.
4350
4351 EXTRA is the amount in bytes of extra space to leave next to this arg.
4352 This is ignored if an argument block has already been allocated.
4353
4354 On a machine that lacks real push insns, ARGS_ADDR is the address of
4355 the bottom of the argument block for this call. We use indexing off there
4356 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
4357 argument block has not been preallocated.
4358
4359 ARGS_SO_FAR is the size of args previously pushed for this call.
4360
4361 REG_PARM_STACK_SPACE is nonzero if functions require stack space
4362 for arguments passed in registers. If nonzero, it will be the number
4363 of bytes required. */
4364
4365 bool
4366 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4367 unsigned int align, int partial, rtx reg, poly_int64 extra,
4368 rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4369 rtx alignment_pad, bool sibcall_p)
4370 {
4371 rtx xinner;
4372 pad_direction stack_direction
4373 = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4374
4375 /* Decide where to pad the argument: PAD_DOWNWARD for below,
4376 PAD_UPWARD for above, or PAD_NONE for don't pad it.
4377 Default is below for small data on big-endian machines; else above. */
4378 pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4379
4380 /* Invert direction if stack is post-decrement.
4381 FIXME: why? */
4382 if (STACK_PUSH_CODE == POST_DEC)
4383 if (where_pad != PAD_NONE)
4384 where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4385
4386 xinner = x;
4387
4388 int nregs = partial / UNITS_PER_WORD;
4389 rtx *tmp_regs = NULL;
4390 int overlapping = 0;
4391
4392 if (mode == BLKmode
4393 || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
4394 {
4395 /* Copy a block into the stack, entirely or partially. */
4396
4397 rtx temp;
4398 int used;
4399 int offset;
4400 int skip;
4401
4402 offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4403 used = partial - offset;
4404
4405 if (mode != BLKmode)
4406 {
4407 /* A value is to be stored in an insufficiently aligned
4408 stack slot; copy via a suitably aligned slot if
4409 necessary. */
4410 size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4411 if (!MEM_P (xinner))
4412 {
4413 temp = assign_temp (type, 1, 1);
4414 emit_move_insn (temp, xinner);
4415 xinner = temp;
4416 }
4417 }
4418
4419 gcc_assert (size);
4420
4421 /* USED is now the # of bytes we need not copy to the stack
4422 because registers will take care of them. */
4423
4424 if (partial != 0)
4425 xinner = adjust_address (xinner, BLKmode, used);
4426
4427 /* If the partial register-part of the arg counts in its stack size,
4428 skip the part of stack space corresponding to the registers.
4429 Otherwise, start copying to the beginning of the stack space,
4430 by setting SKIP to 0. */
4431 skip = (reg_parm_stack_space == 0) ? 0 : used;
4432
4433 #ifdef PUSH_ROUNDING
4434 /* Do it with several push insns if that doesn't take lots of insns
4435 and if there is no difficulty with push insns that skip bytes
4436 on the stack for alignment purposes. */
4437 if (args_addr == 0
4438 && PUSH_ARGS
4439 && CONST_INT_P (size)
4440 && skip == 0
4441 && MEM_ALIGN (xinner) >= align
4442 && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4443 /* Here we avoid the case of a structure whose weak alignment
4444 forces many pushes of a small amount of data,
4445 and such small pushes do rounding that causes trouble. */
4446 && ((!targetm.slow_unaligned_access (word_mode, align))
4447 || align >= BIGGEST_ALIGNMENT
4448 || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4449 align / BITS_PER_UNIT))
4450 && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4451 {
4452 /* Push padding now if padding above and stack grows down,
4453 or if padding below and stack grows up.
4454 But if space already allocated, this has already been done. */
4455 if (maybe_ne (extra, 0)
4456 && args_addr == 0
4457 && where_pad != PAD_NONE
4458 && where_pad != stack_direction)
4459 anti_adjust_stack (gen_int_mode (extra, Pmode));
4460
4461 move_by_pieces (NULL, xinner, INTVAL (size) - used, align,
4462 RETURN_BEGIN);
4463 }
4464 else
4465 #endif /* PUSH_ROUNDING */
4466 {
4467 rtx target;
4468
4469 /* Otherwise make space on the stack and copy the data
4470 to the address of that space. */
4471
4472 /* Deduct words put into registers from the size we must copy. */
4473 if (partial != 0)
4474 {
4475 if (CONST_INT_P (size))
4476 size = GEN_INT (INTVAL (size) - used);
4477 else
4478 size = expand_binop (GET_MODE (size), sub_optab, size,
4479 gen_int_mode (used, GET_MODE (size)),
4480 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4481 }
4482
4483 /* Get the address of the stack space.
4484 In this case, we do not deal with EXTRA separately.
4485 A single stack adjust will do. */
4486 poly_int64 offset;
4487 if (! args_addr)
4488 {
4489 temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4490 extra = 0;
4491 }
4492 else if (poly_int_rtx_p (args_so_far, &offset))
4493 temp = memory_address (BLKmode,
4494 plus_constant (Pmode, args_addr,
4495 skip + offset));
4496 else
4497 temp = memory_address (BLKmode,
4498 plus_constant (Pmode,
4499 gen_rtx_PLUS (Pmode,
4500 args_addr,
4501 args_so_far),
4502 skip));
4503
4504 if (!ACCUMULATE_OUTGOING_ARGS)
4505 {
4506 /* If the source is referenced relative to the stack pointer,
4507 copy it to another register to stabilize it. We do not need
4508 to do this if we know that we won't be changing sp. */
4509
4510 if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4511 || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4512 temp = copy_to_reg (temp);
4513 }
4514
4515 target = gen_rtx_MEM (BLKmode, temp);
4516
4517 /* We do *not* set_mem_attributes here, because incoming arguments
4518 may overlap with sibling call outgoing arguments and we cannot
4519 allow reordering of reads from function arguments with stores
4520 to outgoing arguments of sibling calls. We do, however, want
4521 to record the alignment of the stack slot. */
4522 /* ALIGN may well be better aligned than TYPE, e.g. due to
4523 PARM_BOUNDARY. Assume the caller isn't lying. */
4524 set_mem_align (target, align);
4525
4526 /* If part should go in registers and pushing to that part would
4527 overwrite some of the values that need to go into regs, load the
4528 overlapping values into temporary pseudos to be moved into the hard
4529 regs at the end after the stack pushing has completed.
4530 We cannot load them directly into the hard regs here because
4531 they can be clobbered by the block move expansions.
4532 See PR 65358. */
4533
4534 if (partial > 0 && reg != 0 && mode == BLKmode
4535 && GET_CODE (reg) != PARALLEL)
4536 {
4537 overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4538 if (overlapping > 0)
4539 {
4540 gcc_assert (overlapping % UNITS_PER_WORD == 0);
4541 overlapping /= UNITS_PER_WORD;
4542
4543 tmp_regs = XALLOCAVEC (rtx, overlapping);
4544
4545 for (int i = 0; i < overlapping; i++)
4546 tmp_regs[i] = gen_reg_rtx (word_mode);
4547
4548 for (int i = 0; i < overlapping; i++)
4549 emit_move_insn (tmp_regs[i],
4550 operand_subword_force (target, i, mode));
4551 }
4552 else if (overlapping == -1)
4553 overlapping = 0;
4554 /* Could not determine whether there is overlap.
4555 Fail the sibcall. */
4556 else
4557 {
4558 overlapping = 0;
4559 if (sibcall_p)
4560 return false;
4561 }
4562 }
4563 emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4564 }
4565 }
4566 else if (partial > 0)
4567 {
4568 /* Scalar partly in registers. This case is only supported
4569 for fixed-wdth modes. */
4570 int size = GET_MODE_SIZE (mode).to_constant ();
4571 size /= UNITS_PER_WORD;
4572 int i;
4573 int not_stack;
4574 /* # bytes of start of argument
4575 that we must make space for but need not store. */
4576 int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4577 int args_offset = INTVAL (args_so_far);
4578 int skip;
4579
4580 /* Push padding now if padding above and stack grows down,
4581 or if padding below and stack grows up.
4582 But if space already allocated, this has already been done. */
4583 if (maybe_ne (extra, 0)
4584 && args_addr == 0
4585 && where_pad != PAD_NONE
4586 && where_pad != stack_direction)
4587 anti_adjust_stack (gen_int_mode (extra, Pmode));
4588
4589 /* If we make space by pushing it, we might as well push
4590 the real data. Otherwise, we can leave OFFSET nonzero
4591 and leave the space uninitialized. */
4592 if (args_addr == 0)
4593 offset = 0;
4594
4595 /* Now NOT_STACK gets the number of words that we don't need to
4596 allocate on the stack. Convert OFFSET to words too. */
4597 not_stack = (partial - offset) / UNITS_PER_WORD;
4598 offset /= UNITS_PER_WORD;
4599
4600 /* If the partial register-part of the arg counts in its stack size,
4601 skip the part of stack space corresponding to the registers.
4602 Otherwise, start copying to the beginning of the stack space,
4603 by setting SKIP to 0. */
4604 skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4605
4606 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4607 x = validize_mem (force_const_mem (mode, x));
4608
4609 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4610 SUBREGs of such registers are not allowed. */
4611 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4612 && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4613 x = copy_to_reg (x);
4614
4615 /* Loop over all the words allocated on the stack for this arg. */
4616 /* We can do it by words, because any scalar bigger than a word
4617 has a size a multiple of a word. */
4618 for (i = size - 1; i >= not_stack; i--)
4619 if (i >= not_stack + offset)
4620 if (!emit_push_insn (operand_subword_force (x, i, mode),
4621 word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4622 0, args_addr,
4623 GEN_INT (args_offset + ((i - not_stack + skip)
4624 * UNITS_PER_WORD)),
4625 reg_parm_stack_space, alignment_pad, sibcall_p))
4626 return false;
4627 }
4628 else
4629 {
4630 rtx addr;
4631 rtx dest;
4632
4633 /* Push padding now if padding above and stack grows down,
4634 or if padding below and stack grows up.
4635 But if space already allocated, this has already been done. */
4636 if (maybe_ne (extra, 0)
4637 && args_addr == 0
4638 && where_pad != PAD_NONE
4639 && where_pad != stack_direction)
4640 anti_adjust_stack (gen_int_mode (extra, Pmode));
4641
4642 #ifdef PUSH_ROUNDING
4643 if (args_addr == 0 && PUSH_ARGS)
4644 emit_single_push_insn (mode, x, type);
4645 else
4646 #endif
4647 {
4648 addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4649 dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4650
4651 /* We do *not* set_mem_attributes here, because incoming arguments
4652 may overlap with sibling call outgoing arguments and we cannot
4653 allow reordering of reads from function arguments with stores
4654 to outgoing arguments of sibling calls. We do, however, want
4655 to record the alignment of the stack slot. */
4656 /* ALIGN may well be better aligned than TYPE, e.g. due to
4657 PARM_BOUNDARY. Assume the caller isn't lying. */
4658 set_mem_align (dest, align);
4659
4660 emit_move_insn (dest, x);
4661 }
4662 }
4663
4664 /* Move the partial arguments into the registers and any overlapping
4665 values that we moved into the pseudos in tmp_regs. */
4666 if (partial > 0 && reg != 0)
4667 {
4668 /* Handle calls that pass values in multiple non-contiguous locations.
4669 The Irix 6 ABI has examples of this. */
4670 if (GET_CODE (reg) == PARALLEL)
4671 emit_group_load (reg, x, type, -1);
4672 else
4673 {
4674 gcc_assert (partial % UNITS_PER_WORD == 0);
4675 move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4676
4677 for (int i = 0; i < overlapping; i++)
4678 emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4679 + nregs - overlapping + i),
4680 tmp_regs[i]);
4681
4682 }
4683 }
4684
4685 if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4686 anti_adjust_stack (gen_int_mode (extra, Pmode));
4687
4688 if (alignment_pad && args_addr == 0)
4689 anti_adjust_stack (alignment_pad);
4690
4691 return true;
4692 }
4693 \f
4694 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4695 operations. */
4696
4697 static rtx
4698 get_subtarget (rtx x)
4699 {
4700 return (optimize
4701 || x == 0
4702 /* Only registers can be subtargets. */
4703 || !REG_P (x)
4704 /* Don't use hard regs to avoid extending their life. */
4705 || REGNO (x) < FIRST_PSEUDO_REGISTER
4706 ? 0 : x);
4707 }
4708
4709 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
4710 FIELD is a bitfield. Returns true if the optimization was successful,
4711 and there's nothing else to do. */
4712
4713 static bool
4714 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
4715 poly_uint64 pbitpos,
4716 poly_uint64 pbitregion_start,
4717 poly_uint64 pbitregion_end,
4718 machine_mode mode1, rtx str_rtx,
4719 tree to, tree src, bool reverse)
4720 {
4721 /* str_mode is not guaranteed to be a scalar type. */
4722 machine_mode str_mode = GET_MODE (str_rtx);
4723 unsigned int str_bitsize;
4724 tree op0, op1;
4725 rtx value, result;
4726 optab binop;
4727 gimple *srcstmt;
4728 enum tree_code code;
4729
4730 unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
4731 if (mode1 != VOIDmode
4732 || !pbitsize.is_constant (&bitsize)
4733 || !pbitpos.is_constant (&bitpos)
4734 || !pbitregion_start.is_constant (&bitregion_start)
4735 || !pbitregion_end.is_constant (&bitregion_end)
4736 || bitsize >= BITS_PER_WORD
4737 || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
4738 || str_bitsize > BITS_PER_WORD
4739 || TREE_SIDE_EFFECTS (to)
4740 || TREE_THIS_VOLATILE (to))
4741 return false;
4742
4743 STRIP_NOPS (src);
4744 if (TREE_CODE (src) != SSA_NAME)
4745 return false;
4746 if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
4747 return false;
4748
4749 srcstmt = get_gimple_for_ssa_name (src);
4750 if (!srcstmt
4751 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
4752 return false;
4753
4754 code = gimple_assign_rhs_code (srcstmt);
4755
4756 op0 = gimple_assign_rhs1 (srcstmt);
4757
4758 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4759 to find its initialization. Hopefully the initialization will
4760 be from a bitfield load. */
4761 if (TREE_CODE (op0) == SSA_NAME)
4762 {
4763 gimple *op0stmt = get_gimple_for_ssa_name (op0);
4764
4765 /* We want to eventually have OP0 be the same as TO, which
4766 should be a bitfield. */
4767 if (!op0stmt
4768 || !is_gimple_assign (op0stmt)
4769 || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
4770 return false;
4771 op0 = gimple_assign_rhs1 (op0stmt);
4772 }
4773
4774 op1 = gimple_assign_rhs2 (srcstmt);
4775
4776 if (!operand_equal_p (to, op0, 0))
4777 return false;
4778
4779 if (MEM_P (str_rtx))
4780 {
4781 unsigned HOST_WIDE_INT offset1;
4782
4783 if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
4784 str_bitsize = BITS_PER_WORD;
4785
4786 scalar_int_mode best_mode;
4787 if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
4788 MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
4789 return false;
4790 str_mode = best_mode;
4791 str_bitsize = GET_MODE_BITSIZE (best_mode);
4792
4793 offset1 = bitpos;
4794 bitpos %= str_bitsize;
4795 offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
4796 str_rtx = adjust_address (str_rtx, str_mode, offset1);
4797 }
4798 else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
4799 return false;
4800
4801 /* If the bit field covers the whole REG/MEM, store_field
4802 will likely generate better code. */
4803 if (bitsize >= str_bitsize)
4804 return false;
4805
4806 /* We can't handle fields split across multiple entities. */
4807 if (bitpos + bitsize > str_bitsize)
4808 return false;
4809
4810 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
4811 bitpos = str_bitsize - bitpos - bitsize;
4812
4813 switch (code)
4814 {
4815 case PLUS_EXPR:
4816 case MINUS_EXPR:
4817 /* For now, just optimize the case of the topmost bitfield
4818 where we don't need to do any masking and also
4819 1 bit bitfields where xor can be used.
4820 We might win by one instruction for the other bitfields
4821 too if insv/extv instructions aren't used, so that
4822 can be added later. */
4823 if ((reverse || bitpos + bitsize != str_bitsize)
4824 && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
4825 break;
4826
4827 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4828 value = convert_modes (str_mode,
4829 TYPE_MODE (TREE_TYPE (op1)), value,
4830 TYPE_UNSIGNED (TREE_TYPE (op1)));
4831
4832 /* We may be accessing data outside the field, which means
4833 we can alias adjacent data. */
4834 if (MEM_P (str_rtx))
4835 {
4836 str_rtx = shallow_copy_rtx (str_rtx);
4837 set_mem_alias_set (str_rtx, 0);
4838 set_mem_expr (str_rtx, 0);
4839 }
4840
4841 if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
4842 {
4843 value = expand_and (str_mode, value, const1_rtx, NULL);
4844 binop = xor_optab;
4845 }
4846 else
4847 binop = code == PLUS_EXPR ? add_optab : sub_optab;
4848
4849 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4850 if (reverse)
4851 value = flip_storage_order (str_mode, value);
4852 result = expand_binop (str_mode, binop, str_rtx,
4853 value, str_rtx, 1, OPTAB_WIDEN);
4854 if (result != str_rtx)
4855 emit_move_insn (str_rtx, result);
4856 return true;
4857
4858 case BIT_IOR_EXPR:
4859 case BIT_XOR_EXPR:
4860 if (TREE_CODE (op1) != INTEGER_CST)
4861 break;
4862 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4863 value = convert_modes (str_mode,
4864 TYPE_MODE (TREE_TYPE (op1)), value,
4865 TYPE_UNSIGNED (TREE_TYPE (op1)));
4866
4867 /* We may be accessing data outside the field, which means
4868 we can alias adjacent data. */
4869 if (MEM_P (str_rtx))
4870 {
4871 str_rtx = shallow_copy_rtx (str_rtx);
4872 set_mem_alias_set (str_rtx, 0);
4873 set_mem_expr (str_rtx, 0);
4874 }
4875
4876 binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
4877 if (bitpos + bitsize != str_bitsize)
4878 {
4879 rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
4880 str_mode);
4881 value = expand_and (str_mode, value, mask, NULL_RTX);
4882 }
4883 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4884 if (reverse)
4885 value = flip_storage_order (str_mode, value);
4886 result = expand_binop (str_mode, binop, str_rtx,
4887 value, str_rtx, 1, OPTAB_WIDEN);
4888 if (result != str_rtx)
4889 emit_move_insn (str_rtx, result);
4890 return true;
4891
4892 default:
4893 break;
4894 }
4895
4896 return false;
4897 }
4898
4899 /* In the C++ memory model, consecutive bit fields in a structure are
4900 considered one memory location.
4901
4902 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4903 returns the bit range of consecutive bits in which this COMPONENT_REF
4904 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
4905 and *OFFSET may be adjusted in the process.
4906
4907 If the access does not need to be restricted, 0 is returned in both
4908 *BITSTART and *BITEND. */
4909
4910 void
4911 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
4912 poly_int64_pod *bitpos, tree *offset)
4913 {
4914 poly_int64 bitoffset;
4915 tree field, repr;
4916
4917 gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
4918
4919 field = TREE_OPERAND (exp, 1);
4920 repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
4921 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4922 need to limit the range we can access. */
4923 if (!repr)
4924 {
4925 *bitstart = *bitend = 0;
4926 return;
4927 }
4928
4929 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4930 part of a larger bit field, then the representative does not serve any
4931 useful purpose. This can occur in Ada. */
4932 if (handled_component_p (TREE_OPERAND (exp, 0)))
4933 {
4934 machine_mode rmode;
4935 poly_int64 rbitsize, rbitpos;
4936 tree roffset;
4937 int unsignedp, reversep, volatilep = 0;
4938 get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
4939 &roffset, &rmode, &unsignedp, &reversep,
4940 &volatilep);
4941 if (!multiple_p (rbitpos, BITS_PER_UNIT))
4942 {
4943 *bitstart = *bitend = 0;
4944 return;
4945 }
4946 }
4947
4948 /* Compute the adjustment to bitpos from the offset of the field
4949 relative to the representative. DECL_FIELD_OFFSET of field and
4950 repr are the same by construction if they are not constants,
4951 see finish_bitfield_layout. */
4952 poly_uint64 field_offset, repr_offset;
4953 if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
4954 && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
4955 bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
4956 else
4957 bitoffset = 0;
4958 bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
4959 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
4960
4961 /* If the adjustment is larger than bitpos, we would have a negative bit
4962 position for the lower bound and this may wreak havoc later. Adjust
4963 offset and bitpos to make the lower bound non-negative in that case. */
4964 if (maybe_gt (bitoffset, *bitpos))
4965 {
4966 poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
4967 poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
4968
4969 *bitpos += adjust_bits;
4970 if (*offset == NULL_TREE)
4971 *offset = size_int (-adjust_bytes);
4972 else
4973 *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
4974 *bitstart = 0;
4975 }
4976 else
4977 *bitstart = *bitpos - bitoffset;
4978
4979 *bitend = *bitstart + tree_to_poly_uint64 (DECL_SIZE (repr)) - 1;
4980 }
4981
4982 /* Returns true if BASE is a DECL that does not reside in memory and
4983 has non-BLKmode. DECL_RTL must not be a MEM; if
4984 DECL_RTL was not set yet, return false. */
4985
4986 static inline bool
4987 non_mem_decl_p (tree base)
4988 {
4989 if (!DECL_P (base)
4990 || TREE_ADDRESSABLE (base)
4991 || DECL_MODE (base) == BLKmode)
4992 return false;
4993
4994 if (!DECL_RTL_SET_P (base))
4995 return false;
4996
4997 return (!MEM_P (DECL_RTL (base)));
4998 }
4999
5000 /* Returns true if REF refers to an object that does not
5001 reside in memory and has non-BLKmode. */
5002
5003 static inline bool
5004 mem_ref_refers_to_non_mem_p (tree ref)
5005 {
5006 tree base;
5007
5008 if (TREE_CODE (ref) == MEM_REF
5009 || TREE_CODE (ref) == TARGET_MEM_REF)
5010 {
5011 tree addr = TREE_OPERAND (ref, 0);
5012
5013 if (TREE_CODE (addr) != ADDR_EXPR)
5014 return false;
5015
5016 base = TREE_OPERAND (addr, 0);
5017 }
5018 else
5019 base = ref;
5020
5021 return non_mem_decl_p (base);
5022 }
5023
5024 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
5025 is true, try generating a nontemporal store. */
5026
5027 void
5028 expand_assignment (tree to, tree from, bool nontemporal)
5029 {
5030 rtx to_rtx = 0;
5031 rtx result;
5032 machine_mode mode;
5033 unsigned int align;
5034 enum insn_code icode;
5035
5036 /* Don't crash if the lhs of the assignment was erroneous. */
5037 if (TREE_CODE (to) == ERROR_MARK)
5038 {
5039 expand_normal (from);
5040 return;
5041 }
5042
5043 /* Optimize away no-op moves without side-effects. */
5044 if (operand_equal_p (to, from, 0))
5045 return;
5046
5047 /* Handle misaligned stores. */
5048 mode = TYPE_MODE (TREE_TYPE (to));
5049 if ((TREE_CODE (to) == MEM_REF
5050 || TREE_CODE (to) == TARGET_MEM_REF
5051 || DECL_P (to))
5052 && mode != BLKmode
5053 && !mem_ref_refers_to_non_mem_p (to)
5054 && ((align = get_object_alignment (to))
5055 < GET_MODE_ALIGNMENT (mode))
5056 && (((icode = optab_handler (movmisalign_optab, mode))
5057 != CODE_FOR_nothing)
5058 || targetm.slow_unaligned_access (mode, align)))
5059 {
5060 rtx reg, mem;
5061
5062 reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
5063 reg = force_not_mem (reg);
5064 mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5065 if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
5066 reg = flip_storage_order (mode, reg);
5067
5068 if (icode != CODE_FOR_nothing)
5069 {
5070 class expand_operand ops[2];
5071
5072 create_fixed_operand (&ops[0], mem);
5073 create_input_operand (&ops[1], reg, mode);
5074 /* The movmisalign<mode> pattern cannot fail, else the assignment
5075 would silently be omitted. */
5076 expand_insn (icode, 2, ops);
5077 }
5078 else
5079 store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5080 false);
5081 return;
5082 }
5083
5084 /* Assignment of a structure component needs special treatment
5085 if the structure component's rtx is not simply a MEM.
5086 Assignment of an array element at a constant index, and assignment of
5087 an array element in an unaligned packed structure field, has the same
5088 problem. Same for (partially) storing into a non-memory object. */
5089 if (handled_component_p (to)
5090 || (TREE_CODE (to) == MEM_REF
5091 && (REF_REVERSE_STORAGE_ORDER (to)
5092 || mem_ref_refers_to_non_mem_p (to)))
5093 || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5094 {
5095 machine_mode mode1;
5096 poly_int64 bitsize, bitpos;
5097 poly_uint64 bitregion_start = 0;
5098 poly_uint64 bitregion_end = 0;
5099 tree offset;
5100 int unsignedp, reversep, volatilep = 0;
5101 tree tem;
5102
5103 push_temp_slots ();
5104 tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5105 &unsignedp, &reversep, &volatilep);
5106
5107 /* Make sure bitpos is not negative, it can wreak havoc later. */
5108 if (maybe_lt (bitpos, 0))
5109 {
5110 gcc_assert (offset == NULL_TREE);
5111 offset = size_int (bits_to_bytes_round_down (bitpos));
5112 bitpos = num_trailing_bits (bitpos);
5113 }
5114
5115 if (TREE_CODE (to) == COMPONENT_REF
5116 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5117 get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5118 /* The C++ memory model naturally applies to byte-aligned fields.
5119 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5120 BITSIZE are not byte-aligned, there is no need to limit the range
5121 we can access. This can occur with packed structures in Ada. */
5122 else if (maybe_gt (bitsize, 0)
5123 && multiple_p (bitsize, BITS_PER_UNIT)
5124 && multiple_p (bitpos, BITS_PER_UNIT))
5125 {
5126 bitregion_start = bitpos;
5127 bitregion_end = bitpos + bitsize - 1;
5128 }
5129
5130 to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5131
5132 /* If the field has a mode, we want to access it in the
5133 field's mode, not the computed mode.
5134 If a MEM has VOIDmode (external with incomplete type),
5135 use BLKmode for it instead. */
5136 if (MEM_P (to_rtx))
5137 {
5138 if (mode1 != VOIDmode)
5139 to_rtx = adjust_address (to_rtx, mode1, 0);
5140 else if (GET_MODE (to_rtx) == VOIDmode)
5141 to_rtx = adjust_address (to_rtx, BLKmode, 0);
5142 }
5143
5144 if (offset != 0)
5145 {
5146 machine_mode address_mode;
5147 rtx offset_rtx;
5148
5149 if (!MEM_P (to_rtx))
5150 {
5151 /* We can get constant negative offsets into arrays with broken
5152 user code. Translate this to a trap instead of ICEing. */
5153 gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5154 expand_builtin_trap ();
5155 to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5156 }
5157
5158 offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5159 address_mode = get_address_mode (to_rtx);
5160 if (GET_MODE (offset_rtx) != address_mode)
5161 {
5162 /* We cannot be sure that the RTL in offset_rtx is valid outside
5163 of a memory address context, so force it into a register
5164 before attempting to convert it to the desired mode. */
5165 offset_rtx = force_operand (offset_rtx, NULL_RTX);
5166 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5167 }
5168
5169 /* If we have an expression in OFFSET_RTX and a non-zero
5170 byte offset in BITPOS, adding the byte offset before the
5171 OFFSET_RTX results in better intermediate code, which makes
5172 later rtl optimization passes perform better.
5173
5174 We prefer intermediate code like this:
5175
5176 r124:DI=r123:DI+0x18
5177 [r124:DI]=r121:DI
5178
5179 ... instead of ...
5180
5181 r124:DI=r123:DI+0x10
5182 [r124:DI+0x8]=r121:DI
5183
5184 This is only done for aligned data values, as these can
5185 be expected to result in single move instructions. */
5186 poly_int64 bytepos;
5187 if (mode1 != VOIDmode
5188 && maybe_ne (bitpos, 0)
5189 && maybe_gt (bitsize, 0)
5190 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5191 && multiple_p (bitpos, bitsize)
5192 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5193 && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5194 {
5195 to_rtx = adjust_address (to_rtx, mode1, bytepos);
5196 bitregion_start = 0;
5197 if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5198 bitregion_end -= bitpos;
5199 bitpos = 0;
5200 }
5201
5202 to_rtx = offset_address (to_rtx, offset_rtx,
5203 highest_pow2_factor_for_target (to,
5204 offset));
5205 }
5206
5207 /* No action is needed if the target is not a memory and the field
5208 lies completely outside that target. This can occur if the source
5209 code contains an out-of-bounds access to a small array. */
5210 if (!MEM_P (to_rtx)
5211 && GET_MODE (to_rtx) != BLKmode
5212 && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5213 {
5214 expand_normal (from);
5215 result = NULL;
5216 }
5217 /* Handle expand_expr of a complex value returning a CONCAT. */
5218 else if (GET_CODE (to_rtx) == CONCAT)
5219 {
5220 machine_mode to_mode = GET_MODE (to_rtx);
5221 gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5222 poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5223 unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5224 if (TYPE_MODE (TREE_TYPE (from)) == to_mode
5225 && known_eq (bitpos, 0)
5226 && known_eq (bitsize, mode_bitsize))
5227 result = store_expr (from, to_rtx, false, nontemporal, reversep);
5228 else if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE_INNER (to_mode)
5229 && known_eq (bitsize, inner_bitsize)
5230 && (known_eq (bitpos, 0)
5231 || known_eq (bitpos, inner_bitsize)))
5232 result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5233 false, nontemporal, reversep);
5234 else if (known_le (bitpos + bitsize, inner_bitsize))
5235 result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5236 bitregion_start, bitregion_end,
5237 mode1, from, get_alias_set (to),
5238 nontemporal, reversep);
5239 else if (known_ge (bitpos, inner_bitsize))
5240 result = store_field (XEXP (to_rtx, 1), bitsize,
5241 bitpos - inner_bitsize,
5242 bitregion_start, bitregion_end,
5243 mode1, from, get_alias_set (to),
5244 nontemporal, reversep);
5245 else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5246 {
5247 result = expand_normal (from);
5248 if (GET_CODE (result) == CONCAT)
5249 {
5250 to_mode = GET_MODE_INNER (to_mode);
5251 machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5252 rtx from_real
5253 = simplify_gen_subreg (to_mode, XEXP (result, 0),
5254 from_mode, 0);
5255 rtx from_imag
5256 = simplify_gen_subreg (to_mode, XEXP (result, 1),
5257 from_mode, 0);
5258 if (!from_real || !from_imag)
5259 goto concat_store_slow;
5260 emit_move_insn (XEXP (to_rtx, 0), from_real);
5261 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5262 }
5263 else
5264 {
5265 rtx from_rtx;
5266 if (MEM_P (result))
5267 from_rtx = change_address (result, to_mode, NULL_RTX);
5268 else
5269 from_rtx
5270 = simplify_gen_subreg (to_mode, result,
5271 TYPE_MODE (TREE_TYPE (from)), 0);
5272 if (from_rtx)
5273 {
5274 emit_move_insn (XEXP (to_rtx, 0),
5275 read_complex_part (from_rtx, false));
5276 emit_move_insn (XEXP (to_rtx, 1),
5277 read_complex_part (from_rtx, true));
5278 }
5279 else
5280 {
5281 machine_mode to_mode
5282 = GET_MODE_INNER (GET_MODE (to_rtx));
5283 rtx from_real
5284 = simplify_gen_subreg (to_mode, result,
5285 TYPE_MODE (TREE_TYPE (from)),
5286 0);
5287 rtx from_imag
5288 = simplify_gen_subreg (to_mode, result,
5289 TYPE_MODE (TREE_TYPE (from)),
5290 GET_MODE_SIZE (to_mode));
5291 if (!from_real || !from_imag)
5292 goto concat_store_slow;
5293 emit_move_insn (XEXP (to_rtx, 0), from_real);
5294 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5295 }
5296 }
5297 }
5298 else
5299 {
5300 concat_store_slow:;
5301 rtx temp = assign_stack_temp (to_mode,
5302 GET_MODE_SIZE (GET_MODE (to_rtx)));
5303 write_complex_part (temp, XEXP (to_rtx, 0), false);
5304 write_complex_part (temp, XEXP (to_rtx, 1), true);
5305 result = store_field (temp, bitsize, bitpos,
5306 bitregion_start, bitregion_end,
5307 mode1, from, get_alias_set (to),
5308 nontemporal, reversep);
5309 emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5310 emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5311 }
5312 }
5313 /* For calls to functions returning variable length structures, if TO_RTX
5314 is not a MEM, go through a MEM because we must not create temporaries
5315 of the VLA type. */
5316 else if (!MEM_P (to_rtx)
5317 && TREE_CODE (from) == CALL_EXPR
5318 && COMPLETE_TYPE_P (TREE_TYPE (from))
5319 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) != INTEGER_CST)
5320 {
5321 rtx temp = assign_stack_temp (GET_MODE (to_rtx),
5322 GET_MODE_SIZE (GET_MODE (to_rtx)));
5323 result = store_field (temp, bitsize, bitpos, bitregion_start,
5324 bitregion_end, mode1, from, get_alias_set (to),
5325 nontemporal, reversep);
5326 emit_move_insn (to_rtx, temp);
5327 }
5328 else
5329 {
5330 if (MEM_P (to_rtx))
5331 {
5332 /* If the field is at offset zero, we could have been given the
5333 DECL_RTX of the parent struct. Don't munge it. */
5334 to_rtx = shallow_copy_rtx (to_rtx);
5335 set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5336 if (volatilep)
5337 MEM_VOLATILE_P (to_rtx) = 1;
5338 }
5339
5340 gcc_checking_assert (known_ge (bitpos, 0));
5341 if (optimize_bitfield_assignment_op (bitsize, bitpos,
5342 bitregion_start, bitregion_end,
5343 mode1, to_rtx, to, from,
5344 reversep))
5345 result = NULL;
5346 else
5347 result = store_field (to_rtx, bitsize, bitpos,
5348 bitregion_start, bitregion_end,
5349 mode1, from, get_alias_set (to),
5350 nontemporal, reversep);
5351 }
5352
5353 if (result)
5354 preserve_temp_slots (result);
5355 pop_temp_slots ();
5356 return;
5357 }
5358
5359 /* If the rhs is a function call and its value is not an aggregate,
5360 call the function before we start to compute the lhs.
5361 This is needed for correct code for cases such as
5362 val = setjmp (buf) on machines where reference to val
5363 requires loading up part of an address in a separate insn.
5364
5365 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5366 since it might be a promoted variable where the zero- or sign- extension
5367 needs to be done. Handling this in the normal way is safe because no
5368 computation is done before the call. The same is true for SSA names. */
5369 if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5370 && COMPLETE_TYPE_P (TREE_TYPE (from))
5371 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5372 && ! (((VAR_P (to)
5373 || TREE_CODE (to) == PARM_DECL
5374 || TREE_CODE (to) == RESULT_DECL)
5375 && REG_P (DECL_RTL (to)))
5376 || TREE_CODE (to) == SSA_NAME))
5377 {
5378 rtx value;
5379
5380 push_temp_slots ();
5381 value = expand_normal (from);
5382
5383 if (to_rtx == 0)
5384 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5385
5386 /* Handle calls that return values in multiple non-contiguous locations.
5387 The Irix 6 ABI has examples of this. */
5388 if (GET_CODE (to_rtx) == PARALLEL)
5389 {
5390 if (GET_CODE (value) == PARALLEL)
5391 emit_group_move (to_rtx, value);
5392 else
5393 emit_group_load (to_rtx, value, TREE_TYPE (from),
5394 int_size_in_bytes (TREE_TYPE (from)));
5395 }
5396 else if (GET_CODE (value) == PARALLEL)
5397 emit_group_store (to_rtx, value, TREE_TYPE (from),
5398 int_size_in_bytes (TREE_TYPE (from)));
5399 else if (GET_MODE (to_rtx) == BLKmode)
5400 {
5401 /* Handle calls that return BLKmode values in registers. */
5402 if (REG_P (value))
5403 copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5404 else
5405 emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5406 }
5407 else
5408 {
5409 if (POINTER_TYPE_P (TREE_TYPE (to)))
5410 value = convert_memory_address_addr_space
5411 (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5412 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5413
5414 emit_move_insn (to_rtx, value);
5415 }
5416
5417 preserve_temp_slots (to_rtx);
5418 pop_temp_slots ();
5419 return;
5420 }
5421
5422 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
5423 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5424
5425 /* Don't move directly into a return register. */
5426 if (TREE_CODE (to) == RESULT_DECL
5427 && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5428 {
5429 rtx temp;
5430
5431 push_temp_slots ();
5432
5433 /* If the source is itself a return value, it still is in a pseudo at
5434 this point so we can move it back to the return register directly. */
5435 if (REG_P (to_rtx)
5436 && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5437 && TREE_CODE (from) != CALL_EXPR)
5438 temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5439 else
5440 temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5441
5442 /* Handle calls that return values in multiple non-contiguous locations.
5443 The Irix 6 ABI has examples of this. */
5444 if (GET_CODE (to_rtx) == PARALLEL)
5445 {
5446 if (GET_CODE (temp) == PARALLEL)
5447 emit_group_move (to_rtx, temp);
5448 else
5449 emit_group_load (to_rtx, temp, TREE_TYPE (from),
5450 int_size_in_bytes (TREE_TYPE (from)));
5451 }
5452 else if (temp)
5453 emit_move_insn (to_rtx, temp);
5454
5455 preserve_temp_slots (to_rtx);
5456 pop_temp_slots ();
5457 return;
5458 }
5459
5460 /* In case we are returning the contents of an object which overlaps
5461 the place the value is being stored, use a safe function when copying
5462 a value through a pointer into a structure value return block. */
5463 if (TREE_CODE (to) == RESULT_DECL
5464 && TREE_CODE (from) == INDIRECT_REF
5465 && ADDR_SPACE_GENERIC_P
5466 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5467 && refs_may_alias_p (to, from)
5468 && cfun->returns_struct
5469 && !cfun->returns_pcc_struct)
5470 {
5471 rtx from_rtx, size;
5472
5473 push_temp_slots ();
5474 size = expr_size (from);
5475 from_rtx = expand_normal (from);
5476
5477 emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5478
5479 preserve_temp_slots (to_rtx);
5480 pop_temp_slots ();
5481 return;
5482 }
5483
5484 /* Compute FROM and store the value in the rtx we got. */
5485
5486 push_temp_slots ();
5487 result = store_expr (from, to_rtx, 0, nontemporal, false);
5488 preserve_temp_slots (result);
5489 pop_temp_slots ();
5490 return;
5491 }
5492
5493 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
5494 succeeded, false otherwise. */
5495
5496 bool
5497 emit_storent_insn (rtx to, rtx from)
5498 {
5499 class expand_operand ops[2];
5500 machine_mode mode = GET_MODE (to);
5501 enum insn_code code = optab_handler (storent_optab, mode);
5502
5503 if (code == CODE_FOR_nothing)
5504 return false;
5505
5506 create_fixed_operand (&ops[0], to);
5507 create_input_operand (&ops[1], from, mode);
5508 return maybe_expand_insn (code, 2, ops);
5509 }
5510
5511 /* Helper function for store_expr storing of STRING_CST. */
5512
5513 static rtx
5514 string_cst_read_str (void *data, HOST_WIDE_INT offset, scalar_int_mode mode)
5515 {
5516 tree str = (tree) data;
5517
5518 gcc_assert (offset >= 0);
5519 if (offset >= TREE_STRING_LENGTH (str))
5520 return const0_rtx;
5521
5522 if ((unsigned HOST_WIDE_INT) offset + GET_MODE_SIZE (mode)
5523 > (unsigned HOST_WIDE_INT) TREE_STRING_LENGTH (str))
5524 {
5525 char *p = XALLOCAVEC (char, GET_MODE_SIZE (mode));
5526 size_t l = TREE_STRING_LENGTH (str) - offset;
5527 memcpy (p, TREE_STRING_POINTER (str) + offset, l);
5528 memset (p + l, '\0', GET_MODE_SIZE (mode) - l);
5529 return c_readstr (p, mode, false);
5530 }
5531
5532 return c_readstr (TREE_STRING_POINTER (str) + offset, mode, false);
5533 }
5534
5535 /* Generate code for computing expression EXP,
5536 and storing the value into TARGET.
5537
5538 If the mode is BLKmode then we may return TARGET itself.
5539 It turns out that in BLKmode it doesn't cause a problem.
5540 because C has no operators that could combine two different
5541 assignments into the same BLKmode object with different values
5542 with no sequence point. Will other languages need this to
5543 be more thorough?
5544
5545 If CALL_PARAM_P is nonzero, this is a store into a call param on the
5546 stack, and block moves may need to be treated specially.
5547
5548 If NONTEMPORAL is true, try using a nontemporal store instruction.
5549
5550 If REVERSE is true, the store is to be done in reverse order. */
5551
5552 rtx
5553 store_expr (tree exp, rtx target, int call_param_p,
5554 bool nontemporal, bool reverse)
5555 {
5556 rtx temp;
5557 rtx alt_rtl = NULL_RTX;
5558 location_t loc = curr_insn_location ();
5559
5560 if (VOID_TYPE_P (TREE_TYPE (exp)))
5561 {
5562 /* C++ can generate ?: expressions with a throw expression in one
5563 branch and an rvalue in the other. Here, we resolve attempts to
5564 store the throw expression's nonexistent result. */
5565 gcc_assert (!call_param_p);
5566 expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5567 return NULL_RTX;
5568 }
5569 if (TREE_CODE (exp) == COMPOUND_EXPR)
5570 {
5571 /* Perform first part of compound expression, then assign from second
5572 part. */
5573 expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5574 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5575 return store_expr (TREE_OPERAND (exp, 1), target,
5576 call_param_p, nontemporal, reverse);
5577 }
5578 else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5579 {
5580 /* For conditional expression, get safe form of the target. Then
5581 test the condition, doing the appropriate assignment on either
5582 side. This avoids the creation of unnecessary temporaries.
5583 For non-BLKmode, it is more efficient not to do this. */
5584
5585 rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5586
5587 do_pending_stack_adjust ();
5588 NO_DEFER_POP;
5589 jumpifnot (TREE_OPERAND (exp, 0), lab1,
5590 profile_probability::uninitialized ());
5591 store_expr (TREE_OPERAND (exp, 1), target, call_param_p,
5592 nontemporal, reverse);
5593 emit_jump_insn (targetm.gen_jump (lab2));
5594 emit_barrier ();
5595 emit_label (lab1);
5596 store_expr (TREE_OPERAND (exp, 2), target, call_param_p,
5597 nontemporal, reverse);
5598 emit_label (lab2);
5599 OK_DEFER_POP;
5600
5601 return NULL_RTX;
5602 }
5603 else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5604 /* If this is a scalar in a register that is stored in a wider mode
5605 than the declared mode, compute the result into its declared mode
5606 and then convert to the wider mode. Our value is the computed
5607 expression. */
5608 {
5609 rtx inner_target = 0;
5610 scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5611 scalar_int_mode inner_mode = subreg_promoted_mode (target);
5612
5613 /* We can do the conversion inside EXP, which will often result
5614 in some optimizations. Do the conversion in two steps: first
5615 change the signedness, if needed, then the extend. But don't
5616 do this if the type of EXP is a subtype of something else
5617 since then the conversion might involve more than just
5618 converting modes. */
5619 if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5620 && TREE_TYPE (TREE_TYPE (exp)) == 0
5621 && GET_MODE_PRECISION (outer_mode)
5622 == TYPE_PRECISION (TREE_TYPE (exp)))
5623 {
5624 if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5625 TYPE_UNSIGNED (TREE_TYPE (exp))))
5626 {
5627 /* Some types, e.g. Fortran's logical*4, won't have a signed
5628 version, so use the mode instead. */
5629 tree ntype
5630 = (signed_or_unsigned_type_for
5631 (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5632 if (ntype == NULL)
5633 ntype = lang_hooks.types.type_for_mode
5634 (TYPE_MODE (TREE_TYPE (exp)),
5635 SUBREG_PROMOTED_SIGN (target));
5636
5637 exp = fold_convert_loc (loc, ntype, exp);
5638 }
5639
5640 exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5641 (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5642 exp);
5643
5644 inner_target = SUBREG_REG (target);
5645 }
5646
5647 temp = expand_expr (exp, inner_target, VOIDmode,
5648 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5649
5650
5651 /* If TEMP is a VOIDmode constant, use convert_modes to make
5652 sure that we properly convert it. */
5653 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5654 {
5655 temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5656 temp, SUBREG_PROMOTED_SIGN (target));
5657 temp = convert_modes (inner_mode, outer_mode, temp,
5658 SUBREG_PROMOTED_SIGN (target));
5659 }
5660
5661 convert_move (SUBREG_REG (target), temp,
5662 SUBREG_PROMOTED_SIGN (target));
5663
5664 return NULL_RTX;
5665 }
5666 else if ((TREE_CODE (exp) == STRING_CST
5667 || (TREE_CODE (exp) == MEM_REF
5668 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
5669 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5670 == STRING_CST
5671 && integer_zerop (TREE_OPERAND (exp, 1))))
5672 && !nontemporal && !call_param_p
5673 && MEM_P (target))
5674 {
5675 /* Optimize initialization of an array with a STRING_CST. */
5676 HOST_WIDE_INT exp_len, str_copy_len;
5677 rtx dest_mem;
5678 tree str = TREE_CODE (exp) == STRING_CST
5679 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5680
5681 exp_len = int_expr_size (exp);
5682 if (exp_len <= 0)
5683 goto normal_expr;
5684
5685 if (TREE_STRING_LENGTH (str) <= 0)
5686 goto normal_expr;
5687
5688 if (can_store_by_pieces (exp_len, string_cst_read_str, (void *) str,
5689 MEM_ALIGN (target), false))
5690 {
5691 store_by_pieces (target, exp_len, string_cst_read_str, (void *) str,
5692 MEM_ALIGN (target), false, RETURN_BEGIN);
5693 return NULL_RTX;
5694 }
5695
5696 str_copy_len = TREE_STRING_LENGTH (str);
5697 if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0)
5698 {
5699 str_copy_len += STORE_MAX_PIECES - 1;
5700 str_copy_len &= ~(STORE_MAX_PIECES - 1);
5701 }
5702 if (str_copy_len >= exp_len)
5703 goto normal_expr;
5704
5705 if (!can_store_by_pieces (str_copy_len, string_cst_read_str,
5706 (void *) str, MEM_ALIGN (target), false))
5707 goto normal_expr;
5708
5709 dest_mem = store_by_pieces (target, str_copy_len, string_cst_read_str,
5710 (void *) str, MEM_ALIGN (target), false,
5711 RETURN_END);
5712 clear_storage (adjust_address_1 (dest_mem, BLKmode, 0, 1, 1, 0,
5713 exp_len - str_copy_len),
5714 GEN_INT (exp_len - str_copy_len), BLOCK_OP_NORMAL);
5715 return NULL_RTX;
5716 }
5717 else
5718 {
5719 rtx tmp_target;
5720
5721 normal_expr:
5722 /* If we want to use a nontemporal or a reverse order store, force the
5723 value into a register first. */
5724 tmp_target = nontemporal || reverse ? NULL_RTX : target;
5725 temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
5726 (call_param_p
5727 ? EXPAND_STACK_PARM : EXPAND_NORMAL),
5728 &alt_rtl, false);
5729 }
5730
5731 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5732 the same as that of TARGET, adjust the constant. This is needed, for
5733 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5734 only a word-sized value. */
5735 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
5736 && TREE_CODE (exp) != ERROR_MARK
5737 && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
5738 {
5739 if (GET_MODE_CLASS (GET_MODE (target))
5740 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
5741 && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
5742 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
5743 {
5744 rtx t = simplify_gen_subreg (GET_MODE (target), temp,
5745 TYPE_MODE (TREE_TYPE (exp)), 0);
5746 if (t)
5747 temp = t;
5748 }
5749 if (GET_MODE (temp) == VOIDmode)
5750 temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
5751 temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5752 }
5753
5754 /* If value was not generated in the target, store it there.
5755 Convert the value to TARGET's type first if necessary and emit the
5756 pending incrementations that have been queued when expanding EXP.
5757 Note that we cannot emit the whole queue blindly because this will
5758 effectively disable the POST_INC optimization later.
5759
5760 If TEMP and TARGET compare equal according to rtx_equal_p, but
5761 one or both of them are volatile memory refs, we have to distinguish
5762 two cases:
5763 - expand_expr has used TARGET. In this case, we must not generate
5764 another copy. This can be detected by TARGET being equal according
5765 to == .
5766 - expand_expr has not used TARGET - that means that the source just
5767 happens to have the same RTX form. Since temp will have been created
5768 by expand_expr, it will compare unequal according to == .
5769 We must generate a copy in this case, to reach the correct number
5770 of volatile memory references. */
5771
5772 if ((! rtx_equal_p (temp, target)
5773 || (temp != target && (side_effects_p (temp)
5774 || side_effects_p (target))))
5775 && TREE_CODE (exp) != ERROR_MARK
5776 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5777 but TARGET is not valid memory reference, TEMP will differ
5778 from TARGET although it is really the same location. */
5779 && !(alt_rtl
5780 && rtx_equal_p (alt_rtl, target)
5781 && !side_effects_p (alt_rtl)
5782 && !side_effects_p (target))
5783 /* If there's nothing to copy, don't bother. Don't call
5784 expr_size unless necessary, because some front-ends (C++)
5785 expr_size-hook must not be given objects that are not
5786 supposed to be bit-copied or bit-initialized. */
5787 && expr_size (exp) != const0_rtx)
5788 {
5789 if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
5790 {
5791 if (GET_MODE (target) == BLKmode)
5792 {
5793 /* Handle calls that return BLKmode values in registers. */
5794 if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
5795 copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
5796 else
5797 store_bit_field (target,
5798 INTVAL (expr_size (exp)) * BITS_PER_UNIT,
5799 0, 0, 0, GET_MODE (temp), temp, reverse);
5800 }
5801 else
5802 convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5803 }
5804
5805 else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
5806 {
5807 /* Handle copying a string constant into an array. The string
5808 constant may be shorter than the array. So copy just the string's
5809 actual length, and clear the rest. First get the size of the data
5810 type of the string, which is actually the size of the target. */
5811 rtx size = expr_size (exp);
5812
5813 if (CONST_INT_P (size)
5814 && INTVAL (size) < TREE_STRING_LENGTH (exp))
5815 emit_block_move (target, temp, size,
5816 (call_param_p
5817 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5818 else
5819 {
5820 machine_mode pointer_mode
5821 = targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
5822 machine_mode address_mode = get_address_mode (target);
5823
5824 /* Compute the size of the data to copy from the string. */
5825 tree copy_size
5826 = size_binop_loc (loc, MIN_EXPR,
5827 make_tree (sizetype, size),
5828 size_int (TREE_STRING_LENGTH (exp)));
5829 rtx copy_size_rtx
5830 = expand_expr (copy_size, NULL_RTX, VOIDmode,
5831 (call_param_p
5832 ? EXPAND_STACK_PARM : EXPAND_NORMAL));
5833 rtx_code_label *label = 0;
5834
5835 /* Copy that much. */
5836 copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
5837 TYPE_UNSIGNED (sizetype));
5838 emit_block_move (target, temp, copy_size_rtx,
5839 (call_param_p
5840 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5841
5842 /* Figure out how much is left in TARGET that we have to clear.
5843 Do all calculations in pointer_mode. */
5844 poly_int64 const_copy_size;
5845 if (poly_int_rtx_p (copy_size_rtx, &const_copy_size))
5846 {
5847 size = plus_constant (address_mode, size, -const_copy_size);
5848 target = adjust_address (target, BLKmode, const_copy_size);
5849 }
5850 else
5851 {
5852 size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
5853 copy_size_rtx, NULL_RTX, 0,
5854 OPTAB_LIB_WIDEN);
5855
5856 if (GET_MODE (copy_size_rtx) != address_mode)
5857 copy_size_rtx = convert_to_mode (address_mode,
5858 copy_size_rtx,
5859 TYPE_UNSIGNED (sizetype));
5860
5861 target = offset_address (target, copy_size_rtx,
5862 highest_pow2_factor (copy_size));
5863 label = gen_label_rtx ();
5864 emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
5865 GET_MODE (size), 0, label);
5866 }
5867
5868 if (size != const0_rtx)
5869 clear_storage (target, size, BLOCK_OP_NORMAL);
5870
5871 if (label)
5872 emit_label (label);
5873 }
5874 }
5875 /* Handle calls that return values in multiple non-contiguous locations.
5876 The Irix 6 ABI has examples of this. */
5877 else if (GET_CODE (target) == PARALLEL)
5878 {
5879 if (GET_CODE (temp) == PARALLEL)
5880 emit_group_move (target, temp);
5881 else
5882 emit_group_load (target, temp, TREE_TYPE (exp),
5883 int_size_in_bytes (TREE_TYPE (exp)));
5884 }
5885 else if (GET_CODE (temp) == PARALLEL)
5886 emit_group_store (target, temp, TREE_TYPE (exp),
5887 int_size_in_bytes (TREE_TYPE (exp)));
5888 else if (GET_MODE (temp) == BLKmode)
5889 emit_block_move (target, temp, expr_size (exp),
5890 (call_param_p
5891 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5892 /* If we emit a nontemporal store, there is nothing else to do. */
5893 else if (nontemporal && emit_storent_insn (target, temp))
5894 ;
5895 else
5896 {
5897 if (reverse)
5898 temp = flip_storage_order (GET_MODE (target), temp);
5899 temp = force_operand (temp, target);
5900 if (temp != target)
5901 emit_move_insn (target, temp);
5902 }
5903 }
5904
5905 return NULL_RTX;
5906 }
5907 \f
5908 /* Return true if field F of structure TYPE is a flexible array. */
5909
5910 static bool
5911 flexible_array_member_p (const_tree f, const_tree type)
5912 {
5913 const_tree tf;
5914
5915 tf = TREE_TYPE (f);
5916 return (DECL_CHAIN (f) == NULL
5917 && TREE_CODE (tf) == ARRAY_TYPE
5918 && TYPE_DOMAIN (tf)
5919 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
5920 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
5921 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
5922 && int_size_in_bytes (type) >= 0);
5923 }
5924
5925 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5926 must have in order for it to completely initialize a value of type TYPE.
5927 Return -1 if the number isn't known.
5928
5929 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
5930
5931 static HOST_WIDE_INT
5932 count_type_elements (const_tree type, bool for_ctor_p)
5933 {
5934 switch (TREE_CODE (type))
5935 {
5936 case ARRAY_TYPE:
5937 {
5938 tree nelts;
5939
5940 nelts = array_type_nelts (type);
5941 if (nelts && tree_fits_uhwi_p (nelts))
5942 {
5943 unsigned HOST_WIDE_INT n;
5944
5945 n = tree_to_uhwi (nelts) + 1;
5946 if (n == 0 || for_ctor_p)
5947 return n;
5948 else
5949 return n * count_type_elements (TREE_TYPE (type), false);
5950 }
5951 return for_ctor_p ? -1 : 1;
5952 }
5953
5954 case RECORD_TYPE:
5955 {
5956 unsigned HOST_WIDE_INT n;
5957 tree f;
5958
5959 n = 0;
5960 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5961 if (TREE_CODE (f) == FIELD_DECL)
5962 {
5963 if (!for_ctor_p)
5964 n += count_type_elements (TREE_TYPE (f), false);
5965 else if (!flexible_array_member_p (f, type))
5966 /* Don't count flexible arrays, which are not supposed
5967 to be initialized. */
5968 n += 1;
5969 }
5970
5971 return n;
5972 }
5973
5974 case UNION_TYPE:
5975 case QUAL_UNION_TYPE:
5976 {
5977 tree f;
5978 HOST_WIDE_INT n, m;
5979
5980 gcc_assert (!for_ctor_p);
5981 /* Estimate the number of scalars in each field and pick the
5982 maximum. Other estimates would do instead; the idea is simply
5983 to make sure that the estimate is not sensitive to the ordering
5984 of the fields. */
5985 n = 1;
5986 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5987 if (TREE_CODE (f) == FIELD_DECL)
5988 {
5989 m = count_type_elements (TREE_TYPE (f), false);
5990 /* If the field doesn't span the whole union, add an extra
5991 scalar for the rest. */
5992 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
5993 TYPE_SIZE (type)) != 1)
5994 m++;
5995 if (n < m)
5996 n = m;
5997 }
5998 return n;
5999 }
6000
6001 case COMPLEX_TYPE:
6002 return 2;
6003
6004 case VECTOR_TYPE:
6005 {
6006 unsigned HOST_WIDE_INT nelts;
6007 if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
6008 return nelts;
6009 else
6010 return -1;
6011 }
6012
6013 case INTEGER_TYPE:
6014 case REAL_TYPE:
6015 case FIXED_POINT_TYPE:
6016 case ENUMERAL_TYPE:
6017 case BOOLEAN_TYPE:
6018 case POINTER_TYPE:
6019 case OFFSET_TYPE:
6020 case REFERENCE_TYPE:
6021 case NULLPTR_TYPE:
6022 return 1;
6023
6024 case ERROR_MARK:
6025 return 0;
6026
6027 case VOID_TYPE:
6028 case METHOD_TYPE:
6029 case FUNCTION_TYPE:
6030 case LANG_TYPE:
6031 default:
6032 gcc_unreachable ();
6033 }
6034 }
6035
6036 /* Helper for categorize_ctor_elements. Identical interface. */
6037
6038 static bool
6039 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6040 HOST_WIDE_INT *p_unique_nz_elts,
6041 HOST_WIDE_INT *p_init_elts, bool *p_complete)
6042 {
6043 unsigned HOST_WIDE_INT idx;
6044 HOST_WIDE_INT nz_elts, unique_nz_elts, init_elts, num_fields;
6045 tree value, purpose, elt_type;
6046
6047 /* Whether CTOR is a valid constant initializer, in accordance with what
6048 initializer_constant_valid_p does. If inferred from the constructor
6049 elements, true until proven otherwise. */
6050 bool const_from_elts_p = constructor_static_from_elts_p (ctor);
6051 bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
6052
6053 nz_elts = 0;
6054 unique_nz_elts = 0;
6055 init_elts = 0;
6056 num_fields = 0;
6057 elt_type = NULL_TREE;
6058
6059 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
6060 {
6061 HOST_WIDE_INT mult = 1;
6062
6063 if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
6064 {
6065 tree lo_index = TREE_OPERAND (purpose, 0);
6066 tree hi_index = TREE_OPERAND (purpose, 1);
6067
6068 if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
6069 mult = (tree_to_uhwi (hi_index)
6070 - tree_to_uhwi (lo_index) + 1);
6071 }
6072 num_fields += mult;
6073 elt_type = TREE_TYPE (value);
6074
6075 switch (TREE_CODE (value))
6076 {
6077 case CONSTRUCTOR:
6078 {
6079 HOST_WIDE_INT nz = 0, unz = 0, ic = 0;
6080
6081 bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &unz,
6082 &ic, p_complete);
6083
6084 nz_elts += mult * nz;
6085 unique_nz_elts += unz;
6086 init_elts += mult * ic;
6087
6088 if (const_from_elts_p && const_p)
6089 const_p = const_elt_p;
6090 }
6091 break;
6092
6093 case INTEGER_CST:
6094 case REAL_CST:
6095 case FIXED_CST:
6096 if (!initializer_zerop (value))
6097 {
6098 nz_elts += mult;
6099 unique_nz_elts++;
6100 }
6101 init_elts += mult;
6102 break;
6103
6104 case STRING_CST:
6105 nz_elts += mult * TREE_STRING_LENGTH (value);
6106 unique_nz_elts += TREE_STRING_LENGTH (value);
6107 init_elts += mult * TREE_STRING_LENGTH (value);
6108 break;
6109
6110 case COMPLEX_CST:
6111 if (!initializer_zerop (TREE_REALPART (value)))
6112 {
6113 nz_elts += mult;
6114 unique_nz_elts++;
6115 }
6116 if (!initializer_zerop (TREE_IMAGPART (value)))
6117 {
6118 nz_elts += mult;
6119 unique_nz_elts++;
6120 }
6121 init_elts += 2 * mult;
6122 break;
6123
6124 case VECTOR_CST:
6125 {
6126 /* We can only construct constant-length vectors using
6127 CONSTRUCTOR. */
6128 unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6129 for (unsigned int i = 0; i < nunits; ++i)
6130 {
6131 tree v = VECTOR_CST_ELT (value, i);
6132 if (!initializer_zerop (v))
6133 {
6134 nz_elts += mult;
6135 unique_nz_elts++;
6136 }
6137 init_elts += mult;
6138 }
6139 }
6140 break;
6141
6142 default:
6143 {
6144 HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6145 nz_elts += mult * tc;
6146 unique_nz_elts += tc;
6147 init_elts += mult * tc;
6148
6149 if (const_from_elts_p && const_p)
6150 const_p
6151 = initializer_constant_valid_p (value,
6152 elt_type,
6153 TYPE_REVERSE_STORAGE_ORDER
6154 (TREE_TYPE (ctor)))
6155 != NULL_TREE;
6156 }
6157 break;
6158 }
6159 }
6160
6161 if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6162 num_fields, elt_type))
6163 *p_complete = false;
6164
6165 *p_nz_elts += nz_elts;
6166 *p_unique_nz_elts += unique_nz_elts;
6167 *p_init_elts += init_elts;
6168
6169 return const_p;
6170 }
6171
6172 /* Examine CTOR to discover:
6173 * how many scalar fields are set to nonzero values,
6174 and place it in *P_NZ_ELTS;
6175 * the same, but counting RANGE_EXPRs as multiplier of 1 instead of
6176 high - low + 1 (this can be useful for callers to determine ctors
6177 that could be cheaply initialized with - perhaps nested - loops
6178 compared to copied from huge read-only data),
6179 and place it in *P_UNIQUE_NZ_ELTS;
6180 * how many scalar fields in total are in CTOR,
6181 and place it in *P_ELT_COUNT.
6182 * whether the constructor is complete -- in the sense that every
6183 meaningful byte is explicitly given a value --
6184 and place it in *P_COMPLETE.
6185
6186 Return whether or not CTOR is a valid static constant initializer, the same
6187 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
6188
6189 bool
6190 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6191 HOST_WIDE_INT *p_unique_nz_elts,
6192 HOST_WIDE_INT *p_init_elts, bool *p_complete)
6193 {
6194 *p_nz_elts = 0;
6195 *p_unique_nz_elts = 0;
6196 *p_init_elts = 0;
6197 *p_complete = true;
6198
6199 return categorize_ctor_elements_1 (ctor, p_nz_elts, p_unique_nz_elts,
6200 p_init_elts, p_complete);
6201 }
6202
6203 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6204 of which had type LAST_TYPE. Each element was itself a complete
6205 initializer, in the sense that every meaningful byte was explicitly
6206 given a value. Return true if the same is true for the constructor
6207 as a whole. */
6208
6209 bool
6210 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6211 const_tree last_type)
6212 {
6213 if (TREE_CODE (type) == UNION_TYPE
6214 || TREE_CODE (type) == QUAL_UNION_TYPE)
6215 {
6216 if (num_elts == 0)
6217 return false;
6218
6219 gcc_assert (num_elts == 1 && last_type);
6220
6221 /* ??? We could look at each element of the union, and find the
6222 largest element. Which would avoid comparing the size of the
6223 initialized element against any tail padding in the union.
6224 Doesn't seem worth the effort... */
6225 return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6226 }
6227
6228 return count_type_elements (type, true) == num_elts;
6229 }
6230
6231 /* Return 1 if EXP contains mostly (3/4) zeros. */
6232
6233 static int
6234 mostly_zeros_p (const_tree exp)
6235 {
6236 if (TREE_CODE (exp) == CONSTRUCTOR)
6237 {
6238 HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6239 bool complete_p;
6240
6241 categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6242 &complete_p);
6243 return !complete_p || nz_elts < init_elts / 4;
6244 }
6245
6246 return initializer_zerop (exp);
6247 }
6248
6249 /* Return 1 if EXP contains all zeros. */
6250
6251 static int
6252 all_zeros_p (const_tree exp)
6253 {
6254 if (TREE_CODE (exp) == CONSTRUCTOR)
6255 {
6256 HOST_WIDE_INT nz_elts, unz_elts, init_elts;
6257 bool complete_p;
6258
6259 categorize_ctor_elements (exp, &nz_elts, &unz_elts, &init_elts,
6260 &complete_p);
6261 return nz_elts == 0;
6262 }
6263
6264 return initializer_zerop (exp);
6265 }
6266 \f
6267 /* Helper function for store_constructor.
6268 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6269 CLEARED is as for store_constructor.
6270 ALIAS_SET is the alias set to use for any stores.
6271 If REVERSE is true, the store is to be done in reverse order.
6272
6273 This provides a recursive shortcut back to store_constructor when it isn't
6274 necessary to go through store_field. This is so that we can pass through
6275 the cleared field to let store_constructor know that we may not have to
6276 clear a substructure if the outer structure has already been cleared. */
6277
6278 static void
6279 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6280 poly_uint64 bitregion_start,
6281 poly_uint64 bitregion_end,
6282 machine_mode mode,
6283 tree exp, int cleared,
6284 alias_set_type alias_set, bool reverse)
6285 {
6286 poly_int64 bytepos;
6287 poly_uint64 bytesize;
6288 if (TREE_CODE (exp) == CONSTRUCTOR
6289 /* We can only call store_constructor recursively if the size and
6290 bit position are on a byte boundary. */
6291 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6292 && maybe_ne (bitsize, 0U)
6293 && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6294 /* If we have a nonzero bitpos for a register target, then we just
6295 let store_field do the bitfield handling. This is unlikely to
6296 generate unnecessary clear instructions anyways. */
6297 && (known_eq (bitpos, 0) || MEM_P (target)))
6298 {
6299 if (MEM_P (target))
6300 {
6301 machine_mode target_mode = GET_MODE (target);
6302 if (target_mode != BLKmode
6303 && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6304 target_mode = BLKmode;
6305 target = adjust_address (target, target_mode, bytepos);
6306 }
6307
6308
6309 /* Update the alias set, if required. */
6310 if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6311 && MEM_ALIAS_SET (target) != 0)
6312 {
6313 target = copy_rtx (target);
6314 set_mem_alias_set (target, alias_set);
6315 }
6316
6317 store_constructor (exp, target, cleared, bytesize, reverse);
6318 }
6319 else
6320 store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6321 exp, alias_set, false, reverse);
6322 }
6323
6324
6325 /* Returns the number of FIELD_DECLs in TYPE. */
6326
6327 static int
6328 fields_length (const_tree type)
6329 {
6330 tree t = TYPE_FIELDS (type);
6331 int count = 0;
6332
6333 for (; t; t = DECL_CHAIN (t))
6334 if (TREE_CODE (t) == FIELD_DECL)
6335 ++count;
6336
6337 return count;
6338 }
6339
6340
6341 /* Store the value of constructor EXP into the rtx TARGET.
6342 TARGET is either a REG or a MEM; we know it cannot conflict, since
6343 safe_from_p has been called.
6344 CLEARED is true if TARGET is known to have been zero'd.
6345 SIZE is the number of bytes of TARGET we are allowed to modify: this
6346 may not be the same as the size of EXP if we are assigning to a field
6347 which has been packed to exclude padding bits.
6348 If REVERSE is true, the store is to be done in reverse order. */
6349
6350 static void
6351 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6352 bool reverse)
6353 {
6354 tree type = TREE_TYPE (exp);
6355 HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6356 poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6357
6358 switch (TREE_CODE (type))
6359 {
6360 case RECORD_TYPE:
6361 case UNION_TYPE:
6362 case QUAL_UNION_TYPE:
6363 {
6364 unsigned HOST_WIDE_INT idx;
6365 tree field, value;
6366
6367 /* The storage order is specified for every aggregate type. */
6368 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6369
6370 /* If size is zero or the target is already cleared, do nothing. */
6371 if (known_eq (size, 0) || cleared)
6372 cleared = 1;
6373 /* We either clear the aggregate or indicate the value is dead. */
6374 else if ((TREE_CODE (type) == UNION_TYPE
6375 || TREE_CODE (type) == QUAL_UNION_TYPE)
6376 && ! CONSTRUCTOR_ELTS (exp))
6377 /* If the constructor is empty, clear the union. */
6378 {
6379 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6380 cleared = 1;
6381 }
6382
6383 /* If we are building a static constructor into a register,
6384 set the initial value as zero so we can fold the value into
6385 a constant. But if more than one register is involved,
6386 this probably loses. */
6387 else if (REG_P (target) && TREE_STATIC (exp)
6388 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6389 REGMODE_NATURAL_SIZE (GET_MODE (target))))
6390 {
6391 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6392 cleared = 1;
6393 }
6394
6395 /* If the constructor has fewer fields than the structure or
6396 if we are initializing the structure to mostly zeros, clear
6397 the whole structure first. Don't do this if TARGET is a
6398 register whose mode size isn't equal to SIZE since
6399 clear_storage can't handle this case. */
6400 else if (known_size_p (size)
6401 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6402 || mostly_zeros_p (exp))
6403 && (!REG_P (target)
6404 || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6405 {
6406 clear_storage (target, gen_int_mode (size, Pmode),
6407 BLOCK_OP_NORMAL);
6408 cleared = 1;
6409 }
6410
6411 if (REG_P (target) && !cleared)
6412 emit_clobber (target);
6413
6414 /* Store each element of the constructor into the
6415 corresponding field of TARGET. */
6416 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6417 {
6418 machine_mode mode;
6419 HOST_WIDE_INT bitsize;
6420 HOST_WIDE_INT bitpos = 0;
6421 tree offset;
6422 rtx to_rtx = target;
6423
6424 /* Just ignore missing fields. We cleared the whole
6425 structure, above, if any fields are missing. */
6426 if (field == 0)
6427 continue;
6428
6429 if (cleared && initializer_zerop (value))
6430 continue;
6431
6432 if (tree_fits_uhwi_p (DECL_SIZE (field)))
6433 bitsize = tree_to_uhwi (DECL_SIZE (field));
6434 else
6435 gcc_unreachable ();
6436
6437 mode = DECL_MODE (field);
6438 if (DECL_BIT_FIELD (field))
6439 mode = VOIDmode;
6440
6441 offset = DECL_FIELD_OFFSET (field);
6442 if (tree_fits_shwi_p (offset)
6443 && tree_fits_shwi_p (bit_position (field)))
6444 {
6445 bitpos = int_bit_position (field);
6446 offset = NULL_TREE;
6447 }
6448 else
6449 gcc_unreachable ();
6450
6451 /* If this initializes a field that is smaller than a
6452 word, at the start of a word, try to widen it to a full
6453 word. This special case allows us to output C++ member
6454 function initializations in a form that the optimizers
6455 can understand. */
6456 if (WORD_REGISTER_OPERATIONS
6457 && REG_P (target)
6458 && bitsize < BITS_PER_WORD
6459 && bitpos % BITS_PER_WORD == 0
6460 && GET_MODE_CLASS (mode) == MODE_INT
6461 && TREE_CODE (value) == INTEGER_CST
6462 && exp_size >= 0
6463 && bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6464 {
6465 tree type = TREE_TYPE (value);
6466
6467 if (TYPE_PRECISION (type) < BITS_PER_WORD)
6468 {
6469 type = lang_hooks.types.type_for_mode
6470 (word_mode, TYPE_UNSIGNED (type));
6471 value = fold_convert (type, value);
6472 /* Make sure the bits beyond the original bitsize are zero
6473 so that we can correctly avoid extra zeroing stores in
6474 later constructor elements. */
6475 tree bitsize_mask
6476 = wide_int_to_tree (type, wi::mask (bitsize, false,
6477 BITS_PER_WORD));
6478 value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6479 }
6480
6481 if (BYTES_BIG_ENDIAN)
6482 value
6483 = fold_build2 (LSHIFT_EXPR, type, value,
6484 build_int_cst (type,
6485 BITS_PER_WORD - bitsize));
6486 bitsize = BITS_PER_WORD;
6487 mode = word_mode;
6488 }
6489
6490 if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6491 && DECL_NONADDRESSABLE_P (field))
6492 {
6493 to_rtx = copy_rtx (to_rtx);
6494 MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6495 }
6496
6497 store_constructor_field (to_rtx, bitsize, bitpos,
6498 0, bitregion_end, mode,
6499 value, cleared,
6500 get_alias_set (TREE_TYPE (field)),
6501 reverse);
6502 }
6503 break;
6504 }
6505 case ARRAY_TYPE:
6506 {
6507 tree value, index;
6508 unsigned HOST_WIDE_INT i;
6509 int need_to_clear;
6510 tree domain;
6511 tree elttype = TREE_TYPE (type);
6512 int const_bounds_p;
6513 HOST_WIDE_INT minelt = 0;
6514 HOST_WIDE_INT maxelt = 0;
6515
6516 /* The storage order is specified for every aggregate type. */
6517 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6518
6519 domain = TYPE_DOMAIN (type);
6520 const_bounds_p = (TYPE_MIN_VALUE (domain)
6521 && TYPE_MAX_VALUE (domain)
6522 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6523 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6524
6525 /* If we have constant bounds for the range of the type, get them. */
6526 if (const_bounds_p)
6527 {
6528 minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6529 maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6530 }
6531
6532 /* If the constructor has fewer elements than the array, clear
6533 the whole array first. Similarly if this is static
6534 constructor of a non-BLKmode object. */
6535 if (cleared)
6536 need_to_clear = 0;
6537 else if (REG_P (target) && TREE_STATIC (exp))
6538 need_to_clear = 1;
6539 else
6540 {
6541 unsigned HOST_WIDE_INT idx;
6542 tree index, value;
6543 HOST_WIDE_INT count = 0, zero_count = 0;
6544 need_to_clear = ! const_bounds_p;
6545
6546 /* This loop is a more accurate version of the loop in
6547 mostly_zeros_p (it handles RANGE_EXPR in an index). It
6548 is also needed to check for missing elements. */
6549 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6550 {
6551 HOST_WIDE_INT this_node_count;
6552
6553 if (need_to_clear)
6554 break;
6555
6556 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6557 {
6558 tree lo_index = TREE_OPERAND (index, 0);
6559 tree hi_index = TREE_OPERAND (index, 1);
6560
6561 if (! tree_fits_uhwi_p (lo_index)
6562 || ! tree_fits_uhwi_p (hi_index))
6563 {
6564 need_to_clear = 1;
6565 break;
6566 }
6567
6568 this_node_count = (tree_to_uhwi (hi_index)
6569 - tree_to_uhwi (lo_index) + 1);
6570 }
6571 else
6572 this_node_count = 1;
6573
6574 count += this_node_count;
6575 if (mostly_zeros_p (value))
6576 zero_count += this_node_count;
6577 }
6578
6579 /* Clear the entire array first if there are any missing
6580 elements, or if the incidence of zero elements is >=
6581 75%. */
6582 if (! need_to_clear
6583 && (count < maxelt - minelt + 1
6584 || 4 * zero_count >= 3 * count))
6585 need_to_clear = 1;
6586 }
6587
6588 if (need_to_clear && maybe_gt (size, 0))
6589 {
6590 if (REG_P (target))
6591 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6592 else
6593 clear_storage (target, gen_int_mode (size, Pmode),
6594 BLOCK_OP_NORMAL);
6595 cleared = 1;
6596 }
6597
6598 if (!cleared && REG_P (target))
6599 /* Inform later passes that the old value is dead. */
6600 emit_clobber (target);
6601
6602 /* Store each element of the constructor into the
6603 corresponding element of TARGET, determined by counting the
6604 elements. */
6605 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6606 {
6607 machine_mode mode;
6608 poly_int64 bitsize;
6609 HOST_WIDE_INT bitpos;
6610 rtx xtarget = target;
6611
6612 if (cleared && initializer_zerop (value))
6613 continue;
6614
6615 mode = TYPE_MODE (elttype);
6616 if (mode != BLKmode)
6617 bitsize = GET_MODE_BITSIZE (mode);
6618 else if (!poly_int_tree_p (TYPE_SIZE (elttype), &bitsize))
6619 bitsize = -1;
6620
6621 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6622 {
6623 tree lo_index = TREE_OPERAND (index, 0);
6624 tree hi_index = TREE_OPERAND (index, 1);
6625 rtx index_r, pos_rtx;
6626 HOST_WIDE_INT lo, hi, count;
6627 tree position;
6628
6629 /* If the range is constant and "small", unroll the loop. */
6630 if (const_bounds_p
6631 && tree_fits_shwi_p (lo_index)
6632 && tree_fits_shwi_p (hi_index)
6633 && (lo = tree_to_shwi (lo_index),
6634 hi = tree_to_shwi (hi_index),
6635 count = hi - lo + 1,
6636 (!MEM_P (target)
6637 || count <= 2
6638 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6639 && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
6640 <= 40 * 8)))))
6641 {
6642 lo -= minelt; hi -= minelt;
6643 for (; lo <= hi; lo++)
6644 {
6645 bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
6646
6647 if (MEM_P (target)
6648 && !MEM_KEEP_ALIAS_SET_P (target)
6649 && TREE_CODE (type) == ARRAY_TYPE
6650 && TYPE_NONALIASED_COMPONENT (type))
6651 {
6652 target = copy_rtx (target);
6653 MEM_KEEP_ALIAS_SET_P (target) = 1;
6654 }
6655
6656 store_constructor_field
6657 (target, bitsize, bitpos, 0, bitregion_end,
6658 mode, value, cleared,
6659 get_alias_set (elttype), reverse);
6660 }
6661 }
6662 else
6663 {
6664 rtx_code_label *loop_start = gen_label_rtx ();
6665 rtx_code_label *loop_end = gen_label_rtx ();
6666 tree exit_cond;
6667
6668 expand_normal (hi_index);
6669
6670 index = build_decl (EXPR_LOCATION (exp),
6671 VAR_DECL, NULL_TREE, domain);
6672 index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
6673 SET_DECL_RTL (index, index_r);
6674 store_expr (lo_index, index_r, 0, false, reverse);
6675
6676 /* Build the head of the loop. */
6677 do_pending_stack_adjust ();
6678 emit_label (loop_start);
6679
6680 /* Assign value to element index. */
6681 position =
6682 fold_convert (ssizetype,
6683 fold_build2 (MINUS_EXPR,
6684 TREE_TYPE (index),
6685 index,
6686 TYPE_MIN_VALUE (domain)));
6687
6688 position =
6689 size_binop (MULT_EXPR, position,
6690 fold_convert (ssizetype,
6691 TYPE_SIZE_UNIT (elttype)));
6692
6693 pos_rtx = expand_normal (position);
6694 xtarget = offset_address (target, pos_rtx,
6695 highest_pow2_factor (position));
6696 xtarget = adjust_address (xtarget, mode, 0);
6697 if (TREE_CODE (value) == CONSTRUCTOR)
6698 store_constructor (value, xtarget, cleared,
6699 exact_div (bitsize, BITS_PER_UNIT),
6700 reverse);
6701 else
6702 store_expr (value, xtarget, 0, false, reverse);
6703
6704 /* Generate a conditional jump to exit the loop. */
6705 exit_cond = build2 (LT_EXPR, integer_type_node,
6706 index, hi_index);
6707 jumpif (exit_cond, loop_end,
6708 profile_probability::uninitialized ());
6709
6710 /* Update the loop counter, and jump to the head of
6711 the loop. */
6712 expand_assignment (index,
6713 build2 (PLUS_EXPR, TREE_TYPE (index),
6714 index, integer_one_node),
6715 false);
6716
6717 emit_jump (loop_start);
6718
6719 /* Build the end of the loop. */
6720 emit_label (loop_end);
6721 }
6722 }
6723 else if ((index != 0 && ! tree_fits_shwi_p (index))
6724 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
6725 {
6726 tree position;
6727
6728 if (index == 0)
6729 index = ssize_int (1);
6730
6731 if (minelt)
6732 index = fold_convert (ssizetype,
6733 fold_build2 (MINUS_EXPR,
6734 TREE_TYPE (index),
6735 index,
6736 TYPE_MIN_VALUE (domain)));
6737
6738 position =
6739 size_binop (MULT_EXPR, index,
6740 fold_convert (ssizetype,
6741 TYPE_SIZE_UNIT (elttype)));
6742 xtarget = offset_address (target,
6743 expand_normal (position),
6744 highest_pow2_factor (position));
6745 xtarget = adjust_address (xtarget, mode, 0);
6746 store_expr (value, xtarget, 0, false, reverse);
6747 }
6748 else
6749 {
6750 if (index != 0)
6751 bitpos = ((tree_to_shwi (index) - minelt)
6752 * tree_to_uhwi (TYPE_SIZE (elttype)));
6753 else
6754 bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
6755
6756 if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
6757 && TREE_CODE (type) == ARRAY_TYPE
6758 && TYPE_NONALIASED_COMPONENT (type))
6759 {
6760 target = copy_rtx (target);
6761 MEM_KEEP_ALIAS_SET_P (target) = 1;
6762 }
6763 store_constructor_field (target, bitsize, bitpos, 0,
6764 bitregion_end, mode, value,
6765 cleared, get_alias_set (elttype),
6766 reverse);
6767 }
6768 }
6769 break;
6770 }
6771
6772 case VECTOR_TYPE:
6773 {
6774 unsigned HOST_WIDE_INT idx;
6775 constructor_elt *ce;
6776 int i;
6777 int need_to_clear;
6778 insn_code icode = CODE_FOR_nothing;
6779 tree elt;
6780 tree elttype = TREE_TYPE (type);
6781 int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
6782 machine_mode eltmode = TYPE_MODE (elttype);
6783 HOST_WIDE_INT bitsize;
6784 HOST_WIDE_INT bitpos;
6785 rtvec vector = NULL;
6786 poly_uint64 n_elts;
6787 unsigned HOST_WIDE_INT const_n_elts;
6788 alias_set_type alias;
6789 bool vec_vec_init_p = false;
6790 machine_mode mode = GET_MODE (target);
6791
6792 gcc_assert (eltmode != BLKmode);
6793
6794 /* Try using vec_duplicate_optab for uniform vectors. */
6795 if (!TREE_SIDE_EFFECTS (exp)
6796 && VECTOR_MODE_P (mode)
6797 && eltmode == GET_MODE_INNER (mode)
6798 && ((icode = optab_handler (vec_duplicate_optab, mode))
6799 != CODE_FOR_nothing)
6800 && (elt = uniform_vector_p (exp)))
6801 {
6802 class expand_operand ops[2];
6803 create_output_operand (&ops[0], target, mode);
6804 create_input_operand (&ops[1], expand_normal (elt), eltmode);
6805 expand_insn (icode, 2, ops);
6806 if (!rtx_equal_p (target, ops[0].value))
6807 emit_move_insn (target, ops[0].value);
6808 break;
6809 }
6810
6811 n_elts = TYPE_VECTOR_SUBPARTS (type);
6812 if (REG_P (target)
6813 && VECTOR_MODE_P (mode)
6814 && n_elts.is_constant (&const_n_elts))
6815 {
6816 machine_mode emode = eltmode;
6817
6818 if (CONSTRUCTOR_NELTS (exp)
6819 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
6820 == VECTOR_TYPE))
6821 {
6822 tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
6823 gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
6824 * TYPE_VECTOR_SUBPARTS (etype),
6825 n_elts));
6826 emode = TYPE_MODE (etype);
6827 }
6828 icode = convert_optab_handler (vec_init_optab, mode, emode);
6829 if (icode != CODE_FOR_nothing)
6830 {
6831 unsigned int i, n = const_n_elts;
6832
6833 if (emode != eltmode)
6834 {
6835 n = CONSTRUCTOR_NELTS (exp);
6836 vec_vec_init_p = true;
6837 }
6838 vector = rtvec_alloc (n);
6839 for (i = 0; i < n; i++)
6840 RTVEC_ELT (vector, i) = CONST0_RTX (emode);
6841 }
6842 }
6843
6844 /* If the constructor has fewer elements than the vector,
6845 clear the whole array first. Similarly if this is static
6846 constructor of a non-BLKmode object. */
6847 if (cleared)
6848 need_to_clear = 0;
6849 else if (REG_P (target) && TREE_STATIC (exp))
6850 need_to_clear = 1;
6851 else
6852 {
6853 unsigned HOST_WIDE_INT count = 0, zero_count = 0;
6854 tree value;
6855
6856 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
6857 {
6858 tree sz = TYPE_SIZE (TREE_TYPE (value));
6859 int n_elts_here
6860 = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, sz,
6861 TYPE_SIZE (elttype)));
6862
6863 count += n_elts_here;
6864 if (mostly_zeros_p (value))
6865 zero_count += n_elts_here;
6866 }
6867
6868 /* Clear the entire vector first if there are any missing elements,
6869 or if the incidence of zero elements is >= 75%. */
6870 need_to_clear = (maybe_lt (count, n_elts)
6871 || 4 * zero_count >= 3 * count);
6872 }
6873
6874 if (need_to_clear && maybe_gt (size, 0) && !vector)
6875 {
6876 if (REG_P (target))
6877 emit_move_insn (target, CONST0_RTX (mode));
6878 else
6879 clear_storage (target, gen_int_mode (size, Pmode),
6880 BLOCK_OP_NORMAL);
6881 cleared = 1;
6882 }
6883
6884 /* Inform later passes that the old value is dead. */
6885 if (!cleared && !vector && REG_P (target))
6886 emit_move_insn (target, CONST0_RTX (mode));
6887
6888 if (MEM_P (target))
6889 alias = MEM_ALIAS_SET (target);
6890 else
6891 alias = get_alias_set (elttype);
6892
6893 /* Store each element of the constructor into the corresponding
6894 element of TARGET, determined by counting the elements. */
6895 for (idx = 0, i = 0;
6896 vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
6897 idx++, i += bitsize / elt_size)
6898 {
6899 HOST_WIDE_INT eltpos;
6900 tree value = ce->value;
6901
6902 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)));
6903 if (cleared && initializer_zerop (value))
6904 continue;
6905
6906 if (ce->index)
6907 eltpos = tree_to_uhwi (ce->index);
6908 else
6909 eltpos = i;
6910
6911 if (vector)
6912 {
6913 if (vec_vec_init_p)
6914 {
6915 gcc_assert (ce->index == NULL_TREE);
6916 gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
6917 eltpos = idx;
6918 }
6919 else
6920 gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
6921 RTVEC_ELT (vector, eltpos) = expand_normal (value);
6922 }
6923 else
6924 {
6925 machine_mode value_mode
6926 = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
6927 ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
6928 bitpos = eltpos * elt_size;
6929 store_constructor_field (target, bitsize, bitpos, 0,
6930 bitregion_end, value_mode,
6931 value, cleared, alias, reverse);
6932 }
6933 }
6934
6935 if (vector)
6936 emit_insn (GEN_FCN (icode) (target,
6937 gen_rtx_PARALLEL (mode, vector)));
6938 break;
6939 }
6940
6941 default:
6942 gcc_unreachable ();
6943 }
6944 }
6945
6946 /* Store the value of EXP (an expression tree)
6947 into a subfield of TARGET which has mode MODE and occupies
6948 BITSIZE bits, starting BITPOS bits from the start of TARGET.
6949 If MODE is VOIDmode, it means that we are storing into a bit-field.
6950
6951 BITREGION_START is bitpos of the first bitfield in this region.
6952 BITREGION_END is the bitpos of the ending bitfield in this region.
6953 These two fields are 0, if the C++ memory model does not apply,
6954 or we are not interested in keeping track of bitfield regions.
6955
6956 Always return const0_rtx unless we have something particular to
6957 return.
6958
6959 ALIAS_SET is the alias set for the destination. This value will
6960 (in general) be different from that for TARGET, since TARGET is a
6961 reference to the containing structure.
6962
6963 If NONTEMPORAL is true, try generating a nontemporal store.
6964
6965 If REVERSE is true, the store is to be done in reverse order. */
6966
6967 static rtx
6968 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
6969 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
6970 machine_mode mode, tree exp,
6971 alias_set_type alias_set, bool nontemporal, bool reverse)
6972 {
6973 if (TREE_CODE (exp) == ERROR_MARK)
6974 return const0_rtx;
6975
6976 /* If we have nothing to store, do nothing unless the expression has
6977 side-effects. Don't do that for zero sized addressable lhs of
6978 calls. */
6979 if (known_eq (bitsize, 0)
6980 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6981 || TREE_CODE (exp) != CALL_EXPR))
6982 return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6983
6984 if (GET_CODE (target) == CONCAT)
6985 {
6986 /* We're storing into a struct containing a single __complex. */
6987
6988 gcc_assert (known_eq (bitpos, 0));
6989 return store_expr (exp, target, 0, nontemporal, reverse);
6990 }
6991
6992 /* If the structure is in a register or if the component
6993 is a bit field, we cannot use addressing to access it.
6994 Use bit-field techniques or SUBREG to store in it. */
6995
6996 poly_int64 decl_bitsize;
6997 if (mode == VOIDmode
6998 || (mode != BLKmode && ! direct_store[(int) mode]
6999 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
7000 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
7001 || REG_P (target)
7002 || GET_CODE (target) == SUBREG
7003 /* If the field isn't aligned enough to store as an ordinary memref,
7004 store it as a bit field. */
7005 || (mode != BLKmode
7006 && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
7007 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
7008 && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
7009 || !multiple_p (bitpos, BITS_PER_UNIT)))
7010 || (known_size_p (bitsize)
7011 && mode != BLKmode
7012 && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
7013 /* If the RHS and field are a constant size and the size of the
7014 RHS isn't the same size as the bitfield, we must use bitfield
7015 operations. */
7016 || (known_size_p (bitsize)
7017 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
7018 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
7019 bitsize)
7020 /* Except for initialization of full bytes from a CONSTRUCTOR, which
7021 we will handle specially below. */
7022 && !(TREE_CODE (exp) == CONSTRUCTOR
7023 && multiple_p (bitsize, BITS_PER_UNIT))
7024 /* And except for bitwise copying of TREE_ADDRESSABLE types,
7025 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
7026 includes some extra padding. store_expr / expand_expr will in
7027 that case call get_inner_reference that will have the bitsize
7028 we check here and thus the block move will not clobber the
7029 padding that shouldn't be clobbered. In the future we could
7030 replace the TREE_ADDRESSABLE check with a check that
7031 get_base_address needs to live in memory. */
7032 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
7033 || TREE_CODE (exp) != COMPONENT_REF
7034 || !multiple_p (bitsize, BITS_PER_UNIT)
7035 || !multiple_p (bitpos, BITS_PER_UNIT)
7036 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
7037 &decl_bitsize)
7038 || maybe_ne (decl_bitsize, bitsize)))
7039 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
7040 decl we must use bitfield operations. */
7041 || (known_size_p (bitsize)
7042 && TREE_CODE (exp) == MEM_REF
7043 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
7044 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7045 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
7046 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
7047 {
7048 rtx temp;
7049 gimple *nop_def;
7050
7051 /* If EXP is a NOP_EXPR of precision less than its mode, then that
7052 implies a mask operation. If the precision is the same size as
7053 the field we're storing into, that mask is redundant. This is
7054 particularly common with bit field assignments generated by the
7055 C front end. */
7056 nop_def = get_def_for_expr (exp, NOP_EXPR);
7057 if (nop_def)
7058 {
7059 tree type = TREE_TYPE (exp);
7060 if (INTEGRAL_TYPE_P (type)
7061 && maybe_ne (TYPE_PRECISION (type),
7062 GET_MODE_BITSIZE (TYPE_MODE (type)))
7063 && known_eq (bitsize, TYPE_PRECISION (type)))
7064 {
7065 tree op = gimple_assign_rhs1 (nop_def);
7066 type = TREE_TYPE (op);
7067 if (INTEGRAL_TYPE_P (type)
7068 && known_ge (TYPE_PRECISION (type), bitsize))
7069 exp = op;
7070 }
7071 }
7072
7073 temp = expand_normal (exp);
7074
7075 /* We don't support variable-sized BLKmode bitfields, since our
7076 handling of BLKmode is bound up with the ability to break
7077 things into words. */
7078 gcc_assert (mode != BLKmode || bitsize.is_constant ());
7079
7080 /* Handle calls that return values in multiple non-contiguous locations.
7081 The Irix 6 ABI has examples of this. */
7082 if (GET_CODE (temp) == PARALLEL)
7083 {
7084 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7085 machine_mode temp_mode = GET_MODE (temp);
7086 if (temp_mode == BLKmode || temp_mode == VOIDmode)
7087 temp_mode = smallest_int_mode_for_size (size * BITS_PER_UNIT);
7088 rtx temp_target = gen_reg_rtx (temp_mode);
7089 emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
7090 temp = temp_target;
7091 }
7092
7093 /* Handle calls that return BLKmode values in registers. */
7094 else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
7095 {
7096 rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7097 copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7098 temp = temp_target;
7099 }
7100
7101 /* If the value has aggregate type and an integral mode then, if BITSIZE
7102 is narrower than this mode and this is for big-endian data, we first
7103 need to put the value into the low-order bits for store_bit_field,
7104 except when MODE is BLKmode and BITSIZE larger than the word size
7105 (see the handling of fields larger than a word in store_bit_field).
7106 Moreover, the field may be not aligned on a byte boundary; in this
7107 case, if it has reverse storage order, it needs to be accessed as a
7108 scalar field with reverse storage order and we must first put the
7109 value into target order. */
7110 scalar_int_mode temp_mode;
7111 if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7112 && is_int_mode (GET_MODE (temp), &temp_mode))
7113 {
7114 HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7115
7116 reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7117
7118 if (reverse)
7119 temp = flip_storage_order (temp_mode, temp);
7120
7121 gcc_checking_assert (known_le (bitsize, size));
7122 if (maybe_lt (bitsize, size)
7123 && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7124 /* Use of to_constant for BLKmode was checked above. */
7125 && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7126 temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7127 size - bitsize, NULL_RTX, 1);
7128 }
7129
7130 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
7131 if (mode != VOIDmode && mode != BLKmode
7132 && mode != TYPE_MODE (TREE_TYPE (exp)))
7133 temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7134
7135 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7136 and BITPOS must be aligned on a byte boundary. If so, we simply do
7137 a block copy. Likewise for a BLKmode-like TARGET. */
7138 if (GET_MODE (temp) == BLKmode
7139 && (GET_MODE (target) == BLKmode
7140 || (MEM_P (target)
7141 && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7142 && multiple_p (bitpos, BITS_PER_UNIT)
7143 && multiple_p (bitsize, BITS_PER_UNIT))))
7144 {
7145 gcc_assert (MEM_P (target) && MEM_P (temp));
7146 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7147 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7148
7149 target = adjust_address (target, VOIDmode, bytepos);
7150 emit_block_move (target, temp,
7151 gen_int_mode (bytesize, Pmode),
7152 BLOCK_OP_NORMAL);
7153
7154 return const0_rtx;
7155 }
7156
7157 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7158 word size, we need to load the value (see again store_bit_field). */
7159 if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7160 {
7161 scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
7162 temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7163 temp_mode, false, NULL);
7164 }
7165
7166 /* Store the value in the bitfield. */
7167 gcc_checking_assert (known_ge (bitpos, 0));
7168 store_bit_field (target, bitsize, bitpos,
7169 bitregion_start, bitregion_end,
7170 mode, temp, reverse);
7171
7172 return const0_rtx;
7173 }
7174 else
7175 {
7176 /* Now build a reference to just the desired component. */
7177 rtx to_rtx = adjust_address (target, mode,
7178 exact_div (bitpos, BITS_PER_UNIT));
7179
7180 if (to_rtx == target)
7181 to_rtx = copy_rtx (to_rtx);
7182
7183 if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7184 set_mem_alias_set (to_rtx, alias_set);
7185
7186 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7187 into a target smaller than its type; handle that case now. */
7188 if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7189 {
7190 poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7191 store_constructor (exp, to_rtx, 0, bytesize, reverse);
7192 return to_rtx;
7193 }
7194
7195 return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7196 }
7197 }
7198 \f
7199 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7200 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7201 codes and find the ultimate containing object, which we return.
7202
7203 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7204 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7205 storage order of the field.
7206 If the position of the field is variable, we store a tree
7207 giving the variable offset (in units) in *POFFSET.
7208 This offset is in addition to the bit position.
7209 If the position is not variable, we store 0 in *POFFSET.
7210
7211 If any of the extraction expressions is volatile,
7212 we store 1 in *PVOLATILEP. Otherwise we don't change that.
7213
7214 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7215 Otherwise, it is a mode that can be used to access the field.
7216
7217 If the field describes a variable-sized object, *PMODE is set to
7218 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
7219 this case, but the address of the object can be found. */
7220
7221 tree
7222 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7223 poly_int64_pod *pbitpos, tree *poffset,
7224 machine_mode *pmode, int *punsignedp,
7225 int *preversep, int *pvolatilep)
7226 {
7227 tree size_tree = 0;
7228 machine_mode mode = VOIDmode;
7229 bool blkmode_bitfield = false;
7230 tree offset = size_zero_node;
7231 poly_offset_int bit_offset = 0;
7232
7233 /* First get the mode, signedness, storage order and size. We do this from
7234 just the outermost expression. */
7235 *pbitsize = -1;
7236 if (TREE_CODE (exp) == COMPONENT_REF)
7237 {
7238 tree field = TREE_OPERAND (exp, 1);
7239 size_tree = DECL_SIZE (field);
7240 if (flag_strict_volatile_bitfields > 0
7241 && TREE_THIS_VOLATILE (exp)
7242 && DECL_BIT_FIELD_TYPE (field)
7243 && DECL_MODE (field) != BLKmode)
7244 /* Volatile bitfields should be accessed in the mode of the
7245 field's type, not the mode computed based on the bit
7246 size. */
7247 mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7248 else if (!DECL_BIT_FIELD (field))
7249 {
7250 mode = DECL_MODE (field);
7251 /* For vector fields re-check the target flags, as DECL_MODE
7252 could have been set with different target flags than
7253 the current function has. */
7254 if (mode == BLKmode
7255 && VECTOR_TYPE_P (TREE_TYPE (field))
7256 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7257 mode = TYPE_MODE (TREE_TYPE (field));
7258 }
7259 else if (DECL_MODE (field) == BLKmode)
7260 blkmode_bitfield = true;
7261
7262 *punsignedp = DECL_UNSIGNED (field);
7263 }
7264 else if (TREE_CODE (exp) == BIT_FIELD_REF)
7265 {
7266 size_tree = TREE_OPERAND (exp, 1);
7267 *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7268 || TYPE_UNSIGNED (TREE_TYPE (exp)));
7269
7270 /* For vector element types with the correct size of access or for
7271 vector typed accesses use the mode of the access type. */
7272 if ((TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7273 && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7274 && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7275 || VECTOR_TYPE_P (TREE_TYPE (exp)))
7276 mode = TYPE_MODE (TREE_TYPE (exp));
7277 }
7278 else
7279 {
7280 mode = TYPE_MODE (TREE_TYPE (exp));
7281 *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7282
7283 if (mode == BLKmode)
7284 size_tree = TYPE_SIZE (TREE_TYPE (exp));
7285 else
7286 *pbitsize = GET_MODE_BITSIZE (mode);
7287 }
7288
7289 if (size_tree != 0)
7290 {
7291 if (! tree_fits_uhwi_p (size_tree))
7292 mode = BLKmode, *pbitsize = -1;
7293 else
7294 *pbitsize = tree_to_uhwi (size_tree);
7295 }
7296
7297 *preversep = reverse_storage_order_for_component_p (exp);
7298
7299 /* Compute cumulative bit-offset for nested component-refs and array-refs,
7300 and find the ultimate containing object. */
7301 while (1)
7302 {
7303 switch (TREE_CODE (exp))
7304 {
7305 case BIT_FIELD_REF:
7306 bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7307 break;
7308
7309 case COMPONENT_REF:
7310 {
7311 tree field = TREE_OPERAND (exp, 1);
7312 tree this_offset = component_ref_field_offset (exp);
7313
7314 /* If this field hasn't been filled in yet, don't go past it.
7315 This should only happen when folding expressions made during
7316 type construction. */
7317 if (this_offset == 0)
7318 break;
7319
7320 offset = size_binop (PLUS_EXPR, offset, this_offset);
7321 bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7322
7323 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
7324 }
7325 break;
7326
7327 case ARRAY_REF:
7328 case ARRAY_RANGE_REF:
7329 {
7330 tree index = TREE_OPERAND (exp, 1);
7331 tree low_bound = array_ref_low_bound (exp);
7332 tree unit_size = array_ref_element_size (exp);
7333
7334 /* We assume all arrays have sizes that are a multiple of a byte.
7335 First subtract the lower bound, if any, in the type of the
7336 index, then convert to sizetype and multiply by the size of
7337 the array element. */
7338 if (! integer_zerop (low_bound))
7339 index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7340 index, low_bound);
7341
7342 offset = size_binop (PLUS_EXPR, offset,
7343 size_binop (MULT_EXPR,
7344 fold_convert (sizetype, index),
7345 unit_size));
7346 }
7347 break;
7348
7349 case REALPART_EXPR:
7350 break;
7351
7352 case IMAGPART_EXPR:
7353 bit_offset += *pbitsize;
7354 break;
7355
7356 case VIEW_CONVERT_EXPR:
7357 break;
7358
7359 case MEM_REF:
7360 /* Hand back the decl for MEM[&decl, off]. */
7361 if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7362 {
7363 tree off = TREE_OPERAND (exp, 1);
7364 if (!integer_zerop (off))
7365 {
7366 poly_offset_int boff = mem_ref_offset (exp);
7367 boff <<= LOG2_BITS_PER_UNIT;
7368 bit_offset += boff;
7369 }
7370 exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7371 }
7372 goto done;
7373
7374 default:
7375 goto done;
7376 }
7377
7378 /* If any reference in the chain is volatile, the effect is volatile. */
7379 if (TREE_THIS_VOLATILE (exp))
7380 *pvolatilep = 1;
7381
7382 exp = TREE_OPERAND (exp, 0);
7383 }
7384 done:
7385
7386 /* If OFFSET is constant, see if we can return the whole thing as a
7387 constant bit position. Make sure to handle overflow during
7388 this conversion. */
7389 if (poly_int_tree_p (offset))
7390 {
7391 poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7392 TYPE_PRECISION (sizetype));
7393 tem <<= LOG2_BITS_PER_UNIT;
7394 tem += bit_offset;
7395 if (tem.to_shwi (pbitpos))
7396 *poffset = offset = NULL_TREE;
7397 }
7398
7399 /* Otherwise, split it up. */
7400 if (offset)
7401 {
7402 /* Avoid returning a negative bitpos as this may wreak havoc later. */
7403 if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7404 {
7405 *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7406 poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7407 offset = size_binop (PLUS_EXPR, offset,
7408 build_int_cst (sizetype, bytes.force_shwi ()));
7409 }
7410
7411 *poffset = offset;
7412 }
7413
7414 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
7415 if (mode == VOIDmode
7416 && blkmode_bitfield
7417 && multiple_p (*pbitpos, BITS_PER_UNIT)
7418 && multiple_p (*pbitsize, BITS_PER_UNIT))
7419 *pmode = BLKmode;
7420 else
7421 *pmode = mode;
7422
7423 return exp;
7424 }
7425
7426 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
7427
7428 static unsigned HOST_WIDE_INT
7429 target_align (const_tree target)
7430 {
7431 /* We might have a chain of nested references with intermediate misaligning
7432 bitfields components, so need to recurse to find out. */
7433
7434 unsigned HOST_WIDE_INT this_align, outer_align;
7435
7436 switch (TREE_CODE (target))
7437 {
7438 case BIT_FIELD_REF:
7439 return 1;
7440
7441 case COMPONENT_REF:
7442 this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7443 outer_align = target_align (TREE_OPERAND (target, 0));
7444 return MIN (this_align, outer_align);
7445
7446 case ARRAY_REF:
7447 case ARRAY_RANGE_REF:
7448 this_align = TYPE_ALIGN (TREE_TYPE (target));
7449 outer_align = target_align (TREE_OPERAND (target, 0));
7450 return MIN (this_align, outer_align);
7451
7452 CASE_CONVERT:
7453 case NON_LVALUE_EXPR:
7454 case VIEW_CONVERT_EXPR:
7455 this_align = TYPE_ALIGN (TREE_TYPE (target));
7456 outer_align = target_align (TREE_OPERAND (target, 0));
7457 return MAX (this_align, outer_align);
7458
7459 default:
7460 return TYPE_ALIGN (TREE_TYPE (target));
7461 }
7462 }
7463
7464 \f
7465 /* Given an rtx VALUE that may contain additions and multiplications, return
7466 an equivalent value that just refers to a register, memory, or constant.
7467 This is done by generating instructions to perform the arithmetic and
7468 returning a pseudo-register containing the value.
7469
7470 The returned value may be a REG, SUBREG, MEM or constant. */
7471
7472 rtx
7473 force_operand (rtx value, rtx target)
7474 {
7475 rtx op1, op2;
7476 /* Use subtarget as the target for operand 0 of a binary operation. */
7477 rtx subtarget = get_subtarget (target);
7478 enum rtx_code code = GET_CODE (value);
7479
7480 /* Check for subreg applied to an expression produced by loop optimizer. */
7481 if (code == SUBREG
7482 && !REG_P (SUBREG_REG (value))
7483 && !MEM_P (SUBREG_REG (value)))
7484 {
7485 value
7486 = simplify_gen_subreg (GET_MODE (value),
7487 force_reg (GET_MODE (SUBREG_REG (value)),
7488 force_operand (SUBREG_REG (value),
7489 NULL_RTX)),
7490 GET_MODE (SUBREG_REG (value)),
7491 SUBREG_BYTE (value));
7492 code = GET_CODE (value);
7493 }
7494
7495 /* Check for a PIC address load. */
7496 if ((code == PLUS || code == MINUS)
7497 && XEXP (value, 0) == pic_offset_table_rtx
7498 && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7499 || GET_CODE (XEXP (value, 1)) == LABEL_REF
7500 || GET_CODE (XEXP (value, 1)) == CONST))
7501 {
7502 if (!subtarget)
7503 subtarget = gen_reg_rtx (GET_MODE (value));
7504 emit_move_insn (subtarget, value);
7505 return subtarget;
7506 }
7507
7508 if (ARITHMETIC_P (value))
7509 {
7510 op2 = XEXP (value, 1);
7511 if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7512 subtarget = 0;
7513 if (code == MINUS && CONST_INT_P (op2))
7514 {
7515 code = PLUS;
7516 op2 = negate_rtx (GET_MODE (value), op2);
7517 }
7518
7519 /* Check for an addition with OP2 a constant integer and our first
7520 operand a PLUS of a virtual register and something else. In that
7521 case, we want to emit the sum of the virtual register and the
7522 constant first and then add the other value. This allows virtual
7523 register instantiation to simply modify the constant rather than
7524 creating another one around this addition. */
7525 if (code == PLUS && CONST_INT_P (op2)
7526 && GET_CODE (XEXP (value, 0)) == PLUS
7527 && REG_P (XEXP (XEXP (value, 0), 0))
7528 && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7529 && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7530 {
7531 rtx temp = expand_simple_binop (GET_MODE (value), code,
7532 XEXP (XEXP (value, 0), 0), op2,
7533 subtarget, 0, OPTAB_LIB_WIDEN);
7534 return expand_simple_binop (GET_MODE (value), code, temp,
7535 force_operand (XEXP (XEXP (value,
7536 0), 1), 0),
7537 target, 0, OPTAB_LIB_WIDEN);
7538 }
7539
7540 op1 = force_operand (XEXP (value, 0), subtarget);
7541 op2 = force_operand (op2, NULL_RTX);
7542 switch (code)
7543 {
7544 case MULT:
7545 return expand_mult (GET_MODE (value), op1, op2, target, 1);
7546 case DIV:
7547 if (!INTEGRAL_MODE_P (GET_MODE (value)))
7548 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7549 target, 1, OPTAB_LIB_WIDEN);
7550 else
7551 return expand_divmod (0,
7552 FLOAT_MODE_P (GET_MODE (value))
7553 ? RDIV_EXPR : TRUNC_DIV_EXPR,
7554 GET_MODE (value), op1, op2, target, 0);
7555 case MOD:
7556 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7557 target, 0);
7558 case UDIV:
7559 return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7560 target, 1);
7561 case UMOD:
7562 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7563 target, 1);
7564 case ASHIFTRT:
7565 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7566 target, 0, OPTAB_LIB_WIDEN);
7567 default:
7568 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7569 target, 1, OPTAB_LIB_WIDEN);
7570 }
7571 }
7572 if (UNARY_P (value))
7573 {
7574 if (!target)
7575 target = gen_reg_rtx (GET_MODE (value));
7576 op1 = force_operand (XEXP (value, 0), NULL_RTX);
7577 switch (code)
7578 {
7579 case ZERO_EXTEND:
7580 case SIGN_EXTEND:
7581 case TRUNCATE:
7582 case FLOAT_EXTEND:
7583 case FLOAT_TRUNCATE:
7584 convert_move (target, op1, code == ZERO_EXTEND);
7585 return target;
7586
7587 case FIX:
7588 case UNSIGNED_FIX:
7589 expand_fix (target, op1, code == UNSIGNED_FIX);
7590 return target;
7591
7592 case FLOAT:
7593 case UNSIGNED_FLOAT:
7594 expand_float (target, op1, code == UNSIGNED_FLOAT);
7595 return target;
7596
7597 default:
7598 return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7599 }
7600 }
7601
7602 #ifdef INSN_SCHEDULING
7603 /* On machines that have insn scheduling, we want all memory reference to be
7604 explicit, so we need to deal with such paradoxical SUBREGs. */
7605 if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7606 value
7607 = simplify_gen_subreg (GET_MODE (value),
7608 force_reg (GET_MODE (SUBREG_REG (value)),
7609 force_operand (SUBREG_REG (value),
7610 NULL_RTX)),
7611 GET_MODE (SUBREG_REG (value)),
7612 SUBREG_BYTE (value));
7613 #endif
7614
7615 return value;
7616 }
7617 \f
7618 /* Subroutine of expand_expr: return nonzero iff there is no way that
7619 EXP can reference X, which is being modified. TOP_P is nonzero if this
7620 call is going to be used to determine whether we need a temporary
7621 for EXP, as opposed to a recursive call to this function.
7622
7623 It is always safe for this routine to return zero since it merely
7624 searches for optimization opportunities. */
7625
7626 int
7627 safe_from_p (const_rtx x, tree exp, int top_p)
7628 {
7629 rtx exp_rtl = 0;
7630 int i, nops;
7631
7632 if (x == 0
7633 /* If EXP has varying size, we MUST use a target since we currently
7634 have no way of allocating temporaries of variable size
7635 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7636 So we assume here that something at a higher level has prevented a
7637 clash. This is somewhat bogus, but the best we can do. Only
7638 do this when X is BLKmode and when we are at the top level. */
7639 || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
7640 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
7641 && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
7642 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
7643 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
7644 != INTEGER_CST)
7645 && GET_MODE (x) == BLKmode)
7646 /* If X is in the outgoing argument area, it is always safe. */
7647 || (MEM_P (x)
7648 && (XEXP (x, 0) == virtual_outgoing_args_rtx
7649 || (GET_CODE (XEXP (x, 0)) == PLUS
7650 && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
7651 return 1;
7652
7653 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7654 find the underlying pseudo. */
7655 if (GET_CODE (x) == SUBREG)
7656 {
7657 x = SUBREG_REG (x);
7658 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7659 return 0;
7660 }
7661
7662 /* Now look at our tree code and possibly recurse. */
7663 switch (TREE_CODE_CLASS (TREE_CODE (exp)))
7664 {
7665 case tcc_declaration:
7666 exp_rtl = DECL_RTL_IF_SET (exp);
7667 break;
7668
7669 case tcc_constant:
7670 return 1;
7671
7672 case tcc_exceptional:
7673 if (TREE_CODE (exp) == TREE_LIST)
7674 {
7675 while (1)
7676 {
7677 if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
7678 return 0;
7679 exp = TREE_CHAIN (exp);
7680 if (!exp)
7681 return 1;
7682 if (TREE_CODE (exp) != TREE_LIST)
7683 return safe_from_p (x, exp, 0);
7684 }
7685 }
7686 else if (TREE_CODE (exp) == CONSTRUCTOR)
7687 {
7688 constructor_elt *ce;
7689 unsigned HOST_WIDE_INT idx;
7690
7691 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
7692 if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
7693 || !safe_from_p (x, ce->value, 0))
7694 return 0;
7695 return 1;
7696 }
7697 else if (TREE_CODE (exp) == ERROR_MARK)
7698 return 1; /* An already-visited SAVE_EXPR? */
7699 else
7700 return 0;
7701
7702 case tcc_statement:
7703 /* The only case we look at here is the DECL_INITIAL inside a
7704 DECL_EXPR. */
7705 return (TREE_CODE (exp) != DECL_EXPR
7706 || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
7707 || !DECL_INITIAL (DECL_EXPR_DECL (exp))
7708 || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
7709
7710 case tcc_binary:
7711 case tcc_comparison:
7712 if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
7713 return 0;
7714 /* Fall through. */
7715
7716 case tcc_unary:
7717 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7718
7719 case tcc_expression:
7720 case tcc_reference:
7721 case tcc_vl_exp:
7722 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
7723 the expression. If it is set, we conflict iff we are that rtx or
7724 both are in memory. Otherwise, we check all operands of the
7725 expression recursively. */
7726
7727 switch (TREE_CODE (exp))
7728 {
7729 case ADDR_EXPR:
7730 /* If the operand is static or we are static, we can't conflict.
7731 Likewise if we don't conflict with the operand at all. */
7732 if (staticp (TREE_OPERAND (exp, 0))
7733 || TREE_STATIC (exp)
7734 || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
7735 return 1;
7736
7737 /* Otherwise, the only way this can conflict is if we are taking
7738 the address of a DECL a that address if part of X, which is
7739 very rare. */
7740 exp = TREE_OPERAND (exp, 0);
7741 if (DECL_P (exp))
7742 {
7743 if (!DECL_RTL_SET_P (exp)
7744 || !MEM_P (DECL_RTL (exp)))
7745 return 0;
7746 else
7747 exp_rtl = XEXP (DECL_RTL (exp), 0);
7748 }
7749 break;
7750
7751 case MEM_REF:
7752 if (MEM_P (x)
7753 && alias_sets_conflict_p (MEM_ALIAS_SET (x),
7754 get_alias_set (exp)))
7755 return 0;
7756 break;
7757
7758 case CALL_EXPR:
7759 /* Assume that the call will clobber all hard registers and
7760 all of memory. */
7761 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7762 || MEM_P (x))
7763 return 0;
7764 break;
7765
7766 case WITH_CLEANUP_EXPR:
7767 case CLEANUP_POINT_EXPR:
7768 /* Lowered by gimplify.c. */
7769 gcc_unreachable ();
7770
7771 case SAVE_EXPR:
7772 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7773
7774 default:
7775 break;
7776 }
7777
7778 /* If we have an rtx, we do not need to scan our operands. */
7779 if (exp_rtl)
7780 break;
7781
7782 nops = TREE_OPERAND_LENGTH (exp);
7783 for (i = 0; i < nops; i++)
7784 if (TREE_OPERAND (exp, i) != 0
7785 && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
7786 return 0;
7787
7788 break;
7789
7790 case tcc_type:
7791 /* Should never get a type here. */
7792 gcc_unreachable ();
7793 }
7794
7795 /* If we have an rtl, find any enclosed object. Then see if we conflict
7796 with it. */
7797 if (exp_rtl)
7798 {
7799 if (GET_CODE (exp_rtl) == SUBREG)
7800 {
7801 exp_rtl = SUBREG_REG (exp_rtl);
7802 if (REG_P (exp_rtl)
7803 && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
7804 return 0;
7805 }
7806
7807 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
7808 are memory and they conflict. */
7809 return ! (rtx_equal_p (x, exp_rtl)
7810 || (MEM_P (x) && MEM_P (exp_rtl)
7811 && true_dependence (exp_rtl, VOIDmode, x)));
7812 }
7813
7814 /* If we reach here, it is safe. */
7815 return 1;
7816 }
7817
7818 \f
7819 /* Return the highest power of two that EXP is known to be a multiple of.
7820 This is used in updating alignment of MEMs in array references. */
7821
7822 unsigned HOST_WIDE_INT
7823 highest_pow2_factor (const_tree exp)
7824 {
7825 unsigned HOST_WIDE_INT ret;
7826 int trailing_zeros = tree_ctz (exp);
7827 if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
7828 return BIGGEST_ALIGNMENT;
7829 ret = HOST_WIDE_INT_1U << trailing_zeros;
7830 if (ret > BIGGEST_ALIGNMENT)
7831 return BIGGEST_ALIGNMENT;
7832 return ret;
7833 }
7834
7835 /* Similar, except that the alignment requirements of TARGET are
7836 taken into account. Assume it is at least as aligned as its
7837 type, unless it is a COMPONENT_REF in which case the layout of
7838 the structure gives the alignment. */
7839
7840 static unsigned HOST_WIDE_INT
7841 highest_pow2_factor_for_target (const_tree target, const_tree exp)
7842 {
7843 unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
7844 unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
7845
7846 return MAX (factor, talign);
7847 }
7848 \f
7849 /* Convert the tree comparison code TCODE to the rtl one where the
7850 signedness is UNSIGNEDP. */
7851
7852 static enum rtx_code
7853 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
7854 {
7855 enum rtx_code code;
7856 switch (tcode)
7857 {
7858 case EQ_EXPR:
7859 code = EQ;
7860 break;
7861 case NE_EXPR:
7862 code = NE;
7863 break;
7864 case LT_EXPR:
7865 code = unsignedp ? LTU : LT;
7866 break;
7867 case LE_EXPR:
7868 code = unsignedp ? LEU : LE;
7869 break;
7870 case GT_EXPR:
7871 code = unsignedp ? GTU : GT;
7872 break;
7873 case GE_EXPR:
7874 code = unsignedp ? GEU : GE;
7875 break;
7876 case UNORDERED_EXPR:
7877 code = UNORDERED;
7878 break;
7879 case ORDERED_EXPR:
7880 code = ORDERED;
7881 break;
7882 case UNLT_EXPR:
7883 code = UNLT;
7884 break;
7885 case UNLE_EXPR:
7886 code = UNLE;
7887 break;
7888 case UNGT_EXPR:
7889 code = UNGT;
7890 break;
7891 case UNGE_EXPR:
7892 code = UNGE;
7893 break;
7894 case UNEQ_EXPR:
7895 code = UNEQ;
7896 break;
7897 case LTGT_EXPR:
7898 code = LTGT;
7899 break;
7900
7901 default:
7902 gcc_unreachable ();
7903 }
7904 return code;
7905 }
7906
7907 /* Subroutine of expand_expr. Expand the two operands of a binary
7908 expression EXP0 and EXP1 placing the results in OP0 and OP1.
7909 The value may be stored in TARGET if TARGET is nonzero. The
7910 MODIFIER argument is as documented by expand_expr. */
7911
7912 void
7913 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
7914 enum expand_modifier modifier)
7915 {
7916 if (! safe_from_p (target, exp1, 1))
7917 target = 0;
7918 if (operand_equal_p (exp0, exp1, 0))
7919 {
7920 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7921 *op1 = copy_rtx (*op0);
7922 }
7923 else
7924 {
7925 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7926 *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
7927 }
7928 }
7929
7930 \f
7931 /* Return a MEM that contains constant EXP. DEFER is as for
7932 output_constant_def and MODIFIER is as for expand_expr. */
7933
7934 static rtx
7935 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
7936 {
7937 rtx mem;
7938
7939 mem = output_constant_def (exp, defer);
7940 if (modifier != EXPAND_INITIALIZER)
7941 mem = use_anchored_address (mem);
7942 return mem;
7943 }
7944
7945 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
7946 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
7947
7948 static rtx
7949 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
7950 enum expand_modifier modifier, addr_space_t as)
7951 {
7952 rtx result, subtarget;
7953 tree inner, offset;
7954 poly_int64 bitsize, bitpos;
7955 int unsignedp, reversep, volatilep = 0;
7956 machine_mode mode1;
7957
7958 /* If we are taking the address of a constant and are at the top level,
7959 we have to use output_constant_def since we can't call force_const_mem
7960 at top level. */
7961 /* ??? This should be considered a front-end bug. We should not be
7962 generating ADDR_EXPR of something that isn't an LVALUE. The only
7963 exception here is STRING_CST. */
7964 if (CONSTANT_CLASS_P (exp))
7965 {
7966 result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
7967 if (modifier < EXPAND_SUM)
7968 result = force_operand (result, target);
7969 return result;
7970 }
7971
7972 /* Everything must be something allowed by is_gimple_addressable. */
7973 switch (TREE_CODE (exp))
7974 {
7975 case INDIRECT_REF:
7976 /* This case will happen via recursion for &a->b. */
7977 return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
7978
7979 case MEM_REF:
7980 {
7981 tree tem = TREE_OPERAND (exp, 0);
7982 if (!integer_zerop (TREE_OPERAND (exp, 1)))
7983 tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
7984 return expand_expr (tem, target, tmode, modifier);
7985 }
7986
7987 case TARGET_MEM_REF:
7988 return addr_for_mem_ref (exp, as, true);
7989
7990 case CONST_DECL:
7991 /* Expand the initializer like constants above. */
7992 result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
7993 0, modifier), 0);
7994 if (modifier < EXPAND_SUM)
7995 result = force_operand (result, target);
7996 return result;
7997
7998 case REALPART_EXPR:
7999 /* The real part of the complex number is always first, therefore
8000 the address is the same as the address of the parent object. */
8001 offset = 0;
8002 bitpos = 0;
8003 inner = TREE_OPERAND (exp, 0);
8004 break;
8005
8006 case IMAGPART_EXPR:
8007 /* The imaginary part of the complex number is always second.
8008 The expression is therefore always offset by the size of the
8009 scalar type. */
8010 offset = 0;
8011 bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
8012 inner = TREE_OPERAND (exp, 0);
8013 break;
8014
8015 case COMPOUND_LITERAL_EXPR:
8016 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
8017 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
8018 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
8019 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
8020 the initializers aren't gimplified. */
8021 if (COMPOUND_LITERAL_EXPR_DECL (exp)
8022 && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp)))
8023 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
8024 target, tmode, modifier, as);
8025 /* FALLTHRU */
8026 default:
8027 /* If the object is a DECL, then expand it for its rtl. Don't bypass
8028 expand_expr, as that can have various side effects; LABEL_DECLs for
8029 example, may not have their DECL_RTL set yet. Expand the rtl of
8030 CONSTRUCTORs too, which should yield a memory reference for the
8031 constructor's contents. Assume language specific tree nodes can
8032 be expanded in some interesting way. */
8033 gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
8034 if (DECL_P (exp)
8035 || TREE_CODE (exp) == CONSTRUCTOR
8036 || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
8037 {
8038 result = expand_expr (exp, target, tmode,
8039 modifier == EXPAND_INITIALIZER
8040 ? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
8041
8042 /* If the DECL isn't in memory, then the DECL wasn't properly
8043 marked TREE_ADDRESSABLE, which will be either a front-end
8044 or a tree optimizer bug. */
8045
8046 gcc_assert (MEM_P (result));
8047 result = XEXP (result, 0);
8048
8049 /* ??? Is this needed anymore? */
8050 if (DECL_P (exp))
8051 TREE_USED (exp) = 1;
8052
8053 if (modifier != EXPAND_INITIALIZER
8054 && modifier != EXPAND_CONST_ADDRESS
8055 && modifier != EXPAND_SUM)
8056 result = force_operand (result, target);
8057 return result;
8058 }
8059
8060 /* Pass FALSE as the last argument to get_inner_reference although
8061 we are expanding to RTL. The rationale is that we know how to
8062 handle "aligning nodes" here: we can just bypass them because
8063 they won't change the final object whose address will be returned
8064 (they actually exist only for that purpose). */
8065 inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
8066 &unsignedp, &reversep, &volatilep);
8067 break;
8068 }
8069
8070 /* We must have made progress. */
8071 gcc_assert (inner != exp);
8072
8073 subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
8074 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
8075 inner alignment, force the inner to be sufficiently aligned. */
8076 if (CONSTANT_CLASS_P (inner)
8077 && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
8078 {
8079 inner = copy_node (inner);
8080 TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
8081 SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
8082 TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
8083 }
8084 result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
8085
8086 if (offset)
8087 {
8088 rtx tmp;
8089
8090 if (modifier != EXPAND_NORMAL)
8091 result = force_operand (result, NULL);
8092 tmp = expand_expr (offset, NULL_RTX, tmode,
8093 modifier == EXPAND_INITIALIZER
8094 ? EXPAND_INITIALIZER : EXPAND_NORMAL);
8095
8096 /* expand_expr is allowed to return an object in a mode other
8097 than TMODE. If it did, we need to convert. */
8098 if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
8099 tmp = convert_modes (tmode, GET_MODE (tmp),
8100 tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
8101 result = convert_memory_address_addr_space (tmode, result, as);
8102 tmp = convert_memory_address_addr_space (tmode, tmp, as);
8103
8104 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8105 result = simplify_gen_binary (PLUS, tmode, result, tmp);
8106 else
8107 {
8108 subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8109 result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8110 1, OPTAB_LIB_WIDEN);
8111 }
8112 }
8113
8114 if (maybe_ne (bitpos, 0))
8115 {
8116 /* Someone beforehand should have rejected taking the address
8117 of an object that isn't byte-aligned. */
8118 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8119 result = convert_memory_address_addr_space (tmode, result, as);
8120 result = plus_constant (tmode, result, bytepos);
8121 if (modifier < EXPAND_SUM)
8122 result = force_operand (result, target);
8123 }
8124
8125 return result;
8126 }
8127
8128 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
8129 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
8130
8131 static rtx
8132 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8133 enum expand_modifier modifier)
8134 {
8135 addr_space_t as = ADDR_SPACE_GENERIC;
8136 scalar_int_mode address_mode = Pmode;
8137 scalar_int_mode pointer_mode = ptr_mode;
8138 machine_mode rmode;
8139 rtx result;
8140
8141 /* Target mode of VOIDmode says "whatever's natural". */
8142 if (tmode == VOIDmode)
8143 tmode = TYPE_MODE (TREE_TYPE (exp));
8144
8145 if (POINTER_TYPE_P (TREE_TYPE (exp)))
8146 {
8147 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8148 address_mode = targetm.addr_space.address_mode (as);
8149 pointer_mode = targetm.addr_space.pointer_mode (as);
8150 }
8151
8152 /* We can get called with some Weird Things if the user does silliness
8153 like "(short) &a". In that case, convert_memory_address won't do
8154 the right thing, so ignore the given target mode. */
8155 scalar_int_mode new_tmode = (tmode == pointer_mode
8156 ? pointer_mode
8157 : address_mode);
8158
8159 result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8160 new_tmode, modifier, as);
8161
8162 /* Despite expand_expr claims concerning ignoring TMODE when not
8163 strictly convenient, stuff breaks if we don't honor it. Note
8164 that combined with the above, we only do this for pointer modes. */
8165 rmode = GET_MODE (result);
8166 if (rmode == VOIDmode)
8167 rmode = new_tmode;
8168 if (rmode != new_tmode)
8169 result = convert_memory_address_addr_space (new_tmode, result, as);
8170
8171 return result;
8172 }
8173
8174 /* Generate code for computing CONSTRUCTOR EXP.
8175 An rtx for the computed value is returned. If AVOID_TEMP_MEM
8176 is TRUE, instead of creating a temporary variable in memory
8177 NULL is returned and the caller needs to handle it differently. */
8178
8179 static rtx
8180 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8181 bool avoid_temp_mem)
8182 {
8183 tree type = TREE_TYPE (exp);
8184 machine_mode mode = TYPE_MODE (type);
8185
8186 /* Try to avoid creating a temporary at all. This is possible
8187 if all of the initializer is zero.
8188 FIXME: try to handle all [0..255] initializers we can handle
8189 with memset. */
8190 if (TREE_STATIC (exp)
8191 && !TREE_ADDRESSABLE (exp)
8192 && target != 0 && mode == BLKmode
8193 && all_zeros_p (exp))
8194 {
8195 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8196 return target;
8197 }
8198
8199 /* All elts simple constants => refer to a constant in memory. But
8200 if this is a non-BLKmode mode, let it store a field at a time
8201 since that should make a CONST_INT, CONST_WIDE_INT or
8202 CONST_DOUBLE when we fold. Likewise, if we have a target we can
8203 use, it is best to store directly into the target unless the type
8204 is large enough that memcpy will be used. If we are making an
8205 initializer and all operands are constant, put it in memory as
8206 well.
8207
8208 FIXME: Avoid trying to fill vector constructors piece-meal.
8209 Output them with output_constant_def below unless we're sure
8210 they're zeros. This should go away when vector initializers
8211 are treated like VECTOR_CST instead of arrays. */
8212 if ((TREE_STATIC (exp)
8213 && ((mode == BLKmode
8214 && ! (target != 0 && safe_from_p (target, exp, 1)))
8215 || TREE_ADDRESSABLE (exp)
8216 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8217 && (! can_move_by_pieces
8218 (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8219 TYPE_ALIGN (type)))
8220 && ! mostly_zeros_p (exp))))
8221 || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8222 && TREE_CONSTANT (exp)))
8223 {
8224 rtx constructor;
8225
8226 if (avoid_temp_mem)
8227 return NULL_RTX;
8228
8229 constructor = expand_expr_constant (exp, 1, modifier);
8230
8231 if (modifier != EXPAND_CONST_ADDRESS
8232 && modifier != EXPAND_INITIALIZER
8233 && modifier != EXPAND_SUM)
8234 constructor = validize_mem (constructor);
8235
8236 return constructor;
8237 }
8238
8239 /* Handle calls that pass values in multiple non-contiguous
8240 locations. The Irix 6 ABI has examples of this. */
8241 if (target == 0 || ! safe_from_p (target, exp, 1)
8242 || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM)
8243 {
8244 if (avoid_temp_mem)
8245 return NULL_RTX;
8246
8247 target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8248 }
8249
8250 store_constructor (exp, target, 0, int_expr_size (exp), false);
8251 return target;
8252 }
8253
8254
8255 /* expand_expr: generate code for computing expression EXP.
8256 An rtx for the computed value is returned. The value is never null.
8257 In the case of a void EXP, const0_rtx is returned.
8258
8259 The value may be stored in TARGET if TARGET is nonzero.
8260 TARGET is just a suggestion; callers must assume that
8261 the rtx returned may not be the same as TARGET.
8262
8263 If TARGET is CONST0_RTX, it means that the value will be ignored.
8264
8265 If TMODE is not VOIDmode, it suggests generating the
8266 result in mode TMODE. But this is done only when convenient.
8267 Otherwise, TMODE is ignored and the value generated in its natural mode.
8268 TMODE is just a suggestion; callers must assume that
8269 the rtx returned may not have mode TMODE.
8270
8271 Note that TARGET may have neither TMODE nor MODE. In that case, it
8272 probably will not be used.
8273
8274 If MODIFIER is EXPAND_SUM then when EXP is an addition
8275 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8276 or a nest of (PLUS ...) and (MINUS ...) where the terms are
8277 products as above, or REG or MEM, or constant.
8278 Ordinarily in such cases we would output mul or add instructions
8279 and then return a pseudo reg containing the sum.
8280
8281 EXPAND_INITIALIZER is much like EXPAND_SUM except that
8282 it also marks a label as absolutely required (it can't be dead).
8283 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8284 This is used for outputting expressions used in initializers.
8285
8286 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8287 with a constant address even if that address is not normally legitimate.
8288 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8289
8290 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8291 a call parameter. Such targets require special care as we haven't yet
8292 marked TARGET so that it's safe from being trashed by libcalls. We
8293 don't want to use TARGET for anything but the final result;
8294 Intermediate values must go elsewhere. Additionally, calls to
8295 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8296
8297 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8298 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8299 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
8300 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8301 recursively.
8302 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8303 then *ALT_RTL is set to TARGET (before legitimziation).
8304
8305 If INNER_REFERENCE_P is true, we are expanding an inner reference.
8306 In this case, we don't adjust a returned MEM rtx that wouldn't be
8307 sufficiently aligned for its mode; instead, it's up to the caller
8308 to deal with it afterwards. This is used to make sure that unaligned
8309 base objects for which out-of-bounds accesses are supported, for
8310 example record types with trailing arrays, aren't realigned behind
8311 the back of the caller.
8312 The normal operating mode is to pass FALSE for this parameter. */
8313
8314 rtx
8315 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8316 enum expand_modifier modifier, rtx *alt_rtl,
8317 bool inner_reference_p)
8318 {
8319 rtx ret;
8320
8321 /* Handle ERROR_MARK before anybody tries to access its type. */
8322 if (TREE_CODE (exp) == ERROR_MARK
8323 || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8324 {
8325 ret = CONST0_RTX (tmode);
8326 return ret ? ret : const0_rtx;
8327 }
8328
8329 ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8330 inner_reference_p);
8331 return ret;
8332 }
8333
8334 /* Try to expand the conditional expression which is represented by
8335 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
8336 return the rtl reg which represents the result. Otherwise return
8337 NULL_RTX. */
8338
8339 static rtx
8340 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8341 tree treeop1 ATTRIBUTE_UNUSED,
8342 tree treeop2 ATTRIBUTE_UNUSED)
8343 {
8344 rtx insn;
8345 rtx op00, op01, op1, op2;
8346 enum rtx_code comparison_code;
8347 machine_mode comparison_mode;
8348 gimple *srcstmt;
8349 rtx temp;
8350 tree type = TREE_TYPE (treeop1);
8351 int unsignedp = TYPE_UNSIGNED (type);
8352 machine_mode mode = TYPE_MODE (type);
8353 machine_mode orig_mode = mode;
8354 static bool expanding_cond_expr_using_cmove = false;
8355
8356 /* Conditional move expansion can end up TERing two operands which,
8357 when recursively hitting conditional expressions can result in
8358 exponential behavior if the cmove expansion ultimatively fails.
8359 It's hardly profitable to TER a cmove into a cmove so avoid doing
8360 that by failing early if we end up recursing. */
8361 if (expanding_cond_expr_using_cmove)
8362 return NULL_RTX;
8363
8364 /* If we cannot do a conditional move on the mode, try doing it
8365 with the promoted mode. */
8366 if (!can_conditionally_move_p (mode))
8367 {
8368 mode = promote_mode (type, mode, &unsignedp);
8369 if (!can_conditionally_move_p (mode))
8370 return NULL_RTX;
8371 temp = assign_temp (type, 0, 0); /* Use promoted mode for temp. */
8372 }
8373 else
8374 temp = assign_temp (type, 0, 1);
8375
8376 expanding_cond_expr_using_cmove = true;
8377 start_sequence ();
8378 expand_operands (treeop1, treeop2,
8379 temp, &op1, &op2, EXPAND_NORMAL);
8380
8381 if (TREE_CODE (treeop0) == SSA_NAME
8382 && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8383 {
8384 tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8385 enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8386 op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8387 op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8388 comparison_mode = TYPE_MODE (type);
8389 unsignedp = TYPE_UNSIGNED (type);
8390 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8391 }
8392 else if (COMPARISON_CLASS_P (treeop0))
8393 {
8394 tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8395 enum tree_code cmpcode = TREE_CODE (treeop0);
8396 op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8397 op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8398 unsignedp = TYPE_UNSIGNED (type);
8399 comparison_mode = TYPE_MODE (type);
8400 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8401 }
8402 else
8403 {
8404 op00 = expand_normal (treeop0);
8405 op01 = const0_rtx;
8406 comparison_code = NE;
8407 comparison_mode = GET_MODE (op00);
8408 if (comparison_mode == VOIDmode)
8409 comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8410 }
8411 expanding_cond_expr_using_cmove = false;
8412
8413 if (GET_MODE (op1) != mode)
8414 op1 = gen_lowpart (mode, op1);
8415
8416 if (GET_MODE (op2) != mode)
8417 op2 = gen_lowpart (mode, op2);
8418
8419 /* Try to emit the conditional move. */
8420 insn = emit_conditional_move (temp, comparison_code,
8421 op00, op01, comparison_mode,
8422 op1, op2, mode,
8423 unsignedp);
8424
8425 /* If we could do the conditional move, emit the sequence,
8426 and return. */
8427 if (insn)
8428 {
8429 rtx_insn *seq = get_insns ();
8430 end_sequence ();
8431 emit_insn (seq);
8432 return convert_modes (orig_mode, mode, temp, 0);
8433 }
8434
8435 /* Otherwise discard the sequence and fall back to code with
8436 branches. */
8437 end_sequence ();
8438 return NULL_RTX;
8439 }
8440
8441 /* A helper function for expand_expr_real_2 to be used with a
8442 misaligned mem_ref TEMP. Assume an unsigned type if UNSIGNEDP
8443 is nonzero, with alignment ALIGN in bits.
8444 Store the value at TARGET if possible (if TARGET is nonzero).
8445 Regardless of TARGET, we return the rtx for where the value is placed.
8446 If the result can be stored at TARGET, and ALT_RTL is non-NULL,
8447 then *ALT_RTL is set to TARGET (before legitimziation). */
8448
8449 static rtx
8450 expand_misaligned_mem_ref (rtx temp, machine_mode mode, int unsignedp,
8451 unsigned int align, rtx target, rtx *alt_rtl)
8452 {
8453 enum insn_code icode;
8454
8455 if ((icode = optab_handler (movmisalign_optab, mode))
8456 != CODE_FOR_nothing)
8457 {
8458 class expand_operand ops[2];
8459
8460 /* We've already validated the memory, and we're creating a
8461 new pseudo destination. The predicates really can't fail,
8462 nor can the generator. */
8463 create_output_operand (&ops[0], NULL_RTX, mode);
8464 create_fixed_operand (&ops[1], temp);
8465 expand_insn (icode, 2, ops);
8466 temp = ops[0].value;
8467 }
8468 else if (targetm.slow_unaligned_access (mode, align))
8469 temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
8470 0, unsignedp, target,
8471 mode, mode, false, alt_rtl);
8472 return temp;
8473 }
8474
8475 rtx
8476 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8477 enum expand_modifier modifier)
8478 {
8479 rtx op0, op1, op2, temp;
8480 rtx_code_label *lab;
8481 tree type;
8482 int unsignedp;
8483 machine_mode mode;
8484 scalar_int_mode int_mode;
8485 enum tree_code code = ops->code;
8486 optab this_optab;
8487 rtx subtarget, original_target;
8488 int ignore;
8489 bool reduce_bit_field;
8490 location_t loc = ops->location;
8491 tree treeop0, treeop1, treeop2;
8492 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
8493 ? reduce_to_bit_field_precision ((expr), \
8494 target, \
8495 type) \
8496 : (expr))
8497
8498 type = ops->type;
8499 mode = TYPE_MODE (type);
8500 unsignedp = TYPE_UNSIGNED (type);
8501
8502 treeop0 = ops->op0;
8503 treeop1 = ops->op1;
8504 treeop2 = ops->op2;
8505
8506 /* We should be called only on simple (binary or unary) expressions,
8507 exactly those that are valid in gimple expressions that aren't
8508 GIMPLE_SINGLE_RHS (or invalid). */
8509 gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8510 || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8511 || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8512
8513 ignore = (target == const0_rtx
8514 || ((CONVERT_EXPR_CODE_P (code)
8515 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8516 && TREE_CODE (type) == VOID_TYPE));
8517
8518 /* We should be called only if we need the result. */
8519 gcc_assert (!ignore);
8520
8521 /* An operation in what may be a bit-field type needs the
8522 result to be reduced to the precision of the bit-field type,
8523 which is narrower than that of the type's mode. */
8524 reduce_bit_field = (INTEGRAL_TYPE_P (type)
8525 && !type_has_mode_precision_p (type));
8526
8527 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
8528 target = 0;
8529
8530 /* Use subtarget as the target for operand 0 of a binary operation. */
8531 subtarget = get_subtarget (target);
8532 original_target = target;
8533
8534 switch (code)
8535 {
8536 case NON_LVALUE_EXPR:
8537 case PAREN_EXPR:
8538 CASE_CONVERT:
8539 if (treeop0 == error_mark_node)
8540 return const0_rtx;
8541
8542 if (TREE_CODE (type) == UNION_TYPE)
8543 {
8544 tree valtype = TREE_TYPE (treeop0);
8545
8546 /* If both input and output are BLKmode, this conversion isn't doing
8547 anything except possibly changing memory attribute. */
8548 if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
8549 {
8550 rtx result = expand_expr (treeop0, target, tmode,
8551 modifier);
8552
8553 result = copy_rtx (result);
8554 set_mem_attributes (result, type, 0);
8555 return result;
8556 }
8557
8558 if (target == 0)
8559 {
8560 if (TYPE_MODE (type) != BLKmode)
8561 target = gen_reg_rtx (TYPE_MODE (type));
8562 else
8563 target = assign_temp (type, 1, 1);
8564 }
8565
8566 if (MEM_P (target))
8567 /* Store data into beginning of memory target. */
8568 store_expr (treeop0,
8569 adjust_address (target, TYPE_MODE (valtype), 0),
8570 modifier == EXPAND_STACK_PARM,
8571 false, TYPE_REVERSE_STORAGE_ORDER (type));
8572
8573 else
8574 {
8575 gcc_assert (REG_P (target)
8576 && !TYPE_REVERSE_STORAGE_ORDER (type));
8577
8578 /* Store this field into a union of the proper type. */
8579 poly_uint64 op0_size
8580 = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
8581 poly_uint64 union_size = GET_MODE_BITSIZE (mode);
8582 store_field (target,
8583 /* The conversion must be constructed so that
8584 we know at compile time how many bits
8585 to preserve. */
8586 ordered_min (op0_size, union_size),
8587 0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
8588 false, false);
8589 }
8590
8591 /* Return the entire union. */
8592 return target;
8593 }
8594
8595 if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
8596 {
8597 op0 = expand_expr (treeop0, target, VOIDmode,
8598 modifier);
8599
8600 /* If the signedness of the conversion differs and OP0 is
8601 a promoted SUBREG, clear that indication since we now
8602 have to do the proper extension. */
8603 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
8604 && GET_CODE (op0) == SUBREG)
8605 SUBREG_PROMOTED_VAR_P (op0) = 0;
8606
8607 return REDUCE_BIT_FIELD (op0);
8608 }
8609
8610 op0 = expand_expr (treeop0, NULL_RTX, mode,
8611 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
8612 if (GET_MODE (op0) == mode)
8613 ;
8614
8615 /* If OP0 is a constant, just convert it into the proper mode. */
8616 else if (CONSTANT_P (op0))
8617 {
8618 tree inner_type = TREE_TYPE (treeop0);
8619 machine_mode inner_mode = GET_MODE (op0);
8620
8621 if (inner_mode == VOIDmode)
8622 inner_mode = TYPE_MODE (inner_type);
8623
8624 if (modifier == EXPAND_INITIALIZER)
8625 op0 = lowpart_subreg (mode, op0, inner_mode);
8626 else
8627 op0= convert_modes (mode, inner_mode, op0,
8628 TYPE_UNSIGNED (inner_type));
8629 }
8630
8631 else if (modifier == EXPAND_INITIALIZER)
8632 op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8633 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
8634
8635 else if (target == 0)
8636 op0 = convert_to_mode (mode, op0,
8637 TYPE_UNSIGNED (TREE_TYPE
8638 (treeop0)));
8639 else
8640 {
8641 convert_move (target, op0,
8642 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
8643 op0 = target;
8644 }
8645
8646 return REDUCE_BIT_FIELD (op0);
8647
8648 case ADDR_SPACE_CONVERT_EXPR:
8649 {
8650 tree treeop0_type = TREE_TYPE (treeop0);
8651
8652 gcc_assert (POINTER_TYPE_P (type));
8653 gcc_assert (POINTER_TYPE_P (treeop0_type));
8654
8655 addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
8656 addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
8657
8658 /* Conversions between pointers to the same address space should
8659 have been implemented via CONVERT_EXPR / NOP_EXPR. */
8660 gcc_assert (as_to != as_from);
8661
8662 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
8663
8664 /* Ask target code to handle conversion between pointers
8665 to overlapping address spaces. */
8666 if (targetm.addr_space.subset_p (as_to, as_from)
8667 || targetm.addr_space.subset_p (as_from, as_to))
8668 {
8669 op0 = targetm.addr_space.convert (op0, treeop0_type, type);
8670 }
8671 else
8672 {
8673 /* For disjoint address spaces, converting anything but a null
8674 pointer invokes undefined behavior. We truncate or extend the
8675 value as if we'd converted via integers, which handles 0 as
8676 required, and all others as the programmer likely expects. */
8677 #ifndef POINTERS_EXTEND_UNSIGNED
8678 const int POINTERS_EXTEND_UNSIGNED = 1;
8679 #endif
8680 op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
8681 op0, POINTERS_EXTEND_UNSIGNED);
8682 }
8683 gcc_assert (op0);
8684 return op0;
8685 }
8686
8687 case POINTER_PLUS_EXPR:
8688 /* Even though the sizetype mode and the pointer's mode can be different
8689 expand is able to handle this correctly and get the correct result out
8690 of the PLUS_EXPR code. */
8691 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8692 if sizetype precision is smaller than pointer precision. */
8693 if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
8694 treeop1 = fold_convert_loc (loc, type,
8695 fold_convert_loc (loc, ssizetype,
8696 treeop1));
8697 /* If sizetype precision is larger than pointer precision, truncate the
8698 offset to have matching modes. */
8699 else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
8700 treeop1 = fold_convert_loc (loc, type, treeop1);
8701 /* FALLTHRU */
8702
8703 case PLUS_EXPR:
8704 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8705 something else, make sure we add the register to the constant and
8706 then to the other thing. This case can occur during strength
8707 reduction and doing it this way will produce better code if the
8708 frame pointer or argument pointer is eliminated.
8709
8710 fold-const.c will ensure that the constant is always in the inner
8711 PLUS_EXPR, so the only case we need to do anything about is if
8712 sp, ap, or fp is our second argument, in which case we must swap
8713 the innermost first argument and our second argument. */
8714
8715 if (TREE_CODE (treeop0) == PLUS_EXPR
8716 && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
8717 && VAR_P (treeop1)
8718 && (DECL_RTL (treeop1) == frame_pointer_rtx
8719 || DECL_RTL (treeop1) == stack_pointer_rtx
8720 || DECL_RTL (treeop1) == arg_pointer_rtx))
8721 {
8722 gcc_unreachable ();
8723 }
8724
8725 /* If the result is to be ptr_mode and we are adding an integer to
8726 something, we might be forming a constant. So try to use
8727 plus_constant. If it produces a sum and we can't accept it,
8728 use force_operand. This allows P = &ARR[const] to generate
8729 efficient code on machines where a SYMBOL_REF is not a valid
8730 address.
8731
8732 If this is an EXPAND_SUM call, always return the sum. */
8733 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
8734 || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
8735 {
8736 if (modifier == EXPAND_STACK_PARM)
8737 target = 0;
8738 if (TREE_CODE (treeop0) == INTEGER_CST
8739 && HWI_COMPUTABLE_MODE_P (mode)
8740 && TREE_CONSTANT (treeop1))
8741 {
8742 rtx constant_part;
8743 HOST_WIDE_INT wc;
8744 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
8745
8746 op1 = expand_expr (treeop1, subtarget, VOIDmode,
8747 EXPAND_SUM);
8748 /* Use wi::shwi to ensure that the constant is
8749 truncated according to the mode of OP1, then sign extended
8750 to a HOST_WIDE_INT. Using the constant directly can result
8751 in non-canonical RTL in a 64x32 cross compile. */
8752 wc = TREE_INT_CST_LOW (treeop0);
8753 constant_part =
8754 immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8755 op1 = plus_constant (mode, op1, INTVAL (constant_part));
8756 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8757 op1 = force_operand (op1, target);
8758 return REDUCE_BIT_FIELD (op1);
8759 }
8760
8761 else if (TREE_CODE (treeop1) == INTEGER_CST
8762 && HWI_COMPUTABLE_MODE_P (mode)
8763 && TREE_CONSTANT (treeop0))
8764 {
8765 rtx constant_part;
8766 HOST_WIDE_INT wc;
8767 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
8768
8769 op0 = expand_expr (treeop0, subtarget, VOIDmode,
8770 (modifier == EXPAND_INITIALIZER
8771 ? EXPAND_INITIALIZER : EXPAND_SUM));
8772 if (! CONSTANT_P (op0))
8773 {
8774 op1 = expand_expr (treeop1, NULL_RTX,
8775 VOIDmode, modifier);
8776 /* Return a PLUS if modifier says it's OK. */
8777 if (modifier == EXPAND_SUM
8778 || modifier == EXPAND_INITIALIZER)
8779 return simplify_gen_binary (PLUS, mode, op0, op1);
8780 goto binop2;
8781 }
8782 /* Use wi::shwi to ensure that the constant is
8783 truncated according to the mode of OP1, then sign extended
8784 to a HOST_WIDE_INT. Using the constant directly can result
8785 in non-canonical RTL in a 64x32 cross compile. */
8786 wc = TREE_INT_CST_LOW (treeop1);
8787 constant_part
8788 = immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8789 op0 = plus_constant (mode, op0, INTVAL (constant_part));
8790 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8791 op0 = force_operand (op0, target);
8792 return REDUCE_BIT_FIELD (op0);
8793 }
8794 }
8795
8796 /* Use TER to expand pointer addition of a negated value
8797 as pointer subtraction. */
8798 if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
8799 || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
8800 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
8801 && TREE_CODE (treeop1) == SSA_NAME
8802 && TYPE_MODE (TREE_TYPE (treeop0))
8803 == TYPE_MODE (TREE_TYPE (treeop1)))
8804 {
8805 gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
8806 if (def)
8807 {
8808 treeop1 = gimple_assign_rhs1 (def);
8809 code = MINUS_EXPR;
8810 goto do_minus;
8811 }
8812 }
8813
8814 /* No sense saving up arithmetic to be done
8815 if it's all in the wrong mode to form part of an address.
8816 And force_operand won't know whether to sign-extend or
8817 zero-extend. */
8818 if (modifier != EXPAND_INITIALIZER
8819 && (modifier != EXPAND_SUM || mode != ptr_mode))
8820 {
8821 expand_operands (treeop0, treeop1,
8822 subtarget, &op0, &op1, modifier);
8823 if (op0 == const0_rtx)
8824 return op1;
8825 if (op1 == const0_rtx)
8826 return op0;
8827 goto binop2;
8828 }
8829
8830 expand_operands (treeop0, treeop1,
8831 subtarget, &op0, &op1, modifier);
8832 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8833
8834 case MINUS_EXPR:
8835 case POINTER_DIFF_EXPR:
8836 do_minus:
8837 /* For initializers, we are allowed to return a MINUS of two
8838 symbolic constants. Here we handle all cases when both operands
8839 are constant. */
8840 /* Handle difference of two symbolic constants,
8841 for the sake of an initializer. */
8842 if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8843 && really_constant_p (treeop0)
8844 && really_constant_p (treeop1))
8845 {
8846 expand_operands (treeop0, treeop1,
8847 NULL_RTX, &op0, &op1, modifier);
8848 return simplify_gen_binary (MINUS, mode, op0, op1);
8849 }
8850
8851 /* No sense saving up arithmetic to be done
8852 if it's all in the wrong mode to form part of an address.
8853 And force_operand won't know whether to sign-extend or
8854 zero-extend. */
8855 if (modifier != EXPAND_INITIALIZER
8856 && (modifier != EXPAND_SUM || mode != ptr_mode))
8857 goto binop;
8858
8859 expand_operands (treeop0, treeop1,
8860 subtarget, &op0, &op1, modifier);
8861
8862 /* Convert A - const to A + (-const). */
8863 if (CONST_INT_P (op1))
8864 {
8865 op1 = negate_rtx (mode, op1);
8866 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8867 }
8868
8869 goto binop2;
8870
8871 case WIDEN_MULT_PLUS_EXPR:
8872 case WIDEN_MULT_MINUS_EXPR:
8873 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
8874 op2 = expand_normal (treeop2);
8875 target = expand_widen_pattern_expr (ops, op0, op1, op2,
8876 target, unsignedp);
8877 return target;
8878
8879 case WIDEN_MULT_EXPR:
8880 /* If first operand is constant, swap them.
8881 Thus the following special case checks need only
8882 check the second operand. */
8883 if (TREE_CODE (treeop0) == INTEGER_CST)
8884 std::swap (treeop0, treeop1);
8885
8886 /* First, check if we have a multiplication of one signed and one
8887 unsigned operand. */
8888 if (TREE_CODE (treeop1) != INTEGER_CST
8889 && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8890 != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
8891 {
8892 machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
8893 this_optab = usmul_widen_optab;
8894 if (find_widening_optab_handler (this_optab, mode, innermode)
8895 != CODE_FOR_nothing)
8896 {
8897 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
8898 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8899 EXPAND_NORMAL);
8900 else
8901 expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
8902 EXPAND_NORMAL);
8903 /* op0 and op1 might still be constant, despite the above
8904 != INTEGER_CST check. Handle it. */
8905 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8906 {
8907 op0 = convert_modes (mode, innermode, op0, true);
8908 op1 = convert_modes (mode, innermode, op1, false);
8909 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8910 target, unsignedp));
8911 }
8912 goto binop3;
8913 }
8914 }
8915 /* Check for a multiplication with matching signedness. */
8916 else if ((TREE_CODE (treeop1) == INTEGER_CST
8917 && int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
8918 || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
8919 == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
8920 {
8921 tree op0type = TREE_TYPE (treeop0);
8922 machine_mode innermode = TYPE_MODE (op0type);
8923 bool zextend_p = TYPE_UNSIGNED (op0type);
8924 optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
8925 this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
8926
8927 if (TREE_CODE (treeop0) != INTEGER_CST)
8928 {
8929 if (find_widening_optab_handler (this_optab, mode, innermode)
8930 != CODE_FOR_nothing)
8931 {
8932 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8933 EXPAND_NORMAL);
8934 /* op0 and op1 might still be constant, despite the above
8935 != INTEGER_CST check. Handle it. */
8936 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8937 {
8938 widen_mult_const:
8939 op0 = convert_modes (mode, innermode, op0, zextend_p);
8940 op1
8941 = convert_modes (mode, innermode, op1,
8942 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8943 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8944 target,
8945 unsignedp));
8946 }
8947 temp = expand_widening_mult (mode, op0, op1, target,
8948 unsignedp, this_optab);
8949 return REDUCE_BIT_FIELD (temp);
8950 }
8951 if (find_widening_optab_handler (other_optab, mode, innermode)
8952 != CODE_FOR_nothing
8953 && innermode == word_mode)
8954 {
8955 rtx htem, hipart;
8956 op0 = expand_normal (treeop0);
8957 op1 = expand_normal (treeop1);
8958 /* op0 and op1 might be constants, despite the above
8959 != INTEGER_CST check. Handle it. */
8960 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8961 goto widen_mult_const;
8962 if (TREE_CODE (treeop1) == INTEGER_CST)
8963 op1 = convert_modes (mode, word_mode, op1,
8964 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8965 temp = expand_binop (mode, other_optab, op0, op1, target,
8966 unsignedp, OPTAB_LIB_WIDEN);
8967 hipart = gen_highpart (word_mode, temp);
8968 htem = expand_mult_highpart_adjust (word_mode, hipart,
8969 op0, op1, hipart,
8970 zextend_p);
8971 if (htem != hipart)
8972 emit_move_insn (hipart, htem);
8973 return REDUCE_BIT_FIELD (temp);
8974 }
8975 }
8976 }
8977 treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
8978 treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
8979 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8980 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8981
8982 case MULT_EXPR:
8983 /* If this is a fixed-point operation, then we cannot use the code
8984 below because "expand_mult" doesn't support sat/no-sat fixed-point
8985 multiplications. */
8986 if (ALL_FIXED_POINT_MODE_P (mode))
8987 goto binop;
8988
8989 /* If first operand is constant, swap them.
8990 Thus the following special case checks need only
8991 check the second operand. */
8992 if (TREE_CODE (treeop0) == INTEGER_CST)
8993 std::swap (treeop0, treeop1);
8994
8995 /* Attempt to return something suitable for generating an
8996 indexed address, for machines that support that. */
8997
8998 if (modifier == EXPAND_SUM && mode == ptr_mode
8999 && tree_fits_shwi_p (treeop1))
9000 {
9001 tree exp1 = treeop1;
9002
9003 op0 = expand_expr (treeop0, subtarget, VOIDmode,
9004 EXPAND_SUM);
9005
9006 if (!REG_P (op0))
9007 op0 = force_operand (op0, NULL_RTX);
9008 if (!REG_P (op0))
9009 op0 = copy_to_mode_reg (mode, op0);
9010
9011 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
9012 gen_int_mode (tree_to_shwi (exp1),
9013 TYPE_MODE (TREE_TYPE (exp1)))));
9014 }
9015
9016 if (modifier == EXPAND_STACK_PARM)
9017 target = 0;
9018
9019 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9020 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
9021
9022 case TRUNC_MOD_EXPR:
9023 case FLOOR_MOD_EXPR:
9024 case CEIL_MOD_EXPR:
9025 case ROUND_MOD_EXPR:
9026
9027 case TRUNC_DIV_EXPR:
9028 case FLOOR_DIV_EXPR:
9029 case CEIL_DIV_EXPR:
9030 case ROUND_DIV_EXPR:
9031 case EXACT_DIV_EXPR:
9032 {
9033 /* If this is a fixed-point operation, then we cannot use the code
9034 below because "expand_divmod" doesn't support sat/no-sat fixed-point
9035 divisions. */
9036 if (ALL_FIXED_POINT_MODE_P (mode))
9037 goto binop;
9038
9039 if (modifier == EXPAND_STACK_PARM)
9040 target = 0;
9041 /* Possible optimization: compute the dividend with EXPAND_SUM
9042 then if the divisor is constant can optimize the case
9043 where some terms of the dividend have coeffs divisible by it. */
9044 expand_operands (treeop0, treeop1,
9045 subtarget, &op0, &op1, EXPAND_NORMAL);
9046 bool mod_p = code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
9047 || code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR;
9048 if (SCALAR_INT_MODE_P (mode)
9049 && optimize >= 2
9050 && get_range_pos_neg (treeop0) == 1
9051 && get_range_pos_neg (treeop1) == 1)
9052 {
9053 /* If both arguments are known to be positive when interpreted
9054 as signed, we can expand it as both signed and unsigned
9055 division or modulo. Choose the cheaper sequence in that case. */
9056 bool speed_p = optimize_insn_for_speed_p ();
9057 do_pending_stack_adjust ();
9058 start_sequence ();
9059 rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
9060 rtx_insn *uns_insns = get_insns ();
9061 end_sequence ();
9062 start_sequence ();
9063 rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
9064 rtx_insn *sgn_insns = get_insns ();
9065 end_sequence ();
9066 unsigned uns_cost = seq_cost (uns_insns, speed_p);
9067 unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
9068
9069 /* If costs are the same then use as tie breaker the other
9070 other factor. */
9071 if (uns_cost == sgn_cost)
9072 {
9073 uns_cost = seq_cost (uns_insns, !speed_p);
9074 sgn_cost = seq_cost (sgn_insns, !speed_p);
9075 }
9076
9077 if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
9078 {
9079 emit_insn (uns_insns);
9080 return uns_ret;
9081 }
9082 emit_insn (sgn_insns);
9083 return sgn_ret;
9084 }
9085 return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
9086 }
9087 case RDIV_EXPR:
9088 goto binop;
9089
9090 case MULT_HIGHPART_EXPR:
9091 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9092 temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9093 gcc_assert (temp);
9094 return temp;
9095
9096 case FIXED_CONVERT_EXPR:
9097 op0 = expand_normal (treeop0);
9098 if (target == 0 || modifier == EXPAND_STACK_PARM)
9099 target = gen_reg_rtx (mode);
9100
9101 if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9102 && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9103 || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9104 expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9105 else
9106 expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9107 return target;
9108
9109 case FIX_TRUNC_EXPR:
9110 op0 = expand_normal (treeop0);
9111 if (target == 0 || modifier == EXPAND_STACK_PARM)
9112 target = gen_reg_rtx (mode);
9113 expand_fix (target, op0, unsignedp);
9114 return target;
9115
9116 case FLOAT_EXPR:
9117 op0 = expand_normal (treeop0);
9118 if (target == 0 || modifier == EXPAND_STACK_PARM)
9119 target = gen_reg_rtx (mode);
9120 /* expand_float can't figure out what to do if FROM has VOIDmode.
9121 So give it the correct mode. With -O, cse will optimize this. */
9122 if (GET_MODE (op0) == VOIDmode)
9123 op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9124 op0);
9125 expand_float (target, op0,
9126 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9127 return target;
9128
9129 case NEGATE_EXPR:
9130 op0 = expand_expr (treeop0, subtarget,
9131 VOIDmode, EXPAND_NORMAL);
9132 if (modifier == EXPAND_STACK_PARM)
9133 target = 0;
9134 temp = expand_unop (mode,
9135 optab_for_tree_code (NEGATE_EXPR, type,
9136 optab_default),
9137 op0, target, 0);
9138 gcc_assert (temp);
9139 return REDUCE_BIT_FIELD (temp);
9140
9141 case ABS_EXPR:
9142 case ABSU_EXPR:
9143 op0 = expand_expr (treeop0, subtarget,
9144 VOIDmode, EXPAND_NORMAL);
9145 if (modifier == EXPAND_STACK_PARM)
9146 target = 0;
9147
9148 /* ABS_EXPR is not valid for complex arguments. */
9149 gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9150 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9151
9152 /* Unsigned abs is simply the operand. Testing here means we don't
9153 risk generating incorrect code below. */
9154 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9155 return op0;
9156
9157 return expand_abs (mode, op0, target, unsignedp,
9158 safe_from_p (target, treeop0, 1));
9159
9160 case MAX_EXPR:
9161 case MIN_EXPR:
9162 target = original_target;
9163 if (target == 0
9164 || modifier == EXPAND_STACK_PARM
9165 || (MEM_P (target) && MEM_VOLATILE_P (target))
9166 || GET_MODE (target) != mode
9167 || (REG_P (target)
9168 && REGNO (target) < FIRST_PSEUDO_REGISTER))
9169 target = gen_reg_rtx (mode);
9170 expand_operands (treeop0, treeop1,
9171 target, &op0, &op1, EXPAND_NORMAL);
9172
9173 /* First try to do it with a special MIN or MAX instruction.
9174 If that does not win, use a conditional jump to select the proper
9175 value. */
9176 this_optab = optab_for_tree_code (code, type, optab_default);
9177 temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9178 OPTAB_WIDEN);
9179 if (temp != 0)
9180 return temp;
9181
9182 /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9183 and similarly for MAX <x, y>. */
9184 if (VECTOR_TYPE_P (type))
9185 {
9186 tree t0 = make_tree (type, op0);
9187 tree t1 = make_tree (type, op1);
9188 tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
9189 type, t0, t1);
9190 return expand_vec_cond_expr (type, comparison, t0, t1,
9191 original_target);
9192 }
9193
9194 /* At this point, a MEM target is no longer useful; we will get better
9195 code without it. */
9196
9197 if (! REG_P (target))
9198 target = gen_reg_rtx (mode);
9199
9200 /* If op1 was placed in target, swap op0 and op1. */
9201 if (target != op0 && target == op1)
9202 std::swap (op0, op1);
9203
9204 /* We generate better code and avoid problems with op1 mentioning
9205 target by forcing op1 into a pseudo if it isn't a constant. */
9206 if (! CONSTANT_P (op1))
9207 op1 = force_reg (mode, op1);
9208
9209 {
9210 enum rtx_code comparison_code;
9211 rtx cmpop1 = op1;
9212
9213 if (code == MAX_EXPR)
9214 comparison_code = unsignedp ? GEU : GE;
9215 else
9216 comparison_code = unsignedp ? LEU : LE;
9217
9218 /* Canonicalize to comparisons against 0. */
9219 if (op1 == const1_rtx)
9220 {
9221 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9222 or (a != 0 ? a : 1) for unsigned.
9223 For MIN we are safe converting (a <= 1 ? a : 1)
9224 into (a <= 0 ? a : 1) */
9225 cmpop1 = const0_rtx;
9226 if (code == MAX_EXPR)
9227 comparison_code = unsignedp ? NE : GT;
9228 }
9229 if (op1 == constm1_rtx && !unsignedp)
9230 {
9231 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9232 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9233 cmpop1 = const0_rtx;
9234 if (code == MIN_EXPR)
9235 comparison_code = LT;
9236 }
9237
9238 /* Use a conditional move if possible. */
9239 if (can_conditionally_move_p (mode))
9240 {
9241 rtx insn;
9242
9243 start_sequence ();
9244
9245 /* Try to emit the conditional move. */
9246 insn = emit_conditional_move (target, comparison_code,
9247 op0, cmpop1, mode,
9248 op0, op1, mode,
9249 unsignedp);
9250
9251 /* If we could do the conditional move, emit the sequence,
9252 and return. */
9253 if (insn)
9254 {
9255 rtx_insn *seq = get_insns ();
9256 end_sequence ();
9257 emit_insn (seq);
9258 return target;
9259 }
9260
9261 /* Otherwise discard the sequence and fall back to code with
9262 branches. */
9263 end_sequence ();
9264 }
9265
9266 if (target != op0)
9267 emit_move_insn (target, op0);
9268
9269 lab = gen_label_rtx ();
9270 do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9271 unsignedp, mode, NULL_RTX, NULL, lab,
9272 profile_probability::uninitialized ());
9273 }
9274 emit_move_insn (target, op1);
9275 emit_label (lab);
9276 return target;
9277
9278 case BIT_NOT_EXPR:
9279 op0 = expand_expr (treeop0, subtarget,
9280 VOIDmode, EXPAND_NORMAL);
9281 if (modifier == EXPAND_STACK_PARM)
9282 target = 0;
9283 /* In case we have to reduce the result to bitfield precision
9284 for unsigned bitfield expand this as XOR with a proper constant
9285 instead. */
9286 if (reduce_bit_field && TYPE_UNSIGNED (type))
9287 {
9288 int_mode = SCALAR_INT_TYPE_MODE (type);
9289 wide_int mask = wi::mask (TYPE_PRECISION (type),
9290 false, GET_MODE_PRECISION (int_mode));
9291
9292 temp = expand_binop (int_mode, xor_optab, op0,
9293 immed_wide_int_const (mask, int_mode),
9294 target, 1, OPTAB_LIB_WIDEN);
9295 }
9296 else
9297 temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9298 gcc_assert (temp);
9299 return temp;
9300
9301 /* ??? Can optimize bitwise operations with one arg constant.
9302 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9303 and (a bitwise1 b) bitwise2 b (etc)
9304 but that is probably not worth while. */
9305
9306 case BIT_AND_EXPR:
9307 case BIT_IOR_EXPR:
9308 case BIT_XOR_EXPR:
9309 goto binop;
9310
9311 case LROTATE_EXPR:
9312 case RROTATE_EXPR:
9313 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9314 || type_has_mode_precision_p (type));
9315 /* fall through */
9316
9317 case LSHIFT_EXPR:
9318 case RSHIFT_EXPR:
9319 {
9320 /* If this is a fixed-point operation, then we cannot use the code
9321 below because "expand_shift" doesn't support sat/no-sat fixed-point
9322 shifts. */
9323 if (ALL_FIXED_POINT_MODE_P (mode))
9324 goto binop;
9325
9326 if (! safe_from_p (subtarget, treeop1, 1))
9327 subtarget = 0;
9328 if (modifier == EXPAND_STACK_PARM)
9329 target = 0;
9330 op0 = expand_expr (treeop0, subtarget,
9331 VOIDmode, EXPAND_NORMAL);
9332
9333 /* Left shift optimization when shifting across word_size boundary.
9334
9335 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9336 there isn't native instruction to support this wide mode
9337 left shift. Given below scenario:
9338
9339 Type A = (Type) B << C
9340
9341 |< T >|
9342 | dest_high | dest_low |
9343
9344 | word_size |
9345
9346 If the shift amount C caused we shift B to across the word
9347 size boundary, i.e part of B shifted into high half of
9348 destination register, and part of B remains in the low
9349 half, then GCC will use the following left shift expand
9350 logic:
9351
9352 1. Initialize dest_low to B.
9353 2. Initialize every bit of dest_high to the sign bit of B.
9354 3. Logic left shift dest_low by C bit to finalize dest_low.
9355 The value of dest_low before this shift is kept in a temp D.
9356 4. Logic left shift dest_high by C.
9357 5. Logic right shift D by (word_size - C).
9358 6. Or the result of 4 and 5 to finalize dest_high.
9359
9360 While, by checking gimple statements, if operand B is
9361 coming from signed extension, then we can simplify above
9362 expand logic into:
9363
9364 1. dest_high = src_low >> (word_size - C).
9365 2. dest_low = src_low << C.
9366
9367 We can use one arithmetic right shift to finish all the
9368 purpose of steps 2, 4, 5, 6, thus we reduce the steps
9369 needed from 6 into 2.
9370
9371 The case is similar for zero extension, except that we
9372 initialize dest_high to zero rather than copies of the sign
9373 bit from B. Furthermore, we need to use a logical right shift
9374 in this case.
9375
9376 The choice of sign-extension versus zero-extension is
9377 determined entirely by whether or not B is signed and is
9378 independent of the current setting of unsignedp. */
9379
9380 temp = NULL_RTX;
9381 if (code == LSHIFT_EXPR
9382 && target
9383 && REG_P (target)
9384 && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9385 && mode == int_mode
9386 && TREE_CONSTANT (treeop1)
9387 && TREE_CODE (treeop0) == SSA_NAME)
9388 {
9389 gimple *def = SSA_NAME_DEF_STMT (treeop0);
9390 if (is_gimple_assign (def)
9391 && gimple_assign_rhs_code (def) == NOP_EXPR)
9392 {
9393 scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9394 (TREE_TYPE (gimple_assign_rhs1 (def)));
9395
9396 if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9397 && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9398 && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9399 >= GET_MODE_BITSIZE (word_mode)))
9400 {
9401 rtx_insn *seq, *seq_old;
9402 poly_uint64 high_off = subreg_highpart_offset (word_mode,
9403 int_mode);
9404 bool extend_unsigned
9405 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9406 rtx low = lowpart_subreg (word_mode, op0, int_mode);
9407 rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9408 rtx dest_high = simplify_gen_subreg (word_mode, target,
9409 int_mode, high_off);
9410 HOST_WIDE_INT ramount = (BITS_PER_WORD
9411 - TREE_INT_CST_LOW (treeop1));
9412 tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9413
9414 start_sequence ();
9415 /* dest_high = src_low >> (word_size - C). */
9416 temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9417 rshift, dest_high,
9418 extend_unsigned);
9419 if (temp != dest_high)
9420 emit_move_insn (dest_high, temp);
9421
9422 /* dest_low = src_low << C. */
9423 temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9424 treeop1, dest_low, unsignedp);
9425 if (temp != dest_low)
9426 emit_move_insn (dest_low, temp);
9427
9428 seq = get_insns ();
9429 end_sequence ();
9430 temp = target ;
9431
9432 if (have_insn_for (ASHIFT, int_mode))
9433 {
9434 bool speed_p = optimize_insn_for_speed_p ();
9435 start_sequence ();
9436 rtx ret_old = expand_variable_shift (code, int_mode,
9437 op0, treeop1,
9438 target,
9439 unsignedp);
9440
9441 seq_old = get_insns ();
9442 end_sequence ();
9443 if (seq_cost (seq, speed_p)
9444 >= seq_cost (seq_old, speed_p))
9445 {
9446 seq = seq_old;
9447 temp = ret_old;
9448 }
9449 }
9450 emit_insn (seq);
9451 }
9452 }
9453 }
9454
9455 if (temp == NULL_RTX)
9456 temp = expand_variable_shift (code, mode, op0, treeop1, target,
9457 unsignedp);
9458 if (code == LSHIFT_EXPR)
9459 temp = REDUCE_BIT_FIELD (temp);
9460 return temp;
9461 }
9462
9463 /* Could determine the answer when only additive constants differ. Also,
9464 the addition of one can be handled by changing the condition. */
9465 case LT_EXPR:
9466 case LE_EXPR:
9467 case GT_EXPR:
9468 case GE_EXPR:
9469 case EQ_EXPR:
9470 case NE_EXPR:
9471 case UNORDERED_EXPR:
9472 case ORDERED_EXPR:
9473 case UNLT_EXPR:
9474 case UNLE_EXPR:
9475 case UNGT_EXPR:
9476 case UNGE_EXPR:
9477 case UNEQ_EXPR:
9478 case LTGT_EXPR:
9479 {
9480 temp = do_store_flag (ops,
9481 modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9482 tmode != VOIDmode ? tmode : mode);
9483 if (temp)
9484 return temp;
9485
9486 /* Use a compare and a jump for BLKmode comparisons, or for function
9487 type comparisons is have_canonicalize_funcptr_for_compare. */
9488
9489 if ((target == 0
9490 || modifier == EXPAND_STACK_PARM
9491 || ! safe_from_p (target, treeop0, 1)
9492 || ! safe_from_p (target, treeop1, 1)
9493 /* Make sure we don't have a hard reg (such as function's return
9494 value) live across basic blocks, if not optimizing. */
9495 || (!optimize && REG_P (target)
9496 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9497 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9498
9499 emit_move_insn (target, const0_rtx);
9500
9501 rtx_code_label *lab1 = gen_label_rtx ();
9502 jumpifnot_1 (code, treeop0, treeop1, lab1,
9503 profile_probability::uninitialized ());
9504
9505 if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9506 emit_move_insn (target, constm1_rtx);
9507 else
9508 emit_move_insn (target, const1_rtx);
9509
9510 emit_label (lab1);
9511 return target;
9512 }
9513 case COMPLEX_EXPR:
9514 /* Get the rtx code of the operands. */
9515 op0 = expand_normal (treeop0);
9516 op1 = expand_normal (treeop1);
9517
9518 if (!target)
9519 target = gen_reg_rtx (TYPE_MODE (type));
9520 else
9521 /* If target overlaps with op1, then either we need to force
9522 op1 into a pseudo (if target also overlaps with op0),
9523 or write the complex parts in reverse order. */
9524 switch (GET_CODE (target))
9525 {
9526 case CONCAT:
9527 if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
9528 {
9529 if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
9530 {
9531 complex_expr_force_op1:
9532 temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
9533 emit_move_insn (temp, op1);
9534 op1 = temp;
9535 break;
9536 }
9537 complex_expr_swap_order:
9538 /* Move the imaginary (op1) and real (op0) parts to their
9539 location. */
9540 write_complex_part (target, op1, true);
9541 write_complex_part (target, op0, false);
9542
9543 return target;
9544 }
9545 break;
9546 case MEM:
9547 temp = adjust_address_nv (target,
9548 GET_MODE_INNER (GET_MODE (target)), 0);
9549 if (reg_overlap_mentioned_p (temp, op1))
9550 {
9551 scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
9552 temp = adjust_address_nv (target, imode,
9553 GET_MODE_SIZE (imode));
9554 if (reg_overlap_mentioned_p (temp, op0))
9555 goto complex_expr_force_op1;
9556 goto complex_expr_swap_order;
9557 }
9558 break;
9559 default:
9560 if (reg_overlap_mentioned_p (target, op1))
9561 {
9562 if (reg_overlap_mentioned_p (target, op0))
9563 goto complex_expr_force_op1;
9564 goto complex_expr_swap_order;
9565 }
9566 break;
9567 }
9568
9569 /* Move the real (op0) and imaginary (op1) parts to their location. */
9570 write_complex_part (target, op0, false);
9571 write_complex_part (target, op1, true);
9572
9573 return target;
9574
9575 case WIDEN_SUM_EXPR:
9576 {
9577 tree oprnd0 = treeop0;
9578 tree oprnd1 = treeop1;
9579
9580 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9581 target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
9582 target, unsignedp);
9583 return target;
9584 }
9585
9586 case VEC_UNPACK_HI_EXPR:
9587 case VEC_UNPACK_LO_EXPR:
9588 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
9589 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
9590 {
9591 op0 = expand_normal (treeop0);
9592 temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
9593 target, unsignedp);
9594 gcc_assert (temp);
9595 return temp;
9596 }
9597
9598 case VEC_UNPACK_FLOAT_HI_EXPR:
9599 case VEC_UNPACK_FLOAT_LO_EXPR:
9600 {
9601 op0 = expand_normal (treeop0);
9602 /* The signedness is determined from input operand. */
9603 temp = expand_widen_pattern_expr
9604 (ops, op0, NULL_RTX, NULL_RTX,
9605 target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9606
9607 gcc_assert (temp);
9608 return temp;
9609 }
9610
9611 case VEC_WIDEN_MULT_HI_EXPR:
9612 case VEC_WIDEN_MULT_LO_EXPR:
9613 case VEC_WIDEN_MULT_EVEN_EXPR:
9614 case VEC_WIDEN_MULT_ODD_EXPR:
9615 case VEC_WIDEN_LSHIFT_HI_EXPR:
9616 case VEC_WIDEN_LSHIFT_LO_EXPR:
9617 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9618 target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
9619 target, unsignedp);
9620 gcc_assert (target);
9621 return target;
9622
9623 case VEC_PACK_SAT_EXPR:
9624 case VEC_PACK_FIX_TRUNC_EXPR:
9625 mode = TYPE_MODE (TREE_TYPE (treeop0));
9626 goto binop;
9627
9628 case VEC_PACK_TRUNC_EXPR:
9629 if (VECTOR_BOOLEAN_TYPE_P (type)
9630 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (treeop0))
9631 && mode == TYPE_MODE (TREE_TYPE (treeop0))
9632 && SCALAR_INT_MODE_P (mode))
9633 {
9634 class expand_operand eops[4];
9635 machine_mode imode = TYPE_MODE (TREE_TYPE (treeop0));
9636 expand_operands (treeop0, treeop1,
9637 subtarget, &op0, &op1, EXPAND_NORMAL);
9638 this_optab = vec_pack_sbool_trunc_optab;
9639 enum insn_code icode = optab_handler (this_optab, imode);
9640 create_output_operand (&eops[0], target, mode);
9641 create_convert_operand_from (&eops[1], op0, imode, false);
9642 create_convert_operand_from (&eops[2], op1, imode, false);
9643 temp = GEN_INT (TYPE_VECTOR_SUBPARTS (type).to_constant ());
9644 create_input_operand (&eops[3], temp, imode);
9645 expand_insn (icode, 4, eops);
9646 return eops[0].value;
9647 }
9648 mode = TYPE_MODE (TREE_TYPE (treeop0));
9649 goto binop;
9650
9651 case VEC_PACK_FLOAT_EXPR:
9652 mode = TYPE_MODE (TREE_TYPE (treeop0));
9653 expand_operands (treeop0, treeop1,
9654 subtarget, &op0, &op1, EXPAND_NORMAL);
9655 this_optab = optab_for_tree_code (code, TREE_TYPE (treeop0),
9656 optab_default);
9657 target = expand_binop (mode, this_optab, op0, op1, target,
9658 TYPE_UNSIGNED (TREE_TYPE (treeop0)),
9659 OPTAB_LIB_WIDEN);
9660 gcc_assert (target);
9661 return target;
9662
9663 case VEC_PERM_EXPR:
9664 {
9665 expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
9666 vec_perm_builder sel;
9667 if (TREE_CODE (treeop2) == VECTOR_CST
9668 && tree_to_vec_perm_builder (&sel, treeop2))
9669 {
9670 machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
9671 temp = expand_vec_perm_const (mode, op0, op1, sel,
9672 sel_mode, target);
9673 }
9674 else
9675 {
9676 op2 = expand_normal (treeop2);
9677 temp = expand_vec_perm_var (mode, op0, op1, op2, target);
9678 }
9679 gcc_assert (temp);
9680 return temp;
9681 }
9682
9683 case DOT_PROD_EXPR:
9684 {
9685 tree oprnd0 = treeop0;
9686 tree oprnd1 = treeop1;
9687 tree oprnd2 = treeop2;
9688 rtx op2;
9689
9690 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9691 op2 = expand_normal (oprnd2);
9692 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9693 target, unsignedp);
9694 return target;
9695 }
9696
9697 case SAD_EXPR:
9698 {
9699 tree oprnd0 = treeop0;
9700 tree oprnd1 = treeop1;
9701 tree oprnd2 = treeop2;
9702 rtx op2;
9703
9704 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9705 op2 = expand_normal (oprnd2);
9706 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9707 target, unsignedp);
9708 return target;
9709 }
9710
9711 case REALIGN_LOAD_EXPR:
9712 {
9713 tree oprnd0 = treeop0;
9714 tree oprnd1 = treeop1;
9715 tree oprnd2 = treeop2;
9716 rtx op2;
9717
9718 this_optab = optab_for_tree_code (code, type, optab_default);
9719 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9720 op2 = expand_normal (oprnd2);
9721 temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
9722 target, unsignedp);
9723 gcc_assert (temp);
9724 return temp;
9725 }
9726
9727 case COND_EXPR:
9728 {
9729 /* A COND_EXPR with its type being VOID_TYPE represents a
9730 conditional jump and is handled in
9731 expand_gimple_cond_expr. */
9732 gcc_assert (!VOID_TYPE_P (type));
9733
9734 /* Note that COND_EXPRs whose type is a structure or union
9735 are required to be constructed to contain assignments of
9736 a temporary variable, so that we can evaluate them here
9737 for side effect only. If type is void, we must do likewise. */
9738
9739 gcc_assert (!TREE_ADDRESSABLE (type)
9740 && !ignore
9741 && TREE_TYPE (treeop1) != void_type_node
9742 && TREE_TYPE (treeop2) != void_type_node);
9743
9744 temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
9745 if (temp)
9746 return temp;
9747
9748 /* If we are not to produce a result, we have no target. Otherwise,
9749 if a target was specified use it; it will not be used as an
9750 intermediate target unless it is safe. If no target, use a
9751 temporary. */
9752
9753 if (modifier != EXPAND_STACK_PARM
9754 && original_target
9755 && safe_from_p (original_target, treeop0, 1)
9756 && GET_MODE (original_target) == mode
9757 && !MEM_P (original_target))
9758 temp = original_target;
9759 else
9760 temp = assign_temp (type, 0, 1);
9761
9762 do_pending_stack_adjust ();
9763 NO_DEFER_POP;
9764 rtx_code_label *lab0 = gen_label_rtx ();
9765 rtx_code_label *lab1 = gen_label_rtx ();
9766 jumpifnot (treeop0, lab0,
9767 profile_probability::uninitialized ());
9768 store_expr (treeop1, temp,
9769 modifier == EXPAND_STACK_PARM,
9770 false, false);
9771
9772 emit_jump_insn (targetm.gen_jump (lab1));
9773 emit_barrier ();
9774 emit_label (lab0);
9775 store_expr (treeop2, temp,
9776 modifier == EXPAND_STACK_PARM,
9777 false, false);
9778
9779 emit_label (lab1);
9780 OK_DEFER_POP;
9781 return temp;
9782 }
9783
9784 case VEC_COND_EXPR:
9785 target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
9786 return target;
9787
9788 case VEC_DUPLICATE_EXPR:
9789 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9790 target = expand_vector_broadcast (mode, op0);
9791 gcc_assert (target);
9792 return target;
9793
9794 case VEC_SERIES_EXPR:
9795 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
9796 return expand_vec_series_expr (mode, op0, op1, target);
9797
9798 case BIT_INSERT_EXPR:
9799 {
9800 unsigned bitpos = tree_to_uhwi (treeop2);
9801 unsigned bitsize;
9802 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
9803 bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
9804 else
9805 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
9806 rtx op0 = expand_normal (treeop0);
9807 rtx op1 = expand_normal (treeop1);
9808 rtx dst = gen_reg_rtx (mode);
9809 emit_move_insn (dst, op0);
9810 store_bit_field (dst, bitsize, bitpos, 0, 0,
9811 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
9812 return dst;
9813 }
9814
9815 default:
9816 gcc_unreachable ();
9817 }
9818
9819 /* Here to do an ordinary binary operator. */
9820 binop:
9821 expand_operands (treeop0, treeop1,
9822 subtarget, &op0, &op1, EXPAND_NORMAL);
9823 binop2:
9824 this_optab = optab_for_tree_code (code, type, optab_default);
9825 binop3:
9826 if (modifier == EXPAND_STACK_PARM)
9827 target = 0;
9828 temp = expand_binop (mode, this_optab, op0, op1, target,
9829 unsignedp, OPTAB_LIB_WIDEN);
9830 gcc_assert (temp);
9831 /* Bitwise operations do not need bitfield reduction as we expect their
9832 operands being properly truncated. */
9833 if (code == BIT_XOR_EXPR
9834 || code == BIT_AND_EXPR
9835 || code == BIT_IOR_EXPR)
9836 return temp;
9837 return REDUCE_BIT_FIELD (temp);
9838 }
9839 #undef REDUCE_BIT_FIELD
9840
9841
9842 /* Return TRUE if expression STMT is suitable for replacement.
9843 Never consider memory loads as replaceable, because those don't ever lead
9844 into constant expressions. */
9845
9846 static bool
9847 stmt_is_replaceable_p (gimple *stmt)
9848 {
9849 if (ssa_is_replaceable_p (stmt))
9850 {
9851 /* Don't move around loads. */
9852 if (!gimple_assign_single_p (stmt)
9853 || is_gimple_val (gimple_assign_rhs1 (stmt)))
9854 return true;
9855 }
9856 return false;
9857 }
9858
9859 rtx
9860 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
9861 enum expand_modifier modifier, rtx *alt_rtl,
9862 bool inner_reference_p)
9863 {
9864 rtx op0, op1, temp, decl_rtl;
9865 tree type;
9866 int unsignedp;
9867 machine_mode mode, dmode;
9868 enum tree_code code = TREE_CODE (exp);
9869 rtx subtarget, original_target;
9870 int ignore;
9871 tree context;
9872 bool reduce_bit_field;
9873 location_t loc = EXPR_LOCATION (exp);
9874 struct separate_ops ops;
9875 tree treeop0, treeop1, treeop2;
9876 tree ssa_name = NULL_TREE;
9877 gimple *g;
9878
9879 type = TREE_TYPE (exp);
9880 mode = TYPE_MODE (type);
9881 unsignedp = TYPE_UNSIGNED (type);
9882
9883 treeop0 = treeop1 = treeop2 = NULL_TREE;
9884 if (!VL_EXP_CLASS_P (exp))
9885 switch (TREE_CODE_LENGTH (code))
9886 {
9887 default:
9888 case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
9889 case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
9890 case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
9891 case 0: break;
9892 }
9893 ops.code = code;
9894 ops.type = type;
9895 ops.op0 = treeop0;
9896 ops.op1 = treeop1;
9897 ops.op2 = treeop2;
9898 ops.location = loc;
9899
9900 ignore = (target == const0_rtx
9901 || ((CONVERT_EXPR_CODE_P (code)
9902 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
9903 && TREE_CODE (type) == VOID_TYPE));
9904
9905 /* An operation in what may be a bit-field type needs the
9906 result to be reduced to the precision of the bit-field type,
9907 which is narrower than that of the type's mode. */
9908 reduce_bit_field = (!ignore
9909 && INTEGRAL_TYPE_P (type)
9910 && !type_has_mode_precision_p (type));
9911
9912 /* If we are going to ignore this result, we need only do something
9913 if there is a side-effect somewhere in the expression. If there
9914 is, short-circuit the most common cases here. Note that we must
9915 not call expand_expr with anything but const0_rtx in case this
9916 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
9917
9918 if (ignore)
9919 {
9920 if (! TREE_SIDE_EFFECTS (exp))
9921 return const0_rtx;
9922
9923 /* Ensure we reference a volatile object even if value is ignored, but
9924 don't do this if all we are doing is taking its address. */
9925 if (TREE_THIS_VOLATILE (exp)
9926 && TREE_CODE (exp) != FUNCTION_DECL
9927 && mode != VOIDmode && mode != BLKmode
9928 && modifier != EXPAND_CONST_ADDRESS)
9929 {
9930 temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
9931 if (MEM_P (temp))
9932 copy_to_reg (temp);
9933 return const0_rtx;
9934 }
9935
9936 if (TREE_CODE_CLASS (code) == tcc_unary
9937 || code == BIT_FIELD_REF
9938 || code == COMPONENT_REF
9939 || code == INDIRECT_REF)
9940 return expand_expr (treeop0, const0_rtx, VOIDmode,
9941 modifier);
9942
9943 else if (TREE_CODE_CLASS (code) == tcc_binary
9944 || TREE_CODE_CLASS (code) == tcc_comparison
9945 || code == ARRAY_REF || code == ARRAY_RANGE_REF)
9946 {
9947 expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
9948 expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
9949 return const0_rtx;
9950 }
9951
9952 target = 0;
9953 }
9954
9955 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
9956 target = 0;
9957
9958 /* Use subtarget as the target for operand 0 of a binary operation. */
9959 subtarget = get_subtarget (target);
9960 original_target = target;
9961
9962 switch (code)
9963 {
9964 case LABEL_DECL:
9965 {
9966 tree function = decl_function_context (exp);
9967
9968 temp = label_rtx (exp);
9969 temp = gen_rtx_LABEL_REF (Pmode, temp);
9970
9971 if (function != current_function_decl
9972 && function != 0)
9973 LABEL_REF_NONLOCAL_P (temp) = 1;
9974
9975 temp = gen_rtx_MEM (FUNCTION_MODE, temp);
9976 return temp;
9977 }
9978
9979 case SSA_NAME:
9980 /* ??? ivopts calls expander, without any preparation from
9981 out-of-ssa. So fake instructions as if this was an access to the
9982 base variable. This unnecessarily allocates a pseudo, see how we can
9983 reuse it, if partition base vars have it set already. */
9984 if (!currently_expanding_to_rtl)
9985 {
9986 tree var = SSA_NAME_VAR (exp);
9987 if (var && DECL_RTL_SET_P (var))
9988 return DECL_RTL (var);
9989 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
9990 LAST_VIRTUAL_REGISTER + 1);
9991 }
9992
9993 g = get_gimple_for_ssa_name (exp);
9994 /* For EXPAND_INITIALIZER try harder to get something simpler. */
9995 if (g == NULL
9996 && modifier == EXPAND_INITIALIZER
9997 && !SSA_NAME_IS_DEFAULT_DEF (exp)
9998 && (optimize || !SSA_NAME_VAR (exp)
9999 || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
10000 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
10001 g = SSA_NAME_DEF_STMT (exp);
10002 if (g)
10003 {
10004 rtx r;
10005 location_t saved_loc = curr_insn_location ();
10006 location_t loc = gimple_location (g);
10007 if (loc != UNKNOWN_LOCATION)
10008 set_curr_insn_location (loc);
10009 ops.code = gimple_assign_rhs_code (g);
10010 switch (get_gimple_rhs_class (ops.code))
10011 {
10012 case GIMPLE_TERNARY_RHS:
10013 ops.op2 = gimple_assign_rhs3 (g);
10014 /* Fallthru */
10015 case GIMPLE_BINARY_RHS:
10016 ops.op1 = gimple_assign_rhs2 (g);
10017
10018 /* Try to expand conditonal compare. */
10019 if (targetm.gen_ccmp_first)
10020 {
10021 gcc_checking_assert (targetm.gen_ccmp_next != NULL);
10022 r = expand_ccmp_expr (g, mode);
10023 if (r)
10024 break;
10025 }
10026 /* Fallthru */
10027 case GIMPLE_UNARY_RHS:
10028 ops.op0 = gimple_assign_rhs1 (g);
10029 ops.type = TREE_TYPE (gimple_assign_lhs (g));
10030 ops.location = loc;
10031 r = expand_expr_real_2 (&ops, target, tmode, modifier);
10032 break;
10033 case GIMPLE_SINGLE_RHS:
10034 {
10035 r = expand_expr_real (gimple_assign_rhs1 (g), target,
10036 tmode, modifier, alt_rtl,
10037 inner_reference_p);
10038 break;
10039 }
10040 default:
10041 gcc_unreachable ();
10042 }
10043 set_curr_insn_location (saved_loc);
10044 if (REG_P (r) && !REG_EXPR (r))
10045 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
10046 return r;
10047 }
10048
10049 ssa_name = exp;
10050 decl_rtl = get_rtx_for_ssa_name (ssa_name);
10051 exp = SSA_NAME_VAR (ssa_name);
10052 goto expand_decl_rtl;
10053
10054 case PARM_DECL:
10055 case VAR_DECL:
10056 /* If a static var's type was incomplete when the decl was written,
10057 but the type is complete now, lay out the decl now. */
10058 if (DECL_SIZE (exp) == 0
10059 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
10060 && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
10061 layout_decl (exp, 0);
10062
10063 /* fall through */
10064
10065 case FUNCTION_DECL:
10066 case RESULT_DECL:
10067 decl_rtl = DECL_RTL (exp);
10068 expand_decl_rtl:
10069 gcc_assert (decl_rtl);
10070
10071 /* DECL_MODE might change when TYPE_MODE depends on attribute target
10072 settings for VECTOR_TYPE_P that might switch for the function. */
10073 if (currently_expanding_to_rtl
10074 && code == VAR_DECL && MEM_P (decl_rtl)
10075 && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
10076 decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
10077 else
10078 decl_rtl = copy_rtx (decl_rtl);
10079
10080 /* Record writes to register variables. */
10081 if (modifier == EXPAND_WRITE
10082 && REG_P (decl_rtl)
10083 && HARD_REGISTER_P (decl_rtl))
10084 add_to_hard_reg_set (&crtl->asm_clobbers,
10085 GET_MODE (decl_rtl), REGNO (decl_rtl));
10086
10087 /* Ensure variable marked as used even if it doesn't go through
10088 a parser. If it hasn't be used yet, write out an external
10089 definition. */
10090 if (exp)
10091 TREE_USED (exp) = 1;
10092
10093 /* Show we haven't gotten RTL for this yet. */
10094 temp = 0;
10095
10096 /* Variables inherited from containing functions should have
10097 been lowered by this point. */
10098 if (exp)
10099 context = decl_function_context (exp);
10100 gcc_assert (!exp
10101 || SCOPE_FILE_SCOPE_P (context)
10102 || context == current_function_decl
10103 || TREE_STATIC (exp)
10104 || DECL_EXTERNAL (exp)
10105 /* ??? C++ creates functions that are not TREE_STATIC. */
10106 || TREE_CODE (exp) == FUNCTION_DECL);
10107
10108 /* This is the case of an array whose size is to be determined
10109 from its initializer, while the initializer is still being parsed.
10110 ??? We aren't parsing while expanding anymore. */
10111
10112 if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10113 temp = validize_mem (decl_rtl);
10114
10115 /* If DECL_RTL is memory, we are in the normal case and the
10116 address is not valid, get the address into a register. */
10117
10118 else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10119 {
10120 if (alt_rtl)
10121 *alt_rtl = decl_rtl;
10122 decl_rtl = use_anchored_address (decl_rtl);
10123 if (modifier != EXPAND_CONST_ADDRESS
10124 && modifier != EXPAND_SUM
10125 && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10126 : GET_MODE (decl_rtl),
10127 XEXP (decl_rtl, 0),
10128 MEM_ADDR_SPACE (decl_rtl)))
10129 temp = replace_equiv_address (decl_rtl,
10130 copy_rtx (XEXP (decl_rtl, 0)));
10131 }
10132
10133 /* If we got something, return it. But first, set the alignment
10134 if the address is a register. */
10135 if (temp != 0)
10136 {
10137 if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10138 mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10139 }
10140 else if (MEM_P (decl_rtl))
10141 temp = decl_rtl;
10142
10143 if (temp != 0)
10144 {
10145 if (MEM_P (temp)
10146 && modifier != EXPAND_WRITE
10147 && modifier != EXPAND_MEMORY
10148 && modifier != EXPAND_INITIALIZER
10149 && modifier != EXPAND_CONST_ADDRESS
10150 && modifier != EXPAND_SUM
10151 && !inner_reference_p
10152 && mode != BLKmode
10153 && MEM_ALIGN (temp) < GET_MODE_ALIGNMENT (mode))
10154 temp = expand_misaligned_mem_ref (temp, mode, unsignedp,
10155 MEM_ALIGN (temp), NULL_RTX, NULL);
10156
10157 return temp;
10158 }
10159
10160 if (exp)
10161 dmode = DECL_MODE (exp);
10162 else
10163 dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10164
10165 /* If the mode of DECL_RTL does not match that of the decl,
10166 there are two cases: we are dealing with a BLKmode value
10167 that is returned in a register, or we are dealing with
10168 a promoted value. In the latter case, return a SUBREG
10169 of the wanted mode, but mark it so that we know that it
10170 was already extended. */
10171 if (REG_P (decl_rtl)
10172 && dmode != BLKmode
10173 && GET_MODE (decl_rtl) != dmode)
10174 {
10175 machine_mode pmode;
10176
10177 /* Get the signedness to be used for this variable. Ensure we get
10178 the same mode we got when the variable was declared. */
10179 if (code != SSA_NAME)
10180 pmode = promote_decl_mode (exp, &unsignedp);
10181 else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10182 && gimple_code (g) == GIMPLE_CALL
10183 && !gimple_call_internal_p (g))
10184 pmode = promote_function_mode (type, mode, &unsignedp,
10185 gimple_call_fntype (g),
10186 2);
10187 else
10188 pmode = promote_ssa_mode (ssa_name, &unsignedp);
10189 gcc_assert (GET_MODE (decl_rtl) == pmode);
10190
10191 temp = gen_lowpart_SUBREG (mode, decl_rtl);
10192 SUBREG_PROMOTED_VAR_P (temp) = 1;
10193 SUBREG_PROMOTED_SET (temp, unsignedp);
10194 return temp;
10195 }
10196
10197 return decl_rtl;
10198
10199 case INTEGER_CST:
10200 {
10201 /* Given that TYPE_PRECISION (type) is not always equal to
10202 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10203 the former to the latter according to the signedness of the
10204 type. */
10205 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
10206 temp = immed_wide_int_const
10207 (wi::to_wide (exp, GET_MODE_PRECISION (mode)), mode);
10208 return temp;
10209 }
10210
10211 case VECTOR_CST:
10212 {
10213 tree tmp = NULL_TREE;
10214 if (VECTOR_MODE_P (mode))
10215 return const_vector_from_tree (exp);
10216 scalar_int_mode int_mode;
10217 if (is_int_mode (mode, &int_mode))
10218 {
10219 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
10220 return const_scalar_mask_from_tree (int_mode, exp);
10221 else
10222 {
10223 tree type_for_mode
10224 = lang_hooks.types.type_for_mode (int_mode, 1);
10225 if (type_for_mode)
10226 tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10227 type_for_mode, exp);
10228 }
10229 }
10230 if (!tmp)
10231 {
10232 vec<constructor_elt, va_gc> *v;
10233 /* Constructors need to be fixed-length. FIXME. */
10234 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10235 vec_alloc (v, nunits);
10236 for (unsigned int i = 0; i < nunits; ++i)
10237 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10238 tmp = build_constructor (type, v);
10239 }
10240 return expand_expr (tmp, ignore ? const0_rtx : target,
10241 tmode, modifier);
10242 }
10243
10244 case CONST_DECL:
10245 if (modifier == EXPAND_WRITE)
10246 {
10247 /* Writing into CONST_DECL is always invalid, but handle it
10248 gracefully. */
10249 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10250 scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10251 op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10252 EXPAND_NORMAL, as);
10253 op0 = memory_address_addr_space (mode, op0, as);
10254 temp = gen_rtx_MEM (mode, op0);
10255 set_mem_addr_space (temp, as);
10256 return temp;
10257 }
10258 return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10259
10260 case REAL_CST:
10261 /* If optimized, generate immediate CONST_DOUBLE
10262 which will be turned into memory by reload if necessary.
10263
10264 We used to force a register so that loop.c could see it. But
10265 this does not allow gen_* patterns to perform optimizations with
10266 the constants. It also produces two insns in cases like "x = 1.0;".
10267 On most machines, floating-point constants are not permitted in
10268 many insns, so we'd end up copying it to a register in any case.
10269
10270 Now, we do the copying in expand_binop, if appropriate. */
10271 return const_double_from_real_value (TREE_REAL_CST (exp),
10272 TYPE_MODE (TREE_TYPE (exp)));
10273
10274 case FIXED_CST:
10275 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10276 TYPE_MODE (TREE_TYPE (exp)));
10277
10278 case COMPLEX_CST:
10279 /* Handle evaluating a complex constant in a CONCAT target. */
10280 if (original_target && GET_CODE (original_target) == CONCAT)
10281 {
10282 machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10283 rtx rtarg, itarg;
10284
10285 rtarg = XEXP (original_target, 0);
10286 itarg = XEXP (original_target, 1);
10287
10288 /* Move the real and imaginary parts separately. */
10289 op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10290 op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10291
10292 if (op0 != rtarg)
10293 emit_move_insn (rtarg, op0);
10294 if (op1 != itarg)
10295 emit_move_insn (itarg, op1);
10296
10297 return original_target;
10298 }
10299
10300 /* fall through */
10301
10302 case STRING_CST:
10303 temp = expand_expr_constant (exp, 1, modifier);
10304
10305 /* temp contains a constant address.
10306 On RISC machines where a constant address isn't valid,
10307 make some insns to get that address into a register. */
10308 if (modifier != EXPAND_CONST_ADDRESS
10309 && modifier != EXPAND_INITIALIZER
10310 && modifier != EXPAND_SUM
10311 && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10312 MEM_ADDR_SPACE (temp)))
10313 return replace_equiv_address (temp,
10314 copy_rtx (XEXP (temp, 0)));
10315 return temp;
10316
10317 case POLY_INT_CST:
10318 return immed_wide_int_const (poly_int_cst_value (exp), mode);
10319
10320 case SAVE_EXPR:
10321 {
10322 tree val = treeop0;
10323 rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10324 inner_reference_p);
10325
10326 if (!SAVE_EXPR_RESOLVED_P (exp))
10327 {
10328 /* We can indeed still hit this case, typically via builtin
10329 expanders calling save_expr immediately before expanding
10330 something. Assume this means that we only have to deal
10331 with non-BLKmode values. */
10332 gcc_assert (GET_MODE (ret) != BLKmode);
10333
10334 val = build_decl (curr_insn_location (),
10335 VAR_DECL, NULL, TREE_TYPE (exp));
10336 DECL_ARTIFICIAL (val) = 1;
10337 DECL_IGNORED_P (val) = 1;
10338 treeop0 = val;
10339 TREE_OPERAND (exp, 0) = treeop0;
10340 SAVE_EXPR_RESOLVED_P (exp) = 1;
10341
10342 if (!CONSTANT_P (ret))
10343 ret = copy_to_reg (ret);
10344 SET_DECL_RTL (val, ret);
10345 }
10346
10347 return ret;
10348 }
10349
10350
10351 case CONSTRUCTOR:
10352 /* If we don't need the result, just ensure we evaluate any
10353 subexpressions. */
10354 if (ignore)
10355 {
10356 unsigned HOST_WIDE_INT idx;
10357 tree value;
10358
10359 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10360 expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10361
10362 return const0_rtx;
10363 }
10364
10365 return expand_constructor (exp, target, modifier, false);
10366
10367 case TARGET_MEM_REF:
10368 {
10369 addr_space_t as
10370 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10371 unsigned int align;
10372
10373 op0 = addr_for_mem_ref (exp, as, true);
10374 op0 = memory_address_addr_space (mode, op0, as);
10375 temp = gen_rtx_MEM (mode, op0);
10376 set_mem_attributes (temp, exp, 0);
10377 set_mem_addr_space (temp, as);
10378 align = get_object_alignment (exp);
10379 if (modifier != EXPAND_WRITE
10380 && modifier != EXPAND_MEMORY
10381 && mode != BLKmode
10382 && align < GET_MODE_ALIGNMENT (mode))
10383 temp = expand_misaligned_mem_ref (temp, mode, unsignedp,
10384 align, NULL_RTX, NULL);
10385 return temp;
10386 }
10387
10388 case MEM_REF:
10389 {
10390 const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10391 addr_space_t as
10392 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10393 machine_mode address_mode;
10394 tree base = TREE_OPERAND (exp, 0);
10395 gimple *def_stmt;
10396 unsigned align;
10397 /* Handle expansion of non-aliased memory with non-BLKmode. That
10398 might end up in a register. */
10399 if (mem_ref_refers_to_non_mem_p (exp))
10400 {
10401 poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10402 base = TREE_OPERAND (base, 0);
10403 poly_uint64 type_size;
10404 if (known_eq (offset, 0)
10405 && !reverse
10406 && poly_int_tree_p (TYPE_SIZE (type), &type_size)
10407 && known_eq (GET_MODE_BITSIZE (DECL_MODE (base)), type_size))
10408 return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10409 target, tmode, modifier);
10410 if (TYPE_MODE (type) == BLKmode)
10411 {
10412 temp = assign_stack_temp (DECL_MODE (base),
10413 GET_MODE_SIZE (DECL_MODE (base)));
10414 store_expr (base, temp, 0, false, false);
10415 temp = adjust_address (temp, BLKmode, offset);
10416 set_mem_size (temp, int_size_in_bytes (type));
10417 return temp;
10418 }
10419 exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10420 bitsize_int (offset * BITS_PER_UNIT));
10421 REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10422 return expand_expr (exp, target, tmode, modifier);
10423 }
10424 address_mode = targetm.addr_space.address_mode (as);
10425 if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10426 {
10427 tree mask = gimple_assign_rhs2 (def_stmt);
10428 base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10429 gimple_assign_rhs1 (def_stmt), mask);
10430 TREE_OPERAND (exp, 0) = base;
10431 }
10432 align = get_object_alignment (exp);
10433 op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10434 op0 = memory_address_addr_space (mode, op0, as);
10435 if (!integer_zerop (TREE_OPERAND (exp, 1)))
10436 {
10437 rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10438 op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10439 op0 = memory_address_addr_space (mode, op0, as);
10440 }
10441 temp = gen_rtx_MEM (mode, op0);
10442 set_mem_attributes (temp, exp, 0);
10443 set_mem_addr_space (temp, as);
10444 if (TREE_THIS_VOLATILE (exp))
10445 MEM_VOLATILE_P (temp) = 1;
10446 if (modifier != EXPAND_WRITE
10447 && modifier != EXPAND_MEMORY
10448 && !inner_reference_p
10449 && mode != BLKmode
10450 && align < GET_MODE_ALIGNMENT (mode))
10451 temp = expand_misaligned_mem_ref (temp, mode, unsignedp, align,
10452 modifier == EXPAND_STACK_PARM
10453 ? NULL_RTX : target, alt_rtl);
10454 if (reverse
10455 && modifier != EXPAND_MEMORY
10456 && modifier != EXPAND_WRITE)
10457 temp = flip_storage_order (mode, temp);
10458 return temp;
10459 }
10460
10461 case ARRAY_REF:
10462
10463 {
10464 tree array = treeop0;
10465 tree index = treeop1;
10466 tree init;
10467
10468 /* Fold an expression like: "foo"[2].
10469 This is not done in fold so it won't happen inside &.
10470 Don't fold if this is for wide characters since it's too
10471 difficult to do correctly and this is a very rare case. */
10472
10473 if (modifier != EXPAND_CONST_ADDRESS
10474 && modifier != EXPAND_INITIALIZER
10475 && modifier != EXPAND_MEMORY)
10476 {
10477 tree t = fold_read_from_constant_string (exp);
10478
10479 if (t)
10480 return expand_expr (t, target, tmode, modifier);
10481 }
10482
10483 /* If this is a constant index into a constant array,
10484 just get the value from the array. Handle both the cases when
10485 we have an explicit constructor and when our operand is a variable
10486 that was declared const. */
10487
10488 if (modifier != EXPAND_CONST_ADDRESS
10489 && modifier != EXPAND_INITIALIZER
10490 && modifier != EXPAND_MEMORY
10491 && TREE_CODE (array) == CONSTRUCTOR
10492 && ! TREE_SIDE_EFFECTS (array)
10493 && TREE_CODE (index) == INTEGER_CST)
10494 {
10495 unsigned HOST_WIDE_INT ix;
10496 tree field, value;
10497
10498 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10499 field, value)
10500 if (tree_int_cst_equal (field, index))
10501 {
10502 if (!TREE_SIDE_EFFECTS (value))
10503 return expand_expr (fold (value), target, tmode, modifier);
10504 break;
10505 }
10506 }
10507
10508 else if (optimize >= 1
10509 && modifier != EXPAND_CONST_ADDRESS
10510 && modifier != EXPAND_INITIALIZER
10511 && modifier != EXPAND_MEMORY
10512 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10513 && TREE_CODE (index) == INTEGER_CST
10514 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10515 && (init = ctor_for_folding (array)) != error_mark_node)
10516 {
10517 if (init == NULL_TREE)
10518 {
10519 tree value = build_zero_cst (type);
10520 if (TREE_CODE (value) == CONSTRUCTOR)
10521 {
10522 /* If VALUE is a CONSTRUCTOR, this optimization is only
10523 useful if this doesn't store the CONSTRUCTOR into
10524 memory. If it does, it is more efficient to just
10525 load the data from the array directly. */
10526 rtx ret = expand_constructor (value, target,
10527 modifier, true);
10528 if (ret == NULL_RTX)
10529 value = NULL_TREE;
10530 }
10531
10532 if (value)
10533 return expand_expr (value, target, tmode, modifier);
10534 }
10535 else if (TREE_CODE (init) == CONSTRUCTOR)
10536 {
10537 unsigned HOST_WIDE_INT ix;
10538 tree field, value;
10539
10540 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
10541 field, value)
10542 if (tree_int_cst_equal (field, index))
10543 {
10544 if (TREE_SIDE_EFFECTS (value))
10545 break;
10546
10547 if (TREE_CODE (value) == CONSTRUCTOR)
10548 {
10549 /* If VALUE is a CONSTRUCTOR, this
10550 optimization is only useful if
10551 this doesn't store the CONSTRUCTOR
10552 into memory. If it does, it is more
10553 efficient to just load the data from
10554 the array directly. */
10555 rtx ret = expand_constructor (value, target,
10556 modifier, true);
10557 if (ret == NULL_RTX)
10558 break;
10559 }
10560
10561 return
10562 expand_expr (fold (value), target, tmode, modifier);
10563 }
10564 }
10565 else if (TREE_CODE (init) == STRING_CST)
10566 {
10567 tree low_bound = array_ref_low_bound (exp);
10568 tree index1 = fold_convert_loc (loc, sizetype, treeop1);
10569
10570 /* Optimize the special case of a zero lower bound.
10571
10572 We convert the lower bound to sizetype to avoid problems
10573 with constant folding. E.g. suppose the lower bound is
10574 1 and its mode is QI. Without the conversion
10575 (ARRAY + (INDEX - (unsigned char)1))
10576 becomes
10577 (ARRAY + (-(unsigned char)1) + INDEX)
10578 which becomes
10579 (ARRAY + 255 + INDEX). Oops! */
10580 if (!integer_zerop (low_bound))
10581 index1 = size_diffop_loc (loc, index1,
10582 fold_convert_loc (loc, sizetype,
10583 low_bound));
10584
10585 if (tree_fits_uhwi_p (index1)
10586 && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
10587 {
10588 tree type = TREE_TYPE (TREE_TYPE (init));
10589 scalar_int_mode mode;
10590
10591 if (is_int_mode (TYPE_MODE (type), &mode)
10592 && GET_MODE_SIZE (mode) == 1)
10593 return gen_int_mode (TREE_STRING_POINTER (init)
10594 [TREE_INT_CST_LOW (index1)],
10595 mode);
10596 }
10597 }
10598 }
10599 }
10600 goto normal_inner_ref;
10601
10602 case COMPONENT_REF:
10603 /* If the operand is a CONSTRUCTOR, we can just extract the
10604 appropriate field if it is present. */
10605 if (TREE_CODE (treeop0) == CONSTRUCTOR)
10606 {
10607 unsigned HOST_WIDE_INT idx;
10608 tree field, value;
10609 scalar_int_mode field_mode;
10610
10611 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
10612 idx, field, value)
10613 if (field == treeop1
10614 /* We can normally use the value of the field in the
10615 CONSTRUCTOR. However, if this is a bitfield in
10616 an integral mode that we can fit in a HOST_WIDE_INT,
10617 we must mask only the number of bits in the bitfield,
10618 since this is done implicitly by the constructor. If
10619 the bitfield does not meet either of those conditions,
10620 we can't do this optimization. */
10621 && (! DECL_BIT_FIELD (field)
10622 || (is_int_mode (DECL_MODE (field), &field_mode)
10623 && (GET_MODE_PRECISION (field_mode)
10624 <= HOST_BITS_PER_WIDE_INT))))
10625 {
10626 if (DECL_BIT_FIELD (field)
10627 && modifier == EXPAND_STACK_PARM)
10628 target = 0;
10629 op0 = expand_expr (value, target, tmode, modifier);
10630 if (DECL_BIT_FIELD (field))
10631 {
10632 HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
10633 scalar_int_mode imode
10634 = SCALAR_INT_TYPE_MODE (TREE_TYPE (field));
10635
10636 if (TYPE_UNSIGNED (TREE_TYPE (field)))
10637 {
10638 op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1,
10639 imode);
10640 op0 = expand_and (imode, op0, op1, target);
10641 }
10642 else
10643 {
10644 int count = GET_MODE_PRECISION (imode) - bitsize;
10645
10646 op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
10647 target, 0);
10648 op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
10649 target, 0);
10650 }
10651 }
10652
10653 return op0;
10654 }
10655 }
10656 goto normal_inner_ref;
10657
10658 case BIT_FIELD_REF:
10659 case ARRAY_RANGE_REF:
10660 normal_inner_ref:
10661 {
10662 machine_mode mode1, mode2;
10663 poly_int64 bitsize, bitpos, bytepos;
10664 tree offset;
10665 int reversep, volatilep = 0, must_force_mem;
10666 tree tem
10667 = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
10668 &unsignedp, &reversep, &volatilep);
10669 rtx orig_op0, memloc;
10670 bool clear_mem_expr = false;
10671
10672 /* If we got back the original object, something is wrong. Perhaps
10673 we are evaluating an expression too early. In any event, don't
10674 infinitely recurse. */
10675 gcc_assert (tem != exp);
10676
10677 /* If TEM's type is a union of variable size, pass TARGET to the inner
10678 computation, since it will need a temporary and TARGET is known
10679 to have to do. This occurs in unchecked conversion in Ada. */
10680 orig_op0 = op0
10681 = expand_expr_real (tem,
10682 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
10683 && COMPLETE_TYPE_P (TREE_TYPE (tem))
10684 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
10685 != INTEGER_CST)
10686 && modifier != EXPAND_STACK_PARM
10687 ? target : NULL_RTX),
10688 VOIDmode,
10689 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
10690 NULL, true);
10691
10692 /* If the field has a mode, we want to access it in the
10693 field's mode, not the computed mode.
10694 If a MEM has VOIDmode (external with incomplete type),
10695 use BLKmode for it instead. */
10696 if (MEM_P (op0))
10697 {
10698 if (mode1 != VOIDmode)
10699 op0 = adjust_address (op0, mode1, 0);
10700 else if (GET_MODE (op0) == VOIDmode)
10701 op0 = adjust_address (op0, BLKmode, 0);
10702 }
10703
10704 mode2
10705 = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
10706
10707 /* Make sure bitpos is not negative, it can wreak havoc later. */
10708 if (maybe_lt (bitpos, 0))
10709 {
10710 gcc_checking_assert (offset == NULL_TREE);
10711 offset = size_int (bits_to_bytes_round_down (bitpos));
10712 bitpos = num_trailing_bits (bitpos);
10713 }
10714
10715 /* If we have either an offset, a BLKmode result, or a reference
10716 outside the underlying object, we must force it to memory.
10717 Such a case can occur in Ada if we have unchecked conversion
10718 of an expression from a scalar type to an aggregate type or
10719 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10720 passed a partially uninitialized object or a view-conversion
10721 to a larger size. */
10722 must_force_mem = (offset
10723 || mode1 == BLKmode
10724 || (mode == BLKmode
10725 && !int_mode_for_size (bitsize, 1).exists ())
10726 || maybe_gt (bitpos + bitsize,
10727 GET_MODE_BITSIZE (mode2)));
10728
10729 /* Handle CONCAT first. */
10730 if (GET_CODE (op0) == CONCAT && !must_force_mem)
10731 {
10732 if (known_eq (bitpos, 0)
10733 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
10734 && COMPLEX_MODE_P (mode1)
10735 && COMPLEX_MODE_P (GET_MODE (op0))
10736 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
10737 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
10738 {
10739 if (reversep)
10740 op0 = flip_storage_order (GET_MODE (op0), op0);
10741 if (mode1 != GET_MODE (op0))
10742 {
10743 rtx parts[2];
10744 for (int i = 0; i < 2; i++)
10745 {
10746 rtx op = read_complex_part (op0, i != 0);
10747 if (GET_CODE (op) == SUBREG)
10748 op = force_reg (GET_MODE (op), op);
10749 rtx temp = gen_lowpart_common (GET_MODE_INNER (mode1),
10750 op);
10751 if (temp)
10752 op = temp;
10753 else
10754 {
10755 if (!REG_P (op) && !MEM_P (op))
10756 op = force_reg (GET_MODE (op), op);
10757 op = gen_lowpart (GET_MODE_INNER (mode1), op);
10758 }
10759 parts[i] = op;
10760 }
10761 op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
10762 }
10763 return op0;
10764 }
10765 if (known_eq (bitpos, 0)
10766 && known_eq (bitsize,
10767 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10768 && maybe_ne (bitsize, 0))
10769 {
10770 op0 = XEXP (op0, 0);
10771 mode2 = GET_MODE (op0);
10772 }
10773 else if (known_eq (bitpos,
10774 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10775 && known_eq (bitsize,
10776 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
10777 && maybe_ne (bitpos, 0)
10778 && maybe_ne (bitsize, 0))
10779 {
10780 op0 = XEXP (op0, 1);
10781 bitpos = 0;
10782 mode2 = GET_MODE (op0);
10783 }
10784 else
10785 /* Otherwise force into memory. */
10786 must_force_mem = 1;
10787 }
10788
10789 /* If this is a constant, put it in a register if it is a legitimate
10790 constant and we don't need a memory reference. */
10791 if (CONSTANT_P (op0)
10792 && mode2 != BLKmode
10793 && targetm.legitimate_constant_p (mode2, op0)
10794 && !must_force_mem)
10795 op0 = force_reg (mode2, op0);
10796
10797 /* Otherwise, if this is a constant, try to force it to the constant
10798 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
10799 is a legitimate constant. */
10800 else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
10801 op0 = validize_mem (memloc);
10802
10803 /* Otherwise, if this is a constant or the object is not in memory
10804 and need be, put it there. */
10805 else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
10806 {
10807 memloc = assign_temp (TREE_TYPE (tem), 1, 1);
10808 emit_move_insn (memloc, op0);
10809 op0 = memloc;
10810 clear_mem_expr = true;
10811 }
10812
10813 if (offset)
10814 {
10815 machine_mode address_mode;
10816 rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
10817 EXPAND_SUM);
10818
10819 gcc_assert (MEM_P (op0));
10820
10821 address_mode = get_address_mode (op0);
10822 if (GET_MODE (offset_rtx) != address_mode)
10823 {
10824 /* We cannot be sure that the RTL in offset_rtx is valid outside
10825 of a memory address context, so force it into a register
10826 before attempting to convert it to the desired mode. */
10827 offset_rtx = force_operand (offset_rtx, NULL_RTX);
10828 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
10829 }
10830
10831 /* See the comment in expand_assignment for the rationale. */
10832 if (mode1 != VOIDmode
10833 && maybe_ne (bitpos, 0)
10834 && maybe_gt (bitsize, 0)
10835 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
10836 && multiple_p (bitpos, bitsize)
10837 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
10838 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
10839 {
10840 op0 = adjust_address (op0, mode1, bytepos);
10841 bitpos = 0;
10842 }
10843
10844 op0 = offset_address (op0, offset_rtx,
10845 highest_pow2_factor (offset));
10846 }
10847
10848 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10849 record its alignment as BIGGEST_ALIGNMENT. */
10850 if (MEM_P (op0)
10851 && known_eq (bitpos, 0)
10852 && offset != 0
10853 && is_aligning_offset (offset, tem))
10854 set_mem_align (op0, BIGGEST_ALIGNMENT);
10855
10856 /* Don't forget about volatility even if this is a bitfield. */
10857 if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
10858 {
10859 if (op0 == orig_op0)
10860 op0 = copy_rtx (op0);
10861
10862 MEM_VOLATILE_P (op0) = 1;
10863 }
10864
10865 if (MEM_P (op0) && TREE_CODE (tem) == FUNCTION_DECL)
10866 {
10867 if (op0 == orig_op0)
10868 op0 = copy_rtx (op0);
10869
10870 set_mem_align (op0, BITS_PER_UNIT);
10871 }
10872
10873 /* In cases where an aligned union has an unaligned object
10874 as a field, we might be extracting a BLKmode value from
10875 an integer-mode (e.g., SImode) object. Handle this case
10876 by doing the extract into an object as wide as the field
10877 (which we know to be the width of a basic mode), then
10878 storing into memory, and changing the mode to BLKmode. */
10879 if (mode1 == VOIDmode
10880 || REG_P (op0) || GET_CODE (op0) == SUBREG
10881 || (mode1 != BLKmode && ! direct_load[(int) mode1]
10882 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
10883 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
10884 && modifier != EXPAND_CONST_ADDRESS
10885 && modifier != EXPAND_INITIALIZER
10886 && modifier != EXPAND_MEMORY)
10887 /* If the bitfield is volatile and the bitsize
10888 is narrower than the access size of the bitfield,
10889 we need to extract bitfields from the access. */
10890 || (volatilep && TREE_CODE (exp) == COMPONENT_REF
10891 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
10892 && mode1 != BLKmode
10893 && maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
10894 /* If the field isn't aligned enough to fetch as a memref,
10895 fetch it as a bit field. */
10896 || (mode1 != BLKmode
10897 && (((MEM_P (op0)
10898 ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
10899 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
10900 : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
10901 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
10902 && modifier != EXPAND_MEMORY
10903 && ((modifier == EXPAND_CONST_ADDRESS
10904 || modifier == EXPAND_INITIALIZER)
10905 ? STRICT_ALIGNMENT
10906 : targetm.slow_unaligned_access (mode1,
10907 MEM_ALIGN (op0))))
10908 || !multiple_p (bitpos, BITS_PER_UNIT)))
10909 /* If the type and the field are a constant size and the
10910 size of the type isn't the same size as the bitfield,
10911 we must use bitfield operations. */
10912 || (known_size_p (bitsize)
10913 && TYPE_SIZE (TREE_TYPE (exp))
10914 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
10915 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
10916 bitsize)))
10917 {
10918 machine_mode ext_mode = mode;
10919
10920 if (ext_mode == BLKmode
10921 && ! (target != 0 && MEM_P (op0)
10922 && MEM_P (target)
10923 && multiple_p (bitpos, BITS_PER_UNIT)))
10924 ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
10925
10926 if (ext_mode == BLKmode)
10927 {
10928 if (target == 0)
10929 target = assign_temp (type, 1, 1);
10930
10931 /* ??? Unlike the similar test a few lines below, this one is
10932 very likely obsolete. */
10933 if (known_eq (bitsize, 0))
10934 return target;
10935
10936 /* In this case, BITPOS must start at a byte boundary and
10937 TARGET, if specified, must be a MEM. */
10938 gcc_assert (MEM_P (op0)
10939 && (!target || MEM_P (target)));
10940
10941 bytepos = exact_div (bitpos, BITS_PER_UNIT);
10942 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
10943 emit_block_move (target,
10944 adjust_address (op0, VOIDmode, bytepos),
10945 gen_int_mode (bytesize, Pmode),
10946 (modifier == EXPAND_STACK_PARM
10947 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
10948
10949 return target;
10950 }
10951
10952 /* If we have nothing to extract, the result will be 0 for targets
10953 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
10954 return 0 for the sake of consistency, as reading a zero-sized
10955 bitfield is valid in Ada and the value is fully specified. */
10956 if (known_eq (bitsize, 0))
10957 return const0_rtx;
10958
10959 op0 = validize_mem (op0);
10960
10961 if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
10962 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10963
10964 /* If the result has aggregate type and the extraction is done in
10965 an integral mode, then the field may be not aligned on a byte
10966 boundary; in this case, if it has reverse storage order, it
10967 needs to be extracted as a scalar field with reverse storage
10968 order and put back into memory order afterwards. */
10969 if (AGGREGATE_TYPE_P (type)
10970 && GET_MODE_CLASS (ext_mode) == MODE_INT)
10971 reversep = TYPE_REVERSE_STORAGE_ORDER (type);
10972
10973 gcc_checking_assert (known_ge (bitpos, 0));
10974 op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
10975 (modifier == EXPAND_STACK_PARM
10976 ? NULL_RTX : target),
10977 ext_mode, ext_mode, reversep, alt_rtl);
10978
10979 /* If the result has aggregate type and the mode of OP0 is an
10980 integral mode then, if BITSIZE is narrower than this mode
10981 and this is for big-endian data, we must put the field
10982 into the high-order bits. And we must also put it back
10983 into memory order if it has been previously reversed. */
10984 scalar_int_mode op0_mode;
10985 if (AGGREGATE_TYPE_P (type)
10986 && is_int_mode (GET_MODE (op0), &op0_mode))
10987 {
10988 HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
10989
10990 gcc_checking_assert (known_le (bitsize, size));
10991 if (maybe_lt (bitsize, size)
10992 && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
10993 op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
10994 size - bitsize, op0, 1);
10995
10996 if (reversep)
10997 op0 = flip_storage_order (op0_mode, op0);
10998 }
10999
11000 /* If the result type is BLKmode, store the data into a temporary
11001 of the appropriate type, but with the mode corresponding to the
11002 mode for the data we have (op0's mode). */
11003 if (mode == BLKmode)
11004 {
11005 rtx new_rtx
11006 = assign_stack_temp_for_type (ext_mode,
11007 GET_MODE_BITSIZE (ext_mode),
11008 type);
11009 emit_move_insn (new_rtx, op0);
11010 op0 = copy_rtx (new_rtx);
11011 PUT_MODE (op0, BLKmode);
11012 }
11013
11014 return op0;
11015 }
11016
11017 /* If the result is BLKmode, use that to access the object
11018 now as well. */
11019 if (mode == BLKmode)
11020 mode1 = BLKmode;
11021
11022 /* Get a reference to just this component. */
11023 bytepos = bits_to_bytes_round_down (bitpos);
11024 if (modifier == EXPAND_CONST_ADDRESS
11025 || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
11026 op0 = adjust_address_nv (op0, mode1, bytepos);
11027 else
11028 op0 = adjust_address (op0, mode1, bytepos);
11029
11030 if (op0 == orig_op0)
11031 op0 = copy_rtx (op0);
11032
11033 /* Don't set memory attributes if the base expression is
11034 SSA_NAME that got expanded as a MEM or a CONSTANT. In that case,
11035 we should just honor its original memory attributes. */
11036 if (!(TREE_CODE (tem) == SSA_NAME
11037 && (MEM_P (orig_op0) || CONSTANT_P (orig_op0))))
11038 set_mem_attributes (op0, exp, 0);
11039
11040 if (REG_P (XEXP (op0, 0)))
11041 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11042
11043 /* If op0 is a temporary because the original expressions was forced
11044 to memory, clear MEM_EXPR so that the original expression cannot
11045 be marked as addressable through MEM_EXPR of the temporary. */
11046 if (clear_mem_expr)
11047 set_mem_expr (op0, NULL_TREE);
11048
11049 MEM_VOLATILE_P (op0) |= volatilep;
11050
11051 if (reversep
11052 && modifier != EXPAND_MEMORY
11053 && modifier != EXPAND_WRITE)
11054 op0 = flip_storage_order (mode1, op0);
11055
11056 if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
11057 || modifier == EXPAND_CONST_ADDRESS
11058 || modifier == EXPAND_INITIALIZER)
11059 return op0;
11060
11061 if (target == 0)
11062 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
11063
11064 convert_move (target, op0, unsignedp);
11065 return target;
11066 }
11067
11068 case OBJ_TYPE_REF:
11069 return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
11070
11071 case CALL_EXPR:
11072 /* All valid uses of __builtin_va_arg_pack () are removed during
11073 inlining. */
11074 if (CALL_EXPR_VA_ARG_PACK (exp))
11075 error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
11076 {
11077 tree fndecl = get_callee_fndecl (exp), attr;
11078
11079 if (fndecl
11080 /* Don't diagnose the error attribute in thunks, those are
11081 artificially created. */
11082 && !CALL_FROM_THUNK_P (exp)
11083 && (attr = lookup_attribute ("error",
11084 DECL_ATTRIBUTES (fndecl))) != NULL)
11085 {
11086 const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11087 error ("%Kcall to %qs declared with attribute error: %s", exp,
11088 identifier_to_locale (ident),
11089 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11090 }
11091 if (fndecl
11092 /* Don't diagnose the warning attribute in thunks, those are
11093 artificially created. */
11094 && !CALL_FROM_THUNK_P (exp)
11095 && (attr = lookup_attribute ("warning",
11096 DECL_ATTRIBUTES (fndecl))) != NULL)
11097 {
11098 const char *ident = lang_hooks.decl_printable_name (fndecl, 1);
11099 warning_at (tree_nonartificial_location (exp),
11100 OPT_Wattribute_warning,
11101 "%Kcall to %qs declared with attribute warning: %s",
11102 exp, identifier_to_locale (ident),
11103 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
11104 }
11105
11106 /* Check for a built-in function. */
11107 if (fndecl && fndecl_built_in_p (fndecl))
11108 {
11109 gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
11110 return expand_builtin (exp, target, subtarget, tmode, ignore);
11111 }
11112 }
11113 return expand_call (exp, target, ignore);
11114
11115 case VIEW_CONVERT_EXPR:
11116 op0 = NULL_RTX;
11117
11118 /* If we are converting to BLKmode, try to avoid an intermediate
11119 temporary by fetching an inner memory reference. */
11120 if (mode == BLKmode
11121 && poly_int_tree_p (TYPE_SIZE (type))
11122 && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11123 && handled_component_p (treeop0))
11124 {
11125 machine_mode mode1;
11126 poly_int64 bitsize, bitpos, bytepos;
11127 tree offset;
11128 int reversep, volatilep = 0;
11129 tree tem
11130 = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11131 &unsignedp, &reversep, &volatilep);
11132
11133 /* ??? We should work harder and deal with non-zero offsets. */
11134 if (!offset
11135 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11136 && !reversep
11137 && known_size_p (bitsize)
11138 && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11139 {
11140 /* See the normal_inner_ref case for the rationale. */
11141 rtx orig_op0
11142 = expand_expr_real (tem,
11143 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11144 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11145 != INTEGER_CST)
11146 && modifier != EXPAND_STACK_PARM
11147 ? target : NULL_RTX),
11148 VOIDmode,
11149 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11150 NULL, true);
11151
11152 if (MEM_P (orig_op0))
11153 {
11154 op0 = orig_op0;
11155
11156 /* Get a reference to just this component. */
11157 if (modifier == EXPAND_CONST_ADDRESS
11158 || modifier == EXPAND_SUM
11159 || modifier == EXPAND_INITIALIZER)
11160 op0 = adjust_address_nv (op0, mode, bytepos);
11161 else
11162 op0 = adjust_address (op0, mode, bytepos);
11163
11164 if (op0 == orig_op0)
11165 op0 = copy_rtx (op0);
11166
11167 set_mem_attributes (op0, treeop0, 0);
11168 if (REG_P (XEXP (op0, 0)))
11169 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11170
11171 MEM_VOLATILE_P (op0) |= volatilep;
11172 }
11173 }
11174 }
11175
11176 if (!op0)
11177 op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11178 NULL, inner_reference_p);
11179
11180 /* If the input and output modes are both the same, we are done. */
11181 if (mode == GET_MODE (op0))
11182 ;
11183 /* If neither mode is BLKmode, and both modes are the same size
11184 then we can use gen_lowpart. */
11185 else if (mode != BLKmode
11186 && GET_MODE (op0) != BLKmode
11187 && known_eq (GET_MODE_PRECISION (mode),
11188 GET_MODE_PRECISION (GET_MODE (op0)))
11189 && !COMPLEX_MODE_P (GET_MODE (op0)))
11190 {
11191 if (GET_CODE (op0) == SUBREG)
11192 op0 = force_reg (GET_MODE (op0), op0);
11193 temp = gen_lowpart_common (mode, op0);
11194 if (temp)
11195 op0 = temp;
11196 else
11197 {
11198 if (!REG_P (op0) && !MEM_P (op0))
11199 op0 = force_reg (GET_MODE (op0), op0);
11200 op0 = gen_lowpart (mode, op0);
11201 }
11202 }
11203 /* If both types are integral, convert from one mode to the other. */
11204 else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11205 op0 = convert_modes (mode, GET_MODE (op0), op0,
11206 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11207 /* If the output type is a bit-field type, do an extraction. */
11208 else if (reduce_bit_field)
11209 return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11210 TYPE_UNSIGNED (type), NULL_RTX,
11211 mode, mode, false, NULL);
11212 /* As a last resort, spill op0 to memory, and reload it in a
11213 different mode. */
11214 else if (!MEM_P (op0))
11215 {
11216 /* If the operand is not a MEM, force it into memory. Since we
11217 are going to be changing the mode of the MEM, don't call
11218 force_const_mem for constants because we don't allow pool
11219 constants to change mode. */
11220 tree inner_type = TREE_TYPE (treeop0);
11221
11222 gcc_assert (!TREE_ADDRESSABLE (exp));
11223
11224 if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11225 target
11226 = assign_stack_temp_for_type
11227 (TYPE_MODE (inner_type),
11228 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11229
11230 emit_move_insn (target, op0);
11231 op0 = target;
11232 }
11233
11234 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
11235 output type is such that the operand is known to be aligned, indicate
11236 that it is. Otherwise, we need only be concerned about alignment for
11237 non-BLKmode results. */
11238 if (MEM_P (op0))
11239 {
11240 enum insn_code icode;
11241
11242 if (modifier != EXPAND_WRITE
11243 && modifier != EXPAND_MEMORY
11244 && !inner_reference_p
11245 && mode != BLKmode
11246 && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11247 {
11248 /* If the target does have special handling for unaligned
11249 loads of mode then use them. */
11250 if ((icode = optab_handler (movmisalign_optab, mode))
11251 != CODE_FOR_nothing)
11252 {
11253 rtx reg;
11254
11255 op0 = adjust_address (op0, mode, 0);
11256 /* We've already validated the memory, and we're creating a
11257 new pseudo destination. The predicates really can't
11258 fail. */
11259 reg = gen_reg_rtx (mode);
11260
11261 /* Nor can the insn generator. */
11262 rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11263 emit_insn (insn);
11264 return reg;
11265 }
11266 else if (STRICT_ALIGNMENT)
11267 {
11268 poly_uint64 mode_size = GET_MODE_SIZE (mode);
11269 poly_uint64 temp_size = mode_size;
11270 if (GET_MODE (op0) != BLKmode)
11271 temp_size = upper_bound (temp_size,
11272 GET_MODE_SIZE (GET_MODE (op0)));
11273 rtx new_rtx
11274 = assign_stack_temp_for_type (mode, temp_size, type);
11275 rtx new_with_op0_mode
11276 = adjust_address (new_rtx, GET_MODE (op0), 0);
11277
11278 gcc_assert (!TREE_ADDRESSABLE (exp));
11279
11280 if (GET_MODE (op0) == BLKmode)
11281 {
11282 rtx size_rtx = gen_int_mode (mode_size, Pmode);
11283 emit_block_move (new_with_op0_mode, op0, size_rtx,
11284 (modifier == EXPAND_STACK_PARM
11285 ? BLOCK_OP_CALL_PARM
11286 : BLOCK_OP_NORMAL));
11287 }
11288 else
11289 emit_move_insn (new_with_op0_mode, op0);
11290
11291 op0 = new_rtx;
11292 }
11293 }
11294
11295 op0 = adjust_address (op0, mode, 0);
11296 }
11297
11298 return op0;
11299
11300 case MODIFY_EXPR:
11301 {
11302 tree lhs = treeop0;
11303 tree rhs = treeop1;
11304 gcc_assert (ignore);
11305
11306 /* Check for |= or &= of a bitfield of size one into another bitfield
11307 of size 1. In this case, (unless we need the result of the
11308 assignment) we can do this more efficiently with a
11309 test followed by an assignment, if necessary.
11310
11311 ??? At this point, we can't get a BIT_FIELD_REF here. But if
11312 things change so we do, this code should be enhanced to
11313 support it. */
11314 if (TREE_CODE (lhs) == COMPONENT_REF
11315 && (TREE_CODE (rhs) == BIT_IOR_EXPR
11316 || TREE_CODE (rhs) == BIT_AND_EXPR)
11317 && TREE_OPERAND (rhs, 0) == lhs
11318 && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11319 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11320 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11321 {
11322 rtx_code_label *label = gen_label_rtx ();
11323 int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11324 profile_probability prob = profile_probability::uninitialized ();
11325 if (value)
11326 jumpifnot (TREE_OPERAND (rhs, 1), label, prob);
11327 else
11328 jumpif (TREE_OPERAND (rhs, 1), label, prob);
11329 expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11330 false);
11331 do_pending_stack_adjust ();
11332 emit_label (label);
11333 return const0_rtx;
11334 }
11335
11336 expand_assignment (lhs, rhs, false);
11337 return const0_rtx;
11338 }
11339
11340 case ADDR_EXPR:
11341 return expand_expr_addr_expr (exp, target, tmode, modifier);
11342
11343 case REALPART_EXPR:
11344 op0 = expand_normal (treeop0);
11345 return read_complex_part (op0, false);
11346
11347 case IMAGPART_EXPR:
11348 op0 = expand_normal (treeop0);
11349 return read_complex_part (op0, true);
11350
11351 case RETURN_EXPR:
11352 case LABEL_EXPR:
11353 case GOTO_EXPR:
11354 case SWITCH_EXPR:
11355 case ASM_EXPR:
11356 /* Expanded in cfgexpand.c. */
11357 gcc_unreachable ();
11358
11359 case TRY_CATCH_EXPR:
11360 case CATCH_EXPR:
11361 case EH_FILTER_EXPR:
11362 case TRY_FINALLY_EXPR:
11363 case EH_ELSE_EXPR:
11364 /* Lowered by tree-eh.c. */
11365 gcc_unreachable ();
11366
11367 case WITH_CLEANUP_EXPR:
11368 case CLEANUP_POINT_EXPR:
11369 case TARGET_EXPR:
11370 case CASE_LABEL_EXPR:
11371 case VA_ARG_EXPR:
11372 case BIND_EXPR:
11373 case INIT_EXPR:
11374 case CONJ_EXPR:
11375 case COMPOUND_EXPR:
11376 case PREINCREMENT_EXPR:
11377 case PREDECREMENT_EXPR:
11378 case POSTINCREMENT_EXPR:
11379 case POSTDECREMENT_EXPR:
11380 case LOOP_EXPR:
11381 case EXIT_EXPR:
11382 case COMPOUND_LITERAL_EXPR:
11383 /* Lowered by gimplify.c. */
11384 gcc_unreachable ();
11385
11386 case FDESC_EXPR:
11387 /* Function descriptors are not valid except for as
11388 initialization constants, and should not be expanded. */
11389 gcc_unreachable ();
11390
11391 case WITH_SIZE_EXPR:
11392 /* WITH_SIZE_EXPR expands to its first argument. The caller should
11393 have pulled out the size to use in whatever context it needed. */
11394 return expand_expr_real (treeop0, original_target, tmode,
11395 modifier, alt_rtl, inner_reference_p);
11396
11397 default:
11398 return expand_expr_real_2 (&ops, target, tmode, modifier);
11399 }
11400 }
11401 \f
11402 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11403 signedness of TYPE), possibly returning the result in TARGET.
11404 TYPE is known to be a partial integer type. */
11405 static rtx
11406 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11407 {
11408 HOST_WIDE_INT prec = TYPE_PRECISION (type);
11409 if (target && GET_MODE (target) != GET_MODE (exp))
11410 target = 0;
11411 /* For constant values, reduce using build_int_cst_type. */
11412 poly_int64 const_exp;
11413 if (poly_int_rtx_p (exp, &const_exp))
11414 {
11415 tree t = build_int_cst_type (type, const_exp);
11416 return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11417 }
11418 else if (TYPE_UNSIGNED (type))
11419 {
11420 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11421 rtx mask = immed_wide_int_const
11422 (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11423 return expand_and (mode, exp, mask, target);
11424 }
11425 else
11426 {
11427 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11428 int count = GET_MODE_PRECISION (mode) - prec;
11429 exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11430 return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11431 }
11432 }
11433 \f
11434 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11435 when applied to the address of EXP produces an address known to be
11436 aligned more than BIGGEST_ALIGNMENT. */
11437
11438 static int
11439 is_aligning_offset (const_tree offset, const_tree exp)
11440 {
11441 /* Strip off any conversions. */
11442 while (CONVERT_EXPR_P (offset))
11443 offset = TREE_OPERAND (offset, 0);
11444
11445 /* We must now have a BIT_AND_EXPR with a constant that is one less than
11446 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
11447 if (TREE_CODE (offset) != BIT_AND_EXPR
11448 || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11449 || compare_tree_int (TREE_OPERAND (offset, 1),
11450 BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11451 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11452 return 0;
11453
11454 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11455 It must be NEGATE_EXPR. Then strip any more conversions. */
11456 offset = TREE_OPERAND (offset, 0);
11457 while (CONVERT_EXPR_P (offset))
11458 offset = TREE_OPERAND (offset, 0);
11459
11460 if (TREE_CODE (offset) != NEGATE_EXPR)
11461 return 0;
11462
11463 offset = TREE_OPERAND (offset, 0);
11464 while (CONVERT_EXPR_P (offset))
11465 offset = TREE_OPERAND (offset, 0);
11466
11467 /* This must now be the address of EXP. */
11468 return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11469 }
11470 \f
11471 /* Return the tree node if an ARG corresponds to a string constant or zero
11472 if it doesn't. If we return nonzero, set *PTR_OFFSET to the (possibly
11473 non-constant) offset in bytes within the string that ARG is accessing.
11474 If MEM_SIZE is non-zero the storage size of the memory is returned.
11475 If DECL is non-zero the constant declaration is returned if available. */
11476
11477 tree
11478 string_constant (tree arg, tree *ptr_offset, tree *mem_size, tree *decl)
11479 {
11480 tree dummy = NULL_TREE;;
11481 if (!mem_size)
11482 mem_size = &dummy;
11483
11484 /* Store the type of the original expression before conversions
11485 via NOP_EXPR or POINTER_PLUS_EXPR to other types have been
11486 removed. */
11487 tree argtype = TREE_TYPE (arg);
11488
11489 tree array;
11490 STRIP_NOPS (arg);
11491
11492 /* Non-constant index into the character array in an ARRAY_REF
11493 expression or null. */
11494 tree varidx = NULL_TREE;
11495
11496 poly_int64 base_off = 0;
11497
11498 if (TREE_CODE (arg) == ADDR_EXPR)
11499 {
11500 arg = TREE_OPERAND (arg, 0);
11501 tree ref = arg;
11502 if (TREE_CODE (arg) == ARRAY_REF)
11503 {
11504 tree idx = TREE_OPERAND (arg, 1);
11505 if (TREE_CODE (idx) != INTEGER_CST)
11506 {
11507 /* From a pointer (but not array) argument extract the variable
11508 index to prevent get_addr_base_and_unit_offset() from failing
11509 due to it. Use it later to compute the non-constant offset
11510 into the string and return it to the caller. */
11511 varidx = idx;
11512 ref = TREE_OPERAND (arg, 0);
11513
11514 if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE)
11515 return NULL_TREE;
11516
11517 if (!integer_zerop (array_ref_low_bound (arg)))
11518 return NULL_TREE;
11519
11520 if (!integer_onep (array_ref_element_size (arg)))
11521 return NULL_TREE;
11522 }
11523 }
11524 array = get_addr_base_and_unit_offset (ref, &base_off);
11525 if (!array
11526 || (TREE_CODE (array) != VAR_DECL
11527 && TREE_CODE (array) != CONST_DECL
11528 && TREE_CODE (array) != STRING_CST))
11529 return NULL_TREE;
11530 }
11531 else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11532 {
11533 tree arg0 = TREE_OPERAND (arg, 0);
11534 tree arg1 = TREE_OPERAND (arg, 1);
11535
11536 tree offset;
11537 tree str = string_constant (arg0, &offset, mem_size, decl);
11538 if (!str)
11539 {
11540 str = string_constant (arg1, &offset, mem_size, decl);
11541 arg1 = arg0;
11542 }
11543
11544 if (str)
11545 {
11546 /* Avoid pointers to arrays (see bug 86622). */
11547 if (POINTER_TYPE_P (TREE_TYPE (arg))
11548 && TREE_CODE (TREE_TYPE (TREE_TYPE (arg))) == ARRAY_TYPE
11549 && !(decl && !*decl)
11550 && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11551 && tree_fits_uhwi_p (*mem_size)
11552 && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11553 return NULL_TREE;
11554
11555 tree type = TREE_TYPE (offset);
11556 arg1 = fold_convert (type, arg1);
11557 *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, arg1);
11558 return str;
11559 }
11560 return NULL_TREE;
11561 }
11562 else if (TREE_CODE (arg) == SSA_NAME)
11563 {
11564 gimple *stmt = SSA_NAME_DEF_STMT (arg);
11565 if (!is_gimple_assign (stmt))
11566 return NULL_TREE;
11567
11568 tree rhs1 = gimple_assign_rhs1 (stmt);
11569 tree_code code = gimple_assign_rhs_code (stmt);
11570 if (code == ADDR_EXPR)
11571 return string_constant (rhs1, ptr_offset, mem_size, decl);
11572 else if (code != POINTER_PLUS_EXPR)
11573 return NULL_TREE;
11574
11575 tree offset;
11576 if (tree str = string_constant (rhs1, &offset, mem_size, decl))
11577 {
11578 /* Avoid pointers to arrays (see bug 86622). */
11579 if (POINTER_TYPE_P (TREE_TYPE (rhs1))
11580 && TREE_CODE (TREE_TYPE (TREE_TYPE (rhs1))) == ARRAY_TYPE
11581 && !(decl && !*decl)
11582 && !(decl && tree_fits_uhwi_p (DECL_SIZE_UNIT (*decl))
11583 && tree_fits_uhwi_p (*mem_size)
11584 && tree_int_cst_equal (*mem_size, DECL_SIZE_UNIT (*decl))))
11585 return NULL_TREE;
11586
11587 tree rhs2 = gimple_assign_rhs2 (stmt);
11588 tree type = TREE_TYPE (offset);
11589 rhs2 = fold_convert (type, rhs2);
11590 *ptr_offset = fold_build2 (PLUS_EXPR, type, offset, rhs2);
11591 return str;
11592 }
11593 return NULL_TREE;
11594 }
11595 else if (DECL_P (arg))
11596 array = arg;
11597 else
11598 return NULL_TREE;
11599
11600 tree offset = wide_int_to_tree (sizetype, base_off);
11601 if (varidx)
11602 {
11603 if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE)
11604 return NULL_TREE;
11605
11606 gcc_assert (TREE_CODE (arg) == ARRAY_REF);
11607 tree chartype = TREE_TYPE (TREE_TYPE (TREE_OPERAND (arg, 0)));
11608 if (TREE_CODE (chartype) != INTEGER_TYPE)
11609 return NULL;
11610
11611 offset = fold_convert (sizetype, varidx);
11612 }
11613
11614 if (TREE_CODE (array) == STRING_CST)
11615 {
11616 *ptr_offset = fold_convert (sizetype, offset);
11617 *mem_size = TYPE_SIZE_UNIT (TREE_TYPE (array));
11618 if (decl)
11619 *decl = NULL_TREE;
11620 gcc_checking_assert (tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (array)))
11621 >= TREE_STRING_LENGTH (array));
11622 return array;
11623 }
11624
11625 if (!VAR_P (array) && TREE_CODE (array) != CONST_DECL)
11626 return NULL_TREE;
11627
11628 tree init = ctor_for_folding (array);
11629
11630 /* Handle variables initialized with string literals. */
11631 if (!init || init == error_mark_node)
11632 return NULL_TREE;
11633 if (TREE_CODE (init) == CONSTRUCTOR)
11634 {
11635 /* Convert the 64-bit constant offset to a wider type to avoid
11636 overflow. */
11637 offset_int wioff;
11638 if (!base_off.is_constant (&wioff))
11639 return NULL_TREE;
11640
11641 wioff *= BITS_PER_UNIT;
11642 if (!wi::fits_uhwi_p (wioff))
11643 return NULL_TREE;
11644
11645 base_off = wioff.to_uhwi ();
11646 unsigned HOST_WIDE_INT fieldoff = 0;
11647 init = fold_ctor_reference (TREE_TYPE (arg), init, base_off, 0, array,
11648 &fieldoff);
11649 HOST_WIDE_INT cstoff;
11650 if (!base_off.is_constant (&cstoff))
11651 return NULL_TREE;
11652
11653 cstoff = (cstoff - fieldoff) / BITS_PER_UNIT;
11654 tree off = build_int_cst (sizetype, cstoff);
11655 if (varidx)
11656 offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset), offset, off);
11657 else
11658 offset = off;
11659 }
11660
11661 if (!init)
11662 return NULL_TREE;
11663
11664 *ptr_offset = offset;
11665
11666 tree inittype = TREE_TYPE (init);
11667
11668 if (TREE_CODE (init) == INTEGER_CST
11669 && (TREE_CODE (TREE_TYPE (array)) == INTEGER_TYPE
11670 || TYPE_MAIN_VARIANT (inittype) == char_type_node))
11671 {
11672 /* For a reference to (address of) a single constant character,
11673 store the native representation of the character in CHARBUF.
11674 If the reference is to an element of an array or a member
11675 of a struct, only consider narrow characters until ctors
11676 for wide character arrays are transformed to STRING_CSTs
11677 like those for narrow arrays. */
11678 unsigned char charbuf[MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT];
11679 int len = native_encode_expr (init, charbuf, sizeof charbuf, 0);
11680 if (len > 0)
11681 {
11682 /* Construct a string literal with elements of INITTYPE and
11683 the representation above. Then strip
11684 the ADDR_EXPR (ARRAY_REF (...)) around the STRING_CST. */
11685 init = build_string_literal (len, (char *)charbuf, inittype);
11686 init = TREE_OPERAND (TREE_OPERAND (init, 0), 0);
11687 }
11688 }
11689
11690 tree initsize = TYPE_SIZE_UNIT (inittype);
11691
11692 if (TREE_CODE (init) == CONSTRUCTOR && initializer_zerop (init))
11693 {
11694 /* Fold an empty/zero constructor for an implicitly initialized
11695 object or subobject into the empty string. */
11696
11697 /* Determine the character type from that of the original
11698 expression. */
11699 tree chartype = argtype;
11700 if (POINTER_TYPE_P (chartype))
11701 chartype = TREE_TYPE (chartype);
11702 while (TREE_CODE (chartype) == ARRAY_TYPE)
11703 chartype = TREE_TYPE (chartype);
11704 /* Convert a char array to an empty STRING_CST having an array
11705 of the expected type. */
11706 if (!initsize)
11707 initsize = integer_zero_node;
11708
11709 unsigned HOST_WIDE_INT size = tree_to_uhwi (initsize);
11710 init = build_string_literal (size ? 1 : 0, "", chartype, size);
11711 init = TREE_OPERAND (init, 0);
11712 init = TREE_OPERAND (init, 0);
11713
11714 *ptr_offset = integer_zero_node;
11715 }
11716
11717 if (decl)
11718 *decl = array;
11719
11720 if (TREE_CODE (init) != STRING_CST)
11721 return NULL_TREE;
11722
11723 *mem_size = initsize;
11724
11725 gcc_checking_assert (tree_to_shwi (initsize) >= TREE_STRING_LENGTH (init));
11726
11727 return init;
11728 }
11729 \f
11730 /* Compute the modular multiplicative inverse of A modulo M
11731 using extended Euclid's algorithm. Assumes A and M are coprime. */
11732 static wide_int
11733 mod_inv (const wide_int &a, const wide_int &b)
11734 {
11735 /* Verify the assumption. */
11736 gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
11737
11738 unsigned int p = a.get_precision () + 1;
11739 gcc_checking_assert (b.get_precision () + 1 == p);
11740 wide_int c = wide_int::from (a, p, UNSIGNED);
11741 wide_int d = wide_int::from (b, p, UNSIGNED);
11742 wide_int x0 = wide_int::from (0, p, UNSIGNED);
11743 wide_int x1 = wide_int::from (1, p, UNSIGNED);
11744
11745 if (wi::eq_p (b, 1))
11746 return wide_int::from (1, p, UNSIGNED);
11747
11748 while (wi::gt_p (c, 1, UNSIGNED))
11749 {
11750 wide_int t = d;
11751 wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
11752 c = t;
11753 wide_int s = x0;
11754 x0 = wi::sub (x1, wi::mul (q, x0));
11755 x1 = s;
11756 }
11757 if (wi::lt_p (x1, 0, SIGNED))
11758 x1 += d;
11759 return x1;
11760 }
11761
11762 /* Optimize x % C1 == C2 for signed modulo if C1 is a power of two and C2
11763 is non-zero and C3 ((1<<(prec-1)) | (C1 - 1)):
11764 for C2 > 0 to x & C3 == C2
11765 for C2 < 0 to x & C3 == (C2 & C3). */
11766 enum tree_code
11767 maybe_optimize_pow2p_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11768 {
11769 gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11770 tree treeop0 = gimple_assign_rhs1 (stmt);
11771 tree treeop1 = gimple_assign_rhs2 (stmt);
11772 tree type = TREE_TYPE (*arg0);
11773 scalar_int_mode mode;
11774 if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11775 return code;
11776 if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11777 || TYPE_PRECISION (type) <= 1
11778 || TYPE_UNSIGNED (type)
11779 /* Signed x % c == 0 should have been optimized into unsigned modulo
11780 earlier. */
11781 || integer_zerop (*arg1)
11782 /* If c is known to be non-negative, modulo will be expanded as unsigned
11783 modulo. */
11784 || get_range_pos_neg (treeop0) == 1)
11785 return code;
11786
11787 /* x % c == d where d < 0 && d <= -c should be always false. */
11788 if (tree_int_cst_sgn (*arg1) == -1
11789 && -wi::to_widest (treeop1) >= wi::to_widest (*arg1))
11790 return code;
11791
11792 int prec = TYPE_PRECISION (type);
11793 wide_int w = wi::to_wide (treeop1) - 1;
11794 w |= wi::shifted_mask (0, prec - 1, true, prec);
11795 tree c3 = wide_int_to_tree (type, w);
11796 tree c4 = *arg1;
11797 if (tree_int_cst_sgn (*arg1) == -1)
11798 c4 = wide_int_to_tree (type, w & wi::to_wide (*arg1));
11799
11800 rtx op0 = expand_normal (treeop0);
11801 treeop0 = make_tree (TREE_TYPE (treeop0), op0);
11802
11803 bool speed_p = optimize_insn_for_speed_p ();
11804
11805 do_pending_stack_adjust ();
11806
11807 location_t loc = gimple_location (stmt);
11808 struct separate_ops ops;
11809 ops.code = TRUNC_MOD_EXPR;
11810 ops.location = loc;
11811 ops.type = TREE_TYPE (treeop0);
11812 ops.op0 = treeop0;
11813 ops.op1 = treeop1;
11814 ops.op2 = NULL_TREE;
11815 start_sequence ();
11816 rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11817 EXPAND_NORMAL);
11818 rtx_insn *moinsns = get_insns ();
11819 end_sequence ();
11820
11821 unsigned mocost = seq_cost (moinsns, speed_p);
11822 mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
11823 mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
11824
11825 ops.code = BIT_AND_EXPR;
11826 ops.location = loc;
11827 ops.type = TREE_TYPE (treeop0);
11828 ops.op0 = treeop0;
11829 ops.op1 = c3;
11830 ops.op2 = NULL_TREE;
11831 start_sequence ();
11832 rtx mur = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
11833 EXPAND_NORMAL);
11834 rtx_insn *muinsns = get_insns ();
11835 end_sequence ();
11836
11837 unsigned mucost = seq_cost (muinsns, speed_p);
11838 mucost += rtx_cost (mur, mode, EQ, 0, speed_p);
11839 mucost += rtx_cost (expand_normal (c4), mode, EQ, 1, speed_p);
11840
11841 if (mocost <= mucost)
11842 {
11843 emit_insn (moinsns);
11844 *arg0 = make_tree (TREE_TYPE (*arg0), mor);
11845 return code;
11846 }
11847
11848 emit_insn (muinsns);
11849 *arg0 = make_tree (TREE_TYPE (*arg0), mur);
11850 *arg1 = c4;
11851 return code;
11852 }
11853
11854 /* Attempt to optimize unsigned (X % C1) == C2 (or (X % C1) != C2).
11855 If C1 is odd to:
11856 (X - C2) * C3 <= C4 (or >), where
11857 C3 is modular multiplicative inverse of C1 and 1<<prec and
11858 C4 is ((1<<prec) - 1) / C1 or ((1<<prec) - 1) / C1 - 1 (the latter
11859 if C2 > ((1<<prec) - 1) % C1).
11860 If C1 is even, S = ctz (C1) and C2 is 0, use
11861 ((X * C3) r>> S) <= C4, where C3 is modular multiplicative
11862 inverse of C1>>S and 1<<prec and C4 is (((1<<prec) - 1) / (C1>>S)) >> S.
11863
11864 For signed (X % C1) == 0 if C1 is odd to (all operations in it
11865 unsigned):
11866 (X * C3) + C4 <= 2 * C4, where
11867 C3 is modular multiplicative inverse of (unsigned) C1 and 1<<prec and
11868 C4 is ((1<<(prec - 1) - 1) / C1).
11869 If C1 is even, S = ctz(C1), use
11870 ((X * C3) + C4) r>> S <= (C4 >> (S - 1))
11871 where C3 is modular multiplicative inverse of (unsigned)(C1>>S) and 1<<prec
11872 and C4 is ((1<<(prec - 1) - 1) / (C1>>S)) & (-1<<S).
11873
11874 See the Hacker's Delight book, section 10-17. */
11875 enum tree_code
11876 maybe_optimize_mod_cmp (enum tree_code code, tree *arg0, tree *arg1)
11877 {
11878 gcc_checking_assert (code == EQ_EXPR || code == NE_EXPR);
11879 gcc_checking_assert (TREE_CODE (*arg1) == INTEGER_CST);
11880
11881 if (optimize < 2)
11882 return code;
11883
11884 gimple *stmt = get_def_for_expr (*arg0, TRUNC_MOD_EXPR);
11885 if (stmt == NULL)
11886 return code;
11887
11888 tree treeop0 = gimple_assign_rhs1 (stmt);
11889 tree treeop1 = gimple_assign_rhs2 (stmt);
11890 if (TREE_CODE (treeop0) != SSA_NAME
11891 || TREE_CODE (treeop1) != INTEGER_CST
11892 /* Don't optimize the undefined behavior case x % 0;
11893 x % 1 should have been optimized into zero, punt if
11894 it makes it here for whatever reason;
11895 x % -c should have been optimized into x % c. */
11896 || compare_tree_int (treeop1, 2) <= 0
11897 /* Likewise x % c == d where d >= c should be always false. */
11898 || tree_int_cst_le (treeop1, *arg1))
11899 return code;
11900
11901 /* Unsigned x % pow2 is handled right already, for signed
11902 modulo handle it in maybe_optimize_pow2p_mod_cmp. */
11903 if (integer_pow2p (treeop1))
11904 return maybe_optimize_pow2p_mod_cmp (code, arg0, arg1);
11905
11906 tree type = TREE_TYPE (*arg0);
11907 scalar_int_mode mode;
11908 if (!is_a <scalar_int_mode> (TYPE_MODE (type), &mode))
11909 return code;
11910 if (GET_MODE_BITSIZE (mode) != TYPE_PRECISION (type)
11911 || TYPE_PRECISION (type) <= 1)
11912 return code;
11913
11914 signop sgn = UNSIGNED;
11915 /* If both operands are known to have the sign bit clear, handle
11916 even the signed modulo case as unsigned. treeop1 is always
11917 positive >= 2, checked above. */
11918 if (!TYPE_UNSIGNED (type) && get_range_pos_neg (treeop0) != 1)
11919 sgn = SIGNED;
11920
11921 if (!TYPE_UNSIGNED (type))
11922 {
11923 if (tree_int_cst_sgn (*arg1) == -1)
11924 return code;
11925 type = unsigned_type_for (type);
11926 if (!type || TYPE_MODE (type) != TYPE_MODE (TREE_TYPE (*arg0)))
11927 return code;
11928 }
11929
11930 int prec = TYPE_PRECISION (type);
11931 wide_int w = wi::to_wide (treeop1);
11932 int shift = wi::ctz (w);
11933 /* Unsigned (X % C1) == C2 is equivalent to (X - C2) % C1 == 0 if
11934 C2 <= -1U % C1, because for any Z >= 0U - C2 in that case (Z % C1) != 0.
11935 If C1 is odd, we can handle all cases by subtracting
11936 C4 below. We could handle even the even C1 and C2 > -1U % C1 cases
11937 e.g. by testing for overflow on the subtraction, punt on that for now
11938 though. */
11939 if ((sgn == SIGNED || shift) && !integer_zerop (*arg1))
11940 {
11941 if (sgn == SIGNED)
11942 return code;
11943 wide_int x = wi::umod_trunc (wi::mask (prec, false, prec), w);
11944 if (wi::gtu_p (wi::to_wide (*arg1), x))
11945 return code;
11946 }
11947
11948 imm_use_iterator imm_iter;
11949 use_operand_p use_p;
11950 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, treeop0)
11951 {
11952 gimple *use_stmt = USE_STMT (use_p);
11953 /* Punt if treeop0 is used in the same bb in a division
11954 or another modulo with the same divisor. We should expect
11955 the division and modulo combined together. */
11956 if (use_stmt == stmt
11957 || gimple_bb (use_stmt) != gimple_bb (stmt))
11958 continue;
11959 if (!is_gimple_assign (use_stmt)
11960 || (gimple_assign_rhs_code (use_stmt) != TRUNC_DIV_EXPR
11961 && gimple_assign_rhs_code (use_stmt) != TRUNC_MOD_EXPR))
11962 continue;
11963 if (gimple_assign_rhs1 (use_stmt) != treeop0
11964 || !operand_equal_p (gimple_assign_rhs2 (use_stmt), treeop1, 0))
11965 continue;
11966 return code;
11967 }
11968
11969 w = wi::lrshift (w, shift);
11970 wide_int a = wide_int::from (w, prec + 1, UNSIGNED);
11971 wide_int b = wi::shifted_mask (prec, 1, false, prec + 1);
11972 wide_int m = wide_int::from (mod_inv (a, b), prec, UNSIGNED);
11973 tree c3 = wide_int_to_tree (type, m);
11974 tree c5 = NULL_TREE;
11975 wide_int d, e;
11976 if (sgn == UNSIGNED)
11977 {
11978 d = wi::divmod_trunc (wi::mask (prec, false, prec), w, UNSIGNED, &e);
11979 /* Use <= floor ((1<<prec) - 1) / C1 only if C2 <= ((1<<prec) - 1) % C1,
11980 otherwise use < or subtract one from C4. E.g. for
11981 x % 3U == 0 we transform this into x * 0xaaaaaaab <= 0x55555555, but
11982 x % 3U == 1 already needs to be
11983 (x - 1) * 0xaaaaaaabU <= 0x55555554. */
11984 if (!shift && wi::gtu_p (wi::to_wide (*arg1), e))
11985 d -= 1;
11986 if (shift)
11987 d = wi::lrshift (d, shift);
11988 }
11989 else
11990 {
11991 e = wi::udiv_trunc (wi::mask (prec - 1, false, prec), w);
11992 if (!shift)
11993 d = wi::lshift (e, 1);
11994 else
11995 {
11996 e = wi::bit_and (e, wi::mask (shift, true, prec));
11997 d = wi::lrshift (e, shift - 1);
11998 }
11999 c5 = wide_int_to_tree (type, e);
12000 }
12001 tree c4 = wide_int_to_tree (type, d);
12002
12003 rtx op0 = expand_normal (treeop0);
12004 treeop0 = make_tree (TREE_TYPE (treeop0), op0);
12005
12006 bool speed_p = optimize_insn_for_speed_p ();
12007
12008 do_pending_stack_adjust ();
12009
12010 location_t loc = gimple_location (stmt);
12011 struct separate_ops ops;
12012 ops.code = TRUNC_MOD_EXPR;
12013 ops.location = loc;
12014 ops.type = TREE_TYPE (treeop0);
12015 ops.op0 = treeop0;
12016 ops.op1 = treeop1;
12017 ops.op2 = NULL_TREE;
12018 start_sequence ();
12019 rtx mor = expand_expr_real_2 (&ops, NULL_RTX, TYPE_MODE (ops.type),
12020 EXPAND_NORMAL);
12021 rtx_insn *moinsns = get_insns ();
12022 end_sequence ();
12023
12024 unsigned mocost = seq_cost (moinsns, speed_p);
12025 mocost += rtx_cost (mor, mode, EQ, 0, speed_p);
12026 mocost += rtx_cost (expand_normal (*arg1), mode, EQ, 1, speed_p);
12027
12028 tree t = fold_convert_loc (loc, type, treeop0);
12029 if (!integer_zerop (*arg1))
12030 t = fold_build2_loc (loc, MINUS_EXPR, type, t, fold_convert (type, *arg1));
12031 t = fold_build2_loc (loc, MULT_EXPR, type, t, c3);
12032 if (sgn == SIGNED)
12033 t = fold_build2_loc (loc, PLUS_EXPR, type, t, c5);
12034 if (shift)
12035 {
12036 tree s = build_int_cst (NULL_TREE, shift);
12037 t = fold_build2_loc (loc, RROTATE_EXPR, type, t, s);
12038 }
12039
12040 start_sequence ();
12041 rtx mur = expand_normal (t);
12042 rtx_insn *muinsns = get_insns ();
12043 end_sequence ();
12044
12045 unsigned mucost = seq_cost (muinsns, speed_p);
12046 mucost += rtx_cost (mur, mode, LE, 0, speed_p);
12047 mucost += rtx_cost (expand_normal (c4), mode, LE, 1, speed_p);
12048
12049 if (mocost <= mucost)
12050 {
12051 emit_insn (moinsns);
12052 *arg0 = make_tree (TREE_TYPE (*arg0), mor);
12053 return code;
12054 }
12055
12056 emit_insn (muinsns);
12057 *arg0 = make_tree (type, mur);
12058 *arg1 = c4;
12059 return code == EQ_EXPR ? LE_EXPR : GT_EXPR;
12060 }
12061 \f
12062 /* Generate code to calculate OPS, and exploded expression
12063 using a store-flag instruction and return an rtx for the result.
12064 OPS reflects a comparison.
12065
12066 If TARGET is nonzero, store the result there if convenient.
12067
12068 Return zero if there is no suitable set-flag instruction
12069 available on this machine.
12070
12071 Once expand_expr has been called on the arguments of the comparison,
12072 we are committed to doing the store flag, since it is not safe to
12073 re-evaluate the expression. We emit the store-flag insn by calling
12074 emit_store_flag, but only expand the arguments if we have a reason
12075 to believe that emit_store_flag will be successful. If we think that
12076 it will, but it isn't, we have to simulate the store-flag with a
12077 set/jump/set sequence. */
12078
12079 static rtx
12080 do_store_flag (sepops ops, rtx target, machine_mode mode)
12081 {
12082 enum rtx_code code;
12083 tree arg0, arg1, type;
12084 machine_mode operand_mode;
12085 int unsignedp;
12086 rtx op0, op1;
12087 rtx subtarget = target;
12088 location_t loc = ops->location;
12089
12090 arg0 = ops->op0;
12091 arg1 = ops->op1;
12092
12093 /* Don't crash if the comparison was erroneous. */
12094 if (arg0 == error_mark_node || arg1 == error_mark_node)
12095 return const0_rtx;
12096
12097 type = TREE_TYPE (arg0);
12098 operand_mode = TYPE_MODE (type);
12099 unsignedp = TYPE_UNSIGNED (type);
12100
12101 /* We won't bother with BLKmode store-flag operations because it would mean
12102 passing a lot of information to emit_store_flag. */
12103 if (operand_mode == BLKmode)
12104 return 0;
12105
12106 /* We won't bother with store-flag operations involving function pointers
12107 when function pointers must be canonicalized before comparisons. */
12108 if (targetm.have_canonicalize_funcptr_for_compare ()
12109 && ((POINTER_TYPE_P (TREE_TYPE (arg0))
12110 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg0))))
12111 || (POINTER_TYPE_P (TREE_TYPE (arg1))
12112 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (arg1))))))
12113 return 0;
12114
12115 STRIP_NOPS (arg0);
12116 STRIP_NOPS (arg1);
12117
12118 /* For vector typed comparisons emit code to generate the desired
12119 all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
12120 expander for this. */
12121 if (TREE_CODE (ops->type) == VECTOR_TYPE)
12122 {
12123 tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
12124 if (VECTOR_BOOLEAN_TYPE_P (ops->type)
12125 && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
12126 return expand_vec_cmp_expr (ops->type, ifexp, target);
12127 else
12128 {
12129 tree if_true = constant_boolean_node (true, ops->type);
12130 tree if_false = constant_boolean_node (false, ops->type);
12131 return expand_vec_cond_expr (ops->type, ifexp, if_true,
12132 if_false, target);
12133 }
12134 }
12135
12136 /* Optimize (x % C1) == C2 or (x % C1) != C2 if it is beneficial
12137 into (x - C2) * C3 < C4. */
12138 if ((ops->code == EQ_EXPR || ops->code == NE_EXPR)
12139 && TREE_CODE (arg0) == SSA_NAME
12140 && TREE_CODE (arg1) == INTEGER_CST)
12141 {
12142 enum tree_code code = maybe_optimize_mod_cmp (ops->code, &arg0, &arg1);
12143 if (code != ops->code)
12144 {
12145 struct separate_ops nops = *ops;
12146 nops.code = ops->code = code;
12147 nops.op0 = arg0;
12148 nops.op1 = arg1;
12149 nops.type = TREE_TYPE (arg0);
12150 return do_store_flag (&nops, target, mode);
12151 }
12152 }
12153
12154 /* Get the rtx comparison code to use. We know that EXP is a comparison
12155 operation of some type. Some comparisons against 1 and -1 can be
12156 converted to comparisons with zero. Do so here so that the tests
12157 below will be aware that we have a comparison with zero. These
12158 tests will not catch constants in the first operand, but constants
12159 are rarely passed as the first operand. */
12160
12161 switch (ops->code)
12162 {
12163 case EQ_EXPR:
12164 code = EQ;
12165 break;
12166 case NE_EXPR:
12167 code = NE;
12168 break;
12169 case LT_EXPR:
12170 if (integer_onep (arg1))
12171 arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
12172 else
12173 code = unsignedp ? LTU : LT;
12174 break;
12175 case LE_EXPR:
12176 if (! unsignedp && integer_all_onesp (arg1))
12177 arg1 = integer_zero_node, code = LT;
12178 else
12179 code = unsignedp ? LEU : LE;
12180 break;
12181 case GT_EXPR:
12182 if (! unsignedp && integer_all_onesp (arg1))
12183 arg1 = integer_zero_node, code = GE;
12184 else
12185 code = unsignedp ? GTU : GT;
12186 break;
12187 case GE_EXPR:
12188 if (integer_onep (arg1))
12189 arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
12190 else
12191 code = unsignedp ? GEU : GE;
12192 break;
12193
12194 case UNORDERED_EXPR:
12195 code = UNORDERED;
12196 break;
12197 case ORDERED_EXPR:
12198 code = ORDERED;
12199 break;
12200 case UNLT_EXPR:
12201 code = UNLT;
12202 break;
12203 case UNLE_EXPR:
12204 code = UNLE;
12205 break;
12206 case UNGT_EXPR:
12207 code = UNGT;
12208 break;
12209 case UNGE_EXPR:
12210 code = UNGE;
12211 break;
12212 case UNEQ_EXPR:
12213 code = UNEQ;
12214 break;
12215 case LTGT_EXPR:
12216 code = LTGT;
12217 break;
12218
12219 default:
12220 gcc_unreachable ();
12221 }
12222
12223 /* Put a constant second. */
12224 if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
12225 || TREE_CODE (arg0) == FIXED_CST)
12226 {
12227 std::swap (arg0, arg1);
12228 code = swap_condition (code);
12229 }
12230
12231 /* If this is an equality or inequality test of a single bit, we can
12232 do this by shifting the bit being tested to the low-order bit and
12233 masking the result with the constant 1. If the condition was EQ,
12234 we xor it with 1. This does not require an scc insn and is faster
12235 than an scc insn even if we have it.
12236
12237 The code to make this transformation was moved into fold_single_bit_test,
12238 so we just call into the folder and expand its result. */
12239
12240 if ((code == NE || code == EQ)
12241 && integer_zerop (arg1)
12242 && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
12243 {
12244 gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
12245 if (srcstmt
12246 && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
12247 {
12248 enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
12249 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
12250 tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
12251 gimple_assign_rhs1 (srcstmt),
12252 gimple_assign_rhs2 (srcstmt));
12253 temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
12254 if (temp)
12255 return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
12256 }
12257 }
12258
12259 if (! get_subtarget (target)
12260 || GET_MODE (subtarget) != operand_mode)
12261 subtarget = 0;
12262
12263 expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
12264
12265 if (target == 0)
12266 target = gen_reg_rtx (mode);
12267
12268 /* Try a cstore if possible. */
12269 return emit_store_flag_force (target, code, op0, op1,
12270 operand_mode, unsignedp,
12271 (TYPE_PRECISION (ops->type) == 1
12272 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
12273 }
12274 \f
12275 /* Attempt to generate a casesi instruction. Returns 1 if successful,
12276 0 otherwise (i.e. if there is no casesi instruction).
12277
12278 DEFAULT_PROBABILITY is the probability of jumping to the default
12279 label. */
12280 int
12281 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
12282 rtx table_label, rtx default_label, rtx fallback_label,
12283 profile_probability default_probability)
12284 {
12285 class expand_operand ops[5];
12286 scalar_int_mode index_mode = SImode;
12287 rtx op1, op2, index;
12288
12289 if (! targetm.have_casesi ())
12290 return 0;
12291
12292 /* The index must be some form of integer. Convert it to SImode. */
12293 scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
12294 if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
12295 {
12296 rtx rangertx = expand_normal (range);
12297
12298 /* We must handle the endpoints in the original mode. */
12299 index_expr = build2 (MINUS_EXPR, index_type,
12300 index_expr, minval);
12301 minval = integer_zero_node;
12302 index = expand_normal (index_expr);
12303 if (default_label)
12304 emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
12305 omode, 1, default_label,
12306 default_probability);
12307 /* Now we can safely truncate. */
12308 index = convert_to_mode (index_mode, index, 0);
12309 }
12310 else
12311 {
12312 if (omode != index_mode)
12313 {
12314 index_type = lang_hooks.types.type_for_mode (index_mode, 0);
12315 index_expr = fold_convert (index_type, index_expr);
12316 }
12317
12318 index = expand_normal (index_expr);
12319 }
12320
12321 do_pending_stack_adjust ();
12322
12323 op1 = expand_normal (minval);
12324 op2 = expand_normal (range);
12325
12326 create_input_operand (&ops[0], index, index_mode);
12327 create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
12328 create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
12329 create_fixed_operand (&ops[3], table_label);
12330 create_fixed_operand (&ops[4], (default_label
12331 ? default_label
12332 : fallback_label));
12333 expand_jump_insn (targetm.code_for_casesi, 5, ops);
12334 return 1;
12335 }
12336
12337 /* Attempt to generate a tablejump instruction; same concept. */
12338 /* Subroutine of the next function.
12339
12340 INDEX is the value being switched on, with the lowest value
12341 in the table already subtracted.
12342 MODE is its expected mode (needed if INDEX is constant).
12343 RANGE is the length of the jump table.
12344 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
12345
12346 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
12347 index value is out of range.
12348 DEFAULT_PROBABILITY is the probability of jumping to
12349 the default label. */
12350
12351 static void
12352 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
12353 rtx default_label, profile_probability default_probability)
12354 {
12355 rtx temp, vector;
12356
12357 if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
12358 cfun->cfg->max_jumptable_ents = INTVAL (range);
12359
12360 /* Do an unsigned comparison (in the proper mode) between the index
12361 expression and the value which represents the length of the range.
12362 Since we just finished subtracting the lower bound of the range
12363 from the index expression, this comparison allows us to simultaneously
12364 check that the original index expression value is both greater than
12365 or equal to the minimum value of the range and less than or equal to
12366 the maximum value of the range. */
12367
12368 if (default_label)
12369 emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
12370 default_label, default_probability);
12371
12372 /* If index is in range, it must fit in Pmode.
12373 Convert to Pmode so we can index with it. */
12374 if (mode != Pmode)
12375 {
12376 unsigned int width;
12377
12378 /* We know the value of INDEX is between 0 and RANGE. If we have a
12379 sign-extended subreg, and RANGE does not have the sign bit set, then
12380 we have a value that is valid for both sign and zero extension. In
12381 this case, we get better code if we sign extend. */
12382 if (GET_CODE (index) == SUBREG
12383 && SUBREG_PROMOTED_VAR_P (index)
12384 && SUBREG_PROMOTED_SIGNED_P (index)
12385 && ((width = GET_MODE_PRECISION (as_a <scalar_int_mode> (mode)))
12386 <= HOST_BITS_PER_WIDE_INT)
12387 && ! (UINTVAL (range) & (HOST_WIDE_INT_1U << (width - 1))))
12388 index = convert_to_mode (Pmode, index, 0);
12389 else
12390 index = convert_to_mode (Pmode, index, 1);
12391 }
12392
12393 /* Don't let a MEM slip through, because then INDEX that comes
12394 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
12395 and break_out_memory_refs will go to work on it and mess it up. */
12396 #ifdef PIC_CASE_VECTOR_ADDRESS
12397 if (flag_pic && !REG_P (index))
12398 index = copy_to_mode_reg (Pmode, index);
12399 #endif
12400
12401 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
12402 GET_MODE_SIZE, because this indicates how large insns are. The other
12403 uses should all be Pmode, because they are addresses. This code
12404 could fail if addresses and insns are not the same size. */
12405 index = simplify_gen_binary (MULT, Pmode, index,
12406 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
12407 Pmode));
12408 index = simplify_gen_binary (PLUS, Pmode, index,
12409 gen_rtx_LABEL_REF (Pmode, table_label));
12410
12411 #ifdef PIC_CASE_VECTOR_ADDRESS
12412 if (flag_pic)
12413 index = PIC_CASE_VECTOR_ADDRESS (index);
12414 else
12415 #endif
12416 index = memory_address (CASE_VECTOR_MODE, index);
12417 temp = gen_reg_rtx (CASE_VECTOR_MODE);
12418 vector = gen_const_mem (CASE_VECTOR_MODE, index);
12419 convert_move (temp, vector, 0);
12420
12421 emit_jump_insn (targetm.gen_tablejump (temp, table_label));
12422
12423 /* If we are generating PIC code or if the table is PC-relative, the
12424 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
12425 if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
12426 emit_barrier ();
12427 }
12428
12429 int
12430 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
12431 rtx table_label, rtx default_label,
12432 profile_probability default_probability)
12433 {
12434 rtx index;
12435
12436 if (! targetm.have_tablejump ())
12437 return 0;
12438
12439 index_expr = fold_build2 (MINUS_EXPR, index_type,
12440 fold_convert (index_type, index_expr),
12441 fold_convert (index_type, minval));
12442 index = expand_normal (index_expr);
12443 do_pending_stack_adjust ();
12444
12445 do_tablejump (index, TYPE_MODE (index_type),
12446 convert_modes (TYPE_MODE (index_type),
12447 TYPE_MODE (TREE_TYPE (range)),
12448 expand_normal (range),
12449 TYPE_UNSIGNED (TREE_TYPE (range))),
12450 table_label, default_label, default_probability);
12451 return 1;
12452 }
12453
12454 /* Return a CONST_VECTOR rtx representing vector mask for
12455 a VECTOR_CST of booleans. */
12456 static rtx
12457 const_vector_mask_from_tree (tree exp)
12458 {
12459 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12460 machine_mode inner = GET_MODE_INNER (mode);
12461
12462 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12463 VECTOR_CST_NELTS_PER_PATTERN (exp));
12464 unsigned int count = builder.encoded_nelts ();
12465 for (unsigned int i = 0; i < count; ++i)
12466 {
12467 tree elt = VECTOR_CST_ELT (exp, i);
12468 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12469 if (integer_zerop (elt))
12470 builder.quick_push (CONST0_RTX (inner));
12471 else if (integer_onep (elt)
12472 || integer_minus_onep (elt))
12473 builder.quick_push (CONSTM1_RTX (inner));
12474 else
12475 gcc_unreachable ();
12476 }
12477 return builder.build ();
12478 }
12479
12480 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
12481 Return a constant scalar rtx of mode MODE in which bit X is set if element
12482 X of EXP is nonzero. */
12483 static rtx
12484 const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
12485 {
12486 wide_int res = wi::zero (GET_MODE_PRECISION (mode));
12487 tree elt;
12488
12489 /* The result has a fixed number of bits so the input must too. */
12490 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
12491 for (unsigned int i = 0; i < nunits; ++i)
12492 {
12493 elt = VECTOR_CST_ELT (exp, i);
12494 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
12495 if (integer_all_onesp (elt))
12496 res = wi::set_bit (res, i);
12497 else
12498 gcc_assert (integer_zerop (elt));
12499 }
12500
12501 return immed_wide_int_const (res, mode);
12502 }
12503
12504 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
12505 static rtx
12506 const_vector_from_tree (tree exp)
12507 {
12508 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
12509
12510 if (initializer_zerop (exp))
12511 return CONST0_RTX (mode);
12512
12513 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
12514 return const_vector_mask_from_tree (exp);
12515
12516 machine_mode inner = GET_MODE_INNER (mode);
12517
12518 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
12519 VECTOR_CST_NELTS_PER_PATTERN (exp));
12520 unsigned int count = builder.encoded_nelts ();
12521 for (unsigned int i = 0; i < count; ++i)
12522 {
12523 tree elt = VECTOR_CST_ELT (exp, i);
12524 if (TREE_CODE (elt) == REAL_CST)
12525 builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
12526 inner));
12527 else if (TREE_CODE (elt) == FIXED_CST)
12528 builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
12529 inner));
12530 else
12531 builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
12532 inner));
12533 }
12534 return builder.build ();
12535 }
12536
12537 /* Build a decl for a personality function given a language prefix. */
12538
12539 tree
12540 build_personality_function (const char *lang)
12541 {
12542 const char *unwind_and_version;
12543 tree decl, type;
12544 char *name;
12545
12546 switch (targetm_common.except_unwind_info (&global_options))
12547 {
12548 case UI_NONE:
12549 return NULL;
12550 case UI_SJLJ:
12551 unwind_and_version = "_sj0";
12552 break;
12553 case UI_DWARF2:
12554 case UI_TARGET:
12555 unwind_and_version = "_v0";
12556 break;
12557 case UI_SEH:
12558 unwind_and_version = "_seh0";
12559 break;
12560 default:
12561 gcc_unreachable ();
12562 }
12563
12564 name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
12565
12566 type = build_function_type_list (integer_type_node, integer_type_node,
12567 long_long_unsigned_type_node,
12568 ptr_type_node, ptr_type_node, NULL_TREE);
12569 decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
12570 get_identifier (name), type);
12571 DECL_ARTIFICIAL (decl) = 1;
12572 DECL_EXTERNAL (decl) = 1;
12573 TREE_PUBLIC (decl) = 1;
12574
12575 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
12576 are the flags assigned by targetm.encode_section_info. */
12577 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
12578
12579 return decl;
12580 }
12581
12582 /* Extracts the personality function of DECL and returns the corresponding
12583 libfunc. */
12584
12585 rtx
12586 get_personality_function (tree decl)
12587 {
12588 tree personality = DECL_FUNCTION_PERSONALITY (decl);
12589 enum eh_personality_kind pk;
12590
12591 pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
12592 if (pk == eh_personality_none)
12593 return NULL;
12594
12595 if (!personality
12596 && pk == eh_personality_any)
12597 personality = lang_hooks.eh_personality ();
12598
12599 if (pk == eh_personality_lang)
12600 gcc_assert (personality != NULL_TREE);
12601
12602 return XEXP (DECL_RTL (personality), 0);
12603 }
12604
12605 /* Returns a tree for the size of EXP in bytes. */
12606
12607 static tree
12608 tree_expr_size (const_tree exp)
12609 {
12610 if (DECL_P (exp)
12611 && DECL_SIZE_UNIT (exp) != 0)
12612 return DECL_SIZE_UNIT (exp);
12613 else
12614 return size_in_bytes (TREE_TYPE (exp));
12615 }
12616
12617 /* Return an rtx for the size in bytes of the value of EXP. */
12618
12619 rtx
12620 expr_size (tree exp)
12621 {
12622 tree size;
12623
12624 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12625 size = TREE_OPERAND (exp, 1);
12626 else
12627 {
12628 size = tree_expr_size (exp);
12629 gcc_assert (size);
12630 gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
12631 }
12632
12633 return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
12634 }
12635
12636 /* Return a wide integer for the size in bytes of the value of EXP, or -1
12637 if the size can vary or is larger than an integer. */
12638
12639 static HOST_WIDE_INT
12640 int_expr_size (tree exp)
12641 {
12642 tree size;
12643
12644 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12645 size = TREE_OPERAND (exp, 1);
12646 else
12647 {
12648 size = tree_expr_size (exp);
12649 gcc_assert (size);
12650 }
12651
12652 if (size == 0 || !tree_fits_shwi_p (size))
12653 return -1;
12654
12655 return tree_to_shwi (size);
12656 }