]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/expr.c
poly_int: GET_MODE_SIZE
[thirdparty/gcc.git] / gcc / expr.c
1 /* Convert tree expression to rtl instructions, for GNU compiler.
2 Copyright (C) 1988-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "backend.h"
24 #include "target.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "gimple.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "ssa.h"
32 #include "expmed.h"
33 #include "optabs.h"
34 #include "regs.h"
35 #include "emit-rtl.h"
36 #include "recog.h"
37 #include "cgraph.h"
38 #include "diagnostic.h"
39 #include "alias.h"
40 #include "fold-const.h"
41 #include "stor-layout.h"
42 #include "attribs.h"
43 #include "varasm.h"
44 #include "except.h"
45 #include "insn-attr.h"
46 #include "dojump.h"
47 #include "explow.h"
48 #include "calls.h"
49 #include "stmt.h"
50 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
51 #include "expr.h"
52 #include "optabs-tree.h"
53 #include "libfuncs.h"
54 #include "reload.h"
55 #include "langhooks.h"
56 #include "common/common-target.h"
57 #include "tree-ssa-live.h"
58 #include "tree-outof-ssa.h"
59 #include "tree-ssa-address.h"
60 #include "builtins.h"
61 #include "tree-chkp.h"
62 #include "rtl-chkp.h"
63 #include "ccmp.h"
64 #include "rtx-vector-builder.h"
65
66
67 /* If this is nonzero, we do not bother generating VOLATILE
68 around volatile memory references, and we are willing to
69 output indirect addresses. If cse is to follow, we reject
70 indirect addresses so a useful potential cse is generated;
71 if it is used only once, instruction combination will produce
72 the same indirect address eventually. */
73 int cse_not_expected;
74
75 static bool block_move_libcall_safe_for_call_parm (void);
76 static bool emit_block_move_via_movmem (rtx, rtx, rtx, unsigned, unsigned, HOST_WIDE_INT,
77 unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
78 unsigned HOST_WIDE_INT);
79 static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
80 static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
81 static rtx_insn *compress_float_constant (rtx, rtx);
82 static rtx get_subtarget (rtx);
83 static void store_constructor (tree, rtx, int, poly_int64, bool);
84 static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64,
85 machine_mode, tree, alias_set_type, bool, bool);
86
87 static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
88
89 static int is_aligning_offset (const_tree, const_tree);
90 static rtx reduce_to_bit_field_precision (rtx, rtx, tree);
91 static rtx do_store_flag (sepops, rtx, machine_mode);
92 #ifdef PUSH_ROUNDING
93 static void emit_single_push_insn (machine_mode, rtx, tree);
94 #endif
95 static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx,
96 profile_probability);
97 static rtx const_vector_from_tree (tree);
98 static rtx const_scalar_mask_from_tree (scalar_int_mode, tree);
99 static tree tree_expr_size (const_tree);
100 static HOST_WIDE_INT int_expr_size (tree);
101 static void convert_mode_scalar (rtx, rtx, int);
102
103 \f
104 /* This is run to set up which modes can be used
105 directly in memory and to initialize the block move optab. It is run
106 at the beginning of compilation and when the target is reinitialized. */
107
108 void
109 init_expr_target (void)
110 {
111 rtx pat;
112 int num_clobbers;
113 rtx mem, mem1;
114 rtx reg;
115
116 /* Try indexing by frame ptr and try by stack ptr.
117 It is known that on the Convex the stack ptr isn't a valid index.
118 With luck, one or the other is valid on any machine. */
119 mem = gen_rtx_MEM (word_mode, stack_pointer_rtx);
120 mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx);
121
122 /* A scratch register we can modify in-place below to avoid
123 useless RTL allocations. */
124 reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
125
126 rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN));
127 pat = gen_rtx_SET (NULL_RTX, NULL_RTX);
128 PATTERN (insn) = pat;
129
130 for (machine_mode mode = VOIDmode; (int) mode < NUM_MACHINE_MODES;
131 mode = (machine_mode) ((int) mode + 1))
132 {
133 int regno;
134
135 direct_load[(int) mode] = direct_store[(int) mode] = 0;
136 PUT_MODE (mem, mode);
137 PUT_MODE (mem1, mode);
138
139 /* See if there is some register that can be used in this mode and
140 directly loaded or stored from memory. */
141
142 if (mode != VOIDmode && mode != BLKmode)
143 for (regno = 0; regno < FIRST_PSEUDO_REGISTER
144 && (direct_load[(int) mode] == 0 || direct_store[(int) mode] == 0);
145 regno++)
146 {
147 if (!targetm.hard_regno_mode_ok (regno, mode))
148 continue;
149
150 set_mode_and_regno (reg, mode, regno);
151
152 SET_SRC (pat) = mem;
153 SET_DEST (pat) = reg;
154 if (recog (pat, insn, &num_clobbers) >= 0)
155 direct_load[(int) mode] = 1;
156
157 SET_SRC (pat) = mem1;
158 SET_DEST (pat) = reg;
159 if (recog (pat, insn, &num_clobbers) >= 0)
160 direct_load[(int) mode] = 1;
161
162 SET_SRC (pat) = reg;
163 SET_DEST (pat) = mem;
164 if (recog (pat, insn, &num_clobbers) >= 0)
165 direct_store[(int) mode] = 1;
166
167 SET_SRC (pat) = reg;
168 SET_DEST (pat) = mem1;
169 if (recog (pat, insn, &num_clobbers) >= 0)
170 direct_store[(int) mode] = 1;
171 }
172 }
173
174 mem = gen_rtx_MEM (VOIDmode, gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 1));
175
176 opt_scalar_float_mode mode_iter;
177 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)
178 {
179 scalar_float_mode mode = mode_iter.require ();
180 scalar_float_mode srcmode;
181 FOR_EACH_MODE_UNTIL (srcmode, mode)
182 {
183 enum insn_code ic;
184
185 ic = can_extend_p (mode, srcmode, 0);
186 if (ic == CODE_FOR_nothing)
187 continue;
188
189 PUT_MODE (mem, srcmode);
190
191 if (insn_operand_matches (ic, 1, mem))
192 float_extend_from_mem[mode][srcmode] = true;
193 }
194 }
195 }
196
197 /* This is run at the start of compiling a function. */
198
199 void
200 init_expr (void)
201 {
202 memset (&crtl->expr, 0, sizeof (crtl->expr));
203 }
204 \f
205 /* Copy data from FROM to TO, where the machine modes are not the same.
206 Both modes may be integer, or both may be floating, or both may be
207 fixed-point.
208 UNSIGNEDP should be nonzero if FROM is an unsigned type.
209 This causes zero-extension instead of sign-extension. */
210
211 void
212 convert_move (rtx to, rtx from, int unsignedp)
213 {
214 machine_mode to_mode = GET_MODE (to);
215 machine_mode from_mode = GET_MODE (from);
216
217 gcc_assert (to_mode != BLKmode);
218 gcc_assert (from_mode != BLKmode);
219
220 /* If the source and destination are already the same, then there's
221 nothing to do. */
222 if (to == from)
223 return;
224
225 /* If FROM is a SUBREG that indicates that we have already done at least
226 the required extension, strip it. We don't handle such SUBREGs as
227 TO here. */
228
229 scalar_int_mode to_int_mode;
230 if (GET_CODE (from) == SUBREG
231 && SUBREG_PROMOTED_VAR_P (from)
232 && is_a <scalar_int_mode> (to_mode, &to_int_mode)
233 && (GET_MODE_PRECISION (subreg_promoted_mode (from))
234 >= GET_MODE_PRECISION (to_int_mode))
235 && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp))
236 from = gen_lowpart (to_int_mode, from), from_mode = to_int_mode;
237
238 gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to));
239
240 if (to_mode == from_mode
241 || (from_mode == VOIDmode && CONSTANT_P (from)))
242 {
243 emit_move_insn (to, from);
244 return;
245 }
246
247 if (VECTOR_MODE_P (to_mode) || VECTOR_MODE_P (from_mode))
248 {
249 gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),
250 GET_MODE_BITSIZE (to_mode)));
251
252 if (VECTOR_MODE_P (to_mode))
253 from = simplify_gen_subreg (to_mode, from, GET_MODE (from), 0);
254 else
255 to = simplify_gen_subreg (from_mode, to, GET_MODE (to), 0);
256
257 emit_move_insn (to, from);
258 return;
259 }
260
261 if (GET_CODE (to) == CONCAT && GET_CODE (from) == CONCAT)
262 {
263 convert_move (XEXP (to, 0), XEXP (from, 0), unsignedp);
264 convert_move (XEXP (to, 1), XEXP (from, 1), unsignedp);
265 return;
266 }
267
268 convert_mode_scalar (to, from, unsignedp);
269 }
270
271 /* Like convert_move, but deals only with scalar modes. */
272
273 static void
274 convert_mode_scalar (rtx to, rtx from, int unsignedp)
275 {
276 /* Both modes should be scalar types. */
277 scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from));
278 scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to));
279 bool to_real = SCALAR_FLOAT_MODE_P (to_mode);
280 bool from_real = SCALAR_FLOAT_MODE_P (from_mode);
281 enum insn_code code;
282 rtx libcall;
283
284 gcc_assert (to_real == from_real);
285
286 /* rtx code for making an equivalent value. */
287 enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN
288 : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND));
289
290 if (to_real)
291 {
292 rtx value;
293 rtx_insn *insns;
294 convert_optab tab;
295
296 gcc_assert ((GET_MODE_PRECISION (from_mode)
297 != GET_MODE_PRECISION (to_mode))
298 || (DECIMAL_FLOAT_MODE_P (from_mode)
299 != DECIMAL_FLOAT_MODE_P (to_mode)));
300
301 if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode))
302 /* Conversion between decimal float and binary float, same size. */
303 tab = DECIMAL_FLOAT_MODE_P (from_mode) ? trunc_optab : sext_optab;
304 else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode))
305 tab = sext_optab;
306 else
307 tab = trunc_optab;
308
309 /* Try converting directly if the insn is supported. */
310
311 code = convert_optab_handler (tab, to_mode, from_mode);
312 if (code != CODE_FOR_nothing)
313 {
314 emit_unop_insn (code, to, from,
315 tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE);
316 return;
317 }
318
319 /* Otherwise use a libcall. */
320 libcall = convert_optab_libfunc (tab, to_mode, from_mode);
321
322 /* Is this conversion implemented yet? */
323 gcc_assert (libcall);
324
325 start_sequence ();
326 value = emit_library_call_value (libcall, NULL_RTX, LCT_CONST, to_mode,
327 from, from_mode);
328 insns = get_insns ();
329 end_sequence ();
330 emit_libcall_block (insns, to, value,
331 tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,
332 from)
333 : gen_rtx_FLOAT_EXTEND (to_mode, from));
334 return;
335 }
336
337 /* Handle pointer conversion. */ /* SPEE 900220. */
338 /* If the target has a converter from FROM_MODE to TO_MODE, use it. */
339 {
340 convert_optab ctab;
341
342 if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode))
343 ctab = trunc_optab;
344 else if (unsignedp)
345 ctab = zext_optab;
346 else
347 ctab = sext_optab;
348
349 if (convert_optab_handler (ctab, to_mode, from_mode)
350 != CODE_FOR_nothing)
351 {
352 emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode),
353 to, from, UNKNOWN);
354 return;
355 }
356 }
357
358 /* Targets are expected to provide conversion insns between PxImode and
359 xImode for all MODE_PARTIAL_INT modes they use, but no others. */
360 if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT)
361 {
362 scalar_int_mode full_mode
363 = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode));
364
365 gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)
366 != CODE_FOR_nothing);
367
368 if (full_mode != from_mode)
369 from = convert_to_mode (full_mode, from, unsignedp);
370 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode),
371 to, from, UNKNOWN);
372 return;
373 }
374 if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT)
375 {
376 rtx new_from;
377 scalar_int_mode full_mode
378 = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode));
379 convert_optab ctab = unsignedp ? zext_optab : sext_optab;
380 enum insn_code icode;
381
382 icode = convert_optab_handler (ctab, full_mode, from_mode);
383 gcc_assert (icode != CODE_FOR_nothing);
384
385 if (to_mode == full_mode)
386 {
387 emit_unop_insn (icode, to, from, UNKNOWN);
388 return;
389 }
390
391 new_from = gen_reg_rtx (full_mode);
392 emit_unop_insn (icode, new_from, from, UNKNOWN);
393
394 /* else proceed to integer conversions below. */
395 from_mode = full_mode;
396 from = new_from;
397 }
398
399 /* Make sure both are fixed-point modes or both are not. */
400 gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==
401 ALL_SCALAR_FIXED_POINT_MODE_P (to_mode));
402 if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode))
403 {
404 /* If we widen from_mode to to_mode and they are in the same class,
405 we won't saturate the result.
406 Otherwise, always saturate the result to play safe. */
407 if (GET_MODE_CLASS (from_mode) == GET_MODE_CLASS (to_mode)
408 && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode))
409 expand_fixed_convert (to, from, 0, 0);
410 else
411 expand_fixed_convert (to, from, 0, 1);
412 return;
413 }
414
415 /* Now both modes are integers. */
416
417 /* Handle expanding beyond a word. */
418 if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)
419 && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD)
420 {
421 rtx_insn *insns;
422 rtx lowpart;
423 rtx fill_value;
424 rtx lowfrom;
425 int i;
426 scalar_mode lowpart_mode;
427 int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD);
428
429 /* Try converting directly if the insn is supported. */
430 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
431 != CODE_FOR_nothing)
432 {
433 /* If FROM is a SUBREG, put it into a register. Do this
434 so that we always generate the same set of insns for
435 better cse'ing; if an intermediate assignment occurred,
436 we won't be doing the operation directly on the SUBREG. */
437 if (optimize > 0 && GET_CODE (from) == SUBREG)
438 from = force_reg (from_mode, from);
439 emit_unop_insn (code, to, from, equiv_code);
440 return;
441 }
442 /* Next, try converting via full word. */
443 else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD
444 && ((code = can_extend_p (to_mode, word_mode, unsignedp))
445 != CODE_FOR_nothing))
446 {
447 rtx word_to = gen_reg_rtx (word_mode);
448 if (REG_P (to))
449 {
450 if (reg_overlap_mentioned_p (to, from))
451 from = force_reg (from_mode, from);
452 emit_clobber (to);
453 }
454 convert_move (word_to, from, unsignedp);
455 emit_unop_insn (code, to, word_to, equiv_code);
456 return;
457 }
458
459 /* No special multiword conversion insn; do it by hand. */
460 start_sequence ();
461
462 /* Since we will turn this into a no conflict block, we must ensure
463 the source does not overlap the target so force it into an isolated
464 register when maybe so. Likewise for any MEM input, since the
465 conversion sequence might require several references to it and we
466 must ensure we're getting the same value every time. */
467
468 if (MEM_P (from) || reg_overlap_mentioned_p (to, from))
469 from = force_reg (from_mode, from);
470
471 /* Get a copy of FROM widened to a word, if necessary. */
472 if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD)
473 lowpart_mode = word_mode;
474 else
475 lowpart_mode = from_mode;
476
477 lowfrom = convert_to_mode (lowpart_mode, from, unsignedp);
478
479 lowpart = gen_lowpart (lowpart_mode, to);
480 emit_move_insn (lowpart, lowfrom);
481
482 /* Compute the value to put in each remaining word. */
483 if (unsignedp)
484 fill_value = const0_rtx;
485 else
486 fill_value = emit_store_flag_force (gen_reg_rtx (word_mode),
487 LT, lowfrom, const0_rtx,
488 lowpart_mode, 0, -1);
489
490 /* Fill the remaining words. */
491 for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD; i < nwords; i++)
492 {
493 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
494 rtx subword = operand_subword (to, index, 1, to_mode);
495
496 gcc_assert (subword);
497
498 if (fill_value != subword)
499 emit_move_insn (subword, fill_value);
500 }
501
502 insns = get_insns ();
503 end_sequence ();
504
505 emit_insn (insns);
506 return;
507 }
508
509 /* Truncating multi-word to a word or less. */
510 if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD
511 && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD)
512 {
513 if (!((MEM_P (from)
514 && ! MEM_VOLATILE_P (from)
515 && direct_load[(int) to_mode]
516 && ! mode_dependent_address_p (XEXP (from, 0),
517 MEM_ADDR_SPACE (from)))
518 || REG_P (from)
519 || GET_CODE (from) == SUBREG))
520 from = force_reg (from_mode, from);
521 convert_move (to, gen_lowpart (word_mode, from), 0);
522 return;
523 }
524
525 /* Now follow all the conversions between integers
526 no more than a word long. */
527
528 /* For truncation, usually we can just refer to FROM in a narrower mode. */
529 if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode)
530 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode))
531 {
532 if (!((MEM_P (from)
533 && ! MEM_VOLATILE_P (from)
534 && direct_load[(int) to_mode]
535 && ! mode_dependent_address_p (XEXP (from, 0),
536 MEM_ADDR_SPACE (from)))
537 || REG_P (from)
538 || GET_CODE (from) == SUBREG))
539 from = force_reg (from_mode, from);
540 if (REG_P (from) && REGNO (from) < FIRST_PSEUDO_REGISTER
541 && !targetm.hard_regno_mode_ok (REGNO (from), to_mode))
542 from = copy_to_reg (from);
543 emit_move_insn (to, gen_lowpart (to_mode, from));
544 return;
545 }
546
547 /* Handle extension. */
548 if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode))
549 {
550 /* Convert directly if that works. */
551 if ((code = can_extend_p (to_mode, from_mode, unsignedp))
552 != CODE_FOR_nothing)
553 {
554 emit_unop_insn (code, to, from, equiv_code);
555 return;
556 }
557 else
558 {
559 scalar_mode intermediate;
560 rtx tmp;
561 int shift_amount;
562
563 /* Search for a mode to convert via. */
564 opt_scalar_mode intermediate_iter;
565 FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
566 {
567 scalar_mode intermediate = intermediate_iter.require ();
568 if (((can_extend_p (to_mode, intermediate, unsignedp)
569 != CODE_FOR_nothing)
570 || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
571 && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
572 intermediate)))
573 && (can_extend_p (intermediate, from_mode, unsignedp)
574 != CODE_FOR_nothing))
575 {
576 convert_move (to, convert_to_mode (intermediate, from,
577 unsignedp), unsignedp);
578 return;
579 }
580 }
581
582 /* No suitable intermediate mode.
583 Generate what we need with shifts. */
584 shift_amount = (GET_MODE_PRECISION (to_mode)
585 - GET_MODE_PRECISION (from_mode));
586 from = gen_lowpart (to_mode, force_reg (from_mode, from));
587 tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount,
588 to, unsignedp);
589 tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount,
590 to, unsignedp);
591 if (tmp != to)
592 emit_move_insn (to, tmp);
593 return;
594 }
595 }
596
597 /* Support special truncate insns for certain modes. */
598 if (convert_optab_handler (trunc_optab, to_mode,
599 from_mode) != CODE_FOR_nothing)
600 {
601 emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode),
602 to, from, UNKNOWN);
603 return;
604 }
605
606 /* Handle truncation of volatile memrefs, and so on;
607 the things that couldn't be truncated directly,
608 and for which there was no special instruction.
609
610 ??? Code above formerly short-circuited this, for most integer
611 mode pairs, with a force_reg in from_mode followed by a recursive
612 call to this routine. Appears always to have been wrong. */
613 if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode))
614 {
615 rtx temp = force_reg (to_mode, gen_lowpart (to_mode, from));
616 emit_move_insn (to, temp);
617 return;
618 }
619
620 /* Mode combination is not recognized. */
621 gcc_unreachable ();
622 }
623
624 /* Return an rtx for a value that would result
625 from converting X to mode MODE.
626 Both X and MODE may be floating, or both integer.
627 UNSIGNEDP is nonzero if X is an unsigned value.
628 This can be done by referring to a part of X in place
629 or by copying to a new temporary with conversion. */
630
631 rtx
632 convert_to_mode (machine_mode mode, rtx x, int unsignedp)
633 {
634 return convert_modes (mode, VOIDmode, x, unsignedp);
635 }
636
637 /* Return an rtx for a value that would result
638 from converting X from mode OLDMODE to mode MODE.
639 Both modes may be floating, or both integer.
640 UNSIGNEDP is nonzero if X is an unsigned value.
641
642 This can be done by referring to a part of X in place
643 or by copying to a new temporary with conversion.
644
645 You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */
646
647 rtx
648 convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp)
649 {
650 rtx temp;
651 scalar_int_mode int_mode;
652
653 /* If FROM is a SUBREG that indicates that we have already done at least
654 the required extension, strip it. */
655
656 if (GET_CODE (x) == SUBREG
657 && SUBREG_PROMOTED_VAR_P (x)
658 && is_a <scalar_int_mode> (mode, &int_mode)
659 && (GET_MODE_PRECISION (subreg_promoted_mode (x))
660 >= GET_MODE_PRECISION (int_mode))
661 && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp))
662 x = gen_lowpart (int_mode, SUBREG_REG (x));
663
664 if (GET_MODE (x) != VOIDmode)
665 oldmode = GET_MODE (x);
666
667 if (mode == oldmode)
668 return x;
669
670 if (CONST_SCALAR_INT_P (x)
671 && is_int_mode (mode, &int_mode))
672 {
673 /* If the caller did not tell us the old mode, then there is not
674 much to do with respect to canonicalization. We have to
675 assume that all the bits are significant. */
676 if (GET_MODE_CLASS (oldmode) != MODE_INT)
677 oldmode = MAX_MODE_INT;
678 wide_int w = wide_int::from (rtx_mode_t (x, oldmode),
679 GET_MODE_PRECISION (int_mode),
680 unsignedp ? UNSIGNED : SIGNED);
681 return immed_wide_int_const (w, int_mode);
682 }
683
684 /* We can do this with a gen_lowpart if both desired and current modes
685 are integer, and this is either a constant integer, a register, or a
686 non-volatile MEM. */
687 scalar_int_mode int_oldmode;
688 if (is_int_mode (mode, &int_mode)
689 && is_int_mode (oldmode, &int_oldmode)
690 && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode)
691 && ((MEM_P (x) && !MEM_VOLATILE_P (x) && direct_load[(int) int_mode])
692 || CONST_POLY_INT_P (x)
693 || (REG_P (x)
694 && (!HARD_REGISTER_P (x)
695 || targetm.hard_regno_mode_ok (REGNO (x), int_mode))
696 && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x)))))
697 return gen_lowpart (int_mode, x);
698
699 /* Converting from integer constant into mode is always equivalent to an
700 subreg operation. */
701 if (VECTOR_MODE_P (mode) && GET_MODE (x) == VOIDmode)
702 {
703 gcc_assert (known_eq (GET_MODE_BITSIZE (mode),
704 GET_MODE_BITSIZE (oldmode)));
705 return simplify_gen_subreg (mode, x, oldmode, 0);
706 }
707
708 temp = gen_reg_rtx (mode);
709 convert_move (temp, x, unsignedp);
710 return temp;
711 }
712 \f
713 /* Return the largest alignment we can use for doing a move (or store)
714 of MAX_PIECES. ALIGN is the largest alignment we could use. */
715
716 static unsigned int
717 alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align)
718 {
719 scalar_int_mode tmode
720 = int_mode_for_size (max_pieces * BITS_PER_UNIT, 1).require ();
721
722 if (align >= GET_MODE_ALIGNMENT (tmode))
723 align = GET_MODE_ALIGNMENT (tmode);
724 else
725 {
726 scalar_int_mode xmode = NARROWEST_INT_MODE;
727 opt_scalar_int_mode mode_iter;
728 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
729 {
730 tmode = mode_iter.require ();
731 if (GET_MODE_SIZE (tmode) > max_pieces
732 || targetm.slow_unaligned_access (tmode, align))
733 break;
734 xmode = tmode;
735 }
736
737 align = MAX (align, GET_MODE_ALIGNMENT (xmode));
738 }
739
740 return align;
741 }
742
743 /* Return the widest integer mode that is narrower than SIZE bytes. */
744
745 static scalar_int_mode
746 widest_int_mode_for_size (unsigned int size)
747 {
748 scalar_int_mode result = NARROWEST_INT_MODE;
749
750 gcc_checking_assert (size > 1);
751
752 opt_scalar_int_mode tmode;
753 FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)
754 if (GET_MODE_SIZE (tmode.require ()) < size)
755 result = tmode.require ();
756
757 return result;
758 }
759
760 /* Determine whether an operation OP on LEN bytes with alignment ALIGN can
761 and should be performed piecewise. */
762
763 static bool
764 can_do_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align,
765 enum by_pieces_operation op)
766 {
767 return targetm.use_by_pieces_infrastructure_p (len, align, op,
768 optimize_insn_for_speed_p ());
769 }
770
771 /* Determine whether the LEN bytes can be moved by using several move
772 instructions. Return nonzero if a call to move_by_pieces should
773 succeed. */
774
775 bool
776 can_move_by_pieces (unsigned HOST_WIDE_INT len, unsigned int align)
777 {
778 return can_do_by_pieces (len, align, MOVE_BY_PIECES);
779 }
780
781 /* Return number of insns required to perform operation OP by pieces
782 for L bytes. ALIGN (in bits) is maximum alignment we can assume. */
783
784 unsigned HOST_WIDE_INT
785 by_pieces_ninsns (unsigned HOST_WIDE_INT l, unsigned int align,
786 unsigned int max_size, by_pieces_operation op)
787 {
788 unsigned HOST_WIDE_INT n_insns = 0;
789
790 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
791
792 while (max_size > 1 && l > 0)
793 {
794 scalar_int_mode mode = widest_int_mode_for_size (max_size);
795 enum insn_code icode;
796
797 unsigned int modesize = GET_MODE_SIZE (mode);
798
799 icode = optab_handler (mov_optab, mode);
800 if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode))
801 {
802 unsigned HOST_WIDE_INT n_pieces = l / modesize;
803 l %= modesize;
804 switch (op)
805 {
806 default:
807 n_insns += n_pieces;
808 break;
809
810 case COMPARE_BY_PIECES:
811 int batch = targetm.compare_by_pieces_branch_ratio (mode);
812 int batch_ops = 4 * batch - 1;
813 unsigned HOST_WIDE_INT full = n_pieces / batch;
814 n_insns += full * batch_ops;
815 if (n_pieces % batch != 0)
816 n_insns++;
817 break;
818
819 }
820 }
821 max_size = modesize;
822 }
823
824 gcc_assert (!l);
825 return n_insns;
826 }
827
828 /* Used when performing piecewise block operations, holds information
829 about one of the memory objects involved. The member functions
830 can be used to generate code for loading from the object and
831 updating the address when iterating. */
832
833 class pieces_addr
834 {
835 /* The object being referenced, a MEM. Can be NULL_RTX to indicate
836 stack pushes. */
837 rtx m_obj;
838 /* The address of the object. Can differ from that seen in the
839 MEM rtx if we copied the address to a register. */
840 rtx m_addr;
841 /* Nonzero if the address on the object has an autoincrement already,
842 signifies whether that was an increment or decrement. */
843 signed char m_addr_inc;
844 /* Nonzero if we intend to use autoinc without the address already
845 having autoinc form. We will insert add insns around each memory
846 reference, expecting later passes to form autoinc addressing modes.
847 The only supported options are predecrement and postincrement. */
848 signed char m_explicit_inc;
849 /* True if we have either of the two possible cases of using
850 autoincrement. */
851 bool m_auto;
852 /* True if this is an address to be used for load operations rather
853 than stores. */
854 bool m_is_load;
855
856 /* Optionally, a function to obtain constants for any given offset into
857 the objects, and data associated with it. */
858 by_pieces_constfn m_constfn;
859 void *m_cfndata;
860 public:
861 pieces_addr (rtx, bool, by_pieces_constfn, void *);
862 rtx adjust (scalar_int_mode, HOST_WIDE_INT);
863 void increment_address (HOST_WIDE_INT);
864 void maybe_predec (HOST_WIDE_INT);
865 void maybe_postinc (HOST_WIDE_INT);
866 void decide_autoinc (machine_mode, bool, HOST_WIDE_INT);
867 int get_addr_inc ()
868 {
869 return m_addr_inc;
870 }
871 };
872
873 /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is
874 true if the operation to be performed on this object is a load
875 rather than a store. For stores, OBJ can be NULL, in which case we
876 assume the operation is a stack push. For loads, the optional
877 CONSTFN and its associated CFNDATA can be used in place of the
878 memory load. */
879
880 pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn,
881 void *cfndata)
882 : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata)
883 {
884 m_addr_inc = 0;
885 m_auto = false;
886 if (obj)
887 {
888 rtx addr = XEXP (obj, 0);
889 rtx_code code = GET_CODE (addr);
890 m_addr = addr;
891 bool dec = code == PRE_DEC || code == POST_DEC;
892 bool inc = code == PRE_INC || code == POST_INC;
893 m_auto = inc || dec;
894 if (m_auto)
895 m_addr_inc = dec ? -1 : 1;
896
897 /* While we have always looked for these codes here, the code
898 implementing the memory operation has never handled them.
899 Support could be added later if necessary or beneficial. */
900 gcc_assert (code != PRE_INC && code != POST_DEC);
901 }
902 else
903 {
904 m_addr = NULL_RTX;
905 if (!is_load)
906 {
907 m_auto = true;
908 if (STACK_GROWS_DOWNWARD)
909 m_addr_inc = -1;
910 else
911 m_addr_inc = 1;
912 }
913 else
914 gcc_assert (constfn != NULL);
915 }
916 m_explicit_inc = 0;
917 if (constfn)
918 gcc_assert (is_load);
919 }
920
921 /* Decide whether to use autoinc for an address involved in a memory op.
922 MODE is the mode of the accesses, REVERSE is true if we've decided to
923 perform the operation starting from the end, and LEN is the length of
924 the operation. Don't override an earlier decision to set m_auto. */
925
926 void
927 pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode), bool reverse,
928 HOST_WIDE_INT len)
929 {
930 if (m_auto || m_obj == NULL_RTX)
931 return;
932
933 bool use_predec = (m_is_load
934 ? USE_LOAD_PRE_DECREMENT (mode)
935 : USE_STORE_PRE_DECREMENT (mode));
936 bool use_postinc = (m_is_load
937 ? USE_LOAD_POST_INCREMENT (mode)
938 : USE_STORE_POST_INCREMENT (mode));
939 machine_mode addr_mode = get_address_mode (m_obj);
940
941 if (use_predec && reverse)
942 {
943 m_addr = copy_to_mode_reg (addr_mode,
944 plus_constant (addr_mode,
945 m_addr, len));
946 m_auto = true;
947 m_explicit_inc = -1;
948 }
949 else if (use_postinc && !reverse)
950 {
951 m_addr = copy_to_mode_reg (addr_mode, m_addr);
952 m_auto = true;
953 m_explicit_inc = 1;
954 }
955 else if (CONSTANT_P (m_addr))
956 m_addr = copy_to_mode_reg (addr_mode, m_addr);
957 }
958
959 /* Adjust the address to refer to the data at OFFSET in MODE. If we
960 are using autoincrement for this address, we don't add the offset,
961 but we still modify the MEM's properties. */
962
963 rtx
964 pieces_addr::adjust (scalar_int_mode mode, HOST_WIDE_INT offset)
965 {
966 if (m_constfn)
967 return m_constfn (m_cfndata, offset, mode);
968 if (m_obj == NULL_RTX)
969 return NULL_RTX;
970 if (m_auto)
971 return adjust_automodify_address (m_obj, mode, m_addr, offset);
972 else
973 return adjust_address (m_obj, mode, offset);
974 }
975
976 /* Emit an add instruction to increment the address by SIZE. */
977
978 void
979 pieces_addr::increment_address (HOST_WIDE_INT size)
980 {
981 rtx amount = gen_int_mode (size, GET_MODE (m_addr));
982 emit_insn (gen_add2_insn (m_addr, amount));
983 }
984
985 /* If we are supposed to decrement the address after each access, emit code
986 to do so now. Increment by SIZE (which has should have the correct sign
987 already). */
988
989 void
990 pieces_addr::maybe_predec (HOST_WIDE_INT size)
991 {
992 if (m_explicit_inc >= 0)
993 return;
994 gcc_assert (HAVE_PRE_DECREMENT);
995 increment_address (size);
996 }
997
998 /* If we are supposed to decrement the address after each access, emit code
999 to do so now. Increment by SIZE. */
1000
1001 void
1002 pieces_addr::maybe_postinc (HOST_WIDE_INT size)
1003 {
1004 if (m_explicit_inc <= 0)
1005 return;
1006 gcc_assert (HAVE_POST_INCREMENT);
1007 increment_address (size);
1008 }
1009
1010 /* This structure is used by do_op_by_pieces to describe the operation
1011 to be performed. */
1012
1013 class op_by_pieces_d
1014 {
1015 protected:
1016 pieces_addr m_to, m_from;
1017 unsigned HOST_WIDE_INT m_len;
1018 HOST_WIDE_INT m_offset;
1019 unsigned int m_align;
1020 unsigned int m_max_size;
1021 bool m_reverse;
1022
1023 /* Virtual functions, overriden by derived classes for the specific
1024 operation. */
1025 virtual void generate (rtx, rtx, machine_mode) = 0;
1026 virtual bool prepare_mode (machine_mode, unsigned int) = 0;
1027 virtual void finish_mode (machine_mode)
1028 {
1029 }
1030
1031 public:
1032 op_by_pieces_d (rtx, bool, rtx, bool, by_pieces_constfn, void *,
1033 unsigned HOST_WIDE_INT, unsigned int);
1034 void run ();
1035 };
1036
1037 /* The constructor for an op_by_pieces_d structure. We require two
1038 objects named TO and FROM, which are identified as loads or stores
1039 by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN
1040 and its associated FROM_CFN_DATA can be used to replace loads with
1041 constant values. LEN describes the length of the operation. */
1042
1043 op_by_pieces_d::op_by_pieces_d (rtx to, bool to_load,
1044 rtx from, bool from_load,
1045 by_pieces_constfn from_cfn,
1046 void *from_cfn_data,
1047 unsigned HOST_WIDE_INT len,
1048 unsigned int align)
1049 : m_to (to, to_load, NULL, NULL),
1050 m_from (from, from_load, from_cfn, from_cfn_data),
1051 m_len (len), m_max_size (MOVE_MAX_PIECES + 1)
1052 {
1053 int toi = m_to.get_addr_inc ();
1054 int fromi = m_from.get_addr_inc ();
1055 if (toi >= 0 && fromi >= 0)
1056 m_reverse = false;
1057 else if (toi <= 0 && fromi <= 0)
1058 m_reverse = true;
1059 else
1060 gcc_unreachable ();
1061
1062 m_offset = m_reverse ? len : 0;
1063 align = MIN (to ? MEM_ALIGN (to) : align,
1064 from ? MEM_ALIGN (from) : align);
1065
1066 /* If copying requires more than two move insns,
1067 copy addresses to registers (to make displacements shorter)
1068 and use post-increment if available. */
1069 if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2)
1070 {
1071 /* Find the mode of the largest comparison. */
1072 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1073
1074 m_from.decide_autoinc (mode, m_reverse, len);
1075 m_to.decide_autoinc (mode, m_reverse, len);
1076 }
1077
1078 align = alignment_for_piecewise_move (MOVE_MAX_PIECES, align);
1079 m_align = align;
1080 }
1081
1082 /* This function contains the main loop used for expanding a block
1083 operation. First move what we can in the largest integer mode,
1084 then go to successively smaller modes. For every access, call
1085 GENFUN with the two operands and the EXTRA_DATA. */
1086
1087 void
1088 op_by_pieces_d::run ()
1089 {
1090 while (m_max_size > 1 && m_len > 0)
1091 {
1092 scalar_int_mode mode = widest_int_mode_for_size (m_max_size);
1093
1094 if (prepare_mode (mode, m_align))
1095 {
1096 unsigned int size = GET_MODE_SIZE (mode);
1097 rtx to1 = NULL_RTX, from1;
1098
1099 while (m_len >= size)
1100 {
1101 if (m_reverse)
1102 m_offset -= size;
1103
1104 to1 = m_to.adjust (mode, m_offset);
1105 from1 = m_from.adjust (mode, m_offset);
1106
1107 m_to.maybe_predec (-(HOST_WIDE_INT)size);
1108 m_from.maybe_predec (-(HOST_WIDE_INT)size);
1109
1110 generate (to1, from1, mode);
1111
1112 m_to.maybe_postinc (size);
1113 m_from.maybe_postinc (size);
1114
1115 if (!m_reverse)
1116 m_offset += size;
1117
1118 m_len -= size;
1119 }
1120
1121 finish_mode (mode);
1122 }
1123
1124 m_max_size = GET_MODE_SIZE (mode);
1125 }
1126
1127 /* The code above should have handled everything. */
1128 gcc_assert (!m_len);
1129 }
1130
1131 /* Derived class from op_by_pieces_d, providing support for block move
1132 operations. */
1133
1134 class move_by_pieces_d : public op_by_pieces_d
1135 {
1136 insn_gen_fn m_gen_fun;
1137 void generate (rtx, rtx, machine_mode);
1138 bool prepare_mode (machine_mode, unsigned int);
1139
1140 public:
1141 move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1142 unsigned int align)
1143 : op_by_pieces_d (to, false, from, true, NULL, NULL, len, align)
1144 {
1145 }
1146 rtx finish_endp (int);
1147 };
1148
1149 /* Return true if MODE can be used for a set of copies, given an
1150 alignment ALIGN. Prepare whatever data is necessary for later
1151 calls to generate. */
1152
1153 bool
1154 move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1155 {
1156 insn_code icode = optab_handler (mov_optab, mode);
1157 m_gen_fun = GEN_FCN (icode);
1158 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1159 }
1160
1161 /* A callback used when iterating for a compare_by_pieces_operation.
1162 OP0 and OP1 are the values that have been loaded and should be
1163 compared in MODE. If OP0 is NULL, this means we should generate a
1164 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1165 gen function that should be used to generate the mode. */
1166
1167 void
1168 move_by_pieces_d::generate (rtx op0, rtx op1,
1169 machine_mode mode ATTRIBUTE_UNUSED)
1170 {
1171 #ifdef PUSH_ROUNDING
1172 if (op0 == NULL_RTX)
1173 {
1174 emit_single_push_insn (mode, op1, NULL);
1175 return;
1176 }
1177 #endif
1178 emit_insn (m_gen_fun (op0, op1));
1179 }
1180
1181 /* Perform the final adjustment at the end of a string to obtain the
1182 correct return value for the block operation. If ENDP is 1 return
1183 memory at the end ala mempcpy, and if ENDP is 2 return memory the
1184 end minus one byte ala stpcpy. */
1185
1186 rtx
1187 move_by_pieces_d::finish_endp (int endp)
1188 {
1189 gcc_assert (!m_reverse);
1190 if (endp == 2)
1191 {
1192 m_to.maybe_postinc (-1);
1193 --m_offset;
1194 }
1195 return m_to.adjust (QImode, m_offset);
1196 }
1197
1198 /* Generate several move instructions to copy LEN bytes from block FROM to
1199 block TO. (These are MEM rtx's with BLKmode).
1200
1201 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1202 used to push FROM to the stack.
1203
1204 ALIGN is maximum stack alignment we can assume.
1205
1206 If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1207 mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1208 stpcpy. */
1209
1210 rtx
1211 move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INT len,
1212 unsigned int align, int endp)
1213 {
1214 #ifndef PUSH_ROUNDING
1215 if (to == NULL)
1216 gcc_unreachable ();
1217 #endif
1218
1219 move_by_pieces_d data (to, from, len, align);
1220
1221 data.run ();
1222
1223 if (endp)
1224 return data.finish_endp (endp);
1225 else
1226 return to;
1227 }
1228
1229 /* Derived class from op_by_pieces_d, providing support for block move
1230 operations. */
1231
1232 class store_by_pieces_d : public op_by_pieces_d
1233 {
1234 insn_gen_fn m_gen_fun;
1235 void generate (rtx, rtx, machine_mode);
1236 bool prepare_mode (machine_mode, unsigned int);
1237
1238 public:
1239 store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data,
1240 unsigned HOST_WIDE_INT len, unsigned int align)
1241 : op_by_pieces_d (to, false, NULL_RTX, true, cfn, cfn_data, len, align)
1242 {
1243 }
1244 rtx finish_endp (int);
1245 };
1246
1247 /* Return true if MODE can be used for a set of stores, given an
1248 alignment ALIGN. Prepare whatever data is necessary for later
1249 calls to generate. */
1250
1251 bool
1252 store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1253 {
1254 insn_code icode = optab_handler (mov_optab, mode);
1255 m_gen_fun = GEN_FCN (icode);
1256 return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode);
1257 }
1258
1259 /* A callback used when iterating for a store_by_pieces_operation.
1260 OP0 and OP1 are the values that have been loaded and should be
1261 compared in MODE. If OP0 is NULL, this means we should generate a
1262 push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn
1263 gen function that should be used to generate the mode. */
1264
1265 void
1266 store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode)
1267 {
1268 emit_insn (m_gen_fun (op0, op1));
1269 }
1270
1271 /* Perform the final adjustment at the end of a string to obtain the
1272 correct return value for the block operation. If ENDP is 1 return
1273 memory at the end ala mempcpy, and if ENDP is 2 return memory the
1274 end minus one byte ala stpcpy. */
1275
1276 rtx
1277 store_by_pieces_d::finish_endp (int endp)
1278 {
1279 gcc_assert (!m_reverse);
1280 if (endp == 2)
1281 {
1282 m_to.maybe_postinc (-1);
1283 --m_offset;
1284 }
1285 return m_to.adjust (QImode, m_offset);
1286 }
1287
1288 /* Determine whether the LEN bytes generated by CONSTFUN can be
1289 stored to memory using several move instructions. CONSTFUNDATA is
1290 a pointer which will be passed as argument in every CONSTFUN call.
1291 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1292 a memset operation and false if it's a copy of a constant string.
1293 Return nonzero if a call to store_by_pieces should succeed. */
1294
1295 int
1296 can_store_by_pieces (unsigned HOST_WIDE_INT len,
1297 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1298 void *constfundata, unsigned int align, bool memsetp)
1299 {
1300 unsigned HOST_WIDE_INT l;
1301 unsigned int max_size;
1302 HOST_WIDE_INT offset = 0;
1303 enum insn_code icode;
1304 int reverse;
1305 /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */
1306 rtx cst ATTRIBUTE_UNUSED;
1307
1308 if (len == 0)
1309 return 1;
1310
1311 if (!targetm.use_by_pieces_infrastructure_p (len, align,
1312 memsetp
1313 ? SET_BY_PIECES
1314 : STORE_BY_PIECES,
1315 optimize_insn_for_speed_p ()))
1316 return 0;
1317
1318 align = alignment_for_piecewise_move (STORE_MAX_PIECES, align);
1319
1320 /* We would first store what we can in the largest integer mode, then go to
1321 successively smaller modes. */
1322
1323 for (reverse = 0;
1324 reverse <= (HAVE_PRE_DECREMENT || HAVE_POST_DECREMENT);
1325 reverse++)
1326 {
1327 l = len;
1328 max_size = STORE_MAX_PIECES + 1;
1329 while (max_size > 1 && l > 0)
1330 {
1331 scalar_int_mode mode = widest_int_mode_for_size (max_size);
1332
1333 icode = optab_handler (mov_optab, mode);
1334 if (icode != CODE_FOR_nothing
1335 && align >= GET_MODE_ALIGNMENT (mode))
1336 {
1337 unsigned int size = GET_MODE_SIZE (mode);
1338
1339 while (l >= size)
1340 {
1341 if (reverse)
1342 offset -= size;
1343
1344 cst = (*constfun) (constfundata, offset, mode);
1345 if (!targetm.legitimate_constant_p (mode, cst))
1346 return 0;
1347
1348 if (!reverse)
1349 offset += size;
1350
1351 l -= size;
1352 }
1353 }
1354
1355 max_size = GET_MODE_SIZE (mode);
1356 }
1357
1358 /* The code above should have handled everything. */
1359 gcc_assert (!l);
1360 }
1361
1362 return 1;
1363 }
1364
1365 /* Generate several move instructions to store LEN bytes generated by
1366 CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a
1367 pointer which will be passed as argument in every CONSTFUN call.
1368 ALIGN is maximum alignment we can assume. MEMSETP is true if this is
1369 a memset operation and false if it's a copy of a constant string.
1370 If ENDP is 0 return to, if ENDP is 1 return memory at the end ala
1371 mempcpy, and if ENDP is 2 return memory the end minus one byte ala
1372 stpcpy. */
1373
1374 rtx
1375 store_by_pieces (rtx to, unsigned HOST_WIDE_INT len,
1376 rtx (*constfun) (void *, HOST_WIDE_INT, scalar_int_mode),
1377 void *constfundata, unsigned int align, bool memsetp, int endp)
1378 {
1379 if (len == 0)
1380 {
1381 gcc_assert (endp != 2);
1382 return to;
1383 }
1384
1385 gcc_assert (targetm.use_by_pieces_infrastructure_p
1386 (len, align,
1387 memsetp ? SET_BY_PIECES : STORE_BY_PIECES,
1388 optimize_insn_for_speed_p ()));
1389
1390 store_by_pieces_d data (to, constfun, constfundata, len, align);
1391 data.run ();
1392
1393 if (endp)
1394 return data.finish_endp (endp);
1395 else
1396 return to;
1397 }
1398
1399 /* Callback routine for clear_by_pieces.
1400 Return const0_rtx unconditionally. */
1401
1402 static rtx
1403 clear_by_pieces_1 (void *, HOST_WIDE_INT, scalar_int_mode)
1404 {
1405 return const0_rtx;
1406 }
1407
1408 /* Generate several move instructions to clear LEN bytes of block TO. (A MEM
1409 rtx with BLKmode). ALIGN is maximum alignment we can assume. */
1410
1411 static void
1412 clear_by_pieces (rtx to, unsigned HOST_WIDE_INT len, unsigned int align)
1413 {
1414 if (len == 0)
1415 return;
1416
1417 store_by_pieces_d data (to, clear_by_pieces_1, NULL, len, align);
1418 data.run ();
1419 }
1420
1421 /* Context used by compare_by_pieces_genfn. It stores the fail label
1422 to jump to in case of miscomparison, and for branch ratios greater than 1,
1423 it stores an accumulator and the current and maximum counts before
1424 emitting another branch. */
1425
1426 class compare_by_pieces_d : public op_by_pieces_d
1427 {
1428 rtx_code_label *m_fail_label;
1429 rtx m_accumulator;
1430 int m_count, m_batch;
1431
1432 void generate (rtx, rtx, machine_mode);
1433 bool prepare_mode (machine_mode, unsigned int);
1434 void finish_mode (machine_mode);
1435 public:
1436 compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn,
1437 void *op1_cfn_data, HOST_WIDE_INT len, int align,
1438 rtx_code_label *fail_label)
1439 : op_by_pieces_d (op0, true, op1, true, op1_cfn, op1_cfn_data, len, align)
1440 {
1441 m_fail_label = fail_label;
1442 }
1443 };
1444
1445 /* A callback used when iterating for a compare_by_pieces_operation.
1446 OP0 and OP1 are the values that have been loaded and should be
1447 compared in MODE. DATA holds a pointer to the compare_by_pieces_data
1448 context structure. */
1449
1450 void
1451 compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode)
1452 {
1453 if (m_batch > 1)
1454 {
1455 rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX,
1456 true, OPTAB_LIB_WIDEN);
1457 if (m_count != 0)
1458 temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp,
1459 true, OPTAB_LIB_WIDEN);
1460 m_accumulator = temp;
1461
1462 if (++m_count < m_batch)
1463 return;
1464
1465 m_count = 0;
1466 op0 = m_accumulator;
1467 op1 = const0_rtx;
1468 m_accumulator = NULL_RTX;
1469 }
1470 do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX, NULL,
1471 m_fail_label, profile_probability::uninitialized ());
1472 }
1473
1474 /* Return true if MODE can be used for a set of moves and comparisons,
1475 given an alignment ALIGN. Prepare whatever data is necessary for
1476 later calls to generate. */
1477
1478 bool
1479 compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align)
1480 {
1481 insn_code icode = optab_handler (mov_optab, mode);
1482 if (icode == CODE_FOR_nothing
1483 || align < GET_MODE_ALIGNMENT (mode)
1484 || !can_compare_p (EQ, mode, ccp_jump))
1485 return false;
1486 m_batch = targetm.compare_by_pieces_branch_ratio (mode);
1487 if (m_batch < 0)
1488 return false;
1489 m_accumulator = NULL_RTX;
1490 m_count = 0;
1491 return true;
1492 }
1493
1494 /* Called after expanding a series of comparisons in MODE. If we have
1495 accumulated results for which we haven't emitted a branch yet, do
1496 so now. */
1497
1498 void
1499 compare_by_pieces_d::finish_mode (machine_mode mode)
1500 {
1501 if (m_accumulator != NULL_RTX)
1502 do_compare_rtx_and_jump (m_accumulator, const0_rtx, NE, true, mode,
1503 NULL_RTX, NULL, m_fail_label,
1504 profile_probability::uninitialized ());
1505 }
1506
1507 /* Generate several move instructions to compare LEN bytes from blocks
1508 ARG0 and ARG1. (These are MEM rtx's with BLKmode).
1509
1510 If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is
1511 used to push FROM to the stack.
1512
1513 ALIGN is maximum stack alignment we can assume.
1514
1515 Optionally, the caller can pass a constfn and associated data in A1_CFN
1516 and A1_CFN_DATA. describing that the second operand being compared is a
1517 known constant and how to obtain its data. */
1518
1519 static rtx
1520 compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INT len,
1521 rtx target, unsigned int align,
1522 by_pieces_constfn a1_cfn, void *a1_cfn_data)
1523 {
1524 rtx_code_label *fail_label = gen_label_rtx ();
1525 rtx_code_label *end_label = gen_label_rtx ();
1526
1527 if (target == NULL_RTX
1528 || !REG_P (target) || REGNO (target) < FIRST_PSEUDO_REGISTER)
1529 target = gen_reg_rtx (TYPE_MODE (integer_type_node));
1530
1531 compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align,
1532 fail_label);
1533
1534 data.run ();
1535
1536 emit_move_insn (target, const0_rtx);
1537 emit_jump (end_label);
1538 emit_barrier ();
1539 emit_label (fail_label);
1540 emit_move_insn (target, const1_rtx);
1541 emit_label (end_label);
1542
1543 return target;
1544 }
1545 \f
1546 /* Emit code to move a block Y to a block X. This may be done with
1547 string-move instructions, with multiple scalar move instructions,
1548 or with a library call.
1549
1550 Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode.
1551 SIZE is an rtx that says how long they are.
1552 ALIGN is the maximum alignment we can assume they have.
1553 METHOD describes what kind of copy this is, and what mechanisms may be used.
1554 MIN_SIZE is the minimal size of block to move
1555 MAX_SIZE is the maximal size of block to move, if it can not be represented
1556 in unsigned HOST_WIDE_INT, than it is mask of all ones.
1557
1558 Return the address of the new block, if memcpy is called and returns it,
1559 0 otherwise. */
1560
1561 rtx
1562 emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method,
1563 unsigned int expected_align, HOST_WIDE_INT expected_size,
1564 unsigned HOST_WIDE_INT min_size,
1565 unsigned HOST_WIDE_INT max_size,
1566 unsigned HOST_WIDE_INT probable_max_size)
1567 {
1568 bool may_use_call;
1569 rtx retval = 0;
1570 unsigned int align;
1571
1572 gcc_assert (size);
1573 if (CONST_INT_P (size) && INTVAL (size) == 0)
1574 return 0;
1575
1576 switch (method)
1577 {
1578 case BLOCK_OP_NORMAL:
1579 case BLOCK_OP_TAILCALL:
1580 may_use_call = true;
1581 break;
1582
1583 case BLOCK_OP_CALL_PARM:
1584 may_use_call = block_move_libcall_safe_for_call_parm ();
1585
1586 /* Make inhibit_defer_pop nonzero around the library call
1587 to force it to pop the arguments right away. */
1588 NO_DEFER_POP;
1589 break;
1590
1591 case BLOCK_OP_NO_LIBCALL:
1592 may_use_call = false;
1593 break;
1594
1595 default:
1596 gcc_unreachable ();
1597 }
1598
1599 gcc_assert (MEM_P (x) && MEM_P (y));
1600 align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1601 gcc_assert (align >= BITS_PER_UNIT);
1602
1603 /* Make sure we've got BLKmode addresses; store_one_arg can decide that
1604 block copy is more efficient for other large modes, e.g. DCmode. */
1605 x = adjust_address (x, BLKmode, 0);
1606 y = adjust_address (y, BLKmode, 0);
1607
1608 /* Set MEM_SIZE as appropriate for this block copy. The main place this
1609 can be incorrect is coming from __builtin_memcpy. */
1610 if (CONST_INT_P (size))
1611 {
1612 x = shallow_copy_rtx (x);
1613 y = shallow_copy_rtx (y);
1614 set_mem_size (x, INTVAL (size));
1615 set_mem_size (y, INTVAL (size));
1616 }
1617
1618 if (CONST_INT_P (size) && can_move_by_pieces (INTVAL (size), align))
1619 move_by_pieces (x, y, INTVAL (size), align, 0);
1620 else if (emit_block_move_via_movmem (x, y, size, align,
1621 expected_align, expected_size,
1622 min_size, max_size, probable_max_size))
1623 ;
1624 else if (may_use_call
1625 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
1626 && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y)))
1627 {
1628 /* Since x and y are passed to a libcall, mark the corresponding
1629 tree EXPR as addressable. */
1630 tree y_expr = MEM_EXPR (y);
1631 tree x_expr = MEM_EXPR (x);
1632 if (y_expr)
1633 mark_addressable (y_expr);
1634 if (x_expr)
1635 mark_addressable (x_expr);
1636 retval = emit_block_copy_via_libcall (x, y, size,
1637 method == BLOCK_OP_TAILCALL);
1638 }
1639
1640 else
1641 emit_block_move_via_loop (x, y, size, align);
1642
1643 if (method == BLOCK_OP_CALL_PARM)
1644 OK_DEFER_POP;
1645
1646 return retval;
1647 }
1648
1649 rtx
1650 emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method)
1651 {
1652 unsigned HOST_WIDE_INT max, min = 0;
1653 if (GET_CODE (size) == CONST_INT)
1654 min = max = UINTVAL (size);
1655 else
1656 max = GET_MODE_MASK (GET_MODE (size));
1657 return emit_block_move_hints (x, y, size, method, 0, -1,
1658 min, max, max);
1659 }
1660
1661 /* A subroutine of emit_block_move. Returns true if calling the
1662 block move libcall will not clobber any parameters which may have
1663 already been placed on the stack. */
1664
1665 static bool
1666 block_move_libcall_safe_for_call_parm (void)
1667 {
1668 #if defined (REG_PARM_STACK_SPACE)
1669 tree fn;
1670 #endif
1671
1672 /* If arguments are pushed on the stack, then they're safe. */
1673 if (PUSH_ARGS)
1674 return true;
1675
1676 /* If registers go on the stack anyway, any argument is sure to clobber
1677 an outgoing argument. */
1678 #if defined (REG_PARM_STACK_SPACE)
1679 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1680 /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't
1681 depend on its argument. */
1682 (void) fn;
1683 if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))
1684 && REG_PARM_STACK_SPACE (fn) != 0)
1685 return false;
1686 #endif
1687
1688 /* If any argument goes in memory, then it might clobber an outgoing
1689 argument. */
1690 {
1691 CUMULATIVE_ARGS args_so_far_v;
1692 cumulative_args_t args_so_far;
1693 tree fn, arg;
1694
1695 fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
1696 INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3);
1697 args_so_far = pack_cumulative_args (&args_so_far_v);
1698
1699 arg = TYPE_ARG_TYPES (TREE_TYPE (fn));
1700 for ( ; arg != void_list_node ; arg = TREE_CHAIN (arg))
1701 {
1702 machine_mode mode = TYPE_MODE (TREE_VALUE (arg));
1703 rtx tmp = targetm.calls.function_arg (args_so_far, mode,
1704 NULL_TREE, true);
1705 if (!tmp || !REG_P (tmp))
1706 return false;
1707 if (targetm.calls.arg_partial_bytes (args_so_far, mode, NULL, 1))
1708 return false;
1709 targetm.calls.function_arg_advance (args_so_far, mode,
1710 NULL_TREE, true);
1711 }
1712 }
1713 return true;
1714 }
1715
1716 /* A subroutine of emit_block_move. Expand a movmem pattern;
1717 return true if successful. */
1718
1719 static bool
1720 emit_block_move_via_movmem (rtx x, rtx y, rtx size, unsigned int align,
1721 unsigned int expected_align, HOST_WIDE_INT expected_size,
1722 unsigned HOST_WIDE_INT min_size,
1723 unsigned HOST_WIDE_INT max_size,
1724 unsigned HOST_WIDE_INT probable_max_size)
1725 {
1726 int save_volatile_ok = volatile_ok;
1727
1728 if (expected_align < align)
1729 expected_align = align;
1730 if (expected_size != -1)
1731 {
1732 if ((unsigned HOST_WIDE_INT)expected_size > probable_max_size)
1733 expected_size = probable_max_size;
1734 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
1735 expected_size = min_size;
1736 }
1737
1738 /* Since this is a move insn, we don't care about volatility. */
1739 volatile_ok = 1;
1740
1741 /* Try the most limited insn first, because there's no point
1742 including more than one in the machine description unless
1743 the more limited one has some advantage. */
1744
1745 opt_scalar_int_mode mode_iter;
1746 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
1747 {
1748 scalar_int_mode mode = mode_iter.require ();
1749 enum insn_code code = direct_optab_handler (movmem_optab, mode);
1750
1751 if (code != CODE_FOR_nothing
1752 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
1753 here because if SIZE is less than the mode mask, as it is
1754 returned by the macro, it will definitely be less than the
1755 actual mode mask. Since SIZE is within the Pmode address
1756 space, we limit MODE to Pmode. */
1757 && ((CONST_INT_P (size)
1758 && ((unsigned HOST_WIDE_INT) INTVAL (size)
1759 <= (GET_MODE_MASK (mode) >> 1)))
1760 || max_size <= (GET_MODE_MASK (mode) >> 1)
1761 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
1762 {
1763 struct expand_operand ops[9];
1764 unsigned int nops;
1765
1766 /* ??? When called via emit_block_move_for_call, it'd be
1767 nice if there were some way to inform the backend, so
1768 that it doesn't fail the expansion because it thinks
1769 emitting the libcall would be more efficient. */
1770 nops = insn_data[(int) code].n_generator_args;
1771 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
1772
1773 create_fixed_operand (&ops[0], x);
1774 create_fixed_operand (&ops[1], y);
1775 /* The check above guarantees that this size conversion is valid. */
1776 create_convert_operand_to (&ops[2], size, mode, true);
1777 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
1778 if (nops >= 6)
1779 {
1780 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
1781 create_integer_operand (&ops[5], expected_size);
1782 }
1783 if (nops >= 8)
1784 {
1785 create_integer_operand (&ops[6], min_size);
1786 /* If we can not represent the maximal size,
1787 make parameter NULL. */
1788 if ((HOST_WIDE_INT) max_size != -1)
1789 create_integer_operand (&ops[7], max_size);
1790 else
1791 create_fixed_operand (&ops[7], NULL);
1792 }
1793 if (nops == 9)
1794 {
1795 /* If we can not represent the maximal size,
1796 make parameter NULL. */
1797 if ((HOST_WIDE_INT) probable_max_size != -1)
1798 create_integer_operand (&ops[8], probable_max_size);
1799 else
1800 create_fixed_operand (&ops[8], NULL);
1801 }
1802 if (maybe_expand_insn (code, nops, ops))
1803 {
1804 volatile_ok = save_volatile_ok;
1805 return true;
1806 }
1807 }
1808 }
1809
1810 volatile_ok = save_volatile_ok;
1811 return false;
1812 }
1813
1814 /* A subroutine of emit_block_move. Copy the data via an explicit
1815 loop. This is used only when libcalls are forbidden. */
1816 /* ??? It'd be nice to copy in hunks larger than QImode. */
1817
1818 static void
1819 emit_block_move_via_loop (rtx x, rtx y, rtx size,
1820 unsigned int align ATTRIBUTE_UNUSED)
1821 {
1822 rtx_code_label *cmp_label, *top_label;
1823 rtx iter, x_addr, y_addr, tmp;
1824 machine_mode x_addr_mode = get_address_mode (x);
1825 machine_mode y_addr_mode = get_address_mode (y);
1826 machine_mode iter_mode;
1827
1828 iter_mode = GET_MODE (size);
1829 if (iter_mode == VOIDmode)
1830 iter_mode = word_mode;
1831
1832 top_label = gen_label_rtx ();
1833 cmp_label = gen_label_rtx ();
1834 iter = gen_reg_rtx (iter_mode);
1835
1836 emit_move_insn (iter, const0_rtx);
1837
1838 x_addr = force_operand (XEXP (x, 0), NULL_RTX);
1839 y_addr = force_operand (XEXP (y, 0), NULL_RTX);
1840 do_pending_stack_adjust ();
1841
1842 emit_jump (cmp_label);
1843 emit_label (top_label);
1844
1845 tmp = convert_modes (x_addr_mode, iter_mode, iter, true);
1846 x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp);
1847
1848 if (x_addr_mode != y_addr_mode)
1849 tmp = convert_modes (y_addr_mode, iter_mode, iter, true);
1850 y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp);
1851
1852 x = change_address (x, QImode, x_addr);
1853 y = change_address (y, QImode, y_addr);
1854
1855 emit_move_insn (x, y);
1856
1857 tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx, iter,
1858 true, OPTAB_LIB_WIDEN);
1859 if (tmp != iter)
1860 emit_move_insn (iter, tmp);
1861
1862 emit_label (cmp_label);
1863
1864 emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX, iter_mode,
1865 true, top_label,
1866 profile_probability::guessed_always ()
1867 .apply_scale (9, 10));
1868 }
1869 \f
1870 /* Expand a call to memcpy or memmove or memcmp, and return the result.
1871 TAILCALL is true if this is a tail call. */
1872
1873 rtx
1874 emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src,
1875 rtx size, bool tailcall)
1876 {
1877 rtx dst_addr, src_addr;
1878 tree call_expr, dst_tree, src_tree, size_tree;
1879 machine_mode size_mode;
1880
1881 dst_addr = copy_addr_to_reg (XEXP (dst, 0));
1882 dst_addr = convert_memory_address (ptr_mode, dst_addr);
1883 dst_tree = make_tree (ptr_type_node, dst_addr);
1884
1885 src_addr = copy_addr_to_reg (XEXP (src, 0));
1886 src_addr = convert_memory_address (ptr_mode, src_addr);
1887 src_tree = make_tree (ptr_type_node, src_addr);
1888
1889 size_mode = TYPE_MODE (sizetype);
1890 size = convert_to_mode (size_mode, size, 1);
1891 size = copy_to_mode_reg (size_mode, size);
1892 size_tree = make_tree (sizetype, size);
1893
1894 /* It is incorrect to use the libcall calling conventions for calls to
1895 memcpy/memmove/memcmp because they can be provided by the user. */
1896 tree fn = builtin_decl_implicit (fncode);
1897 call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree);
1898 CALL_EXPR_TAILCALL (call_expr) = tailcall;
1899
1900 return expand_call (call_expr, NULL_RTX, false);
1901 }
1902
1903 /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands.
1904 ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success,
1905 otherwise return null. */
1906
1907 rtx
1908 expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx,
1909 rtx arg2_rtx, tree arg3_type, rtx arg3_rtx,
1910 HOST_WIDE_INT align)
1911 {
1912 machine_mode insn_mode = insn_data[icode].operand[0].mode;
1913
1914 if (target && (!REG_P (target) || HARD_REGISTER_P (target)))
1915 target = NULL_RTX;
1916
1917 struct expand_operand ops[5];
1918 create_output_operand (&ops[0], target, insn_mode);
1919 create_fixed_operand (&ops[1], arg1_rtx);
1920 create_fixed_operand (&ops[2], arg2_rtx);
1921 create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type),
1922 TYPE_UNSIGNED (arg3_type));
1923 create_integer_operand (&ops[4], align);
1924 if (maybe_expand_insn (icode, 5, ops))
1925 return ops[0].value;
1926 return NULL_RTX;
1927 }
1928
1929 /* Expand a block compare between X and Y with length LEN using the
1930 cmpmem optab, placing the result in TARGET. LEN_TYPE is the type
1931 of the expression that was used to calculate the length. ALIGN
1932 gives the known minimum common alignment. */
1933
1934 static rtx
1935 emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target,
1936 unsigned align)
1937 {
1938 /* Note: The cmpstrnsi pattern, if it exists, is not suitable for
1939 implementing memcmp because it will stop if it encounters two
1940 zero bytes. */
1941 insn_code icode = direct_optab_handler (cmpmem_optab, SImode);
1942
1943 if (icode == CODE_FOR_nothing)
1944 return NULL_RTX;
1945
1946 return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align);
1947 }
1948
1949 /* Emit code to compare a block Y to a block X. This may be done with
1950 string-compare instructions, with multiple scalar instructions,
1951 or with a library call.
1952
1953 Both X and Y must be MEM rtx's. LEN is an rtx that says how long
1954 they are. LEN_TYPE is the type of the expression that was used to
1955 calculate it.
1956
1957 If EQUALITY_ONLY is true, it means we don't have to return the tri-state
1958 value of a normal memcmp call, instead we can just compare for equality.
1959 If FORCE_LIBCALL is true, we should emit a call to memcmp rather than
1960 returning NULL_RTX.
1961
1962 Optionally, the caller can pass a constfn and associated data in Y_CFN
1963 and Y_CFN_DATA. describing that the second operand being compared is a
1964 known constant and how to obtain its data.
1965 Return the result of the comparison, or NULL_RTX if we failed to
1966 perform the operation. */
1967
1968 rtx
1969 emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target,
1970 bool equality_only, by_pieces_constfn y_cfn,
1971 void *y_cfndata)
1972 {
1973 rtx result = 0;
1974
1975 if (CONST_INT_P (len) && INTVAL (len) == 0)
1976 return const0_rtx;
1977
1978 gcc_assert (MEM_P (x) && MEM_P (y));
1979 unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y));
1980 gcc_assert (align >= BITS_PER_UNIT);
1981
1982 x = adjust_address (x, BLKmode, 0);
1983 y = adjust_address (y, BLKmode, 0);
1984
1985 if (equality_only
1986 && CONST_INT_P (len)
1987 && can_do_by_pieces (INTVAL (len), align, COMPARE_BY_PIECES))
1988 result = compare_by_pieces (x, y, INTVAL (len), target, align,
1989 y_cfn, y_cfndata);
1990 else
1991 result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align);
1992
1993 return result;
1994 }
1995 \f
1996 /* Copy all or part of a value X into registers starting at REGNO.
1997 The number of registers to be filled is NREGS. */
1998
1999 void
2000 move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode)
2001 {
2002 if (nregs == 0)
2003 return;
2004
2005 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
2006 x = validize_mem (force_const_mem (mode, x));
2007
2008 /* See if the machine can do this with a load multiple insn. */
2009 if (targetm.have_load_multiple ())
2010 {
2011 rtx_insn *last = get_last_insn ();
2012 rtx first = gen_rtx_REG (word_mode, regno);
2013 if (rtx_insn *pat = targetm.gen_load_multiple (first, x,
2014 GEN_INT (nregs)))
2015 {
2016 emit_insn (pat);
2017 return;
2018 }
2019 else
2020 delete_insns_since (last);
2021 }
2022
2023 for (int i = 0; i < nregs; i++)
2024 emit_move_insn (gen_rtx_REG (word_mode, regno + i),
2025 operand_subword_force (x, i, mode));
2026 }
2027
2028 /* Copy all or part of a BLKmode value X out of registers starting at REGNO.
2029 The number of registers to be filled is NREGS. */
2030
2031 void
2032 move_block_from_reg (int regno, rtx x, int nregs)
2033 {
2034 if (nregs == 0)
2035 return;
2036
2037 /* See if the machine can do this with a store multiple insn. */
2038 if (targetm.have_store_multiple ())
2039 {
2040 rtx_insn *last = get_last_insn ();
2041 rtx first = gen_rtx_REG (word_mode, regno);
2042 if (rtx_insn *pat = targetm.gen_store_multiple (x, first,
2043 GEN_INT (nregs)))
2044 {
2045 emit_insn (pat);
2046 return;
2047 }
2048 else
2049 delete_insns_since (last);
2050 }
2051
2052 for (int i = 0; i < nregs; i++)
2053 {
2054 rtx tem = operand_subword (x, i, 1, BLKmode);
2055
2056 gcc_assert (tem);
2057
2058 emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i));
2059 }
2060 }
2061
2062 /* Generate a PARALLEL rtx for a new non-consecutive group of registers from
2063 ORIG, where ORIG is a non-consecutive group of registers represented by
2064 a PARALLEL. The clone is identical to the original except in that the
2065 original set of registers is replaced by a new set of pseudo registers.
2066 The new set has the same modes as the original set. */
2067
2068 rtx
2069 gen_group_rtx (rtx orig)
2070 {
2071 int i, length;
2072 rtx *tmps;
2073
2074 gcc_assert (GET_CODE (orig) == PARALLEL);
2075
2076 length = XVECLEN (orig, 0);
2077 tmps = XALLOCAVEC (rtx, length);
2078
2079 /* Skip a NULL entry in first slot. */
2080 i = XEXP (XVECEXP (orig, 0, 0), 0) ? 0 : 1;
2081
2082 if (i)
2083 tmps[0] = 0;
2084
2085 for (; i < length; i++)
2086 {
2087 machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0));
2088 rtx offset = XEXP (XVECEXP (orig, 0, i), 1);
2089
2090 tmps[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_reg_rtx (mode), offset);
2091 }
2092
2093 return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps));
2094 }
2095
2096 /* A subroutine of emit_group_load. Arguments as for emit_group_load,
2097 except that values are placed in TMPS[i], and must later be moved
2098 into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */
2099
2100 static void
2101 emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type,
2102 poly_int64 ssize)
2103 {
2104 rtx src;
2105 int start, i;
2106 machine_mode m = GET_MODE (orig_src);
2107
2108 gcc_assert (GET_CODE (dst) == PARALLEL);
2109
2110 if (m != VOIDmode
2111 && !SCALAR_INT_MODE_P (m)
2112 && !MEM_P (orig_src)
2113 && GET_CODE (orig_src) != CONCAT)
2114 {
2115 scalar_int_mode imode;
2116 if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
2117 {
2118 src = gen_reg_rtx (imode);
2119 emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
2120 }
2121 else
2122 {
2123 src = assign_stack_temp (GET_MODE (orig_src), ssize);
2124 emit_move_insn (src, orig_src);
2125 }
2126 emit_group_load_1 (tmps, dst, src, type, ssize);
2127 return;
2128 }
2129
2130 /* Check for a NULL entry, used to indicate that the parameter goes
2131 both on the stack and in registers. */
2132 if (XEXP (XVECEXP (dst, 0, 0), 0))
2133 start = 0;
2134 else
2135 start = 1;
2136
2137 /* Process the pieces. */
2138 for (i = start; i < XVECLEN (dst, 0); i++)
2139 {
2140 machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0));
2141 poly_int64 bytepos = INTVAL (XEXP (XVECEXP (dst, 0, i), 1));
2142 poly_int64 bytelen = GET_MODE_SIZE (mode);
2143 poly_int64 shift = 0;
2144
2145 /* Handle trailing fragments that run over the size of the struct.
2146 It's the target's responsibility to make sure that the fragment
2147 cannot be strictly smaller in some cases and strictly larger
2148 in others. */
2149 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2150 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2151 {
2152 /* Arrange to shift the fragment to where it belongs.
2153 extract_bit_field loads to the lsb of the reg. */
2154 if (
2155 #ifdef BLOCK_REG_PADDING
2156 BLOCK_REG_PADDING (GET_MODE (orig_src), type, i == start)
2157 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2158 #else
2159 BYTES_BIG_ENDIAN
2160 #endif
2161 )
2162 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2163 bytelen = ssize - bytepos;
2164 gcc_assert (maybe_gt (bytelen, 0));
2165 }
2166
2167 /* If we won't be loading directly from memory, protect the real source
2168 from strange tricks we might play; but make sure that the source can
2169 be loaded directly into the destination. */
2170 src = orig_src;
2171 if (!MEM_P (orig_src)
2172 && (!CONSTANT_P (orig_src)
2173 || (GET_MODE (orig_src) != mode
2174 && GET_MODE (orig_src) != VOIDmode)))
2175 {
2176 if (GET_MODE (orig_src) == VOIDmode)
2177 src = gen_reg_rtx (mode);
2178 else
2179 src = gen_reg_rtx (GET_MODE (orig_src));
2180
2181 emit_move_insn (src, orig_src);
2182 }
2183
2184 /* Optimize the access just a bit. */
2185 if (MEM_P (src)
2186 && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src))
2187 || MEM_ALIGN (src) >= GET_MODE_ALIGNMENT (mode))
2188 && multiple_p (bytepos * BITS_PER_UNIT, GET_MODE_ALIGNMENT (mode))
2189 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2190 {
2191 tmps[i] = gen_reg_rtx (mode);
2192 emit_move_insn (tmps[i], adjust_address (src, mode, bytepos));
2193 }
2194 else if (COMPLEX_MODE_P (mode)
2195 && GET_MODE (src) == mode
2196 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2197 /* Let emit_move_complex do the bulk of the work. */
2198 tmps[i] = src;
2199 else if (GET_CODE (src) == CONCAT)
2200 {
2201 poly_int64 slen = GET_MODE_SIZE (GET_MODE (src));
2202 poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0)));
2203 unsigned int elt;
2204 poly_int64 subpos;
2205
2206 if (can_div_trunc_p (bytepos, slen0, &elt, &subpos)
2207 && known_le (subpos + bytelen, slen0))
2208 {
2209 /* The following assumes that the concatenated objects all
2210 have the same size. In this case, a simple calculation
2211 can be used to determine the object and the bit field
2212 to be extracted. */
2213 tmps[i] = XEXP (src, elt);
2214 if (maybe_ne (subpos, 0)
2215 || maybe_ne (subpos + bytelen, slen0)
2216 || (!CONSTANT_P (tmps[i])
2217 && (!REG_P (tmps[i]) || GET_MODE (tmps[i]) != mode)))
2218 tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT,
2219 subpos * BITS_PER_UNIT,
2220 1, NULL_RTX, mode, mode, false,
2221 NULL);
2222 }
2223 else
2224 {
2225 rtx mem;
2226
2227 gcc_assert (known_eq (bytepos, 0));
2228 mem = assign_stack_temp (GET_MODE (src), slen);
2229 emit_move_insn (mem, src);
2230 tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT,
2231 0, 1, NULL_RTX, mode, mode, false,
2232 NULL);
2233 }
2234 }
2235 /* FIXME: A SIMD parallel will eventually lead to a subreg of a
2236 SIMD register, which is currently broken. While we get GCC
2237 to emit proper RTL for these cases, let's dump to memory. */
2238 else if (VECTOR_MODE_P (GET_MODE (dst))
2239 && REG_P (src))
2240 {
2241 poly_uint64 slen = GET_MODE_SIZE (GET_MODE (src));
2242 rtx mem;
2243
2244 mem = assign_stack_temp (GET_MODE (src), slen);
2245 emit_move_insn (mem, src);
2246 tmps[i] = adjust_address (mem, mode, bytepos);
2247 }
2248 else if (CONSTANT_P (src) && GET_MODE (dst) != BLKmode
2249 && XVECLEN (dst, 0) > 1)
2250 tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst), bytepos);
2251 else if (CONSTANT_P (src))
2252 {
2253 if (known_eq (bytelen, ssize))
2254 tmps[i] = src;
2255 else
2256 {
2257 rtx first, second;
2258
2259 /* TODO: const_wide_int can have sizes other than this... */
2260 gcc_assert (known_eq (2 * bytelen, ssize));
2261 split_double (src, &first, &second);
2262 if (i)
2263 tmps[i] = second;
2264 else
2265 tmps[i] = first;
2266 }
2267 }
2268 else if (REG_P (src) && GET_MODE (src) == mode)
2269 tmps[i] = src;
2270 else
2271 tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT,
2272 bytepos * BITS_PER_UNIT, 1, NULL_RTX,
2273 mode, mode, false, NULL);
2274
2275 if (maybe_ne (shift, 0))
2276 tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i],
2277 shift, tmps[i], 0);
2278 }
2279 }
2280
2281 /* Emit code to move a block SRC of type TYPE to a block DST,
2282 where DST is non-consecutive registers represented by a PARALLEL.
2283 SSIZE represents the total size of block ORIG_SRC in bytes, or -1
2284 if not known. */
2285
2286 void
2287 emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize)
2288 {
2289 rtx *tmps;
2290 int i;
2291
2292 tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0));
2293 emit_group_load_1 (tmps, dst, src, type, ssize);
2294
2295 /* Copy the extracted pieces into the proper (probable) hard regs. */
2296 for (i = 0; i < XVECLEN (dst, 0); i++)
2297 {
2298 rtx d = XEXP (XVECEXP (dst, 0, i), 0);
2299 if (d == NULL)
2300 continue;
2301 emit_move_insn (d, tmps[i]);
2302 }
2303 }
2304
2305 /* Similar, but load SRC into new pseudos in a format that looks like
2306 PARALLEL. This can later be fed to emit_group_move to get things
2307 in the right place. */
2308
2309 rtx
2310 emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize)
2311 {
2312 rtvec vec;
2313 int i;
2314
2315 vec = rtvec_alloc (XVECLEN (parallel, 0));
2316 emit_group_load_1 (&RTVEC_ELT (vec, 0), parallel, src, type, ssize);
2317
2318 /* Convert the vector to look just like the original PARALLEL, except
2319 with the computed values. */
2320 for (i = 0; i < XVECLEN (parallel, 0); i++)
2321 {
2322 rtx e = XVECEXP (parallel, 0, i);
2323 rtx d = XEXP (e, 0);
2324
2325 if (d)
2326 {
2327 d = force_reg (GET_MODE (d), RTVEC_ELT (vec, i));
2328 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), d, XEXP (e, 1));
2329 }
2330 RTVEC_ELT (vec, i) = e;
2331 }
2332
2333 return gen_rtx_PARALLEL (GET_MODE (parallel), vec);
2334 }
2335
2336 /* Emit code to move a block SRC to block DST, where SRC and DST are
2337 non-consecutive groups of registers, each represented by a PARALLEL. */
2338
2339 void
2340 emit_group_move (rtx dst, rtx src)
2341 {
2342 int i;
2343
2344 gcc_assert (GET_CODE (src) == PARALLEL
2345 && GET_CODE (dst) == PARALLEL
2346 && XVECLEN (src, 0) == XVECLEN (dst, 0));
2347
2348 /* Skip first entry if NULL. */
2349 for (i = XEXP (XVECEXP (src, 0, 0), 0) ? 0 : 1; i < XVECLEN (src, 0); i++)
2350 emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0),
2351 XEXP (XVECEXP (src, 0, i), 0));
2352 }
2353
2354 /* Move a group of registers represented by a PARALLEL into pseudos. */
2355
2356 rtx
2357 emit_group_move_into_temps (rtx src)
2358 {
2359 rtvec vec = rtvec_alloc (XVECLEN (src, 0));
2360 int i;
2361
2362 for (i = 0; i < XVECLEN (src, 0); i++)
2363 {
2364 rtx e = XVECEXP (src, 0, i);
2365 rtx d = XEXP (e, 0);
2366
2367 if (d)
2368 e = alloc_EXPR_LIST (REG_NOTE_KIND (e), copy_to_reg (d), XEXP (e, 1));
2369 RTVEC_ELT (vec, i) = e;
2370 }
2371
2372 return gen_rtx_PARALLEL (GET_MODE (src), vec);
2373 }
2374
2375 /* Emit code to move a block SRC to a block ORIG_DST of type TYPE,
2376 where SRC is non-consecutive registers represented by a PARALLEL.
2377 SSIZE represents the total size of block ORIG_DST, or -1 if not
2378 known. */
2379
2380 void
2381 emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED,
2382 poly_int64 ssize)
2383 {
2384 rtx *tmps, dst;
2385 int start, finish, i;
2386 machine_mode m = GET_MODE (orig_dst);
2387
2388 gcc_assert (GET_CODE (src) == PARALLEL);
2389
2390 if (!SCALAR_INT_MODE_P (m)
2391 && !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
2392 {
2393 scalar_int_mode imode;
2394 if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
2395 {
2396 dst = gen_reg_rtx (imode);
2397 emit_group_store (dst, src, type, ssize);
2398 dst = gen_lowpart (GET_MODE (orig_dst), dst);
2399 }
2400 else
2401 {
2402 dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
2403 emit_group_store (dst, src, type, ssize);
2404 }
2405 emit_move_insn (orig_dst, dst);
2406 return;
2407 }
2408
2409 /* Check for a NULL entry, used to indicate that the parameter goes
2410 both on the stack and in registers. */
2411 if (XEXP (XVECEXP (src, 0, 0), 0))
2412 start = 0;
2413 else
2414 start = 1;
2415 finish = XVECLEN (src, 0);
2416
2417 tmps = XALLOCAVEC (rtx, finish);
2418
2419 /* Copy the (probable) hard regs into pseudos. */
2420 for (i = start; i < finish; i++)
2421 {
2422 rtx reg = XEXP (XVECEXP (src, 0, i), 0);
2423 if (!REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
2424 {
2425 tmps[i] = gen_reg_rtx (GET_MODE (reg));
2426 emit_move_insn (tmps[i], reg);
2427 }
2428 else
2429 tmps[i] = reg;
2430 }
2431
2432 /* If we won't be storing directly into memory, protect the real destination
2433 from strange tricks we might play. */
2434 dst = orig_dst;
2435 if (GET_CODE (dst) == PARALLEL)
2436 {
2437 rtx temp;
2438
2439 /* We can get a PARALLEL dst if there is a conditional expression in
2440 a return statement. In that case, the dst and src are the same,
2441 so no action is necessary. */
2442 if (rtx_equal_p (dst, src))
2443 return;
2444
2445 /* It is unclear if we can ever reach here, but we may as well handle
2446 it. Allocate a temporary, and split this into a store/load to/from
2447 the temporary. */
2448 temp = assign_stack_temp (GET_MODE (dst), ssize);
2449 emit_group_store (temp, src, type, ssize);
2450 emit_group_load (dst, temp, type, ssize);
2451 return;
2452 }
2453 else if (!MEM_P (dst) && GET_CODE (dst) != CONCAT)
2454 {
2455 machine_mode outer = GET_MODE (dst);
2456 machine_mode inner;
2457 poly_int64 bytepos;
2458 bool done = false;
2459 rtx temp;
2460
2461 if (!REG_P (dst) || REGNO (dst) < FIRST_PSEUDO_REGISTER)
2462 dst = gen_reg_rtx (outer);
2463
2464 /* Make life a bit easier for combine. */
2465 /* If the first element of the vector is the low part
2466 of the destination mode, use a paradoxical subreg to
2467 initialize the destination. */
2468 if (start < finish)
2469 {
2470 inner = GET_MODE (tmps[start]);
2471 bytepos = subreg_lowpart_offset (inner, outer);
2472 if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, start), 1)), bytepos))
2473 {
2474 temp = simplify_gen_subreg (outer, tmps[start],
2475 inner, 0);
2476 if (temp)
2477 {
2478 emit_move_insn (dst, temp);
2479 done = true;
2480 start++;
2481 }
2482 }
2483 }
2484
2485 /* If the first element wasn't the low part, try the last. */
2486 if (!done
2487 && start < finish - 1)
2488 {
2489 inner = GET_MODE (tmps[finish - 1]);
2490 bytepos = subreg_lowpart_offset (inner, outer);
2491 if (known_eq (INTVAL (XEXP (XVECEXP (src, 0, finish - 1), 1)),
2492 bytepos))
2493 {
2494 temp = simplify_gen_subreg (outer, tmps[finish - 1],
2495 inner, 0);
2496 if (temp)
2497 {
2498 emit_move_insn (dst, temp);
2499 done = true;
2500 finish--;
2501 }
2502 }
2503 }
2504
2505 /* Otherwise, simply initialize the result to zero. */
2506 if (!done)
2507 emit_move_insn (dst, CONST0_RTX (outer));
2508 }
2509
2510 /* Process the pieces. */
2511 for (i = start; i < finish; i++)
2512 {
2513 poly_int64 bytepos = INTVAL (XEXP (XVECEXP (src, 0, i), 1));
2514 machine_mode mode = GET_MODE (tmps[i]);
2515 poly_int64 bytelen = GET_MODE_SIZE (mode);
2516 poly_uint64 adj_bytelen;
2517 rtx dest = dst;
2518
2519 /* Handle trailing fragments that run over the size of the struct.
2520 It's the target's responsibility to make sure that the fragment
2521 cannot be strictly smaller in some cases and strictly larger
2522 in others. */
2523 gcc_checking_assert (ordered_p (bytepos + bytelen, ssize));
2524 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2525 adj_bytelen = ssize - bytepos;
2526 else
2527 adj_bytelen = bytelen;
2528
2529 if (GET_CODE (dst) == CONCAT)
2530 {
2531 if (known_le (bytepos + adj_bytelen,
2532 GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2533 dest = XEXP (dst, 0);
2534 else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)))))
2535 {
2536 bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0)));
2537 dest = XEXP (dst, 1);
2538 }
2539 else
2540 {
2541 machine_mode dest_mode = GET_MODE (dest);
2542 machine_mode tmp_mode = GET_MODE (tmps[i]);
2543
2544 gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0));
2545
2546 if (GET_MODE_ALIGNMENT (dest_mode)
2547 >= GET_MODE_ALIGNMENT (tmp_mode))
2548 {
2549 dest = assign_stack_temp (dest_mode,
2550 GET_MODE_SIZE (dest_mode));
2551 emit_move_insn (adjust_address (dest,
2552 tmp_mode,
2553 bytepos),
2554 tmps[i]);
2555 dst = dest;
2556 }
2557 else
2558 {
2559 dest = assign_stack_temp (tmp_mode,
2560 GET_MODE_SIZE (tmp_mode));
2561 emit_move_insn (dest, tmps[i]);
2562 dst = adjust_address (dest, dest_mode, bytepos);
2563 }
2564 break;
2565 }
2566 }
2567
2568 /* Handle trailing fragments that run over the size of the struct. */
2569 if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize))
2570 {
2571 /* store_bit_field always takes its value from the lsb.
2572 Move the fragment to the lsb if it's not already there. */
2573 if (
2574 #ifdef BLOCK_REG_PADDING
2575 BLOCK_REG_PADDING (GET_MODE (orig_dst), type, i == start)
2576 == (BYTES_BIG_ENDIAN ? PAD_UPWARD : PAD_DOWNWARD)
2577 #else
2578 BYTES_BIG_ENDIAN
2579 #endif
2580 )
2581 {
2582 poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT;
2583 tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i],
2584 shift, tmps[i], 0);
2585 }
2586
2587 /* Make sure not to write past the end of the struct. */
2588 store_bit_field (dest,
2589 adj_bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2590 bytepos * BITS_PER_UNIT, ssize * BITS_PER_UNIT - 1,
2591 VOIDmode, tmps[i], false);
2592 }
2593
2594 /* Optimize the access just a bit. */
2595 else if (MEM_P (dest)
2596 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest))
2597 || MEM_ALIGN (dest) >= GET_MODE_ALIGNMENT (mode))
2598 && multiple_p (bytepos * BITS_PER_UNIT,
2599 GET_MODE_ALIGNMENT (mode))
2600 && known_eq (bytelen, GET_MODE_SIZE (mode)))
2601 emit_move_insn (adjust_address (dest, mode, bytepos), tmps[i]);
2602
2603 else
2604 store_bit_field (dest, bytelen * BITS_PER_UNIT, bytepos * BITS_PER_UNIT,
2605 0, 0, mode, tmps[i], false);
2606 }
2607
2608 /* Copy from the pseudo into the (probable) hard reg. */
2609 if (orig_dst != dst)
2610 emit_move_insn (orig_dst, dst);
2611 }
2612
2613 /* Return a form of X that does not use a PARALLEL. TYPE is the type
2614 of the value stored in X. */
2615
2616 rtx
2617 maybe_emit_group_store (rtx x, tree type)
2618 {
2619 machine_mode mode = TYPE_MODE (type);
2620 gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
2621 if (GET_CODE (x) == PARALLEL)
2622 {
2623 rtx result = gen_reg_rtx (mode);
2624 emit_group_store (result, x, type, int_size_in_bytes (type));
2625 return result;
2626 }
2627 return x;
2628 }
2629
2630 /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET.
2631
2632 This is used on targets that return BLKmode values in registers. */
2633
2634 static void
2635 copy_blkmode_from_reg (rtx target, rtx srcreg, tree type)
2636 {
2637 unsigned HOST_WIDE_INT bytes = int_size_in_bytes (type);
2638 rtx src = NULL, dst = NULL;
2639 unsigned HOST_WIDE_INT bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD);
2640 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0;
2641 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2642 fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg));
2643 fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target));
2644 fixed_size_mode copy_mode;
2645
2646 /* BLKmode registers created in the back-end shouldn't have survived. */
2647 gcc_assert (mode != BLKmode);
2648
2649 /* If the structure doesn't take up a whole number of words, see whether
2650 SRCREG is padded on the left or on the right. If it's on the left,
2651 set PADDING_CORRECTION to the number of bits to skip.
2652
2653 In most ABIs, the structure will be returned at the least end of
2654 the register, which translates to right padding on little-endian
2655 targets and left padding on big-endian targets. The opposite
2656 holds if the structure is returned at the most significant
2657 end of the register. */
2658 if (bytes % UNITS_PER_WORD != 0
2659 && (targetm.calls.return_in_msb (type)
2660 ? !BYTES_BIG_ENDIAN
2661 : BYTES_BIG_ENDIAN))
2662 padding_correction
2663 = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD) * BITS_PER_UNIT));
2664
2665 /* We can use a single move if we have an exact mode for the size. */
2666 else if (MEM_P (target)
2667 && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target))
2668 || MEM_ALIGN (target) >= GET_MODE_ALIGNMENT (mode))
2669 && bytes == GET_MODE_SIZE (mode))
2670 {
2671 emit_move_insn (adjust_address (target, mode, 0), srcreg);
2672 return;
2673 }
2674
2675 /* And if we additionally have the same mode for a register. */
2676 else if (REG_P (target)
2677 && GET_MODE (target) == mode
2678 && bytes == GET_MODE_SIZE (mode))
2679 {
2680 emit_move_insn (target, srcreg);
2681 return;
2682 }
2683
2684 /* This code assumes srcreg is at least a full word. If it isn't, copy it
2685 into a new pseudo which is a full word. */
2686 if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
2687 {
2688 srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type));
2689 mode = word_mode;
2690 }
2691
2692 /* Copy the structure BITSIZE bits at a time. If the target lives in
2693 memory, take care of not reading/writing past its end by selecting
2694 a copy mode suited to BITSIZE. This should always be possible given
2695 how it is computed.
2696
2697 If the target lives in register, make sure not to select a copy mode
2698 larger than the mode of the register.
2699
2700 We could probably emit more efficient code for machines which do not use
2701 strict alignment, but it doesn't seem worth the effort at the current
2702 time. */
2703
2704 copy_mode = word_mode;
2705 if (MEM_P (target))
2706 {
2707 opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1);
2708 if (mem_mode.exists ())
2709 copy_mode = mem_mode.require ();
2710 }
2711 else if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2712 copy_mode = tmode;
2713
2714 for (bitpos = 0, xbitpos = padding_correction;
2715 bitpos < bytes * BITS_PER_UNIT;
2716 bitpos += bitsize, xbitpos += bitsize)
2717 {
2718 /* We need a new source operand each time xbitpos is on a
2719 word boundary and when xbitpos == padding_correction
2720 (the first time through). */
2721 if (xbitpos % BITS_PER_WORD == 0 || xbitpos == padding_correction)
2722 src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD, mode);
2723
2724 /* We need a new destination operand each time bitpos is on
2725 a word boundary. */
2726 if (REG_P (target) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
2727 dst = target;
2728 else if (bitpos % BITS_PER_WORD == 0)
2729 dst = operand_subword (target, bitpos / BITS_PER_WORD, 1, tmode);
2730
2731 /* Use xbitpos for the source extraction (right justified) and
2732 bitpos for the destination store (left justified). */
2733 store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD, 0, 0, copy_mode,
2734 extract_bit_field (src, bitsize,
2735 xbitpos % BITS_PER_WORD, 1,
2736 NULL_RTX, copy_mode, copy_mode,
2737 false, NULL),
2738 false);
2739 }
2740 }
2741
2742 /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the
2743 register if it contains any data, otherwise return null.
2744
2745 This is used on targets that return BLKmode values in registers. */
2746
2747 rtx
2748 copy_blkmode_to_reg (machine_mode mode_in, tree src)
2749 {
2750 int i, n_regs;
2751 unsigned HOST_WIDE_INT bitpos, xbitpos, padding_correction = 0, bytes;
2752 unsigned int bitsize;
2753 rtx *dst_words, dst, x, src_word = NULL_RTX, dst_word = NULL_RTX;
2754 /* No current ABI uses variable-sized modes to pass a BLKmnode type. */
2755 fixed_size_mode mode = as_a <fixed_size_mode> (mode_in);
2756 fixed_size_mode dst_mode;
2757
2758 gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode);
2759
2760 x = expand_normal (src);
2761
2762 bytes = arg_int_size_in_bytes (TREE_TYPE (src));
2763 if (bytes == 0)
2764 return NULL_RTX;
2765
2766 /* If the structure doesn't take up a whole number of words, see
2767 whether the register value should be padded on the left or on
2768 the right. Set PADDING_CORRECTION to the number of padding
2769 bits needed on the left side.
2770
2771 In most ABIs, the structure will be returned at the least end of
2772 the register, which translates to right padding on little-endian
2773 targets and left padding on big-endian targets. The opposite
2774 holds if the structure is returned at the most significant
2775 end of the register. */
2776 if (bytes % UNITS_PER_WORD != 0
2777 && (targetm.calls.return_in_msb (TREE_TYPE (src))
2778 ? !BYTES_BIG_ENDIAN
2779 : BYTES_BIG_ENDIAN))
2780 padding_correction = (BITS_PER_WORD - ((bytes % UNITS_PER_WORD)
2781 * BITS_PER_UNIT));
2782
2783 n_regs = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2784 dst_words = XALLOCAVEC (rtx, n_regs);
2785 bitsize = BITS_PER_WORD;
2786 if (targetm.slow_unaligned_access (word_mode, TYPE_ALIGN (TREE_TYPE (src))))
2787 bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD);
2788
2789 /* Copy the structure BITSIZE bits at a time. */
2790 for (bitpos = 0, xbitpos = padding_correction;
2791 bitpos < bytes * BITS_PER_UNIT;
2792 bitpos += bitsize, xbitpos += bitsize)
2793 {
2794 /* We need a new destination pseudo each time xbitpos is
2795 on a word boundary and when xbitpos == padding_correction
2796 (the first time through). */
2797 if (xbitpos % BITS_PER_WORD == 0
2798 || xbitpos == padding_correction)
2799 {
2800 /* Generate an appropriate register. */
2801 dst_word = gen_reg_rtx (word_mode);
2802 dst_words[xbitpos / BITS_PER_WORD] = dst_word;
2803
2804 /* Clear the destination before we move anything into it. */
2805 emit_move_insn (dst_word, CONST0_RTX (word_mode));
2806 }
2807
2808 /* We need a new source operand each time bitpos is on a word
2809 boundary. */
2810 if (bitpos % BITS_PER_WORD == 0)
2811 src_word = operand_subword_force (x, bitpos / BITS_PER_WORD, BLKmode);
2812
2813 /* Use bitpos for the source extraction (left justified) and
2814 xbitpos for the destination store (right justified). */
2815 store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD,
2816 0, 0, word_mode,
2817 extract_bit_field (src_word, bitsize,
2818 bitpos % BITS_PER_WORD, 1,
2819 NULL_RTX, word_mode, word_mode,
2820 false, NULL),
2821 false);
2822 }
2823
2824 if (mode == BLKmode)
2825 {
2826 /* Find the smallest integer mode large enough to hold the
2827 entire structure. */
2828 opt_scalar_int_mode mode_iter;
2829 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2830 if (GET_MODE_SIZE (mode_iter.require ()) >= bytes)
2831 break;
2832
2833 /* A suitable mode should have been found. */
2834 mode = mode_iter.require ();
2835 }
2836
2837 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode))
2838 dst_mode = word_mode;
2839 else
2840 dst_mode = mode;
2841 dst = gen_reg_rtx (dst_mode);
2842
2843 for (i = 0; i < n_regs; i++)
2844 emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]);
2845
2846 if (mode != dst_mode)
2847 dst = gen_lowpart (mode, dst);
2848
2849 return dst;
2850 }
2851
2852 /* Add a USE expression for REG to the (possibly empty) list pointed
2853 to by CALL_FUSAGE. REG must denote a hard register. */
2854
2855 void
2856 use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2857 {
2858 gcc_assert (REG_P (reg));
2859
2860 if (!HARD_REGISTER_P (reg))
2861 return;
2862
2863 *call_fusage
2864 = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg), *call_fusage);
2865 }
2866
2867 /* Add a CLOBBER expression for REG to the (possibly empty) list pointed
2868 to by CALL_FUSAGE. REG must denote a hard register. */
2869
2870 void
2871 clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode)
2872 {
2873 gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER);
2874
2875 *call_fusage
2876 = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg), *call_fusage);
2877 }
2878
2879 /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs,
2880 starting at REGNO. All of these registers must be hard registers. */
2881
2882 void
2883 use_regs (rtx *call_fusage, int regno, int nregs)
2884 {
2885 int i;
2886
2887 gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER);
2888
2889 for (i = 0; i < nregs; i++)
2890 use_reg (call_fusage, regno_reg_rtx[regno + i]);
2891 }
2892
2893 /* Add USE expressions to *CALL_FUSAGE for each REG contained in the
2894 PARALLEL REGS. This is for calls that pass values in multiple
2895 non-contiguous locations. The Irix 6 ABI has examples of this. */
2896
2897 void
2898 use_group_regs (rtx *call_fusage, rtx regs)
2899 {
2900 int i;
2901
2902 for (i = 0; i < XVECLEN (regs, 0); i++)
2903 {
2904 rtx reg = XEXP (XVECEXP (regs, 0, i), 0);
2905
2906 /* A NULL entry means the parameter goes both on the stack and in
2907 registers. This can also be a MEM for targets that pass values
2908 partially on the stack and partially in registers. */
2909 if (reg != 0 && REG_P (reg))
2910 use_reg (call_fusage, reg);
2911 }
2912 }
2913
2914 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2915 assigment and the code of the expresion on the RHS is CODE. Return
2916 NULL otherwise. */
2917
2918 static gimple *
2919 get_def_for_expr (tree name, enum tree_code code)
2920 {
2921 gimple *def_stmt;
2922
2923 if (TREE_CODE (name) != SSA_NAME)
2924 return NULL;
2925
2926 def_stmt = get_gimple_for_ssa_name (name);
2927 if (!def_stmt
2928 || gimple_assign_rhs_code (def_stmt) != code)
2929 return NULL;
2930
2931 return def_stmt;
2932 }
2933
2934 /* Return the defining gimple statement for SSA_NAME NAME if it is an
2935 assigment and the class of the expresion on the RHS is CLASS. Return
2936 NULL otherwise. */
2937
2938 static gimple *
2939 get_def_for_expr_class (tree name, enum tree_code_class tclass)
2940 {
2941 gimple *def_stmt;
2942
2943 if (TREE_CODE (name) != SSA_NAME)
2944 return NULL;
2945
2946 def_stmt = get_gimple_for_ssa_name (name);
2947 if (!def_stmt
2948 || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt)) != tclass)
2949 return NULL;
2950
2951 return def_stmt;
2952 }
2953 \f
2954 /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is
2955 its length in bytes. */
2956
2957 rtx
2958 clear_storage_hints (rtx object, rtx size, enum block_op_methods method,
2959 unsigned int expected_align, HOST_WIDE_INT expected_size,
2960 unsigned HOST_WIDE_INT min_size,
2961 unsigned HOST_WIDE_INT max_size,
2962 unsigned HOST_WIDE_INT probable_max_size)
2963 {
2964 machine_mode mode = GET_MODE (object);
2965 unsigned int align;
2966
2967 gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL);
2968
2969 /* If OBJECT is not BLKmode and SIZE is the same size as its mode,
2970 just move a zero. Otherwise, do this a piece at a time. */
2971 if (mode != BLKmode
2972 && CONST_INT_P (size)
2973 && known_eq (INTVAL (size), GET_MODE_SIZE (mode)))
2974 {
2975 rtx zero = CONST0_RTX (mode);
2976 if (zero != NULL)
2977 {
2978 emit_move_insn (object, zero);
2979 return NULL;
2980 }
2981
2982 if (COMPLEX_MODE_P (mode))
2983 {
2984 zero = CONST0_RTX (GET_MODE_INNER (mode));
2985 if (zero != NULL)
2986 {
2987 write_complex_part (object, zero, 0);
2988 write_complex_part (object, zero, 1);
2989 return NULL;
2990 }
2991 }
2992 }
2993
2994 if (size == const0_rtx)
2995 return NULL;
2996
2997 align = MEM_ALIGN (object);
2998
2999 if (CONST_INT_P (size)
3000 && targetm.use_by_pieces_infrastructure_p (INTVAL (size), align,
3001 CLEAR_BY_PIECES,
3002 optimize_insn_for_speed_p ()))
3003 clear_by_pieces (object, INTVAL (size), align);
3004 else if (set_storage_via_setmem (object, size, const0_rtx, align,
3005 expected_align, expected_size,
3006 min_size, max_size, probable_max_size))
3007 ;
3008 else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object)))
3009 return set_storage_via_libcall (object, size, const0_rtx,
3010 method == BLOCK_OP_TAILCALL);
3011 else
3012 gcc_unreachable ();
3013
3014 return NULL;
3015 }
3016
3017 rtx
3018 clear_storage (rtx object, rtx size, enum block_op_methods method)
3019 {
3020 unsigned HOST_WIDE_INT max, min = 0;
3021 if (GET_CODE (size) == CONST_INT)
3022 min = max = UINTVAL (size);
3023 else
3024 max = GET_MODE_MASK (GET_MODE (size));
3025 return clear_storage_hints (object, size, method, 0, -1, min, max, max);
3026 }
3027
3028
3029 /* A subroutine of clear_storage. Expand a call to memset.
3030 Return the return value of memset, 0 otherwise. */
3031
3032 rtx
3033 set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall)
3034 {
3035 tree call_expr, fn, object_tree, size_tree, val_tree;
3036 machine_mode size_mode;
3037
3038 object = copy_addr_to_reg (XEXP (object, 0));
3039 object_tree = make_tree (ptr_type_node, object);
3040
3041 if (!CONST_INT_P (val))
3042 val = convert_to_mode (TYPE_MODE (integer_type_node), val, 1);
3043 val_tree = make_tree (integer_type_node, val);
3044
3045 size_mode = TYPE_MODE (sizetype);
3046 size = convert_to_mode (size_mode, size, 1);
3047 size = copy_to_mode_reg (size_mode, size);
3048 size_tree = make_tree (sizetype, size);
3049
3050 /* It is incorrect to use the libcall calling conventions for calls to
3051 memset because it can be provided by the user. */
3052 fn = builtin_decl_implicit (BUILT_IN_MEMSET);
3053 call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree);
3054 CALL_EXPR_TAILCALL (call_expr) = tailcall;
3055
3056 return expand_call (call_expr, NULL_RTX, false);
3057 }
3058 \f
3059 /* Expand a setmem pattern; return true if successful. */
3060
3061 bool
3062 set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align,
3063 unsigned int expected_align, HOST_WIDE_INT expected_size,
3064 unsigned HOST_WIDE_INT min_size,
3065 unsigned HOST_WIDE_INT max_size,
3066 unsigned HOST_WIDE_INT probable_max_size)
3067 {
3068 /* Try the most limited insn first, because there's no point
3069 including more than one in the machine description unless
3070 the more limited one has some advantage. */
3071
3072 if (expected_align < align)
3073 expected_align = align;
3074 if (expected_size != -1)
3075 {
3076 if ((unsigned HOST_WIDE_INT)expected_size > max_size)
3077 expected_size = max_size;
3078 if ((unsigned HOST_WIDE_INT)expected_size < min_size)
3079 expected_size = min_size;
3080 }
3081
3082 opt_scalar_int_mode mode_iter;
3083 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
3084 {
3085 scalar_int_mode mode = mode_iter.require ();
3086 enum insn_code code = direct_optab_handler (setmem_optab, mode);
3087
3088 if (code != CODE_FOR_nothing
3089 /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT
3090 here because if SIZE is less than the mode mask, as it is
3091 returned by the macro, it will definitely be less than the
3092 actual mode mask. Since SIZE is within the Pmode address
3093 space, we limit MODE to Pmode. */
3094 && ((CONST_INT_P (size)
3095 && ((unsigned HOST_WIDE_INT) INTVAL (size)
3096 <= (GET_MODE_MASK (mode) >> 1)))
3097 || max_size <= (GET_MODE_MASK (mode) >> 1)
3098 || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode)))
3099 {
3100 struct expand_operand ops[9];
3101 unsigned int nops;
3102
3103 nops = insn_data[(int) code].n_generator_args;
3104 gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9);
3105
3106 create_fixed_operand (&ops[0], object);
3107 /* The check above guarantees that this size conversion is valid. */
3108 create_convert_operand_to (&ops[1], size, mode, true);
3109 create_convert_operand_from (&ops[2], val, byte_mode, true);
3110 create_integer_operand (&ops[3], align / BITS_PER_UNIT);
3111 if (nops >= 6)
3112 {
3113 create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT);
3114 create_integer_operand (&ops[5], expected_size);
3115 }
3116 if (nops >= 8)
3117 {
3118 create_integer_operand (&ops[6], min_size);
3119 /* If we can not represent the maximal size,
3120 make parameter NULL. */
3121 if ((HOST_WIDE_INT) max_size != -1)
3122 create_integer_operand (&ops[7], max_size);
3123 else
3124 create_fixed_operand (&ops[7], NULL);
3125 }
3126 if (nops == 9)
3127 {
3128 /* If we can not represent the maximal size,
3129 make parameter NULL. */
3130 if ((HOST_WIDE_INT) probable_max_size != -1)
3131 create_integer_operand (&ops[8], probable_max_size);
3132 else
3133 create_fixed_operand (&ops[8], NULL);
3134 }
3135 if (maybe_expand_insn (code, nops, ops))
3136 return true;
3137 }
3138 }
3139
3140 return false;
3141 }
3142
3143 \f
3144 /* Write to one of the components of the complex value CPLX. Write VAL to
3145 the real part if IMAG_P is false, and the imaginary part if its true. */
3146
3147 void
3148 write_complex_part (rtx cplx, rtx val, bool imag_p)
3149 {
3150 machine_mode cmode;
3151 scalar_mode imode;
3152 unsigned ibitsize;
3153
3154 if (GET_CODE (cplx) == CONCAT)
3155 {
3156 emit_move_insn (XEXP (cplx, imag_p), val);
3157 return;
3158 }
3159
3160 cmode = GET_MODE (cplx);
3161 imode = GET_MODE_INNER (cmode);
3162 ibitsize = GET_MODE_BITSIZE (imode);
3163
3164 /* For MEMs simplify_gen_subreg may generate an invalid new address
3165 because, e.g., the original address is considered mode-dependent
3166 by the target, which restricts simplify_subreg from invoking
3167 adjust_address_nv. Instead of preparing fallback support for an
3168 invalid address, we call adjust_address_nv directly. */
3169 if (MEM_P (cplx))
3170 {
3171 emit_move_insn (adjust_address_nv (cplx, imode,
3172 imag_p ? GET_MODE_SIZE (imode) : 0),
3173 val);
3174 return;
3175 }
3176
3177 /* If the sub-object is at least word sized, then we know that subregging
3178 will work. This special case is important, since store_bit_field
3179 wants to operate on integer modes, and there's rarely an OImode to
3180 correspond to TCmode. */
3181 if (ibitsize >= BITS_PER_WORD
3182 /* For hard regs we have exact predicates. Assume we can split
3183 the original object if it spans an even number of hard regs.
3184 This special case is important for SCmode on 64-bit platforms
3185 where the natural size of floating-point regs is 32-bit. */
3186 || (REG_P (cplx)
3187 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3188 && REG_NREGS (cplx) % 2 == 0))
3189 {
3190 rtx part = simplify_gen_subreg (imode, cplx, cmode,
3191 imag_p ? GET_MODE_SIZE (imode) : 0);
3192 if (part)
3193 {
3194 emit_move_insn (part, val);
3195 return;
3196 }
3197 else
3198 /* simplify_gen_subreg may fail for sub-word MEMs. */
3199 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3200 }
3201
3202 store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val,
3203 false);
3204 }
3205
3206 /* Extract one of the components of the complex value CPLX. Extract the
3207 real part if IMAG_P is false, and the imaginary part if it's true. */
3208
3209 rtx
3210 read_complex_part (rtx cplx, bool imag_p)
3211 {
3212 machine_mode cmode;
3213 scalar_mode imode;
3214 unsigned ibitsize;
3215
3216 if (GET_CODE (cplx) == CONCAT)
3217 return XEXP (cplx, imag_p);
3218
3219 cmode = GET_MODE (cplx);
3220 imode = GET_MODE_INNER (cmode);
3221 ibitsize = GET_MODE_BITSIZE (imode);
3222
3223 /* Special case reads from complex constants that got spilled to memory. */
3224 if (MEM_P (cplx) && GET_CODE (XEXP (cplx, 0)) == SYMBOL_REF)
3225 {
3226 tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0));
3227 if (decl && TREE_CODE (decl) == COMPLEX_CST)
3228 {
3229 tree part = imag_p ? TREE_IMAGPART (decl) : TREE_REALPART (decl);
3230 if (CONSTANT_CLASS_P (part))
3231 return expand_expr (part, NULL_RTX, imode, EXPAND_NORMAL);
3232 }
3233 }
3234
3235 /* For MEMs simplify_gen_subreg may generate an invalid new address
3236 because, e.g., the original address is considered mode-dependent
3237 by the target, which restricts simplify_subreg from invoking
3238 adjust_address_nv. Instead of preparing fallback support for an
3239 invalid address, we call adjust_address_nv directly. */
3240 if (MEM_P (cplx))
3241 return adjust_address_nv (cplx, imode,
3242 imag_p ? GET_MODE_SIZE (imode) : 0);
3243
3244 /* If the sub-object is at least word sized, then we know that subregging
3245 will work. This special case is important, since extract_bit_field
3246 wants to operate on integer modes, and there's rarely an OImode to
3247 correspond to TCmode. */
3248 if (ibitsize >= BITS_PER_WORD
3249 /* For hard regs we have exact predicates. Assume we can split
3250 the original object if it spans an even number of hard regs.
3251 This special case is important for SCmode on 64-bit platforms
3252 where the natural size of floating-point regs is 32-bit. */
3253 || (REG_P (cplx)
3254 && REGNO (cplx) < FIRST_PSEUDO_REGISTER
3255 && REG_NREGS (cplx) % 2 == 0))
3256 {
3257 rtx ret = simplify_gen_subreg (imode, cplx, cmode,
3258 imag_p ? GET_MODE_SIZE (imode) : 0);
3259 if (ret)
3260 return ret;
3261 else
3262 /* simplify_gen_subreg may fail for sub-word MEMs. */
3263 gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD);
3264 }
3265
3266 return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0,
3267 true, NULL_RTX, imode, imode, false, NULL);
3268 }
3269 \f
3270 /* A subroutine of emit_move_insn_1. Yet another lowpart generator.
3271 NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be
3272 represented in NEW_MODE. If FORCE is true, this will never happen, as
3273 we'll force-create a SUBREG if needed. */
3274
3275 static rtx
3276 emit_move_change_mode (machine_mode new_mode,
3277 machine_mode old_mode, rtx x, bool force)
3278 {
3279 rtx ret;
3280
3281 if (push_operand (x, GET_MODE (x)))
3282 {
3283 ret = gen_rtx_MEM (new_mode, XEXP (x, 0));
3284 MEM_COPY_ATTRIBUTES (ret, x);
3285 }
3286 else if (MEM_P (x))
3287 {
3288 /* We don't have to worry about changing the address since the
3289 size in bytes is supposed to be the same. */
3290 if (reload_in_progress)
3291 {
3292 /* Copy the MEM to change the mode and move any
3293 substitutions from the old MEM to the new one. */
3294 ret = adjust_address_nv (x, new_mode, 0);
3295 copy_replacements (x, ret);
3296 }
3297 else
3298 ret = adjust_address (x, new_mode, 0);
3299 }
3300 else
3301 {
3302 /* Note that we do want simplify_subreg's behavior of validating
3303 that the new mode is ok for a hard register. If we were to use
3304 simplify_gen_subreg, we would create the subreg, but would
3305 probably run into the target not being able to implement it. */
3306 /* Except, of course, when FORCE is true, when this is exactly what
3307 we want. Which is needed for CCmodes on some targets. */
3308 if (force)
3309 ret = simplify_gen_subreg (new_mode, x, old_mode, 0);
3310 else
3311 ret = simplify_subreg (new_mode, x, old_mode, 0);
3312 }
3313
3314 return ret;
3315 }
3316
3317 /* A subroutine of emit_move_insn_1. Generate a move from Y into X using
3318 an integer mode of the same size as MODE. Returns the instruction
3319 emitted, or NULL if such a move could not be generated. */
3320
3321 static rtx_insn *
3322 emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
3323 {
3324 scalar_int_mode imode;
3325 enum insn_code code;
3326
3327 /* There must exist a mode of the exact size we require. */
3328 if (!int_mode_for_mode (mode).exists (&imode))
3329 return NULL;
3330
3331 /* The target must support moves in this mode. */
3332 code = optab_handler (mov_optab, imode);
3333 if (code == CODE_FOR_nothing)
3334 return NULL;
3335
3336 x = emit_move_change_mode (imode, mode, x, force);
3337 if (x == NULL_RTX)
3338 return NULL;
3339 y = emit_move_change_mode (imode, mode, y, force);
3340 if (y == NULL_RTX)
3341 return NULL;
3342 return emit_insn (GEN_FCN (code) (x, y));
3343 }
3344
3345 /* A subroutine of emit_move_insn_1. X is a push_operand in MODE.
3346 Return an equivalent MEM that does not use an auto-increment. */
3347
3348 rtx
3349 emit_move_resolve_push (machine_mode mode, rtx x)
3350 {
3351 enum rtx_code code = GET_CODE (XEXP (x, 0));
3352 rtx temp;
3353
3354 poly_int64 adjust = GET_MODE_SIZE (mode);
3355 #ifdef PUSH_ROUNDING
3356 adjust = PUSH_ROUNDING (adjust);
3357 #endif
3358 if (code == PRE_DEC || code == POST_DEC)
3359 adjust = -adjust;
3360 else if (code == PRE_MODIFY || code == POST_MODIFY)
3361 {
3362 rtx expr = XEXP (XEXP (x, 0), 1);
3363
3364 gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS);
3365 poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1));
3366 if (GET_CODE (expr) == MINUS)
3367 val = -val;
3368 gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val));
3369 adjust = val;
3370 }
3371
3372 /* Do not use anti_adjust_stack, since we don't want to update
3373 stack_pointer_delta. */
3374 temp = expand_simple_binop (Pmode, PLUS, stack_pointer_rtx,
3375 gen_int_mode (adjust, Pmode), stack_pointer_rtx,
3376 0, OPTAB_LIB_WIDEN);
3377 if (temp != stack_pointer_rtx)
3378 emit_move_insn (stack_pointer_rtx, temp);
3379
3380 switch (code)
3381 {
3382 case PRE_INC:
3383 case PRE_DEC:
3384 case PRE_MODIFY:
3385 temp = stack_pointer_rtx;
3386 break;
3387 case POST_INC:
3388 case POST_DEC:
3389 case POST_MODIFY:
3390 temp = plus_constant (Pmode, stack_pointer_rtx, -adjust);
3391 break;
3392 default:
3393 gcc_unreachable ();
3394 }
3395
3396 return replace_equiv_address (x, temp);
3397 }
3398
3399 /* A subroutine of emit_move_complex. Generate a move from Y into X.
3400 X is known to satisfy push_operand, and MODE is known to be complex.
3401 Returns the last instruction emitted. */
3402
3403 rtx_insn *
3404 emit_move_complex_push (machine_mode mode, rtx x, rtx y)
3405 {
3406 scalar_mode submode = GET_MODE_INNER (mode);
3407 bool imag_first;
3408
3409 #ifdef PUSH_ROUNDING
3410 poly_int64 submodesize = GET_MODE_SIZE (submode);
3411
3412 /* In case we output to the stack, but the size is smaller than the
3413 machine can push exactly, we need to use move instructions. */
3414 if (maybe_ne (PUSH_ROUNDING (submodesize), submodesize))
3415 {
3416 x = emit_move_resolve_push (mode, x);
3417 return emit_move_insn (x, y);
3418 }
3419 #endif
3420
3421 /* Note that the real part always precedes the imag part in memory
3422 regardless of machine's endianness. */
3423 switch (GET_CODE (XEXP (x, 0)))
3424 {
3425 case PRE_DEC:
3426 case POST_DEC:
3427 imag_first = true;
3428 break;
3429 case PRE_INC:
3430 case POST_INC:
3431 imag_first = false;
3432 break;
3433 default:
3434 gcc_unreachable ();
3435 }
3436
3437 emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3438 read_complex_part (y, imag_first));
3439 return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)),
3440 read_complex_part (y, !imag_first));
3441 }
3442
3443 /* A subroutine of emit_move_complex. Perform the move from Y to X
3444 via two moves of the parts. Returns the last instruction emitted. */
3445
3446 rtx_insn *
3447 emit_move_complex_parts (rtx x, rtx y)
3448 {
3449 /* Show the output dies here. This is necessary for SUBREGs
3450 of pseudos since we cannot track their lifetimes correctly;
3451 hard regs shouldn't appear here except as return values. */
3452 if (!reload_completed && !reload_in_progress
3453 && REG_P (x) && !reg_overlap_mentioned_p (x, y))
3454 emit_clobber (x);
3455
3456 write_complex_part (x, read_complex_part (y, false), false);
3457 write_complex_part (x, read_complex_part (y, true), true);
3458
3459 return get_last_insn ();
3460 }
3461
3462 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3463 MODE is known to be complex. Returns the last instruction emitted. */
3464
3465 static rtx_insn *
3466 emit_move_complex (machine_mode mode, rtx x, rtx y)
3467 {
3468 bool try_int;
3469
3470 /* Need to take special care for pushes, to maintain proper ordering
3471 of the data, and possibly extra padding. */
3472 if (push_operand (x, mode))
3473 return emit_move_complex_push (mode, x, y);
3474
3475 /* See if we can coerce the target into moving both values at once, except
3476 for floating point where we favor moving as parts if this is easy. */
3477 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3478 && optab_handler (mov_optab, GET_MODE_INNER (mode)) != CODE_FOR_nothing
3479 && !(REG_P (x)
3480 && HARD_REGISTER_P (x)
3481 && REG_NREGS (x) == 1)
3482 && !(REG_P (y)
3483 && HARD_REGISTER_P (y)
3484 && REG_NREGS (y) == 1))
3485 try_int = false;
3486 /* Not possible if the values are inherently not adjacent. */
3487 else if (GET_CODE (x) == CONCAT || GET_CODE (y) == CONCAT)
3488 try_int = false;
3489 /* Is possible if both are registers (or subregs of registers). */
3490 else if (register_operand (x, mode) && register_operand (y, mode))
3491 try_int = true;
3492 /* If one of the operands is a memory, and alignment constraints
3493 are friendly enough, we may be able to do combined memory operations.
3494 We do not attempt this if Y is a constant because that combination is
3495 usually better with the by-parts thing below. */
3496 else if ((MEM_P (x) ? !CONSTANT_P (y) : MEM_P (y))
3497 && (!STRICT_ALIGNMENT
3498 || get_mode_alignment (mode) == BIGGEST_ALIGNMENT))
3499 try_int = true;
3500 else
3501 try_int = false;
3502
3503 if (try_int)
3504 {
3505 rtx_insn *ret;
3506
3507 /* For memory to memory moves, optimal behavior can be had with the
3508 existing block move logic. */
3509 if (MEM_P (x) && MEM_P (y))
3510 {
3511 emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode),
3512 BLOCK_OP_NO_LIBCALL);
3513 return get_last_insn ();
3514 }
3515
3516 ret = emit_move_via_integer (mode, x, y, true);
3517 if (ret)
3518 return ret;
3519 }
3520
3521 return emit_move_complex_parts (x, y);
3522 }
3523
3524 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3525 MODE is known to be MODE_CC. Returns the last instruction emitted. */
3526
3527 static rtx_insn *
3528 emit_move_ccmode (machine_mode mode, rtx x, rtx y)
3529 {
3530 rtx_insn *ret;
3531
3532 /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */
3533 if (mode != CCmode)
3534 {
3535 enum insn_code code = optab_handler (mov_optab, CCmode);
3536 if (code != CODE_FOR_nothing)
3537 {
3538 x = emit_move_change_mode (CCmode, mode, x, true);
3539 y = emit_move_change_mode (CCmode, mode, y, true);
3540 return emit_insn (GEN_FCN (code) (x, y));
3541 }
3542 }
3543
3544 /* Otherwise, find the MODE_INT mode of the same width. */
3545 ret = emit_move_via_integer (mode, x, y, false);
3546 gcc_assert (ret != NULL);
3547 return ret;
3548 }
3549
3550 /* Return true if word I of OP lies entirely in the
3551 undefined bits of a paradoxical subreg. */
3552
3553 static bool
3554 undefined_operand_subword_p (const_rtx op, int i)
3555 {
3556 if (GET_CODE (op) != SUBREG)
3557 return false;
3558 machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
3559 poly_int64 offset = i * UNITS_PER_WORD + subreg_memory_offset (op);
3560 return (known_ge (offset, GET_MODE_SIZE (innermostmode))
3561 || known_le (offset, -UNITS_PER_WORD));
3562 }
3563
3564 /* A subroutine of emit_move_insn_1. Generate a move from Y into X.
3565 MODE is any multi-word or full-word mode that lacks a move_insn
3566 pattern. Note that you will get better code if you define such
3567 patterns, even if they must turn into multiple assembler instructions. */
3568
3569 static rtx_insn *
3570 emit_move_multi_word (machine_mode mode, rtx x, rtx y)
3571 {
3572 rtx_insn *last_insn = 0;
3573 rtx_insn *seq;
3574 rtx inner;
3575 bool need_clobber;
3576 int i, mode_size;
3577
3578 /* This function can only handle cases where the number of words is
3579 known at compile time. */
3580 mode_size = GET_MODE_SIZE (mode).to_constant ();
3581 gcc_assert (mode_size >= UNITS_PER_WORD);
3582
3583 /* If X is a push on the stack, do the push now and replace
3584 X with a reference to the stack pointer. */
3585 if (push_operand (x, mode))
3586 x = emit_move_resolve_push (mode, x);
3587
3588 /* If we are in reload, see if either operand is a MEM whose address
3589 is scheduled for replacement. */
3590 if (reload_in_progress && MEM_P (x)
3591 && (inner = find_replacement (&XEXP (x, 0))) != XEXP (x, 0))
3592 x = replace_equiv_address_nv (x, inner);
3593 if (reload_in_progress && MEM_P (y)
3594 && (inner = find_replacement (&XEXP (y, 0))) != XEXP (y, 0))
3595 y = replace_equiv_address_nv (y, inner);
3596
3597 start_sequence ();
3598
3599 need_clobber = false;
3600 for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD); i++)
3601 {
3602 rtx xpart = operand_subword (x, i, 1, mode);
3603 rtx ypart;
3604
3605 /* Do not generate code for a move if it would come entirely
3606 from the undefined bits of a paradoxical subreg. */
3607 if (undefined_operand_subword_p (y, i))
3608 continue;
3609
3610 ypart = operand_subword (y, i, 1, mode);
3611
3612 /* If we can't get a part of Y, put Y into memory if it is a
3613 constant. Otherwise, force it into a register. Then we must
3614 be able to get a part of Y. */
3615 if (ypart == 0 && CONSTANT_P (y))
3616 {
3617 y = use_anchored_address (force_const_mem (mode, y));
3618 ypart = operand_subword (y, i, 1, mode);
3619 }
3620 else if (ypart == 0)
3621 ypart = operand_subword_force (y, i, mode);
3622
3623 gcc_assert (xpart && ypart);
3624
3625 need_clobber |= (GET_CODE (xpart) == SUBREG);
3626
3627 last_insn = emit_move_insn (xpart, ypart);
3628 }
3629
3630 seq = get_insns ();
3631 end_sequence ();
3632
3633 /* Show the output dies here. This is necessary for SUBREGs
3634 of pseudos since we cannot track their lifetimes correctly;
3635 hard regs shouldn't appear here except as return values.
3636 We never want to emit such a clobber after reload. */
3637 if (x != y
3638 && ! (reload_in_progress || reload_completed)
3639 && need_clobber != 0)
3640 emit_clobber (x);
3641
3642 emit_insn (seq);
3643
3644 return last_insn;
3645 }
3646
3647 /* Low level part of emit_move_insn.
3648 Called just like emit_move_insn, but assumes X and Y
3649 are basically valid. */
3650
3651 rtx_insn *
3652 emit_move_insn_1 (rtx x, rtx y)
3653 {
3654 machine_mode mode = GET_MODE (x);
3655 enum insn_code code;
3656
3657 gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE);
3658
3659 code = optab_handler (mov_optab, mode);
3660 if (code != CODE_FOR_nothing)
3661 return emit_insn (GEN_FCN (code) (x, y));
3662
3663 /* Expand complex moves by moving real part and imag part. */
3664 if (COMPLEX_MODE_P (mode))
3665 return emit_move_complex (mode, x, y);
3666
3667 if (GET_MODE_CLASS (mode) == MODE_DECIMAL_FLOAT
3668 || ALL_FIXED_POINT_MODE_P (mode))
3669 {
3670 rtx_insn *result = emit_move_via_integer (mode, x, y, true);
3671
3672 /* If we can't find an integer mode, use multi words. */
3673 if (result)
3674 return result;
3675 else
3676 return emit_move_multi_word (mode, x, y);
3677 }
3678
3679 if (GET_MODE_CLASS (mode) == MODE_CC)
3680 return emit_move_ccmode (mode, x, y);
3681
3682 /* Try using a move pattern for the corresponding integer mode. This is
3683 only safe when simplify_subreg can convert MODE constants into integer
3684 constants. At present, it can only do this reliably if the value
3685 fits within a HOST_WIDE_INT. */
3686 if (!CONSTANT_P (y)
3687 || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT))
3688 {
3689 rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress);
3690
3691 if (ret)
3692 {
3693 if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0)
3694 return ret;
3695 }
3696 }
3697
3698 return emit_move_multi_word (mode, x, y);
3699 }
3700
3701 /* Generate code to copy Y into X.
3702 Both Y and X must have the same mode, except that
3703 Y can be a constant with VOIDmode.
3704 This mode cannot be BLKmode; use emit_block_move for that.
3705
3706 Return the last instruction emitted. */
3707
3708 rtx_insn *
3709 emit_move_insn (rtx x, rtx y)
3710 {
3711 machine_mode mode = GET_MODE (x);
3712 rtx y_cst = NULL_RTX;
3713 rtx_insn *last_insn;
3714 rtx set;
3715
3716 gcc_assert (mode != BLKmode
3717 && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode));
3718
3719 if (CONSTANT_P (y))
3720 {
3721 if (optimize
3722 && SCALAR_FLOAT_MODE_P (GET_MODE (x))
3723 && (last_insn = compress_float_constant (x, y)))
3724 return last_insn;
3725
3726 y_cst = y;
3727
3728 if (!targetm.legitimate_constant_p (mode, y))
3729 {
3730 y = force_const_mem (mode, y);
3731
3732 /* If the target's cannot_force_const_mem prevented the spill,
3733 assume that the target's move expanders will also take care
3734 of the non-legitimate constant. */
3735 if (!y)
3736 y = y_cst;
3737 else
3738 y = use_anchored_address (y);
3739 }
3740 }
3741
3742 /* If X or Y are memory references, verify that their addresses are valid
3743 for the machine. */
3744 if (MEM_P (x)
3745 && (! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
3746 MEM_ADDR_SPACE (x))
3747 && ! push_operand (x, GET_MODE (x))))
3748 x = validize_mem (x);
3749
3750 if (MEM_P (y)
3751 && ! memory_address_addr_space_p (GET_MODE (y), XEXP (y, 0),
3752 MEM_ADDR_SPACE (y)))
3753 y = validize_mem (y);
3754
3755 gcc_assert (mode != BLKmode);
3756
3757 last_insn = emit_move_insn_1 (x, y);
3758
3759 if (y_cst && REG_P (x)
3760 && (set = single_set (last_insn)) != NULL_RTX
3761 && SET_DEST (set) == x
3762 && ! rtx_equal_p (y_cst, SET_SRC (set)))
3763 set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst));
3764
3765 return last_insn;
3766 }
3767
3768 /* Generate the body of an instruction to copy Y into X.
3769 It may be a list of insns, if one insn isn't enough. */
3770
3771 rtx_insn *
3772 gen_move_insn (rtx x, rtx y)
3773 {
3774 rtx_insn *seq;
3775
3776 start_sequence ();
3777 emit_move_insn_1 (x, y);
3778 seq = get_insns ();
3779 end_sequence ();
3780 return seq;
3781 }
3782
3783 /* If Y is representable exactly in a narrower mode, and the target can
3784 perform the extension directly from constant or memory, then emit the
3785 move as an extension. */
3786
3787 static rtx_insn *
3788 compress_float_constant (rtx x, rtx y)
3789 {
3790 machine_mode dstmode = GET_MODE (x);
3791 machine_mode orig_srcmode = GET_MODE (y);
3792 machine_mode srcmode;
3793 const REAL_VALUE_TYPE *r;
3794 int oldcost, newcost;
3795 bool speed = optimize_insn_for_speed_p ();
3796
3797 r = CONST_DOUBLE_REAL_VALUE (y);
3798
3799 if (targetm.legitimate_constant_p (dstmode, y))
3800 oldcost = set_src_cost (y, orig_srcmode, speed);
3801 else
3802 oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed);
3803
3804 FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)
3805 {
3806 enum insn_code ic;
3807 rtx trunc_y;
3808 rtx_insn *last_insn;
3809
3810 /* Skip if the target can't extend this way. */
3811 ic = can_extend_p (dstmode, srcmode, 0);
3812 if (ic == CODE_FOR_nothing)
3813 continue;
3814
3815 /* Skip if the narrowed value isn't exact. */
3816 if (! exact_real_truncate (srcmode, r))
3817 continue;
3818
3819 trunc_y = const_double_from_real_value (*r, srcmode);
3820
3821 if (targetm.legitimate_constant_p (srcmode, trunc_y))
3822 {
3823 /* Skip if the target needs extra instructions to perform
3824 the extension. */
3825 if (!insn_operand_matches (ic, 1, trunc_y))
3826 continue;
3827 /* This is valid, but may not be cheaper than the original. */
3828 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3829 dstmode, speed);
3830 if (oldcost < newcost)
3831 continue;
3832 }
3833 else if (float_extend_from_mem[dstmode][srcmode])
3834 {
3835 trunc_y = force_const_mem (srcmode, trunc_y);
3836 /* This is valid, but may not be cheaper than the original. */
3837 newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y),
3838 dstmode, speed);
3839 if (oldcost < newcost)
3840 continue;
3841 trunc_y = validize_mem (trunc_y);
3842 }
3843 else
3844 continue;
3845
3846 /* For CSE's benefit, force the compressed constant pool entry
3847 into a new pseudo. This constant may be used in different modes,
3848 and if not, combine will put things back together for us. */
3849 trunc_y = force_reg (srcmode, trunc_y);
3850
3851 /* If x is a hard register, perform the extension into a pseudo,
3852 so that e.g. stack realignment code is aware of it. */
3853 rtx target = x;
3854 if (REG_P (x) && HARD_REGISTER_P (x))
3855 target = gen_reg_rtx (dstmode);
3856
3857 emit_unop_insn (ic, target, trunc_y, UNKNOWN);
3858 last_insn = get_last_insn ();
3859
3860 if (REG_P (target))
3861 set_unique_reg_note (last_insn, REG_EQUAL, y);
3862
3863 if (target != x)
3864 return emit_move_insn (x, target);
3865 return last_insn;
3866 }
3867
3868 return NULL;
3869 }
3870 \f
3871 /* Pushing data onto the stack. */
3872
3873 /* Push a block of length SIZE (perhaps variable)
3874 and return an rtx to address the beginning of the block.
3875 The value may be virtual_outgoing_args_rtx.
3876
3877 EXTRA is the number of bytes of padding to push in addition to SIZE.
3878 BELOW nonzero means this padding comes at low addresses;
3879 otherwise, the padding comes at high addresses. */
3880
3881 rtx
3882 push_block (rtx size, poly_int64 extra, int below)
3883 {
3884 rtx temp;
3885
3886 size = convert_modes (Pmode, ptr_mode, size, 1);
3887 if (CONSTANT_P (size))
3888 anti_adjust_stack (plus_constant (Pmode, size, extra));
3889 else if (REG_P (size) && known_eq (extra, 0))
3890 anti_adjust_stack (size);
3891 else
3892 {
3893 temp = copy_to_mode_reg (Pmode, size);
3894 if (maybe_ne (extra, 0))
3895 temp = expand_binop (Pmode, add_optab, temp,
3896 gen_int_mode (extra, Pmode),
3897 temp, 0, OPTAB_LIB_WIDEN);
3898 anti_adjust_stack (temp);
3899 }
3900
3901 if (STACK_GROWS_DOWNWARD)
3902 {
3903 temp = virtual_outgoing_args_rtx;
3904 if (maybe_ne (extra, 0) && below)
3905 temp = plus_constant (Pmode, temp, extra);
3906 }
3907 else
3908 {
3909 if (CONST_INT_P (size))
3910 temp = plus_constant (Pmode, virtual_outgoing_args_rtx,
3911 -INTVAL (size) - (below ? 0 : extra));
3912 else if (maybe_ne (extra, 0) && !below)
3913 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3914 negate_rtx (Pmode, plus_constant (Pmode, size,
3915 extra)));
3916 else
3917 temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,
3918 negate_rtx (Pmode, size));
3919 }
3920
3921 return memory_address (NARROWEST_INT_MODE, temp);
3922 }
3923
3924 /* A utility routine that returns the base of an auto-inc memory, or NULL. */
3925
3926 static rtx
3927 mem_autoinc_base (rtx mem)
3928 {
3929 if (MEM_P (mem))
3930 {
3931 rtx addr = XEXP (mem, 0);
3932 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3933 return XEXP (addr, 0);
3934 }
3935 return NULL;
3936 }
3937
3938 /* A utility routine used here, in reload, and in try_split. The insns
3939 after PREV up to and including LAST are known to adjust the stack,
3940 with a final value of END_ARGS_SIZE. Iterate backward from LAST
3941 placing notes as appropriate. PREV may be NULL, indicating the
3942 entire insn sequence prior to LAST should be scanned.
3943
3944 The set of allowed stack pointer modifications is small:
3945 (1) One or more auto-inc style memory references (aka pushes),
3946 (2) One or more addition/subtraction with the SP as destination,
3947 (3) A single move insn with the SP as destination,
3948 (4) A call_pop insn,
3949 (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS.
3950
3951 Insns in the sequence that do not modify the SP are ignored,
3952 except for noreturn calls.
3953
3954 The return value is the amount of adjustment that can be trivially
3955 verified, via immediate operand or auto-inc. If the adjustment
3956 cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */
3957
3958 poly_int64
3959 find_args_size_adjust (rtx_insn *insn)
3960 {
3961 rtx dest, set, pat;
3962 int i;
3963
3964 pat = PATTERN (insn);
3965 set = NULL;
3966
3967 /* Look for a call_pop pattern. */
3968 if (CALL_P (insn))
3969 {
3970 /* We have to allow non-call_pop patterns for the case
3971 of emit_single_push_insn of a TLS address. */
3972 if (GET_CODE (pat) != PARALLEL)
3973 return 0;
3974
3975 /* All call_pop have a stack pointer adjust in the parallel.
3976 The call itself is always first, and the stack adjust is
3977 usually last, so search from the end. */
3978 for (i = XVECLEN (pat, 0) - 1; i > 0; --i)
3979 {
3980 set = XVECEXP (pat, 0, i);
3981 if (GET_CODE (set) != SET)
3982 continue;
3983 dest = SET_DEST (set);
3984 if (dest == stack_pointer_rtx)
3985 break;
3986 }
3987 /* We'd better have found the stack pointer adjust. */
3988 if (i == 0)
3989 return 0;
3990 /* Fall through to process the extracted SET and DEST
3991 as if it was a standalone insn. */
3992 }
3993 else if (GET_CODE (pat) == SET)
3994 set = pat;
3995 else if ((set = single_set (insn)) != NULL)
3996 ;
3997 else if (GET_CODE (pat) == PARALLEL)
3998 {
3999 /* ??? Some older ports use a parallel with a stack adjust
4000 and a store for a PUSH_ROUNDING pattern, rather than a
4001 PRE/POST_MODIFY rtx. Don't force them to update yet... */
4002 /* ??? See h8300 and m68k, pushqi1. */
4003 for (i = XVECLEN (pat, 0) - 1; i >= 0; --i)
4004 {
4005 set = XVECEXP (pat, 0, i);
4006 if (GET_CODE (set) != SET)
4007 continue;
4008 dest = SET_DEST (set);
4009 if (dest == stack_pointer_rtx)
4010 break;
4011
4012 /* We do not expect an auto-inc of the sp in the parallel. */
4013 gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx);
4014 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4015 != stack_pointer_rtx);
4016 }
4017 if (i < 0)
4018 return 0;
4019 }
4020 else
4021 return 0;
4022
4023 dest = SET_DEST (set);
4024
4025 /* Look for direct modifications of the stack pointer. */
4026 if (REG_P (dest) && REGNO (dest) == STACK_POINTER_REGNUM)
4027 {
4028 /* Look for a trivial adjustment, otherwise assume nothing. */
4029 /* Note that the SPU restore_stack_block pattern refers to
4030 the stack pointer in V4SImode. Consider that non-trivial. */
4031 if (SCALAR_INT_MODE_P (GET_MODE (dest))
4032 && GET_CODE (SET_SRC (set)) == PLUS
4033 && XEXP (SET_SRC (set), 0) == stack_pointer_rtx
4034 && CONST_INT_P (XEXP (SET_SRC (set), 1)))
4035 return INTVAL (XEXP (SET_SRC (set), 1));
4036 /* ??? Reload can generate no-op moves, which will be cleaned
4037 up later. Recognize it and continue searching. */
4038 else if (rtx_equal_p (dest, SET_SRC (set)))
4039 return 0;
4040 else
4041 return HOST_WIDE_INT_MIN;
4042 }
4043 else
4044 {
4045 rtx mem, addr;
4046
4047 /* Otherwise only think about autoinc patterns. */
4048 if (mem_autoinc_base (dest) == stack_pointer_rtx)
4049 {
4050 mem = dest;
4051 gcc_checking_assert (mem_autoinc_base (SET_SRC (set))
4052 != stack_pointer_rtx);
4053 }
4054 else if (mem_autoinc_base (SET_SRC (set)) == stack_pointer_rtx)
4055 mem = SET_SRC (set);
4056 else
4057 return 0;
4058
4059 addr = XEXP (mem, 0);
4060 switch (GET_CODE (addr))
4061 {
4062 case PRE_INC:
4063 case POST_INC:
4064 return GET_MODE_SIZE (GET_MODE (mem));
4065 case PRE_DEC:
4066 case POST_DEC:
4067 return -GET_MODE_SIZE (GET_MODE (mem));
4068 case PRE_MODIFY:
4069 case POST_MODIFY:
4070 addr = XEXP (addr, 1);
4071 gcc_assert (GET_CODE (addr) == PLUS);
4072 gcc_assert (XEXP (addr, 0) == stack_pointer_rtx);
4073 gcc_assert (CONST_INT_P (XEXP (addr, 1)));
4074 return INTVAL (XEXP (addr, 1));
4075 default:
4076 gcc_unreachable ();
4077 }
4078 }
4079 }
4080
4081 poly_int64
4082 fixup_args_size_notes (rtx_insn *prev, rtx_insn *last,
4083 poly_int64 end_args_size)
4084 {
4085 poly_int64 args_size = end_args_size;
4086 bool saw_unknown = false;
4087 rtx_insn *insn;
4088
4089 for (insn = last; insn != prev; insn = PREV_INSN (insn))
4090 {
4091 if (!NONDEBUG_INSN_P (insn))
4092 continue;
4093
4094 /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing
4095 a call argument containing a TLS address that itself requires
4096 a call to __tls_get_addr. The handling of stack_pointer_delta
4097 in emit_single_push_insn is supposed to ensure that any such
4098 notes are already correct. */
4099 rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
4100 gcc_assert (!note || known_eq (args_size, get_args_size (note)));
4101
4102 poly_int64 this_delta = find_args_size_adjust (insn);
4103 if (known_eq (this_delta, 0))
4104 {
4105 if (!CALL_P (insn)
4106 || ACCUMULATE_OUTGOING_ARGS
4107 || find_reg_note (insn, REG_NORETURN, NULL_RTX) == NULL_RTX)
4108 continue;
4109 }
4110
4111 gcc_assert (!saw_unknown);
4112 if (known_eq (this_delta, HOST_WIDE_INT_MIN))
4113 saw_unknown = true;
4114
4115 if (!note)
4116 add_args_size_note (insn, args_size);
4117 if (STACK_GROWS_DOWNWARD)
4118 this_delta = -poly_uint64 (this_delta);
4119
4120 if (saw_unknown)
4121 args_size = HOST_WIDE_INT_MIN;
4122 else
4123 args_size -= this_delta;
4124 }
4125
4126 return args_size;
4127 }
4128
4129 #ifdef PUSH_ROUNDING
4130 /* Emit single push insn. */
4131
4132 static void
4133 emit_single_push_insn_1 (machine_mode mode, rtx x, tree type)
4134 {
4135 rtx dest_addr;
4136 poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode));
4137 rtx dest;
4138 enum insn_code icode;
4139
4140 /* If there is push pattern, use it. Otherwise try old way of throwing
4141 MEM representing push operation to move expander. */
4142 icode = optab_handler (push_optab, mode);
4143 if (icode != CODE_FOR_nothing)
4144 {
4145 struct expand_operand ops[1];
4146
4147 create_input_operand (&ops[0], x, mode);
4148 if (maybe_expand_insn (icode, 1, ops))
4149 return;
4150 }
4151 if (known_eq (GET_MODE_SIZE (mode), rounded_size))
4152 dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx);
4153 /* If we are to pad downward, adjust the stack pointer first and
4154 then store X into the stack location using an offset. This is
4155 because emit_move_insn does not know how to pad; it does not have
4156 access to type. */
4157 else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD)
4158 {
4159 emit_move_insn (stack_pointer_rtx,
4160 expand_binop (Pmode,
4161 STACK_GROWS_DOWNWARD ? sub_optab
4162 : add_optab,
4163 stack_pointer_rtx,
4164 gen_int_mode (rounded_size, Pmode),
4165 NULL_RTX, 0, OPTAB_LIB_WIDEN));
4166
4167 poly_int64 offset = rounded_size - GET_MODE_SIZE (mode);
4168 if (STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_DEC)
4169 /* We have already decremented the stack pointer, so get the
4170 previous value. */
4171 offset += rounded_size;
4172
4173 if (!STACK_GROWS_DOWNWARD && STACK_PUSH_CODE == POST_INC)
4174 /* We have already incremented the stack pointer, so get the
4175 previous value. */
4176 offset -= rounded_size;
4177
4178 dest_addr = plus_constant (Pmode, stack_pointer_rtx, offset);
4179 }
4180 else
4181 {
4182 if (STACK_GROWS_DOWNWARD)
4183 /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */
4184 dest_addr = plus_constant (Pmode, stack_pointer_rtx, -rounded_size);
4185 else
4186 /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */
4187 dest_addr = plus_constant (Pmode, stack_pointer_rtx, rounded_size);
4188
4189 dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr);
4190 }
4191
4192 dest = gen_rtx_MEM (mode, dest_addr);
4193
4194 if (type != 0)
4195 {
4196 set_mem_attributes (dest, type, 1);
4197
4198 if (cfun->tail_call_marked)
4199 /* Function incoming arguments may overlap with sibling call
4200 outgoing arguments and we cannot allow reordering of reads
4201 from function arguments with stores to outgoing arguments
4202 of sibling calls. */
4203 set_mem_alias_set (dest, 0);
4204 }
4205 emit_move_insn (dest, x);
4206 }
4207
4208 /* Emit and annotate a single push insn. */
4209
4210 static void
4211 emit_single_push_insn (machine_mode mode, rtx x, tree type)
4212 {
4213 poly_int64 delta, old_delta = stack_pointer_delta;
4214 rtx_insn *prev = get_last_insn ();
4215 rtx_insn *last;
4216
4217 emit_single_push_insn_1 (mode, x, type);
4218
4219 /* Adjust stack_pointer_delta to describe the situation after the push
4220 we just performed. Note that we must do this after the push rather
4221 than before the push in case calculating X needs pushes and pops of
4222 its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes
4223 for such pushes and pops must not include the effect of the future
4224 push of X. */
4225 stack_pointer_delta += PUSH_ROUNDING (GET_MODE_SIZE (mode));
4226
4227 last = get_last_insn ();
4228
4229 /* Notice the common case where we emitted exactly one insn. */
4230 if (PREV_INSN (last) == prev)
4231 {
4232 add_args_size_note (last, stack_pointer_delta);
4233 return;
4234 }
4235
4236 delta = fixup_args_size_notes (prev, last, stack_pointer_delta);
4237 gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)
4238 || known_eq (delta, old_delta));
4239 }
4240 #endif
4241
4242 /* If reading SIZE bytes from X will end up reading from
4243 Y return the number of bytes that overlap. Return -1
4244 if there is no overlap or -2 if we can't determine
4245 (for example when X and Y have different base registers). */
4246
4247 static int
4248 memory_load_overlap (rtx x, rtx y, HOST_WIDE_INT size)
4249 {
4250 rtx tmp = plus_constant (Pmode, x, size);
4251 rtx sub = simplify_gen_binary (MINUS, Pmode, tmp, y);
4252
4253 if (!CONST_INT_P (sub))
4254 return -2;
4255
4256 HOST_WIDE_INT val = INTVAL (sub);
4257
4258 return IN_RANGE (val, 1, size) ? val : -1;
4259 }
4260
4261 /* Generate code to push X onto the stack, assuming it has mode MODE and
4262 type TYPE.
4263 MODE is redundant except when X is a CONST_INT (since they don't
4264 carry mode info).
4265 SIZE is an rtx for the size of data to be copied (in bytes),
4266 needed only if X is BLKmode.
4267 Return true if successful. May return false if asked to push a
4268 partial argument during a sibcall optimization (as specified by
4269 SIBCALL_P) and the incoming and outgoing pointers cannot be shown
4270 to not overlap.
4271
4272 ALIGN (in bits) is maximum alignment we can assume.
4273
4274 If PARTIAL and REG are both nonzero, then copy that many of the first
4275 bytes of X into registers starting with REG, and push the rest of X.
4276 The amount of space pushed is decreased by PARTIAL bytes.
4277 REG must be a hard register in this case.
4278 If REG is zero but PARTIAL is not, take any all others actions for an
4279 argument partially in registers, but do not actually load any
4280 registers.
4281
4282 EXTRA is the amount in bytes of extra space to leave next to this arg.
4283 This is ignored if an argument block has already been allocated.
4284
4285 On a machine that lacks real push insns, ARGS_ADDR is the address of
4286 the bottom of the argument block for this call. We use indexing off there
4287 to store the arg. On machines with push insns, ARGS_ADDR is 0 when a
4288 argument block has not been preallocated.
4289
4290 ARGS_SO_FAR is the size of args previously pushed for this call.
4291
4292 REG_PARM_STACK_SPACE is nonzero if functions require stack space
4293 for arguments passed in registers. If nonzero, it will be the number
4294 of bytes required. */
4295
4296 bool
4297 emit_push_insn (rtx x, machine_mode mode, tree type, rtx size,
4298 unsigned int align, int partial, rtx reg, poly_int64 extra,
4299 rtx args_addr, rtx args_so_far, int reg_parm_stack_space,
4300 rtx alignment_pad, bool sibcall_p)
4301 {
4302 rtx xinner;
4303 pad_direction stack_direction
4304 = STACK_GROWS_DOWNWARD ? PAD_DOWNWARD : PAD_UPWARD;
4305
4306 /* Decide where to pad the argument: PAD_DOWNWARD for below,
4307 PAD_UPWARD for above, or PAD_NONE for don't pad it.
4308 Default is below for small data on big-endian machines; else above. */
4309 pad_direction where_pad = targetm.calls.function_arg_padding (mode, type);
4310
4311 /* Invert direction if stack is post-decrement.
4312 FIXME: why? */
4313 if (STACK_PUSH_CODE == POST_DEC)
4314 if (where_pad != PAD_NONE)
4315 where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD);
4316
4317 xinner = x;
4318
4319 int nregs = partial / UNITS_PER_WORD;
4320 rtx *tmp_regs = NULL;
4321 int overlapping = 0;
4322
4323 if (mode == BLKmode
4324 || (STRICT_ALIGNMENT && align < GET_MODE_ALIGNMENT (mode)))
4325 {
4326 /* Copy a block into the stack, entirely or partially. */
4327
4328 rtx temp;
4329 int used;
4330 int offset;
4331 int skip;
4332
4333 offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4334 used = partial - offset;
4335
4336 if (mode != BLKmode)
4337 {
4338 /* A value is to be stored in an insufficiently aligned
4339 stack slot; copy via a suitably aligned slot if
4340 necessary. */
4341 size = gen_int_mode (GET_MODE_SIZE (mode), Pmode);
4342 if (!MEM_P (xinner))
4343 {
4344 temp = assign_temp (type, 1, 1);
4345 emit_move_insn (temp, xinner);
4346 xinner = temp;
4347 }
4348 }
4349
4350 gcc_assert (size);
4351
4352 /* USED is now the # of bytes we need not copy to the stack
4353 because registers will take care of them. */
4354
4355 if (partial != 0)
4356 xinner = adjust_address (xinner, BLKmode, used);
4357
4358 /* If the partial register-part of the arg counts in its stack size,
4359 skip the part of stack space corresponding to the registers.
4360 Otherwise, start copying to the beginning of the stack space,
4361 by setting SKIP to 0. */
4362 skip = (reg_parm_stack_space == 0) ? 0 : used;
4363
4364 #ifdef PUSH_ROUNDING
4365 /* Do it with several push insns if that doesn't take lots of insns
4366 and if there is no difficulty with push insns that skip bytes
4367 on the stack for alignment purposes. */
4368 if (args_addr == 0
4369 && PUSH_ARGS
4370 && CONST_INT_P (size)
4371 && skip == 0
4372 && MEM_ALIGN (xinner) >= align
4373 && can_move_by_pieces ((unsigned) INTVAL (size) - used, align)
4374 /* Here we avoid the case of a structure whose weak alignment
4375 forces many pushes of a small amount of data,
4376 and such small pushes do rounding that causes trouble. */
4377 && ((!targetm.slow_unaligned_access (word_mode, align))
4378 || align >= BIGGEST_ALIGNMENT
4379 || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),
4380 align / BITS_PER_UNIT))
4381 && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size)))
4382 {
4383 /* Push padding now if padding above and stack grows down,
4384 or if padding below and stack grows up.
4385 But if space already allocated, this has already been done. */
4386 if (maybe_ne (extra, 0)
4387 && args_addr == 0
4388 && where_pad != PAD_NONE
4389 && where_pad != stack_direction)
4390 anti_adjust_stack (gen_int_mode (extra, Pmode));
4391
4392 move_by_pieces (NULL, xinner, INTVAL (size) - used, align, 0);
4393 }
4394 else
4395 #endif /* PUSH_ROUNDING */
4396 {
4397 rtx target;
4398
4399 /* Otherwise make space on the stack and copy the data
4400 to the address of that space. */
4401
4402 /* Deduct words put into registers from the size we must copy. */
4403 if (partial != 0)
4404 {
4405 if (CONST_INT_P (size))
4406 size = GEN_INT (INTVAL (size) - used);
4407 else
4408 size = expand_binop (GET_MODE (size), sub_optab, size,
4409 gen_int_mode (used, GET_MODE (size)),
4410 NULL_RTX, 0, OPTAB_LIB_WIDEN);
4411 }
4412
4413 /* Get the address of the stack space.
4414 In this case, we do not deal with EXTRA separately.
4415 A single stack adjust will do. */
4416 if (! args_addr)
4417 {
4418 temp = push_block (size, extra, where_pad == PAD_DOWNWARD);
4419 extra = 0;
4420 }
4421 else if (CONST_INT_P (args_so_far))
4422 temp = memory_address (BLKmode,
4423 plus_constant (Pmode, args_addr,
4424 skip + INTVAL (args_so_far)));
4425 else
4426 temp = memory_address (BLKmode,
4427 plus_constant (Pmode,
4428 gen_rtx_PLUS (Pmode,
4429 args_addr,
4430 args_so_far),
4431 skip));
4432
4433 if (!ACCUMULATE_OUTGOING_ARGS)
4434 {
4435 /* If the source is referenced relative to the stack pointer,
4436 copy it to another register to stabilize it. We do not need
4437 to do this if we know that we won't be changing sp. */
4438
4439 if (reg_mentioned_p (virtual_stack_dynamic_rtx, temp)
4440 || reg_mentioned_p (virtual_outgoing_args_rtx, temp))
4441 temp = copy_to_reg (temp);
4442 }
4443
4444 target = gen_rtx_MEM (BLKmode, temp);
4445
4446 /* We do *not* set_mem_attributes here, because incoming arguments
4447 may overlap with sibling call outgoing arguments and we cannot
4448 allow reordering of reads from function arguments with stores
4449 to outgoing arguments of sibling calls. We do, however, want
4450 to record the alignment of the stack slot. */
4451 /* ALIGN may well be better aligned than TYPE, e.g. due to
4452 PARM_BOUNDARY. Assume the caller isn't lying. */
4453 set_mem_align (target, align);
4454
4455 /* If part should go in registers and pushing to that part would
4456 overwrite some of the values that need to go into regs, load the
4457 overlapping values into temporary pseudos to be moved into the hard
4458 regs at the end after the stack pushing has completed.
4459 We cannot load them directly into the hard regs here because
4460 they can be clobbered by the block move expansions.
4461 See PR 65358. */
4462
4463 if (partial > 0 && reg != 0 && mode == BLKmode
4464 && GET_CODE (reg) != PARALLEL)
4465 {
4466 overlapping = memory_load_overlap (XEXP (x, 0), temp, partial);
4467 if (overlapping > 0)
4468 {
4469 gcc_assert (overlapping % UNITS_PER_WORD == 0);
4470 overlapping /= UNITS_PER_WORD;
4471
4472 tmp_regs = XALLOCAVEC (rtx, overlapping);
4473
4474 for (int i = 0; i < overlapping; i++)
4475 tmp_regs[i] = gen_reg_rtx (word_mode);
4476
4477 for (int i = 0; i < overlapping; i++)
4478 emit_move_insn (tmp_regs[i],
4479 operand_subword_force (target, i, mode));
4480 }
4481 else if (overlapping == -1)
4482 overlapping = 0;
4483 /* Could not determine whether there is overlap.
4484 Fail the sibcall. */
4485 else
4486 {
4487 overlapping = 0;
4488 if (sibcall_p)
4489 return false;
4490 }
4491 }
4492 emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM);
4493 }
4494 }
4495 else if (partial > 0)
4496 {
4497 /* Scalar partly in registers. This case is only supported
4498 for fixed-wdth modes. */
4499 int size = GET_MODE_SIZE (mode).to_constant ();
4500 size /= UNITS_PER_WORD;
4501 int i;
4502 int not_stack;
4503 /* # bytes of start of argument
4504 that we must make space for but need not store. */
4505 int offset = partial % (PARM_BOUNDARY / BITS_PER_UNIT);
4506 int args_offset = INTVAL (args_so_far);
4507 int skip;
4508
4509 /* Push padding now if padding above and stack grows down,
4510 or if padding below and stack grows up.
4511 But if space already allocated, this has already been done. */
4512 if (maybe_ne (extra, 0)
4513 && args_addr == 0
4514 && where_pad != PAD_NONE
4515 && where_pad != stack_direction)
4516 anti_adjust_stack (gen_int_mode (extra, Pmode));
4517
4518 /* If we make space by pushing it, we might as well push
4519 the real data. Otherwise, we can leave OFFSET nonzero
4520 and leave the space uninitialized. */
4521 if (args_addr == 0)
4522 offset = 0;
4523
4524 /* Now NOT_STACK gets the number of words that we don't need to
4525 allocate on the stack. Convert OFFSET to words too. */
4526 not_stack = (partial - offset) / UNITS_PER_WORD;
4527 offset /= UNITS_PER_WORD;
4528
4529 /* If the partial register-part of the arg counts in its stack size,
4530 skip the part of stack space corresponding to the registers.
4531 Otherwise, start copying to the beginning of the stack space,
4532 by setting SKIP to 0. */
4533 skip = (reg_parm_stack_space == 0) ? 0 : not_stack;
4534
4535 if (CONSTANT_P (x) && !targetm.legitimate_constant_p (mode, x))
4536 x = validize_mem (force_const_mem (mode, x));
4537
4538 /* If X is a hard register in a non-integer mode, copy it into a pseudo;
4539 SUBREGs of such registers are not allowed. */
4540 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4541 && GET_MODE_CLASS (GET_MODE (x)) != MODE_INT))
4542 x = copy_to_reg (x);
4543
4544 /* Loop over all the words allocated on the stack for this arg. */
4545 /* We can do it by words, because any scalar bigger than a word
4546 has a size a multiple of a word. */
4547 for (i = size - 1; i >= not_stack; i--)
4548 if (i >= not_stack + offset)
4549 if (!emit_push_insn (operand_subword_force (x, i, mode),
4550 word_mode, NULL_TREE, NULL_RTX, align, 0, NULL_RTX,
4551 0, args_addr,
4552 GEN_INT (args_offset + ((i - not_stack + skip)
4553 * UNITS_PER_WORD)),
4554 reg_parm_stack_space, alignment_pad, sibcall_p))
4555 return false;
4556 }
4557 else
4558 {
4559 rtx addr;
4560 rtx dest;
4561
4562 /* Push padding now if padding above and stack grows down,
4563 or if padding below and stack grows up.
4564 But if space already allocated, this has already been done. */
4565 if (maybe_ne (extra, 0)
4566 && args_addr == 0
4567 && where_pad != PAD_NONE
4568 && where_pad != stack_direction)
4569 anti_adjust_stack (gen_int_mode (extra, Pmode));
4570
4571 #ifdef PUSH_ROUNDING
4572 if (args_addr == 0 && PUSH_ARGS)
4573 emit_single_push_insn (mode, x, type);
4574 else
4575 #endif
4576 {
4577 addr = simplify_gen_binary (PLUS, Pmode, args_addr, args_so_far);
4578 dest = gen_rtx_MEM (mode, memory_address (mode, addr));
4579
4580 /* We do *not* set_mem_attributes here, because incoming arguments
4581 may overlap with sibling call outgoing arguments and we cannot
4582 allow reordering of reads from function arguments with stores
4583 to outgoing arguments of sibling calls. We do, however, want
4584 to record the alignment of the stack slot. */
4585 /* ALIGN may well be better aligned than TYPE, e.g. due to
4586 PARM_BOUNDARY. Assume the caller isn't lying. */
4587 set_mem_align (dest, align);
4588
4589 emit_move_insn (dest, x);
4590 }
4591 }
4592
4593 /* Move the partial arguments into the registers and any overlapping
4594 values that we moved into the pseudos in tmp_regs. */
4595 if (partial > 0 && reg != 0)
4596 {
4597 /* Handle calls that pass values in multiple non-contiguous locations.
4598 The Irix 6 ABI has examples of this. */
4599 if (GET_CODE (reg) == PARALLEL)
4600 emit_group_load (reg, x, type, -1);
4601 else
4602 {
4603 gcc_assert (partial % UNITS_PER_WORD == 0);
4604 move_block_to_reg (REGNO (reg), x, nregs - overlapping, mode);
4605
4606 for (int i = 0; i < overlapping; i++)
4607 emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)
4608 + nregs - overlapping + i),
4609 tmp_regs[i]);
4610
4611 }
4612 }
4613
4614 if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction)
4615 anti_adjust_stack (gen_int_mode (extra, Pmode));
4616
4617 if (alignment_pad && args_addr == 0)
4618 anti_adjust_stack (alignment_pad);
4619
4620 return true;
4621 }
4622 \f
4623 /* Return X if X can be used as a subtarget in a sequence of arithmetic
4624 operations. */
4625
4626 static rtx
4627 get_subtarget (rtx x)
4628 {
4629 return (optimize
4630 || x == 0
4631 /* Only registers can be subtargets. */
4632 || !REG_P (x)
4633 /* Don't use hard regs to avoid extending their life. */
4634 || REGNO (x) < FIRST_PSEUDO_REGISTER
4635 ? 0 : x);
4636 }
4637
4638 /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where
4639 FIELD is a bitfield. Returns true if the optimization was successful,
4640 and there's nothing else to do. */
4641
4642 static bool
4643 optimize_bitfield_assignment_op (poly_uint64 pbitsize,
4644 poly_uint64 pbitpos,
4645 poly_uint64 pbitregion_start,
4646 poly_uint64 pbitregion_end,
4647 machine_mode mode1, rtx str_rtx,
4648 tree to, tree src, bool reverse)
4649 {
4650 /* str_mode is not guaranteed to be a scalar type. */
4651 machine_mode str_mode = GET_MODE (str_rtx);
4652 unsigned int str_bitsize;
4653 tree op0, op1;
4654 rtx value, result;
4655 optab binop;
4656 gimple *srcstmt;
4657 enum tree_code code;
4658
4659 unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
4660 if (mode1 != VOIDmode
4661 || !pbitsize.is_constant (&bitsize)
4662 || !pbitpos.is_constant (&bitpos)
4663 || !pbitregion_start.is_constant (&bitregion_start)
4664 || !pbitregion_end.is_constant (&bitregion_end)
4665 || bitsize >= BITS_PER_WORD
4666 || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize)
4667 || str_bitsize > BITS_PER_WORD
4668 || TREE_SIDE_EFFECTS (to)
4669 || TREE_THIS_VOLATILE (to))
4670 return false;
4671
4672 STRIP_NOPS (src);
4673 if (TREE_CODE (src) != SSA_NAME)
4674 return false;
4675 if (TREE_CODE (TREE_TYPE (src)) != INTEGER_TYPE)
4676 return false;
4677
4678 srcstmt = get_gimple_for_ssa_name (src);
4679 if (!srcstmt
4680 || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt)) != tcc_binary)
4681 return false;
4682
4683 code = gimple_assign_rhs_code (srcstmt);
4684
4685 op0 = gimple_assign_rhs1 (srcstmt);
4686
4687 /* If OP0 is an SSA_NAME, then we want to walk the use-def chain
4688 to find its initialization. Hopefully the initialization will
4689 be from a bitfield load. */
4690 if (TREE_CODE (op0) == SSA_NAME)
4691 {
4692 gimple *op0stmt = get_gimple_for_ssa_name (op0);
4693
4694 /* We want to eventually have OP0 be the same as TO, which
4695 should be a bitfield. */
4696 if (!op0stmt
4697 || !is_gimple_assign (op0stmt)
4698 || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to))
4699 return false;
4700 op0 = gimple_assign_rhs1 (op0stmt);
4701 }
4702
4703 op1 = gimple_assign_rhs2 (srcstmt);
4704
4705 if (!operand_equal_p (to, op0, 0))
4706 return false;
4707
4708 if (MEM_P (str_rtx))
4709 {
4710 unsigned HOST_WIDE_INT offset1;
4711
4712 if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD)
4713 str_bitsize = BITS_PER_WORD;
4714
4715 scalar_int_mode best_mode;
4716 if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end,
4717 MEM_ALIGN (str_rtx), str_bitsize, false, &best_mode))
4718 return false;
4719 str_mode = best_mode;
4720 str_bitsize = GET_MODE_BITSIZE (best_mode);
4721
4722 offset1 = bitpos;
4723 bitpos %= str_bitsize;
4724 offset1 = (offset1 - bitpos) / BITS_PER_UNIT;
4725 str_rtx = adjust_address (str_rtx, str_mode, offset1);
4726 }
4727 else if (!REG_P (str_rtx) && GET_CODE (str_rtx) != SUBREG)
4728 return false;
4729 else
4730 gcc_assert (!reverse);
4731
4732 /* If the bit field covers the whole REG/MEM, store_field
4733 will likely generate better code. */
4734 if (bitsize >= str_bitsize)
4735 return false;
4736
4737 /* We can't handle fields split across multiple entities. */
4738 if (bitpos + bitsize > str_bitsize)
4739 return false;
4740
4741 if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
4742 bitpos = str_bitsize - bitpos - bitsize;
4743
4744 switch (code)
4745 {
4746 case PLUS_EXPR:
4747 case MINUS_EXPR:
4748 /* For now, just optimize the case of the topmost bitfield
4749 where we don't need to do any masking and also
4750 1 bit bitfields where xor can be used.
4751 We might win by one instruction for the other bitfields
4752 too if insv/extv instructions aren't used, so that
4753 can be added later. */
4754 if ((reverse || bitpos + bitsize != str_bitsize)
4755 && (bitsize != 1 || TREE_CODE (op1) != INTEGER_CST))
4756 break;
4757
4758 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4759 value = convert_modes (str_mode,
4760 TYPE_MODE (TREE_TYPE (op1)), value,
4761 TYPE_UNSIGNED (TREE_TYPE (op1)));
4762
4763 /* We may be accessing data outside the field, which means
4764 we can alias adjacent data. */
4765 if (MEM_P (str_rtx))
4766 {
4767 str_rtx = shallow_copy_rtx (str_rtx);
4768 set_mem_alias_set (str_rtx, 0);
4769 set_mem_expr (str_rtx, 0);
4770 }
4771
4772 if (bitsize == 1 && (reverse || bitpos + bitsize != str_bitsize))
4773 {
4774 value = expand_and (str_mode, value, const1_rtx, NULL);
4775 binop = xor_optab;
4776 }
4777 else
4778 binop = code == PLUS_EXPR ? add_optab : sub_optab;
4779
4780 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4781 if (reverse)
4782 value = flip_storage_order (str_mode, value);
4783 result = expand_binop (str_mode, binop, str_rtx,
4784 value, str_rtx, 1, OPTAB_WIDEN);
4785 if (result != str_rtx)
4786 emit_move_insn (str_rtx, result);
4787 return true;
4788
4789 case BIT_IOR_EXPR:
4790 case BIT_XOR_EXPR:
4791 if (TREE_CODE (op1) != INTEGER_CST)
4792 break;
4793 value = expand_expr (op1, NULL_RTX, str_mode, EXPAND_NORMAL);
4794 value = convert_modes (str_mode,
4795 TYPE_MODE (TREE_TYPE (op1)), value,
4796 TYPE_UNSIGNED (TREE_TYPE (op1)));
4797
4798 /* We may be accessing data outside the field, which means
4799 we can alias adjacent data. */
4800 if (MEM_P (str_rtx))
4801 {
4802 str_rtx = shallow_copy_rtx (str_rtx);
4803 set_mem_alias_set (str_rtx, 0);
4804 set_mem_expr (str_rtx, 0);
4805 }
4806
4807 binop = code == BIT_IOR_EXPR ? ior_optab : xor_optab;
4808 if (bitpos + bitsize != str_bitsize)
4809 {
4810 rtx mask = gen_int_mode ((HOST_WIDE_INT_1U << bitsize) - 1,
4811 str_mode);
4812 value = expand_and (str_mode, value, mask, NULL_RTX);
4813 }
4814 value = expand_shift (LSHIFT_EXPR, str_mode, value, bitpos, NULL_RTX, 1);
4815 if (reverse)
4816 value = flip_storage_order (str_mode, value);
4817 result = expand_binop (str_mode, binop, str_rtx,
4818 value, str_rtx, 1, OPTAB_WIDEN);
4819 if (result != str_rtx)
4820 emit_move_insn (str_rtx, result);
4821 return true;
4822
4823 default:
4824 break;
4825 }
4826
4827 return false;
4828 }
4829
4830 /* In the C++ memory model, consecutive bit fields in a structure are
4831 considered one memory location.
4832
4833 Given a COMPONENT_REF EXP at position (BITPOS, OFFSET), this function
4834 returns the bit range of consecutive bits in which this COMPONENT_REF
4835 belongs. The values are returned in *BITSTART and *BITEND. *BITPOS
4836 and *OFFSET may be adjusted in the process.
4837
4838 If the access does not need to be restricted, 0 is returned in both
4839 *BITSTART and *BITEND. */
4840
4841 void
4842 get_bit_range (poly_uint64_pod *bitstart, poly_uint64_pod *bitend, tree exp,
4843 poly_int64_pod *bitpos, tree *offset)
4844 {
4845 poly_int64 bitoffset;
4846 tree field, repr;
4847
4848 gcc_assert (TREE_CODE (exp) == COMPONENT_REF);
4849
4850 field = TREE_OPERAND (exp, 1);
4851 repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
4852 /* If we do not have a DECL_BIT_FIELD_REPRESENTATIVE there is no
4853 need to limit the range we can access. */
4854 if (!repr)
4855 {
4856 *bitstart = *bitend = 0;
4857 return;
4858 }
4859
4860 /* If we have a DECL_BIT_FIELD_REPRESENTATIVE but the enclosing record is
4861 part of a larger bit field, then the representative does not serve any
4862 useful purpose. This can occur in Ada. */
4863 if (handled_component_p (TREE_OPERAND (exp, 0)))
4864 {
4865 machine_mode rmode;
4866 poly_int64 rbitsize, rbitpos;
4867 tree roffset;
4868 int unsignedp, reversep, volatilep = 0;
4869 get_inner_reference (TREE_OPERAND (exp, 0), &rbitsize, &rbitpos,
4870 &roffset, &rmode, &unsignedp, &reversep,
4871 &volatilep);
4872 if (!multiple_p (rbitpos, BITS_PER_UNIT))
4873 {
4874 *bitstart = *bitend = 0;
4875 return;
4876 }
4877 }
4878
4879 /* Compute the adjustment to bitpos from the offset of the field
4880 relative to the representative. DECL_FIELD_OFFSET of field and
4881 repr are the same by construction if they are not constants,
4882 see finish_bitfield_layout. */
4883 poly_uint64 field_offset, repr_offset;
4884 if (poly_int_tree_p (DECL_FIELD_OFFSET (field), &field_offset)
4885 && poly_int_tree_p (DECL_FIELD_OFFSET (repr), &repr_offset))
4886 bitoffset = (field_offset - repr_offset) * BITS_PER_UNIT;
4887 else
4888 bitoffset = 0;
4889 bitoffset += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
4890 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
4891
4892 /* If the adjustment is larger than bitpos, we would have a negative bit
4893 position for the lower bound and this may wreak havoc later. Adjust
4894 offset and bitpos to make the lower bound non-negative in that case. */
4895 if (maybe_gt (bitoffset, *bitpos))
4896 {
4897 poly_int64 adjust_bits = upper_bound (bitoffset, *bitpos) - *bitpos;
4898 poly_int64 adjust_bytes = exact_div (adjust_bits, BITS_PER_UNIT);
4899
4900 *bitpos += adjust_bits;
4901 if (*offset == NULL_TREE)
4902 *offset = size_int (-adjust_bytes);
4903 else
4904 *offset = size_binop (MINUS_EXPR, *offset, size_int (adjust_bytes));
4905 *bitstart = 0;
4906 }
4907 else
4908 *bitstart = *bitpos - bitoffset;
4909
4910 *bitend = *bitstart + tree_to_uhwi (DECL_SIZE (repr)) - 1;
4911 }
4912
4913 /* Returns true if ADDR is an ADDR_EXPR of a DECL that does not reside
4914 in memory and has non-BLKmode. DECL_RTL must not be a MEM; if
4915 DECL_RTL was not set yet, return NORTL. */
4916
4917 static inline bool
4918 addr_expr_of_non_mem_decl_p_1 (tree addr, bool nortl)
4919 {
4920 if (TREE_CODE (addr) != ADDR_EXPR)
4921 return false;
4922
4923 tree base = TREE_OPERAND (addr, 0);
4924
4925 if (!DECL_P (base)
4926 || TREE_ADDRESSABLE (base)
4927 || DECL_MODE (base) == BLKmode)
4928 return false;
4929
4930 if (!DECL_RTL_SET_P (base))
4931 return nortl;
4932
4933 return (!MEM_P (DECL_RTL (base)));
4934 }
4935
4936 /* Returns true if the MEM_REF REF refers to an object that does not
4937 reside in memory and has non-BLKmode. */
4938
4939 static inline bool
4940 mem_ref_refers_to_non_mem_p (tree ref)
4941 {
4942 tree base = TREE_OPERAND (ref, 0);
4943 return addr_expr_of_non_mem_decl_p_1 (base, false);
4944 }
4945
4946 /* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
4947 is true, try generating a nontemporal store. */
4948
4949 void
4950 expand_assignment (tree to, tree from, bool nontemporal)
4951 {
4952 rtx to_rtx = 0;
4953 rtx result;
4954 machine_mode mode;
4955 unsigned int align;
4956 enum insn_code icode;
4957
4958 /* Don't crash if the lhs of the assignment was erroneous. */
4959 if (TREE_CODE (to) == ERROR_MARK)
4960 {
4961 expand_normal (from);
4962 return;
4963 }
4964
4965 /* Optimize away no-op moves without side-effects. */
4966 if (operand_equal_p (to, from, 0))
4967 return;
4968
4969 /* Handle misaligned stores. */
4970 mode = TYPE_MODE (TREE_TYPE (to));
4971 if ((TREE_CODE (to) == MEM_REF
4972 || TREE_CODE (to) == TARGET_MEM_REF)
4973 && mode != BLKmode
4974 && !mem_ref_refers_to_non_mem_p (to)
4975 && ((align = get_object_alignment (to))
4976 < GET_MODE_ALIGNMENT (mode))
4977 && (((icode = optab_handler (movmisalign_optab, mode))
4978 != CODE_FOR_nothing)
4979 || targetm.slow_unaligned_access (mode, align)))
4980 {
4981 rtx reg, mem;
4982
4983 reg = expand_expr (from, NULL_RTX, VOIDmode, EXPAND_NORMAL);
4984 reg = force_not_mem (reg);
4985 mem = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
4986 if (TREE_CODE (to) == MEM_REF && REF_REVERSE_STORAGE_ORDER (to))
4987 reg = flip_storage_order (mode, reg);
4988
4989 if (icode != CODE_FOR_nothing)
4990 {
4991 struct expand_operand ops[2];
4992
4993 create_fixed_operand (&ops[0], mem);
4994 create_input_operand (&ops[1], reg, mode);
4995 /* The movmisalign<mode> pattern cannot fail, else the assignment
4996 would silently be omitted. */
4997 expand_insn (icode, 2, ops);
4998 }
4999 else
5000 store_bit_field (mem, GET_MODE_BITSIZE (mode), 0, 0, 0, mode, reg,
5001 false);
5002 return;
5003 }
5004
5005 /* Assignment of a structure component needs special treatment
5006 if the structure component's rtx is not simply a MEM.
5007 Assignment of an array element at a constant index, and assignment of
5008 an array element in an unaligned packed structure field, has the same
5009 problem. Same for (partially) storing into a non-memory object. */
5010 if (handled_component_p (to)
5011 || (TREE_CODE (to) == MEM_REF
5012 && (REF_REVERSE_STORAGE_ORDER (to)
5013 || mem_ref_refers_to_non_mem_p (to)))
5014 || TREE_CODE (TREE_TYPE (to)) == ARRAY_TYPE)
5015 {
5016 machine_mode mode1;
5017 poly_int64 bitsize, bitpos;
5018 poly_uint64 bitregion_start = 0;
5019 poly_uint64 bitregion_end = 0;
5020 tree offset;
5021 int unsignedp, reversep, volatilep = 0;
5022 tree tem;
5023
5024 push_temp_slots ();
5025 tem = get_inner_reference (to, &bitsize, &bitpos, &offset, &mode1,
5026 &unsignedp, &reversep, &volatilep);
5027
5028 /* Make sure bitpos is not negative, it can wreak havoc later. */
5029 if (maybe_lt (bitpos, 0))
5030 {
5031 gcc_assert (offset == NULL_TREE);
5032 offset = size_int (bits_to_bytes_round_down (bitpos));
5033 bitpos = num_trailing_bits (bitpos);
5034 }
5035
5036 if (TREE_CODE (to) == COMPONENT_REF
5037 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (to, 1)))
5038 get_bit_range (&bitregion_start, &bitregion_end, to, &bitpos, &offset);
5039 /* The C++ memory model naturally applies to byte-aligned fields.
5040 However, if we do not have a DECL_BIT_FIELD_TYPE but BITPOS or
5041 BITSIZE are not byte-aligned, there is no need to limit the range
5042 we can access. This can occur with packed structures in Ada. */
5043 else if (maybe_gt (bitsize, 0)
5044 && multiple_p (bitsize, BITS_PER_UNIT)
5045 && multiple_p (bitpos, BITS_PER_UNIT))
5046 {
5047 bitregion_start = bitpos;
5048 bitregion_end = bitpos + bitsize - 1;
5049 }
5050
5051 to_rtx = expand_expr (tem, NULL_RTX, VOIDmode, EXPAND_WRITE);
5052
5053 /* If the field has a mode, we want to access it in the
5054 field's mode, not the computed mode.
5055 If a MEM has VOIDmode (external with incomplete type),
5056 use BLKmode for it instead. */
5057 if (MEM_P (to_rtx))
5058 {
5059 if (mode1 != VOIDmode)
5060 to_rtx = adjust_address (to_rtx, mode1, 0);
5061 else if (GET_MODE (to_rtx) == VOIDmode)
5062 to_rtx = adjust_address (to_rtx, BLKmode, 0);
5063 }
5064
5065 if (offset != 0)
5066 {
5067 machine_mode address_mode;
5068 rtx offset_rtx;
5069
5070 if (!MEM_P (to_rtx))
5071 {
5072 /* We can get constant negative offsets into arrays with broken
5073 user code. Translate this to a trap instead of ICEing. */
5074 gcc_assert (TREE_CODE (offset) == INTEGER_CST);
5075 expand_builtin_trap ();
5076 to_rtx = gen_rtx_MEM (BLKmode, const0_rtx);
5077 }
5078
5079 offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode, EXPAND_SUM);
5080 address_mode = get_address_mode (to_rtx);
5081 if (GET_MODE (offset_rtx) != address_mode)
5082 {
5083 /* We cannot be sure that the RTL in offset_rtx is valid outside
5084 of a memory address context, so force it into a register
5085 before attempting to convert it to the desired mode. */
5086 offset_rtx = force_operand (offset_rtx, NULL_RTX);
5087 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
5088 }
5089
5090 /* If we have an expression in OFFSET_RTX and a non-zero
5091 byte offset in BITPOS, adding the byte offset before the
5092 OFFSET_RTX results in better intermediate code, which makes
5093 later rtl optimization passes perform better.
5094
5095 We prefer intermediate code like this:
5096
5097 r124:DI=r123:DI+0x18
5098 [r124:DI]=r121:DI
5099
5100 ... instead of ...
5101
5102 r124:DI=r123:DI+0x10
5103 [r124:DI+0x8]=r121:DI
5104
5105 This is only done for aligned data values, as these can
5106 be expected to result in single move instructions. */
5107 poly_int64 bytepos;
5108 if (mode1 != VOIDmode
5109 && maybe_ne (bitpos, 0)
5110 && maybe_gt (bitsize, 0)
5111 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
5112 && multiple_p (bitpos, bitsize)
5113 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
5114 && MEM_ALIGN (to_rtx) >= GET_MODE_ALIGNMENT (mode1))
5115 {
5116 to_rtx = adjust_address (to_rtx, mode1, bytepos);
5117 bitregion_start = 0;
5118 if (known_ge (bitregion_end, poly_uint64 (bitpos)))
5119 bitregion_end -= bitpos;
5120 bitpos = 0;
5121 }
5122
5123 to_rtx = offset_address (to_rtx, offset_rtx,
5124 highest_pow2_factor_for_target (to,
5125 offset));
5126 }
5127
5128 /* No action is needed if the target is not a memory and the field
5129 lies completely outside that target. This can occur if the source
5130 code contains an out-of-bounds access to a small array. */
5131 if (!MEM_P (to_rtx)
5132 && GET_MODE (to_rtx) != BLKmode
5133 && known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (to_rtx))))
5134 {
5135 expand_normal (from);
5136 result = NULL;
5137 }
5138 /* Handle expand_expr of a complex value returning a CONCAT. */
5139 else if (GET_CODE (to_rtx) == CONCAT)
5140 {
5141 machine_mode to_mode = GET_MODE (to_rtx);
5142 gcc_checking_assert (COMPLEX_MODE_P (to_mode));
5143 poly_int64 mode_bitsize = GET_MODE_BITSIZE (to_mode);
5144 unsigned short inner_bitsize = GET_MODE_UNIT_BITSIZE (to_mode);
5145 if (TYPE_MODE (TREE_TYPE (from)) == GET_MODE (to_rtx)
5146 && COMPLEX_MODE_P (GET_MODE (to_rtx))
5147 && known_eq (bitpos, 0)
5148 && known_eq (bitsize, mode_bitsize))
5149 result = store_expr (from, to_rtx, false, nontemporal, reversep);
5150 else if (known_eq (bitsize, inner_bitsize)
5151 && (known_eq (bitpos, 0)
5152 || known_eq (bitpos, inner_bitsize)))
5153 result = store_expr (from, XEXP (to_rtx, maybe_ne (bitpos, 0)),
5154 false, nontemporal, reversep);
5155 else if (known_le (bitpos + bitsize, inner_bitsize))
5156 result = store_field (XEXP (to_rtx, 0), bitsize, bitpos,
5157 bitregion_start, bitregion_end,
5158 mode1, from, get_alias_set (to),
5159 nontemporal, reversep);
5160 else if (known_ge (bitpos, inner_bitsize))
5161 result = store_field (XEXP (to_rtx, 1), bitsize,
5162 bitpos - inner_bitsize,
5163 bitregion_start, bitregion_end,
5164 mode1, from, get_alias_set (to),
5165 nontemporal, reversep);
5166 else if (known_eq (bitpos, 0) && known_eq (bitsize, mode_bitsize))
5167 {
5168 result = expand_normal (from);
5169 if (GET_CODE (result) == CONCAT)
5170 {
5171 to_mode = GET_MODE_INNER (to_mode);
5172 machine_mode from_mode = GET_MODE_INNER (GET_MODE (result));
5173 rtx from_real
5174 = simplify_gen_subreg (to_mode, XEXP (result, 0),
5175 from_mode, 0);
5176 rtx from_imag
5177 = simplify_gen_subreg (to_mode, XEXP (result, 1),
5178 from_mode, 0);
5179 if (!from_real || !from_imag)
5180 goto concat_store_slow;
5181 emit_move_insn (XEXP (to_rtx, 0), from_real);
5182 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5183 }
5184 else
5185 {
5186 rtx from_rtx
5187 = simplify_gen_subreg (to_mode, result,
5188 TYPE_MODE (TREE_TYPE (from)), 0);
5189 if (from_rtx)
5190 {
5191 emit_move_insn (XEXP (to_rtx, 0),
5192 read_complex_part (from_rtx, false));
5193 emit_move_insn (XEXP (to_rtx, 1),
5194 read_complex_part (from_rtx, true));
5195 }
5196 else
5197 {
5198 machine_mode to_mode
5199 = GET_MODE_INNER (GET_MODE (to_rtx));
5200 rtx from_real
5201 = simplify_gen_subreg (to_mode, result,
5202 TYPE_MODE (TREE_TYPE (from)),
5203 0);
5204 rtx from_imag
5205 = simplify_gen_subreg (to_mode, result,
5206 TYPE_MODE (TREE_TYPE (from)),
5207 GET_MODE_SIZE (to_mode));
5208 if (!from_real || !from_imag)
5209 goto concat_store_slow;
5210 emit_move_insn (XEXP (to_rtx, 0), from_real);
5211 emit_move_insn (XEXP (to_rtx, 1), from_imag);
5212 }
5213 }
5214 }
5215 else
5216 {
5217 concat_store_slow:;
5218 rtx temp = assign_stack_temp (to_mode,
5219 GET_MODE_SIZE (GET_MODE (to_rtx)));
5220 write_complex_part (temp, XEXP (to_rtx, 0), false);
5221 write_complex_part (temp, XEXP (to_rtx, 1), true);
5222 result = store_field (temp, bitsize, bitpos,
5223 bitregion_start, bitregion_end,
5224 mode1, from, get_alias_set (to),
5225 nontemporal, reversep);
5226 emit_move_insn (XEXP (to_rtx, 0), read_complex_part (temp, false));
5227 emit_move_insn (XEXP (to_rtx, 1), read_complex_part (temp, true));
5228 }
5229 }
5230 else
5231 {
5232 if (MEM_P (to_rtx))
5233 {
5234 /* If the field is at offset zero, we could have been given the
5235 DECL_RTX of the parent struct. Don't munge it. */
5236 to_rtx = shallow_copy_rtx (to_rtx);
5237 set_mem_attributes_minus_bitpos (to_rtx, to, 0, bitpos);
5238 if (volatilep)
5239 MEM_VOLATILE_P (to_rtx) = 1;
5240 }
5241
5242 if (optimize_bitfield_assignment_op (bitsize, bitpos,
5243 bitregion_start, bitregion_end,
5244 mode1, to_rtx, to, from,
5245 reversep))
5246 result = NULL;
5247 else
5248 result = store_field (to_rtx, bitsize, bitpos,
5249 bitregion_start, bitregion_end,
5250 mode1, from, get_alias_set (to),
5251 nontemporal, reversep);
5252 }
5253
5254 if (result)
5255 preserve_temp_slots (result);
5256 pop_temp_slots ();
5257 return;
5258 }
5259
5260 /* If the rhs is a function call and its value is not an aggregate,
5261 call the function before we start to compute the lhs.
5262 This is needed for correct code for cases such as
5263 val = setjmp (buf) on machines where reference to val
5264 requires loading up part of an address in a separate insn.
5265
5266 Don't do this if TO is a VAR_DECL or PARM_DECL whose DECL_RTL is REG
5267 since it might be a promoted variable where the zero- or sign- extension
5268 needs to be done. Handling this in the normal way is safe because no
5269 computation is done before the call. The same is true for SSA names. */
5270 if (TREE_CODE (from) == CALL_EXPR && ! aggregate_value_p (from, from)
5271 && COMPLETE_TYPE_P (TREE_TYPE (from))
5272 && TREE_CODE (TYPE_SIZE (TREE_TYPE (from))) == INTEGER_CST
5273 && ! (((VAR_P (to)
5274 || TREE_CODE (to) == PARM_DECL
5275 || TREE_CODE (to) == RESULT_DECL)
5276 && REG_P (DECL_RTL (to)))
5277 || TREE_CODE (to) == SSA_NAME))
5278 {
5279 rtx value;
5280 rtx bounds;
5281
5282 push_temp_slots ();
5283 value = expand_normal (from);
5284
5285 /* Split value and bounds to store them separately. */
5286 chkp_split_slot (value, &value, &bounds);
5287
5288 if (to_rtx == 0)
5289 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5290
5291 /* Handle calls that return values in multiple non-contiguous locations.
5292 The Irix 6 ABI has examples of this. */
5293 if (GET_CODE (to_rtx) == PARALLEL)
5294 {
5295 if (GET_CODE (value) == PARALLEL)
5296 emit_group_move (to_rtx, value);
5297 else
5298 emit_group_load (to_rtx, value, TREE_TYPE (from),
5299 int_size_in_bytes (TREE_TYPE (from)));
5300 }
5301 else if (GET_CODE (value) == PARALLEL)
5302 emit_group_store (to_rtx, value, TREE_TYPE (from),
5303 int_size_in_bytes (TREE_TYPE (from)));
5304 else if (GET_MODE (to_rtx) == BLKmode)
5305 {
5306 /* Handle calls that return BLKmode values in registers. */
5307 if (REG_P (value))
5308 copy_blkmode_from_reg (to_rtx, value, TREE_TYPE (from));
5309 else
5310 emit_block_move (to_rtx, value, expr_size (from), BLOCK_OP_NORMAL);
5311 }
5312 else
5313 {
5314 if (POINTER_TYPE_P (TREE_TYPE (to)))
5315 value = convert_memory_address_addr_space
5316 (as_a <scalar_int_mode> (GET_MODE (to_rtx)), value,
5317 TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (to))));
5318
5319 emit_move_insn (to_rtx, value);
5320 }
5321
5322 /* Store bounds if required. */
5323 if (bounds
5324 && (BOUNDED_P (to) || chkp_type_has_pointer (TREE_TYPE (to))))
5325 {
5326 gcc_assert (MEM_P (to_rtx));
5327 chkp_emit_bounds_store (bounds, value, to_rtx);
5328 }
5329
5330 preserve_temp_slots (to_rtx);
5331 pop_temp_slots ();
5332 return;
5333 }
5334
5335 /* Ordinary treatment. Expand TO to get a REG or MEM rtx. */
5336 to_rtx = expand_expr (to, NULL_RTX, VOIDmode, EXPAND_WRITE);
5337
5338 /* Don't move directly into a return register. */
5339 if (TREE_CODE (to) == RESULT_DECL
5340 && (REG_P (to_rtx) || GET_CODE (to_rtx) == PARALLEL))
5341 {
5342 rtx temp;
5343
5344 push_temp_slots ();
5345
5346 /* If the source is itself a return value, it still is in a pseudo at
5347 this point so we can move it back to the return register directly. */
5348 if (REG_P (to_rtx)
5349 && TYPE_MODE (TREE_TYPE (from)) == BLKmode
5350 && TREE_CODE (from) != CALL_EXPR)
5351 temp = copy_blkmode_to_reg (GET_MODE (to_rtx), from);
5352 else
5353 temp = expand_expr (from, NULL_RTX, GET_MODE (to_rtx), EXPAND_NORMAL);
5354
5355 /* Handle calls that return values in multiple non-contiguous locations.
5356 The Irix 6 ABI has examples of this. */
5357 if (GET_CODE (to_rtx) == PARALLEL)
5358 {
5359 if (GET_CODE (temp) == PARALLEL)
5360 emit_group_move (to_rtx, temp);
5361 else
5362 emit_group_load (to_rtx, temp, TREE_TYPE (from),
5363 int_size_in_bytes (TREE_TYPE (from)));
5364 }
5365 else if (temp)
5366 emit_move_insn (to_rtx, temp);
5367
5368 preserve_temp_slots (to_rtx);
5369 pop_temp_slots ();
5370 return;
5371 }
5372
5373 /* In case we are returning the contents of an object which overlaps
5374 the place the value is being stored, use a safe function when copying
5375 a value through a pointer into a structure value return block. */
5376 if (TREE_CODE (to) == RESULT_DECL
5377 && TREE_CODE (from) == INDIRECT_REF
5378 && ADDR_SPACE_GENERIC_P
5379 (TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (from, 0)))))
5380 && refs_may_alias_p (to, from)
5381 && cfun->returns_struct
5382 && !cfun->returns_pcc_struct)
5383 {
5384 rtx from_rtx, size;
5385
5386 push_temp_slots ();
5387 size = expr_size (from);
5388 from_rtx = expand_normal (from);
5389
5390 emit_block_move_via_libcall (XEXP (to_rtx, 0), XEXP (from_rtx, 0), size);
5391
5392 preserve_temp_slots (to_rtx);
5393 pop_temp_slots ();
5394 return;
5395 }
5396
5397 /* Compute FROM and store the value in the rtx we got. */
5398
5399 push_temp_slots ();
5400 result = store_expr_with_bounds (from, to_rtx, 0, nontemporal, false, to);
5401 preserve_temp_slots (result);
5402 pop_temp_slots ();
5403 return;
5404 }
5405
5406 /* Emits nontemporal store insn that moves FROM to TO. Returns true if this
5407 succeeded, false otherwise. */
5408
5409 bool
5410 emit_storent_insn (rtx to, rtx from)
5411 {
5412 struct expand_operand ops[2];
5413 machine_mode mode = GET_MODE (to);
5414 enum insn_code code = optab_handler (storent_optab, mode);
5415
5416 if (code == CODE_FOR_nothing)
5417 return false;
5418
5419 create_fixed_operand (&ops[0], to);
5420 create_input_operand (&ops[1], from, mode);
5421 return maybe_expand_insn (code, 2, ops);
5422 }
5423
5424 /* Generate code for computing expression EXP,
5425 and storing the value into TARGET.
5426
5427 If the mode is BLKmode then we may return TARGET itself.
5428 It turns out that in BLKmode it doesn't cause a problem.
5429 because C has no operators that could combine two different
5430 assignments into the same BLKmode object with different values
5431 with no sequence point. Will other languages need this to
5432 be more thorough?
5433
5434 If CALL_PARAM_P is nonzero, this is a store into a call param on the
5435 stack, and block moves may need to be treated specially.
5436
5437 If NONTEMPORAL is true, try using a nontemporal store instruction.
5438
5439 If REVERSE is true, the store is to be done in reverse order.
5440
5441 If BTARGET is not NULL then computed bounds of EXP are
5442 associated with BTARGET. */
5443
5444 rtx
5445 store_expr_with_bounds (tree exp, rtx target, int call_param_p,
5446 bool nontemporal, bool reverse, tree btarget)
5447 {
5448 rtx temp;
5449 rtx alt_rtl = NULL_RTX;
5450 location_t loc = curr_insn_location ();
5451
5452 if (VOID_TYPE_P (TREE_TYPE (exp)))
5453 {
5454 /* C++ can generate ?: expressions with a throw expression in one
5455 branch and an rvalue in the other. Here, we resolve attempts to
5456 store the throw expression's nonexistent result. */
5457 gcc_assert (!call_param_p);
5458 expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
5459 return NULL_RTX;
5460 }
5461 if (TREE_CODE (exp) == COMPOUND_EXPR)
5462 {
5463 /* Perform first part of compound expression, then assign from second
5464 part. */
5465 expand_expr (TREE_OPERAND (exp, 0), const0_rtx, VOIDmode,
5466 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5467 return store_expr_with_bounds (TREE_OPERAND (exp, 1), target,
5468 call_param_p, nontemporal, reverse,
5469 btarget);
5470 }
5471 else if (TREE_CODE (exp) == COND_EXPR && GET_MODE (target) == BLKmode)
5472 {
5473 /* For conditional expression, get safe form of the target. Then
5474 test the condition, doing the appropriate assignment on either
5475 side. This avoids the creation of unnecessary temporaries.
5476 For non-BLKmode, it is more efficient not to do this. */
5477
5478 rtx_code_label *lab1 = gen_label_rtx (), *lab2 = gen_label_rtx ();
5479
5480 do_pending_stack_adjust ();
5481 NO_DEFER_POP;
5482 jumpifnot (TREE_OPERAND (exp, 0), lab1,
5483 profile_probability::uninitialized ());
5484 store_expr_with_bounds (TREE_OPERAND (exp, 1), target, call_param_p,
5485 nontemporal, reverse, btarget);
5486 emit_jump_insn (targetm.gen_jump (lab2));
5487 emit_barrier ();
5488 emit_label (lab1);
5489 store_expr_with_bounds (TREE_OPERAND (exp, 2), target, call_param_p,
5490 nontemporal, reverse, btarget);
5491 emit_label (lab2);
5492 OK_DEFER_POP;
5493
5494 return NULL_RTX;
5495 }
5496 else if (GET_CODE (target) == SUBREG && SUBREG_PROMOTED_VAR_P (target))
5497 /* If this is a scalar in a register that is stored in a wider mode
5498 than the declared mode, compute the result into its declared mode
5499 and then convert to the wider mode. Our value is the computed
5500 expression. */
5501 {
5502 rtx inner_target = 0;
5503 scalar_int_mode outer_mode = subreg_unpromoted_mode (target);
5504 scalar_int_mode inner_mode = subreg_promoted_mode (target);
5505
5506 /* We can do the conversion inside EXP, which will often result
5507 in some optimizations. Do the conversion in two steps: first
5508 change the signedness, if needed, then the extend. But don't
5509 do this if the type of EXP is a subtype of something else
5510 since then the conversion might involve more than just
5511 converting modes. */
5512 if (INTEGRAL_TYPE_P (TREE_TYPE (exp))
5513 && TREE_TYPE (TREE_TYPE (exp)) == 0
5514 && GET_MODE_PRECISION (outer_mode)
5515 == TYPE_PRECISION (TREE_TYPE (exp)))
5516 {
5517 if (!SUBREG_CHECK_PROMOTED_SIGN (target,
5518 TYPE_UNSIGNED (TREE_TYPE (exp))))
5519 {
5520 /* Some types, e.g. Fortran's logical*4, won't have a signed
5521 version, so use the mode instead. */
5522 tree ntype
5523 = (signed_or_unsigned_type_for
5524 (SUBREG_PROMOTED_SIGN (target), TREE_TYPE (exp)));
5525 if (ntype == NULL)
5526 ntype = lang_hooks.types.type_for_mode
5527 (TYPE_MODE (TREE_TYPE (exp)),
5528 SUBREG_PROMOTED_SIGN (target));
5529
5530 exp = fold_convert_loc (loc, ntype, exp);
5531 }
5532
5533 exp = fold_convert_loc (loc, lang_hooks.types.type_for_mode
5534 (inner_mode, SUBREG_PROMOTED_SIGN (target)),
5535 exp);
5536
5537 inner_target = SUBREG_REG (target);
5538 }
5539
5540 temp = expand_expr (exp, inner_target, VOIDmode,
5541 call_param_p ? EXPAND_STACK_PARM : EXPAND_NORMAL);
5542
5543 /* Handle bounds returned by call. */
5544 if (TREE_CODE (exp) == CALL_EXPR)
5545 {
5546 rtx bounds;
5547 chkp_split_slot (temp, &temp, &bounds);
5548 if (bounds && btarget)
5549 {
5550 gcc_assert (TREE_CODE (btarget) == SSA_NAME);
5551 rtx tmp = targetm.calls.load_returned_bounds (bounds);
5552 chkp_set_rtl_bounds (btarget, tmp);
5553 }
5554 }
5555
5556 /* If TEMP is a VOIDmode constant, use convert_modes to make
5557 sure that we properly convert it. */
5558 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode)
5559 {
5560 temp = convert_modes (outer_mode, TYPE_MODE (TREE_TYPE (exp)),
5561 temp, SUBREG_PROMOTED_SIGN (target));
5562 temp = convert_modes (inner_mode, outer_mode, temp,
5563 SUBREG_PROMOTED_SIGN (target));
5564 }
5565
5566 convert_move (SUBREG_REG (target), temp,
5567 SUBREG_PROMOTED_SIGN (target));
5568
5569 return NULL_RTX;
5570 }
5571 else if ((TREE_CODE (exp) == STRING_CST
5572 || (TREE_CODE (exp) == MEM_REF
5573 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
5574 && TREE_CODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
5575 == STRING_CST
5576 && integer_zerop (TREE_OPERAND (exp, 1))))
5577 && !nontemporal && !call_param_p
5578 && MEM_P (target))
5579 {
5580 /* Optimize initialization of an array with a STRING_CST. */
5581 HOST_WIDE_INT exp_len, str_copy_len;
5582 rtx dest_mem;
5583 tree str = TREE_CODE (exp) == STRING_CST
5584 ? exp : TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
5585
5586 exp_len = int_expr_size (exp);
5587 if (exp_len <= 0)
5588 goto normal_expr;
5589
5590 if (TREE_STRING_LENGTH (str) <= 0)
5591 goto normal_expr;
5592
5593 str_copy_len = strlen (TREE_STRING_POINTER (str));
5594 if (str_copy_len < TREE_STRING_LENGTH (str) - 1)
5595 goto normal_expr;
5596
5597 str_copy_len = TREE_STRING_LENGTH (str);
5598 if ((STORE_MAX_PIECES & (STORE_MAX_PIECES - 1)) == 0
5599 && TREE_STRING_POINTER (str)[TREE_STRING_LENGTH (str) - 1] == '\0')
5600 {
5601 str_copy_len += STORE_MAX_PIECES - 1;
5602 str_copy_len &= ~(STORE_MAX_PIECES - 1);
5603 }
5604 str_copy_len = MIN (str_copy_len, exp_len);
5605 if (!can_store_by_pieces (str_copy_len, builtin_strncpy_read_str,
5606 CONST_CAST (char *, TREE_STRING_POINTER (str)),
5607 MEM_ALIGN (target), false))
5608 goto normal_expr;
5609
5610 dest_mem = target;
5611
5612 dest_mem = store_by_pieces (dest_mem,
5613 str_copy_len, builtin_strncpy_read_str,
5614 CONST_CAST (char *,
5615 TREE_STRING_POINTER (str)),
5616 MEM_ALIGN (target), false,
5617 exp_len > str_copy_len ? 1 : 0);
5618 if (exp_len > str_copy_len)
5619 clear_storage (adjust_address (dest_mem, BLKmode, 0),
5620 GEN_INT (exp_len - str_copy_len),
5621 BLOCK_OP_NORMAL);
5622 return NULL_RTX;
5623 }
5624 else
5625 {
5626 rtx tmp_target;
5627
5628 normal_expr:
5629 /* If we want to use a nontemporal or a reverse order store, force the
5630 value into a register first. */
5631 tmp_target = nontemporal || reverse ? NULL_RTX : target;
5632 temp = expand_expr_real (exp, tmp_target, GET_MODE (target),
5633 (call_param_p
5634 ? EXPAND_STACK_PARM : EXPAND_NORMAL),
5635 &alt_rtl, false);
5636
5637 /* Handle bounds returned by call. */
5638 if (TREE_CODE (exp) == CALL_EXPR)
5639 {
5640 rtx bounds;
5641 chkp_split_slot (temp, &temp, &bounds);
5642 if (bounds && btarget)
5643 {
5644 gcc_assert (TREE_CODE (btarget) == SSA_NAME);
5645 rtx tmp = targetm.calls.load_returned_bounds (bounds);
5646 chkp_set_rtl_bounds (btarget, tmp);
5647 }
5648 }
5649 }
5650
5651 /* If TEMP is a VOIDmode constant and the mode of the type of EXP is not
5652 the same as that of TARGET, adjust the constant. This is needed, for
5653 example, in case it is a CONST_DOUBLE or CONST_WIDE_INT and we want
5654 only a word-sized value. */
5655 if (CONSTANT_P (temp) && GET_MODE (temp) == VOIDmode
5656 && TREE_CODE (exp) != ERROR_MARK
5657 && GET_MODE (target) != TYPE_MODE (TREE_TYPE (exp)))
5658 {
5659 if (GET_MODE_CLASS (GET_MODE (target))
5660 != GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (exp)))
5661 && known_eq (GET_MODE_BITSIZE (GET_MODE (target)),
5662 GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (exp)))))
5663 {
5664 rtx t = simplify_gen_subreg (GET_MODE (target), temp,
5665 TYPE_MODE (TREE_TYPE (exp)), 0);
5666 if (t)
5667 temp = t;
5668 }
5669 if (GET_MODE (temp) == VOIDmode)
5670 temp = convert_modes (GET_MODE (target), TYPE_MODE (TREE_TYPE (exp)),
5671 temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5672 }
5673
5674 /* If value was not generated in the target, store it there.
5675 Convert the value to TARGET's type first if necessary and emit the
5676 pending incrementations that have been queued when expanding EXP.
5677 Note that we cannot emit the whole queue blindly because this will
5678 effectively disable the POST_INC optimization later.
5679
5680 If TEMP and TARGET compare equal according to rtx_equal_p, but
5681 one or both of them are volatile memory refs, we have to distinguish
5682 two cases:
5683 - expand_expr has used TARGET. In this case, we must not generate
5684 another copy. This can be detected by TARGET being equal according
5685 to == .
5686 - expand_expr has not used TARGET - that means that the source just
5687 happens to have the same RTX form. Since temp will have been created
5688 by expand_expr, it will compare unequal according to == .
5689 We must generate a copy in this case, to reach the correct number
5690 of volatile memory references. */
5691
5692 if ((! rtx_equal_p (temp, target)
5693 || (temp != target && (side_effects_p (temp)
5694 || side_effects_p (target))))
5695 && TREE_CODE (exp) != ERROR_MARK
5696 /* If store_expr stores a DECL whose DECL_RTL(exp) == TARGET,
5697 but TARGET is not valid memory reference, TEMP will differ
5698 from TARGET although it is really the same location. */
5699 && !(alt_rtl
5700 && rtx_equal_p (alt_rtl, target)
5701 && !side_effects_p (alt_rtl)
5702 && !side_effects_p (target))
5703 /* If there's nothing to copy, don't bother. Don't call
5704 expr_size unless necessary, because some front-ends (C++)
5705 expr_size-hook must not be given objects that are not
5706 supposed to be bit-copied or bit-initialized. */
5707 && expr_size (exp) != const0_rtx)
5708 {
5709 if (GET_MODE (temp) != GET_MODE (target) && GET_MODE (temp) != VOIDmode)
5710 {
5711 if (GET_MODE (target) == BLKmode)
5712 {
5713 /* Handle calls that return BLKmode values in registers. */
5714 if (REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
5715 copy_blkmode_from_reg (target, temp, TREE_TYPE (exp));
5716 else
5717 store_bit_field (target,
5718 INTVAL (expr_size (exp)) * BITS_PER_UNIT,
5719 0, 0, 0, GET_MODE (temp), temp, reverse);
5720 }
5721 else
5722 convert_move (target, temp, TYPE_UNSIGNED (TREE_TYPE (exp)));
5723 }
5724
5725 else if (GET_MODE (temp) == BLKmode && TREE_CODE (exp) == STRING_CST)
5726 {
5727 /* Handle copying a string constant into an array. The string
5728 constant may be shorter than the array. So copy just the string's
5729 actual length, and clear the rest. First get the size of the data
5730 type of the string, which is actually the size of the target. */
5731 rtx size = expr_size (exp);
5732
5733 if (CONST_INT_P (size)
5734 && INTVAL (size) < TREE_STRING_LENGTH (exp))
5735 emit_block_move (target, temp, size,
5736 (call_param_p
5737 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5738 else
5739 {
5740 machine_mode pointer_mode
5741 = targetm.addr_space.pointer_mode (MEM_ADDR_SPACE (target));
5742 machine_mode address_mode = get_address_mode (target);
5743
5744 /* Compute the size of the data to copy from the string. */
5745 tree copy_size
5746 = size_binop_loc (loc, MIN_EXPR,
5747 make_tree (sizetype, size),
5748 size_int (TREE_STRING_LENGTH (exp)));
5749 rtx copy_size_rtx
5750 = expand_expr (copy_size, NULL_RTX, VOIDmode,
5751 (call_param_p
5752 ? EXPAND_STACK_PARM : EXPAND_NORMAL));
5753 rtx_code_label *label = 0;
5754
5755 /* Copy that much. */
5756 copy_size_rtx = convert_to_mode (pointer_mode, copy_size_rtx,
5757 TYPE_UNSIGNED (sizetype));
5758 emit_block_move (target, temp, copy_size_rtx,
5759 (call_param_p
5760 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5761
5762 /* Figure out how much is left in TARGET that we have to clear.
5763 Do all calculations in pointer_mode. */
5764 if (CONST_INT_P (copy_size_rtx))
5765 {
5766 size = plus_constant (address_mode, size,
5767 -INTVAL (copy_size_rtx));
5768 target = adjust_address (target, BLKmode,
5769 INTVAL (copy_size_rtx));
5770 }
5771 else
5772 {
5773 size = expand_binop (TYPE_MODE (sizetype), sub_optab, size,
5774 copy_size_rtx, NULL_RTX, 0,
5775 OPTAB_LIB_WIDEN);
5776
5777 if (GET_MODE (copy_size_rtx) != address_mode)
5778 copy_size_rtx = convert_to_mode (address_mode,
5779 copy_size_rtx,
5780 TYPE_UNSIGNED (sizetype));
5781
5782 target = offset_address (target, copy_size_rtx,
5783 highest_pow2_factor (copy_size));
5784 label = gen_label_rtx ();
5785 emit_cmp_and_jump_insns (size, const0_rtx, LT, NULL_RTX,
5786 GET_MODE (size), 0, label);
5787 }
5788
5789 if (size != const0_rtx)
5790 clear_storage (target, size, BLOCK_OP_NORMAL);
5791
5792 if (label)
5793 emit_label (label);
5794 }
5795 }
5796 /* Handle calls that return values in multiple non-contiguous locations.
5797 The Irix 6 ABI has examples of this. */
5798 else if (GET_CODE (target) == PARALLEL)
5799 {
5800 if (GET_CODE (temp) == PARALLEL)
5801 emit_group_move (target, temp);
5802 else
5803 emit_group_load (target, temp, TREE_TYPE (exp),
5804 int_size_in_bytes (TREE_TYPE (exp)));
5805 }
5806 else if (GET_CODE (temp) == PARALLEL)
5807 emit_group_store (target, temp, TREE_TYPE (exp),
5808 int_size_in_bytes (TREE_TYPE (exp)));
5809 else if (GET_MODE (temp) == BLKmode)
5810 emit_block_move (target, temp, expr_size (exp),
5811 (call_param_p
5812 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
5813 /* If we emit a nontemporal store, there is nothing else to do. */
5814 else if (nontemporal && emit_storent_insn (target, temp))
5815 ;
5816 else
5817 {
5818 if (reverse)
5819 temp = flip_storage_order (GET_MODE (target), temp);
5820 temp = force_operand (temp, target);
5821 if (temp != target)
5822 emit_move_insn (target, temp);
5823 }
5824 }
5825
5826 return NULL_RTX;
5827 }
5828
5829 /* Same as store_expr_with_bounds but ignoring bounds of EXP. */
5830 rtx
5831 store_expr (tree exp, rtx target, int call_param_p, bool nontemporal,
5832 bool reverse)
5833 {
5834 return store_expr_with_bounds (exp, target, call_param_p, nontemporal,
5835 reverse, NULL);
5836 }
5837 \f
5838 /* Return true if field F of structure TYPE is a flexible array. */
5839
5840 static bool
5841 flexible_array_member_p (const_tree f, const_tree type)
5842 {
5843 const_tree tf;
5844
5845 tf = TREE_TYPE (f);
5846 return (DECL_CHAIN (f) == NULL
5847 && TREE_CODE (tf) == ARRAY_TYPE
5848 && TYPE_DOMAIN (tf)
5849 && TYPE_MIN_VALUE (TYPE_DOMAIN (tf))
5850 && integer_zerop (TYPE_MIN_VALUE (TYPE_DOMAIN (tf)))
5851 && !TYPE_MAX_VALUE (TYPE_DOMAIN (tf))
5852 && int_size_in_bytes (type) >= 0);
5853 }
5854
5855 /* If FOR_CTOR_P, return the number of top-level elements that a constructor
5856 must have in order for it to completely initialize a value of type TYPE.
5857 Return -1 if the number isn't known.
5858
5859 If !FOR_CTOR_P, return an estimate of the number of scalars in TYPE. */
5860
5861 static HOST_WIDE_INT
5862 count_type_elements (const_tree type, bool for_ctor_p)
5863 {
5864 switch (TREE_CODE (type))
5865 {
5866 case ARRAY_TYPE:
5867 {
5868 tree nelts;
5869
5870 nelts = array_type_nelts (type);
5871 if (nelts && tree_fits_uhwi_p (nelts))
5872 {
5873 unsigned HOST_WIDE_INT n;
5874
5875 n = tree_to_uhwi (nelts) + 1;
5876 if (n == 0 || for_ctor_p)
5877 return n;
5878 else
5879 return n * count_type_elements (TREE_TYPE (type), false);
5880 }
5881 return for_ctor_p ? -1 : 1;
5882 }
5883
5884 case RECORD_TYPE:
5885 {
5886 unsigned HOST_WIDE_INT n;
5887 tree f;
5888
5889 n = 0;
5890 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5891 if (TREE_CODE (f) == FIELD_DECL)
5892 {
5893 if (!for_ctor_p)
5894 n += count_type_elements (TREE_TYPE (f), false);
5895 else if (!flexible_array_member_p (f, type))
5896 /* Don't count flexible arrays, which are not supposed
5897 to be initialized. */
5898 n += 1;
5899 }
5900
5901 return n;
5902 }
5903
5904 case UNION_TYPE:
5905 case QUAL_UNION_TYPE:
5906 {
5907 tree f;
5908 HOST_WIDE_INT n, m;
5909
5910 gcc_assert (!for_ctor_p);
5911 /* Estimate the number of scalars in each field and pick the
5912 maximum. Other estimates would do instead; the idea is simply
5913 to make sure that the estimate is not sensitive to the ordering
5914 of the fields. */
5915 n = 1;
5916 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
5917 if (TREE_CODE (f) == FIELD_DECL)
5918 {
5919 m = count_type_elements (TREE_TYPE (f), false);
5920 /* If the field doesn't span the whole union, add an extra
5921 scalar for the rest. */
5922 if (simple_cst_equal (TYPE_SIZE (TREE_TYPE (f)),
5923 TYPE_SIZE (type)) != 1)
5924 m++;
5925 if (n < m)
5926 n = m;
5927 }
5928 return n;
5929 }
5930
5931 case COMPLEX_TYPE:
5932 return 2;
5933
5934 case VECTOR_TYPE:
5935 {
5936 unsigned HOST_WIDE_INT nelts;
5937 if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
5938 return nelts;
5939 else
5940 return -1;
5941 }
5942
5943 case INTEGER_TYPE:
5944 case REAL_TYPE:
5945 case FIXED_POINT_TYPE:
5946 case ENUMERAL_TYPE:
5947 case BOOLEAN_TYPE:
5948 case POINTER_TYPE:
5949 case OFFSET_TYPE:
5950 case REFERENCE_TYPE:
5951 case NULLPTR_TYPE:
5952 return 1;
5953
5954 case ERROR_MARK:
5955 return 0;
5956
5957 case VOID_TYPE:
5958 case METHOD_TYPE:
5959 case FUNCTION_TYPE:
5960 case LANG_TYPE:
5961 default:
5962 gcc_unreachable ();
5963 }
5964 }
5965
5966 /* Helper for categorize_ctor_elements. Identical interface. */
5967
5968 static bool
5969 categorize_ctor_elements_1 (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
5970 HOST_WIDE_INT *p_init_elts, bool *p_complete)
5971 {
5972 unsigned HOST_WIDE_INT idx;
5973 HOST_WIDE_INT nz_elts, init_elts, num_fields;
5974 tree value, purpose, elt_type;
5975
5976 /* Whether CTOR is a valid constant initializer, in accordance with what
5977 initializer_constant_valid_p does. If inferred from the constructor
5978 elements, true until proven otherwise. */
5979 bool const_from_elts_p = constructor_static_from_elts_p (ctor);
5980 bool const_p = const_from_elts_p ? true : TREE_STATIC (ctor);
5981
5982 nz_elts = 0;
5983 init_elts = 0;
5984 num_fields = 0;
5985 elt_type = NULL_TREE;
5986
5987 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), idx, purpose, value)
5988 {
5989 HOST_WIDE_INT mult = 1;
5990
5991 if (purpose && TREE_CODE (purpose) == RANGE_EXPR)
5992 {
5993 tree lo_index = TREE_OPERAND (purpose, 0);
5994 tree hi_index = TREE_OPERAND (purpose, 1);
5995
5996 if (tree_fits_uhwi_p (lo_index) && tree_fits_uhwi_p (hi_index))
5997 mult = (tree_to_uhwi (hi_index)
5998 - tree_to_uhwi (lo_index) + 1);
5999 }
6000 num_fields += mult;
6001 elt_type = TREE_TYPE (value);
6002
6003 switch (TREE_CODE (value))
6004 {
6005 case CONSTRUCTOR:
6006 {
6007 HOST_WIDE_INT nz = 0, ic = 0;
6008
6009 bool const_elt_p = categorize_ctor_elements_1 (value, &nz, &ic,
6010 p_complete);
6011
6012 nz_elts += mult * nz;
6013 init_elts += mult * ic;
6014
6015 if (const_from_elts_p && const_p)
6016 const_p = const_elt_p;
6017 }
6018 break;
6019
6020 case INTEGER_CST:
6021 case REAL_CST:
6022 case FIXED_CST:
6023 if (!initializer_zerop (value))
6024 nz_elts += mult;
6025 init_elts += mult;
6026 break;
6027
6028 case STRING_CST:
6029 nz_elts += mult * TREE_STRING_LENGTH (value);
6030 init_elts += mult * TREE_STRING_LENGTH (value);
6031 break;
6032
6033 case COMPLEX_CST:
6034 if (!initializer_zerop (TREE_REALPART (value)))
6035 nz_elts += mult;
6036 if (!initializer_zerop (TREE_IMAGPART (value)))
6037 nz_elts += mult;
6038 init_elts += mult;
6039 break;
6040
6041 case VECTOR_CST:
6042 {
6043 /* We can only construct constant-length vectors using
6044 CONSTRUCTOR. */
6045 unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
6046 for (unsigned int i = 0; i < nunits; ++i)
6047 {
6048 tree v = VECTOR_CST_ELT (value, i);
6049 if (!initializer_zerop (v))
6050 nz_elts += mult;
6051 init_elts += mult;
6052 }
6053 }
6054 break;
6055
6056 default:
6057 {
6058 HOST_WIDE_INT tc = count_type_elements (elt_type, false);
6059 nz_elts += mult * tc;
6060 init_elts += mult * tc;
6061
6062 if (const_from_elts_p && const_p)
6063 const_p
6064 = initializer_constant_valid_p (value,
6065 elt_type,
6066 TYPE_REVERSE_STORAGE_ORDER
6067 (TREE_TYPE (ctor)))
6068 != NULL_TREE;
6069 }
6070 break;
6071 }
6072 }
6073
6074 if (*p_complete && !complete_ctor_at_level_p (TREE_TYPE (ctor),
6075 num_fields, elt_type))
6076 *p_complete = false;
6077
6078 *p_nz_elts += nz_elts;
6079 *p_init_elts += init_elts;
6080
6081 return const_p;
6082 }
6083
6084 /* Examine CTOR to discover:
6085 * how many scalar fields are set to nonzero values,
6086 and place it in *P_NZ_ELTS;
6087 * how many scalar fields in total are in CTOR,
6088 and place it in *P_ELT_COUNT.
6089 * whether the constructor is complete -- in the sense that every
6090 meaningful byte is explicitly given a value --
6091 and place it in *P_COMPLETE.
6092
6093 Return whether or not CTOR is a valid static constant initializer, the same
6094 as "initializer_constant_valid_p (CTOR, TREE_TYPE (CTOR)) != 0". */
6095
6096 bool
6097 categorize_ctor_elements (const_tree ctor, HOST_WIDE_INT *p_nz_elts,
6098 HOST_WIDE_INT *p_init_elts, bool *p_complete)
6099 {
6100 *p_nz_elts = 0;
6101 *p_init_elts = 0;
6102 *p_complete = true;
6103
6104 return categorize_ctor_elements_1 (ctor, p_nz_elts, p_init_elts, p_complete);
6105 }
6106
6107 /* TYPE is initialized by a constructor with NUM_ELTS elements, the last
6108 of which had type LAST_TYPE. Each element was itself a complete
6109 initializer, in the sense that every meaningful byte was explicitly
6110 given a value. Return true if the same is true for the constructor
6111 as a whole. */
6112
6113 bool
6114 complete_ctor_at_level_p (const_tree type, HOST_WIDE_INT num_elts,
6115 const_tree last_type)
6116 {
6117 if (TREE_CODE (type) == UNION_TYPE
6118 || TREE_CODE (type) == QUAL_UNION_TYPE)
6119 {
6120 if (num_elts == 0)
6121 return false;
6122
6123 gcc_assert (num_elts == 1 && last_type);
6124
6125 /* ??? We could look at each element of the union, and find the
6126 largest element. Which would avoid comparing the size of the
6127 initialized element against any tail padding in the union.
6128 Doesn't seem worth the effort... */
6129 return simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (last_type)) == 1;
6130 }
6131
6132 return count_type_elements (type, true) == num_elts;
6133 }
6134
6135 /* Return 1 if EXP contains mostly (3/4) zeros. */
6136
6137 static int
6138 mostly_zeros_p (const_tree exp)
6139 {
6140 if (TREE_CODE (exp) == CONSTRUCTOR)
6141 {
6142 HOST_WIDE_INT nz_elts, init_elts;
6143 bool complete_p;
6144
6145 categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
6146 return !complete_p || nz_elts < init_elts / 4;
6147 }
6148
6149 return initializer_zerop (exp);
6150 }
6151
6152 /* Return 1 if EXP contains all zeros. */
6153
6154 static int
6155 all_zeros_p (const_tree exp)
6156 {
6157 if (TREE_CODE (exp) == CONSTRUCTOR)
6158 {
6159 HOST_WIDE_INT nz_elts, init_elts;
6160 bool complete_p;
6161
6162 categorize_ctor_elements (exp, &nz_elts, &init_elts, &complete_p);
6163 return nz_elts == 0;
6164 }
6165
6166 return initializer_zerop (exp);
6167 }
6168 \f
6169 /* Helper function for store_constructor.
6170 TARGET, BITSIZE, BITPOS, MODE, EXP are as for store_field.
6171 CLEARED is as for store_constructor.
6172 ALIAS_SET is the alias set to use for any stores.
6173 If REVERSE is true, the store is to be done in reverse order.
6174
6175 This provides a recursive shortcut back to store_constructor when it isn't
6176 necessary to go through store_field. This is so that we can pass through
6177 the cleared field to let store_constructor know that we may not have to
6178 clear a substructure if the outer structure has already been cleared. */
6179
6180 static void
6181 store_constructor_field (rtx target, poly_uint64 bitsize, poly_int64 bitpos,
6182 poly_uint64 bitregion_start,
6183 poly_uint64 bitregion_end,
6184 machine_mode mode,
6185 tree exp, int cleared,
6186 alias_set_type alias_set, bool reverse)
6187 {
6188 poly_int64 bytepos;
6189 poly_uint64 bytesize;
6190 if (TREE_CODE (exp) == CONSTRUCTOR
6191 /* We can only call store_constructor recursively if the size and
6192 bit position are on a byte boundary. */
6193 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
6194 && maybe_ne (bitsize, 0U)
6195 && multiple_p (bitsize, BITS_PER_UNIT, &bytesize)
6196 /* If we have a nonzero bitpos for a register target, then we just
6197 let store_field do the bitfield handling. This is unlikely to
6198 generate unnecessary clear instructions anyways. */
6199 && (known_eq (bitpos, 0) || MEM_P (target)))
6200 {
6201 if (MEM_P (target))
6202 {
6203 machine_mode target_mode = GET_MODE (target);
6204 if (target_mode != BLKmode
6205 && !multiple_p (bitpos, GET_MODE_ALIGNMENT (target_mode)))
6206 target_mode = BLKmode;
6207 target = adjust_address (target, target_mode, bytepos);
6208 }
6209
6210
6211 /* Update the alias set, if required. */
6212 if (MEM_P (target) && ! MEM_KEEP_ALIAS_SET_P (target)
6213 && MEM_ALIAS_SET (target) != 0)
6214 {
6215 target = copy_rtx (target);
6216 set_mem_alias_set (target, alias_set);
6217 }
6218
6219 store_constructor (exp, target, cleared, bytesize, reverse);
6220 }
6221 else
6222 store_field (target, bitsize, bitpos, bitregion_start, bitregion_end, mode,
6223 exp, alias_set, false, reverse);
6224 }
6225
6226
6227 /* Returns the number of FIELD_DECLs in TYPE. */
6228
6229 static int
6230 fields_length (const_tree type)
6231 {
6232 tree t = TYPE_FIELDS (type);
6233 int count = 0;
6234
6235 for (; t; t = DECL_CHAIN (t))
6236 if (TREE_CODE (t) == FIELD_DECL)
6237 ++count;
6238
6239 return count;
6240 }
6241
6242
6243 /* Store the value of constructor EXP into the rtx TARGET.
6244 TARGET is either a REG or a MEM; we know it cannot conflict, since
6245 safe_from_p has been called.
6246 CLEARED is true if TARGET is known to have been zero'd.
6247 SIZE is the number of bytes of TARGET we are allowed to modify: this
6248 may not be the same as the size of EXP if we are assigning to a field
6249 which has been packed to exclude padding bits.
6250 If REVERSE is true, the store is to be done in reverse order. */
6251
6252 static void
6253 store_constructor (tree exp, rtx target, int cleared, poly_int64 size,
6254 bool reverse)
6255 {
6256 tree type = TREE_TYPE (exp);
6257 HOST_WIDE_INT exp_size = int_size_in_bytes (type);
6258 poly_int64 bitregion_end = known_gt (size, 0) ? size * BITS_PER_UNIT - 1 : 0;
6259
6260 switch (TREE_CODE (type))
6261 {
6262 case RECORD_TYPE:
6263 case UNION_TYPE:
6264 case QUAL_UNION_TYPE:
6265 {
6266 unsigned HOST_WIDE_INT idx;
6267 tree field, value;
6268
6269 /* The storage order is specified for every aggregate type. */
6270 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6271
6272 /* If size is zero or the target is already cleared, do nothing. */
6273 if (known_eq (size, 0) || cleared)
6274 cleared = 1;
6275 /* We either clear the aggregate or indicate the value is dead. */
6276 else if ((TREE_CODE (type) == UNION_TYPE
6277 || TREE_CODE (type) == QUAL_UNION_TYPE)
6278 && ! CONSTRUCTOR_ELTS (exp))
6279 /* If the constructor is empty, clear the union. */
6280 {
6281 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
6282 cleared = 1;
6283 }
6284
6285 /* If we are building a static constructor into a register,
6286 set the initial value as zero so we can fold the value into
6287 a constant. But if more than one register is involved,
6288 this probably loses. */
6289 else if (REG_P (target) && TREE_STATIC (exp)
6290 && known_le (GET_MODE_SIZE (GET_MODE (target)),
6291 REGMODE_NATURAL_SIZE (GET_MODE (target))))
6292 {
6293 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6294 cleared = 1;
6295 }
6296
6297 /* If the constructor has fewer fields than the structure or
6298 if we are initializing the structure to mostly zeros, clear
6299 the whole structure first. Don't do this if TARGET is a
6300 register whose mode size isn't equal to SIZE since
6301 clear_storage can't handle this case. */
6302 else if (known_size_p (size)
6303 && (((int) CONSTRUCTOR_NELTS (exp) != fields_length (type))
6304 || mostly_zeros_p (exp))
6305 && (!REG_P (target)
6306 || known_eq (GET_MODE_SIZE (GET_MODE (target)), size)))
6307 {
6308 clear_storage (target, gen_int_mode (size, Pmode),
6309 BLOCK_OP_NORMAL);
6310 cleared = 1;
6311 }
6312
6313 if (REG_P (target) && !cleared)
6314 emit_clobber (target);
6315
6316 /* Store each element of the constructor into the
6317 corresponding field of TARGET. */
6318 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, field, value)
6319 {
6320 machine_mode mode;
6321 HOST_WIDE_INT bitsize;
6322 HOST_WIDE_INT bitpos = 0;
6323 tree offset;
6324 rtx to_rtx = target;
6325
6326 /* Just ignore missing fields. We cleared the whole
6327 structure, above, if any fields are missing. */
6328 if (field == 0)
6329 continue;
6330
6331 if (cleared && initializer_zerop (value))
6332 continue;
6333
6334 if (tree_fits_uhwi_p (DECL_SIZE (field)))
6335 bitsize = tree_to_uhwi (DECL_SIZE (field));
6336 else
6337 gcc_unreachable ();
6338
6339 mode = DECL_MODE (field);
6340 if (DECL_BIT_FIELD (field))
6341 mode = VOIDmode;
6342
6343 offset = DECL_FIELD_OFFSET (field);
6344 if (tree_fits_shwi_p (offset)
6345 && tree_fits_shwi_p (bit_position (field)))
6346 {
6347 bitpos = int_bit_position (field);
6348 offset = NULL_TREE;
6349 }
6350 else
6351 gcc_unreachable ();
6352
6353 /* If this initializes a field that is smaller than a
6354 word, at the start of a word, try to widen it to a full
6355 word. This special case allows us to output C++ member
6356 function initializations in a form that the optimizers
6357 can understand. */
6358 if (WORD_REGISTER_OPERATIONS
6359 && REG_P (target)
6360 && bitsize < BITS_PER_WORD
6361 && bitpos % BITS_PER_WORD == 0
6362 && GET_MODE_CLASS (mode) == MODE_INT
6363 && TREE_CODE (value) == INTEGER_CST
6364 && exp_size >= 0
6365 && bitpos + BITS_PER_WORD <= exp_size * BITS_PER_UNIT)
6366 {
6367 tree type = TREE_TYPE (value);
6368
6369 if (TYPE_PRECISION (type) < BITS_PER_WORD)
6370 {
6371 type = lang_hooks.types.type_for_mode
6372 (word_mode, TYPE_UNSIGNED (type));
6373 value = fold_convert (type, value);
6374 /* Make sure the bits beyond the original bitsize are zero
6375 so that we can correctly avoid extra zeroing stores in
6376 later constructor elements. */
6377 tree bitsize_mask
6378 = wide_int_to_tree (type, wi::mask (bitsize, false,
6379 BITS_PER_WORD));
6380 value = fold_build2 (BIT_AND_EXPR, type, value, bitsize_mask);
6381 }
6382
6383 if (BYTES_BIG_ENDIAN)
6384 value
6385 = fold_build2 (LSHIFT_EXPR, type, value,
6386 build_int_cst (type,
6387 BITS_PER_WORD - bitsize));
6388 bitsize = BITS_PER_WORD;
6389 mode = word_mode;
6390 }
6391
6392 if (MEM_P (to_rtx) && !MEM_KEEP_ALIAS_SET_P (to_rtx)
6393 && DECL_NONADDRESSABLE_P (field))
6394 {
6395 to_rtx = copy_rtx (to_rtx);
6396 MEM_KEEP_ALIAS_SET_P (to_rtx) = 1;
6397 }
6398
6399 store_constructor_field (to_rtx, bitsize, bitpos,
6400 0, bitregion_end, mode,
6401 value, cleared,
6402 get_alias_set (TREE_TYPE (field)),
6403 reverse);
6404 }
6405 break;
6406 }
6407 case ARRAY_TYPE:
6408 {
6409 tree value, index;
6410 unsigned HOST_WIDE_INT i;
6411 int need_to_clear;
6412 tree domain;
6413 tree elttype = TREE_TYPE (type);
6414 int const_bounds_p;
6415 HOST_WIDE_INT minelt = 0;
6416 HOST_WIDE_INT maxelt = 0;
6417
6418 /* The storage order is specified for every aggregate type. */
6419 reverse = TYPE_REVERSE_STORAGE_ORDER (type);
6420
6421 domain = TYPE_DOMAIN (type);
6422 const_bounds_p = (TYPE_MIN_VALUE (domain)
6423 && TYPE_MAX_VALUE (domain)
6424 && tree_fits_shwi_p (TYPE_MIN_VALUE (domain))
6425 && tree_fits_shwi_p (TYPE_MAX_VALUE (domain)));
6426
6427 /* If we have constant bounds for the range of the type, get them. */
6428 if (const_bounds_p)
6429 {
6430 minelt = tree_to_shwi (TYPE_MIN_VALUE (domain));
6431 maxelt = tree_to_shwi (TYPE_MAX_VALUE (domain));
6432 }
6433
6434 /* If the constructor has fewer elements than the array, clear
6435 the whole array first. Similarly if this is static
6436 constructor of a non-BLKmode object. */
6437 if (cleared)
6438 need_to_clear = 0;
6439 else if (REG_P (target) && TREE_STATIC (exp))
6440 need_to_clear = 1;
6441 else
6442 {
6443 unsigned HOST_WIDE_INT idx;
6444 tree index, value;
6445 HOST_WIDE_INT count = 0, zero_count = 0;
6446 need_to_clear = ! const_bounds_p;
6447
6448 /* This loop is a more accurate version of the loop in
6449 mostly_zeros_p (it handles RANGE_EXPR in an index). It
6450 is also needed to check for missing elements. */
6451 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), idx, index, value)
6452 {
6453 HOST_WIDE_INT this_node_count;
6454
6455 if (need_to_clear)
6456 break;
6457
6458 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6459 {
6460 tree lo_index = TREE_OPERAND (index, 0);
6461 tree hi_index = TREE_OPERAND (index, 1);
6462
6463 if (! tree_fits_uhwi_p (lo_index)
6464 || ! tree_fits_uhwi_p (hi_index))
6465 {
6466 need_to_clear = 1;
6467 break;
6468 }
6469
6470 this_node_count = (tree_to_uhwi (hi_index)
6471 - tree_to_uhwi (lo_index) + 1);
6472 }
6473 else
6474 this_node_count = 1;
6475
6476 count += this_node_count;
6477 if (mostly_zeros_p (value))
6478 zero_count += this_node_count;
6479 }
6480
6481 /* Clear the entire array first if there are any missing
6482 elements, or if the incidence of zero elements is >=
6483 75%. */
6484 if (! need_to_clear
6485 && (count < maxelt - minelt + 1
6486 || 4 * zero_count >= 3 * count))
6487 need_to_clear = 1;
6488 }
6489
6490 if (need_to_clear && maybe_gt (size, 0))
6491 {
6492 if (REG_P (target))
6493 emit_move_insn (target, CONST0_RTX (GET_MODE (target)));
6494 else
6495 clear_storage (target, gen_int_mode (size, Pmode),
6496 BLOCK_OP_NORMAL);
6497 cleared = 1;
6498 }
6499
6500 if (!cleared && REG_P (target))
6501 /* Inform later passes that the old value is dead. */
6502 emit_clobber (target);
6503
6504 /* Store each element of the constructor into the
6505 corresponding element of TARGET, determined by counting the
6506 elements. */
6507 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (exp), i, index, value)
6508 {
6509 machine_mode mode;
6510 poly_int64 bitsize;
6511 HOST_WIDE_INT bitpos;
6512 rtx xtarget = target;
6513
6514 if (cleared && initializer_zerop (value))
6515 continue;
6516
6517 mode = TYPE_MODE (elttype);
6518 if (mode == BLKmode)
6519 bitsize = (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6520 ? tree_to_uhwi (TYPE_SIZE (elttype))
6521 : -1);
6522 else
6523 bitsize = GET_MODE_BITSIZE (mode);
6524
6525 if (index != NULL_TREE && TREE_CODE (index) == RANGE_EXPR)
6526 {
6527 tree lo_index = TREE_OPERAND (index, 0);
6528 tree hi_index = TREE_OPERAND (index, 1);
6529 rtx index_r, pos_rtx;
6530 HOST_WIDE_INT lo, hi, count;
6531 tree position;
6532
6533 /* If the range is constant and "small", unroll the loop. */
6534 if (const_bounds_p
6535 && tree_fits_shwi_p (lo_index)
6536 && tree_fits_shwi_p (hi_index)
6537 && (lo = tree_to_shwi (lo_index),
6538 hi = tree_to_shwi (hi_index),
6539 count = hi - lo + 1,
6540 (!MEM_P (target)
6541 || count <= 2
6542 || (tree_fits_uhwi_p (TYPE_SIZE (elttype))
6543 && (tree_to_uhwi (TYPE_SIZE (elttype)) * count
6544 <= 40 * 8)))))
6545 {
6546 lo -= minelt; hi -= minelt;
6547 for (; lo <= hi; lo++)
6548 {
6549 bitpos = lo * tree_to_shwi (TYPE_SIZE (elttype));
6550
6551 if (MEM_P (target)
6552 && !MEM_KEEP_ALIAS_SET_P (target)
6553 && TREE_CODE (type) == ARRAY_TYPE
6554 && TYPE_NONALIASED_COMPONENT (type))
6555 {
6556 target = copy_rtx (target);
6557 MEM_KEEP_ALIAS_SET_P (target) = 1;
6558 }
6559
6560 store_constructor_field
6561 (target, bitsize, bitpos, 0, bitregion_end,
6562 mode, value, cleared,
6563 get_alias_set (elttype), reverse);
6564 }
6565 }
6566 else
6567 {
6568 rtx_code_label *loop_start = gen_label_rtx ();
6569 rtx_code_label *loop_end = gen_label_rtx ();
6570 tree exit_cond;
6571
6572 expand_normal (hi_index);
6573
6574 index = build_decl (EXPR_LOCATION (exp),
6575 VAR_DECL, NULL_TREE, domain);
6576 index_r = gen_reg_rtx (promote_decl_mode (index, NULL));
6577 SET_DECL_RTL (index, index_r);
6578 store_expr (lo_index, index_r, 0, false, reverse);
6579
6580 /* Build the head of the loop. */
6581 do_pending_stack_adjust ();
6582 emit_label (loop_start);
6583
6584 /* Assign value to element index. */
6585 position =
6586 fold_convert (ssizetype,
6587 fold_build2 (MINUS_EXPR,
6588 TREE_TYPE (index),
6589 index,
6590 TYPE_MIN_VALUE (domain)));
6591
6592 position =
6593 size_binop (MULT_EXPR, position,
6594 fold_convert (ssizetype,
6595 TYPE_SIZE_UNIT (elttype)));
6596
6597 pos_rtx = expand_normal (position);
6598 xtarget = offset_address (target, pos_rtx,
6599 highest_pow2_factor (position));
6600 xtarget = adjust_address (xtarget, mode, 0);
6601 if (TREE_CODE (value) == CONSTRUCTOR)
6602 store_constructor (value, xtarget, cleared,
6603 exact_div (bitsize, BITS_PER_UNIT),
6604 reverse);
6605 else
6606 store_expr (value, xtarget, 0, false, reverse);
6607
6608 /* Generate a conditional jump to exit the loop. */
6609 exit_cond = build2 (LT_EXPR, integer_type_node,
6610 index, hi_index);
6611 jumpif (exit_cond, loop_end,
6612 profile_probability::uninitialized ());
6613
6614 /* Update the loop counter, and jump to the head of
6615 the loop. */
6616 expand_assignment (index,
6617 build2 (PLUS_EXPR, TREE_TYPE (index),
6618 index, integer_one_node),
6619 false);
6620
6621 emit_jump (loop_start);
6622
6623 /* Build the end of the loop. */
6624 emit_label (loop_end);
6625 }
6626 }
6627 else if ((index != 0 && ! tree_fits_shwi_p (index))
6628 || ! tree_fits_uhwi_p (TYPE_SIZE (elttype)))
6629 {
6630 tree position;
6631
6632 if (index == 0)
6633 index = ssize_int (1);
6634
6635 if (minelt)
6636 index = fold_convert (ssizetype,
6637 fold_build2 (MINUS_EXPR,
6638 TREE_TYPE (index),
6639 index,
6640 TYPE_MIN_VALUE (domain)));
6641
6642 position =
6643 size_binop (MULT_EXPR, index,
6644 fold_convert (ssizetype,
6645 TYPE_SIZE_UNIT (elttype)));
6646 xtarget = offset_address (target,
6647 expand_normal (position),
6648 highest_pow2_factor (position));
6649 xtarget = adjust_address (xtarget, mode, 0);
6650 store_expr (value, xtarget, 0, false, reverse);
6651 }
6652 else
6653 {
6654 if (index != 0)
6655 bitpos = ((tree_to_shwi (index) - minelt)
6656 * tree_to_uhwi (TYPE_SIZE (elttype)));
6657 else
6658 bitpos = (i * tree_to_uhwi (TYPE_SIZE (elttype)));
6659
6660 if (MEM_P (target) && !MEM_KEEP_ALIAS_SET_P (target)
6661 && TREE_CODE (type) == ARRAY_TYPE
6662 && TYPE_NONALIASED_COMPONENT (type))
6663 {
6664 target = copy_rtx (target);
6665 MEM_KEEP_ALIAS_SET_P (target) = 1;
6666 }
6667 store_constructor_field (target, bitsize, bitpos, 0,
6668 bitregion_end, mode, value,
6669 cleared, get_alias_set (elttype),
6670 reverse);
6671 }
6672 }
6673 break;
6674 }
6675
6676 case VECTOR_TYPE:
6677 {
6678 unsigned HOST_WIDE_INT idx;
6679 constructor_elt *ce;
6680 int i;
6681 int need_to_clear;
6682 insn_code icode = CODE_FOR_nothing;
6683 tree elt;
6684 tree elttype = TREE_TYPE (type);
6685 int elt_size = tree_to_uhwi (TYPE_SIZE (elttype));
6686 machine_mode eltmode = TYPE_MODE (elttype);
6687 HOST_WIDE_INT bitsize;
6688 HOST_WIDE_INT bitpos;
6689 rtvec vector = NULL;
6690 poly_uint64 n_elts;
6691 unsigned HOST_WIDE_INT const_n_elts;
6692 alias_set_type alias;
6693 bool vec_vec_init_p = false;
6694 machine_mode mode = GET_MODE (target);
6695
6696 gcc_assert (eltmode != BLKmode);
6697
6698 /* Try using vec_duplicate_optab for uniform vectors. */
6699 if (!TREE_SIDE_EFFECTS (exp)
6700 && VECTOR_MODE_P (mode)
6701 && eltmode == GET_MODE_INNER (mode)
6702 && ((icode = optab_handler (vec_duplicate_optab, mode))
6703 != CODE_FOR_nothing)
6704 && (elt = uniform_vector_p (exp)))
6705 {
6706 struct expand_operand ops[2];
6707 create_output_operand (&ops[0], target, mode);
6708 create_input_operand (&ops[1], expand_normal (elt), eltmode);
6709 expand_insn (icode, 2, ops);
6710 if (!rtx_equal_p (target, ops[0].value))
6711 emit_move_insn (target, ops[0].value);
6712 break;
6713 }
6714
6715 n_elts = TYPE_VECTOR_SUBPARTS (type);
6716 if (REG_P (target)
6717 && VECTOR_MODE_P (mode)
6718 && n_elts.is_constant (&const_n_elts))
6719 {
6720 machine_mode emode = eltmode;
6721
6722 if (CONSTRUCTOR_NELTS (exp)
6723 && (TREE_CODE (TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value))
6724 == VECTOR_TYPE))
6725 {
6726 tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
6727 gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
6728 * TYPE_VECTOR_SUBPARTS (etype),
6729 n_elts));
6730 emode = TYPE_MODE (etype);
6731 }
6732 icode = convert_optab_handler (vec_init_optab, mode, emode);
6733 if (icode != CODE_FOR_nothing)
6734 {
6735 unsigned int i, n = const_n_elts;
6736
6737 if (emode != eltmode)
6738 {
6739 n = CONSTRUCTOR_NELTS (exp);
6740 vec_vec_init_p = true;
6741 }
6742 vector = rtvec_alloc (n);
6743 for (i = 0; i < n; i++)
6744 RTVEC_ELT (vector, i) = CONST0_RTX (emode);
6745 }
6746 }
6747
6748 /* If the constructor has fewer elements than the vector,
6749 clear the whole array first. Similarly if this is static
6750 constructor of a non-BLKmode object. */
6751 if (cleared)
6752 need_to_clear = 0;
6753 else if (REG_P (target) && TREE_STATIC (exp))
6754 need_to_clear = 1;
6755 else
6756 {
6757 unsigned HOST_WIDE_INT count = 0, zero_count = 0;
6758 tree value;
6759
6760 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
6761 {
6762 tree sz = TYPE_SIZE (TREE_TYPE (value));
6763 int n_elts_here
6764 = tree_to_uhwi (int_const_binop (TRUNC_DIV_EXPR, sz,
6765 TYPE_SIZE (elttype)));
6766
6767 count += n_elts_here;
6768 if (mostly_zeros_p (value))
6769 zero_count += n_elts_here;
6770 }
6771
6772 /* Clear the entire vector first if there are any missing elements,
6773 or if the incidence of zero elements is >= 75%. */
6774 need_to_clear = (maybe_lt (count, n_elts)
6775 || 4 * zero_count >= 3 * count);
6776 }
6777
6778 if (need_to_clear && maybe_gt (size, 0) && !vector)
6779 {
6780 if (REG_P (target))
6781 emit_move_insn (target, CONST0_RTX (mode));
6782 else
6783 clear_storage (target, gen_int_mode (size, Pmode),
6784 BLOCK_OP_NORMAL);
6785 cleared = 1;
6786 }
6787
6788 /* Inform later passes that the old value is dead. */
6789 if (!cleared && !vector && REG_P (target))
6790 emit_move_insn (target, CONST0_RTX (mode));
6791
6792 if (MEM_P (target))
6793 alias = MEM_ALIAS_SET (target);
6794 else
6795 alias = get_alias_set (elttype);
6796
6797 /* Store each element of the constructor into the corresponding
6798 element of TARGET, determined by counting the elements. */
6799 for (idx = 0, i = 0;
6800 vec_safe_iterate (CONSTRUCTOR_ELTS (exp), idx, &ce);
6801 idx++, i += bitsize / elt_size)
6802 {
6803 HOST_WIDE_INT eltpos;
6804 tree value = ce->value;
6805
6806 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (value)));
6807 if (cleared && initializer_zerop (value))
6808 continue;
6809
6810 if (ce->index)
6811 eltpos = tree_to_uhwi (ce->index);
6812 else
6813 eltpos = i;
6814
6815 if (vector)
6816 {
6817 if (vec_vec_init_p)
6818 {
6819 gcc_assert (ce->index == NULL_TREE);
6820 gcc_assert (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE);
6821 eltpos = idx;
6822 }
6823 else
6824 gcc_assert (TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE);
6825 RTVEC_ELT (vector, eltpos) = expand_normal (value);
6826 }
6827 else
6828 {
6829 machine_mode value_mode
6830 = (TREE_CODE (TREE_TYPE (value)) == VECTOR_TYPE
6831 ? TYPE_MODE (TREE_TYPE (value)) : eltmode);
6832 bitpos = eltpos * elt_size;
6833 store_constructor_field (target, bitsize, bitpos, 0,
6834 bitregion_end, value_mode,
6835 value, cleared, alias, reverse);
6836 }
6837 }
6838
6839 if (vector)
6840 emit_insn (GEN_FCN (icode) (target,
6841 gen_rtx_PARALLEL (mode, vector)));
6842 break;
6843 }
6844
6845 default:
6846 gcc_unreachable ();
6847 }
6848 }
6849
6850 /* Store the value of EXP (an expression tree)
6851 into a subfield of TARGET which has mode MODE and occupies
6852 BITSIZE bits, starting BITPOS bits from the start of TARGET.
6853 If MODE is VOIDmode, it means that we are storing into a bit-field.
6854
6855 BITREGION_START is bitpos of the first bitfield in this region.
6856 BITREGION_END is the bitpos of the ending bitfield in this region.
6857 These two fields are 0, if the C++ memory model does not apply,
6858 or we are not interested in keeping track of bitfield regions.
6859
6860 Always return const0_rtx unless we have something particular to
6861 return.
6862
6863 ALIAS_SET is the alias set for the destination. This value will
6864 (in general) be different from that for TARGET, since TARGET is a
6865 reference to the containing structure.
6866
6867 If NONTEMPORAL is true, try generating a nontemporal store.
6868
6869 If REVERSE is true, the store is to be done in reverse order. */
6870
6871 static rtx
6872 store_field (rtx target, poly_int64 bitsize, poly_int64 bitpos,
6873 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
6874 machine_mode mode, tree exp,
6875 alias_set_type alias_set, bool nontemporal, bool reverse)
6876 {
6877 if (TREE_CODE (exp) == ERROR_MARK)
6878 return const0_rtx;
6879
6880 /* If we have nothing to store, do nothing unless the expression has
6881 side-effects. Don't do that for zero sized addressable lhs of
6882 calls. */
6883 if (known_eq (bitsize, 0)
6884 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6885 || TREE_CODE (exp) != CALL_EXPR))
6886 return expand_expr (exp, const0_rtx, VOIDmode, EXPAND_NORMAL);
6887
6888 if (GET_CODE (target) == CONCAT)
6889 {
6890 /* We're storing into a struct containing a single __complex. */
6891
6892 gcc_assert (known_eq (bitpos, 0));
6893 return store_expr (exp, target, 0, nontemporal, reverse);
6894 }
6895
6896 /* If the structure is in a register or if the component
6897 is a bit field, we cannot use addressing to access it.
6898 Use bit-field techniques or SUBREG to store in it. */
6899
6900 poly_int64 decl_bitsize;
6901 if (mode == VOIDmode
6902 || (mode != BLKmode && ! direct_store[(int) mode]
6903 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
6904 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
6905 || REG_P (target)
6906 || GET_CODE (target) == SUBREG
6907 /* If the field isn't aligned enough to store as an ordinary memref,
6908 store it as a bit field. */
6909 || (mode != BLKmode
6910 && ((((MEM_ALIGN (target) < GET_MODE_ALIGNMENT (mode))
6911 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
6912 && targetm.slow_unaligned_access (mode, MEM_ALIGN (target)))
6913 || !multiple_p (bitpos, BITS_PER_UNIT)))
6914 || (known_size_p (bitsize)
6915 && mode != BLKmode
6916 && maybe_gt (GET_MODE_BITSIZE (mode), bitsize))
6917 /* If the RHS and field are a constant size and the size of the
6918 RHS isn't the same size as the bitfield, we must use bitfield
6919 operations. */
6920 || (known_size_p (bitsize)
6921 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
6922 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
6923 bitsize)
6924 /* Except for initialization of full bytes from a CONSTRUCTOR, which
6925 we will handle specially below. */
6926 && !(TREE_CODE (exp) == CONSTRUCTOR
6927 && multiple_p (bitsize, BITS_PER_UNIT))
6928 /* And except for bitwise copying of TREE_ADDRESSABLE types,
6929 where the FIELD_DECL has the right bitsize, but TREE_TYPE (exp)
6930 includes some extra padding. store_expr / expand_expr will in
6931 that case call get_inner_reference that will have the bitsize
6932 we check here and thus the block move will not clobber the
6933 padding that shouldn't be clobbered. In the future we could
6934 replace the TREE_ADDRESSABLE check with a check that
6935 get_base_address needs to live in memory. */
6936 && (!TREE_ADDRESSABLE (TREE_TYPE (exp))
6937 || TREE_CODE (exp) != COMPONENT_REF
6938 || !multiple_p (bitsize, BITS_PER_UNIT)
6939 || !multiple_p (bitpos, BITS_PER_UNIT)
6940 || !poly_int_tree_p (DECL_SIZE (TREE_OPERAND (exp, 1)),
6941 &decl_bitsize)
6942 || maybe_ne (decl_bitsize, bitsize)))
6943 /* If we are expanding a MEM_REF of a non-BLKmode non-addressable
6944 decl we must use bitfield operations. */
6945 || (known_size_p (bitsize)
6946 && TREE_CODE (exp) == MEM_REF
6947 && TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR
6948 && DECL_P (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6949 && !TREE_ADDRESSABLE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0))
6950 && DECL_MODE (TREE_OPERAND (TREE_OPERAND (exp, 0), 0)) != BLKmode))
6951 {
6952 rtx temp;
6953 gimple *nop_def;
6954
6955 /* If EXP is a NOP_EXPR of precision less than its mode, then that
6956 implies a mask operation. If the precision is the same size as
6957 the field we're storing into, that mask is redundant. This is
6958 particularly common with bit field assignments generated by the
6959 C front end. */
6960 nop_def = get_def_for_expr (exp, NOP_EXPR);
6961 if (nop_def)
6962 {
6963 tree type = TREE_TYPE (exp);
6964 if (INTEGRAL_TYPE_P (type)
6965 && maybe_ne (TYPE_PRECISION (type),
6966 GET_MODE_BITSIZE (TYPE_MODE (type)))
6967 && known_eq (bitsize, TYPE_PRECISION (type)))
6968 {
6969 tree op = gimple_assign_rhs1 (nop_def);
6970 type = TREE_TYPE (op);
6971 if (INTEGRAL_TYPE_P (type)
6972 && known_ge (TYPE_PRECISION (type), bitsize))
6973 exp = op;
6974 }
6975 }
6976
6977 temp = expand_normal (exp);
6978
6979 /* We don't support variable-sized BLKmode bitfields, since our
6980 handling of BLKmode is bound up with the ability to break
6981 things into words. */
6982 gcc_assert (mode != BLKmode || bitsize.is_constant ());
6983
6984 /* Handle calls that return values in multiple non-contiguous locations.
6985 The Irix 6 ABI has examples of this. */
6986 if (GET_CODE (temp) == PARALLEL)
6987 {
6988 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
6989 scalar_int_mode temp_mode
6990 = smallest_int_mode_for_size (size * BITS_PER_UNIT);
6991 rtx temp_target = gen_reg_rtx (temp_mode);
6992 emit_group_store (temp_target, temp, TREE_TYPE (exp), size);
6993 temp = temp_target;
6994 }
6995
6996 /* Handle calls that return BLKmode values in registers. */
6997 else if (mode == BLKmode && REG_P (temp) && TREE_CODE (exp) == CALL_EXPR)
6998 {
6999 rtx temp_target = gen_reg_rtx (GET_MODE (temp));
7000 copy_blkmode_from_reg (temp_target, temp, TREE_TYPE (exp));
7001 temp = temp_target;
7002 }
7003
7004 /* If the value has aggregate type and an integral mode then, if BITSIZE
7005 is narrower than this mode and this is for big-endian data, we first
7006 need to put the value into the low-order bits for store_bit_field,
7007 except when MODE is BLKmode and BITSIZE larger than the word size
7008 (see the handling of fields larger than a word in store_bit_field).
7009 Moreover, the field may be not aligned on a byte boundary; in this
7010 case, if it has reverse storage order, it needs to be accessed as a
7011 scalar field with reverse storage order and we must first put the
7012 value into target order. */
7013 scalar_int_mode temp_mode;
7014 if (AGGREGATE_TYPE_P (TREE_TYPE (exp))
7015 && is_int_mode (GET_MODE (temp), &temp_mode))
7016 {
7017 HOST_WIDE_INT size = GET_MODE_BITSIZE (temp_mode);
7018
7019 reverse = TYPE_REVERSE_STORAGE_ORDER (TREE_TYPE (exp));
7020
7021 if (reverse)
7022 temp = flip_storage_order (temp_mode, temp);
7023
7024 gcc_checking_assert (known_le (bitsize, size));
7025 if (maybe_lt (bitsize, size)
7026 && reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN
7027 /* Use of to_constant for BLKmode was checked above. */
7028 && !(mode == BLKmode && bitsize.to_constant () > BITS_PER_WORD))
7029 temp = expand_shift (RSHIFT_EXPR, temp_mode, temp,
7030 size - bitsize, NULL_RTX, 1);
7031 }
7032
7033 /* Unless MODE is VOIDmode or BLKmode, convert TEMP to MODE. */
7034 if (mode != VOIDmode && mode != BLKmode
7035 && mode != TYPE_MODE (TREE_TYPE (exp)))
7036 temp = convert_modes (mode, TYPE_MODE (TREE_TYPE (exp)), temp, 1);
7037
7038 /* If the mode of TEMP and TARGET is BLKmode, both must be in memory
7039 and BITPOS must be aligned on a byte boundary. If so, we simply do
7040 a block copy. Likewise for a BLKmode-like TARGET. */
7041 if (GET_MODE (temp) == BLKmode
7042 && (GET_MODE (target) == BLKmode
7043 || (MEM_P (target)
7044 && GET_MODE_CLASS (GET_MODE (target)) == MODE_INT
7045 && multiple_p (bitpos, BITS_PER_UNIT)
7046 && multiple_p (bitsize, BITS_PER_UNIT))))
7047 {
7048 gcc_assert (MEM_P (target) && MEM_P (temp));
7049 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
7050 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
7051
7052 target = adjust_address (target, VOIDmode, bytepos);
7053 emit_block_move (target, temp,
7054 gen_int_mode (bytesize, Pmode),
7055 BLOCK_OP_NORMAL);
7056
7057 return const0_rtx;
7058 }
7059
7060 /* If the mode of TEMP is still BLKmode and BITSIZE not larger than the
7061 word size, we need to load the value (see again store_bit_field). */
7062 if (GET_MODE (temp) == BLKmode && known_le (bitsize, BITS_PER_WORD))
7063 {
7064 scalar_int_mode temp_mode = smallest_int_mode_for_size (bitsize);
7065 temp = extract_bit_field (temp, bitsize, 0, 1, NULL_RTX, temp_mode,
7066 temp_mode, false, NULL);
7067 }
7068
7069 /* Store the value in the bitfield. */
7070 store_bit_field (target, bitsize, bitpos,
7071 bitregion_start, bitregion_end,
7072 mode, temp, reverse);
7073
7074 return const0_rtx;
7075 }
7076 else
7077 {
7078 /* Now build a reference to just the desired component. */
7079 rtx to_rtx = adjust_address (target, mode,
7080 exact_div (bitpos, BITS_PER_UNIT));
7081
7082 if (to_rtx == target)
7083 to_rtx = copy_rtx (to_rtx);
7084
7085 if (!MEM_KEEP_ALIAS_SET_P (to_rtx) && MEM_ALIAS_SET (to_rtx) != 0)
7086 set_mem_alias_set (to_rtx, alias_set);
7087
7088 /* Above we avoided using bitfield operations for storing a CONSTRUCTOR
7089 into a target smaller than its type; handle that case now. */
7090 if (TREE_CODE (exp) == CONSTRUCTOR && known_size_p (bitsize))
7091 {
7092 poly_int64 bytesize = exact_div (bitsize, BITS_PER_UNIT);
7093 store_constructor (exp, to_rtx, 0, bytesize, reverse);
7094 return to_rtx;
7095 }
7096
7097 return store_expr (exp, to_rtx, 0, nontemporal, reverse);
7098 }
7099 }
7100 \f
7101 /* Given an expression EXP that may be a COMPONENT_REF, a BIT_FIELD_REF,
7102 an ARRAY_REF, or an ARRAY_RANGE_REF, look for nested operations of these
7103 codes and find the ultimate containing object, which we return.
7104
7105 We set *PBITSIZE to the size in bits that we want, *PBITPOS to the
7106 bit position, *PUNSIGNEDP to the signedness and *PREVERSEP to the
7107 storage order of the field.
7108 If the position of the field is variable, we store a tree
7109 giving the variable offset (in units) in *POFFSET.
7110 This offset is in addition to the bit position.
7111 If the position is not variable, we store 0 in *POFFSET.
7112
7113 If any of the extraction expressions is volatile,
7114 we store 1 in *PVOLATILEP. Otherwise we don't change that.
7115
7116 If the field is a non-BLKmode bit-field, *PMODE is set to VOIDmode.
7117 Otherwise, it is a mode that can be used to access the field.
7118
7119 If the field describes a variable-sized object, *PMODE is set to
7120 BLKmode and *PBITSIZE is set to -1. An access cannot be made in
7121 this case, but the address of the object can be found. */
7122
7123 tree
7124 get_inner_reference (tree exp, poly_int64_pod *pbitsize,
7125 poly_int64_pod *pbitpos, tree *poffset,
7126 machine_mode *pmode, int *punsignedp,
7127 int *preversep, int *pvolatilep)
7128 {
7129 tree size_tree = 0;
7130 machine_mode mode = VOIDmode;
7131 bool blkmode_bitfield = false;
7132 tree offset = size_zero_node;
7133 poly_offset_int bit_offset = 0;
7134
7135 /* First get the mode, signedness, storage order and size. We do this from
7136 just the outermost expression. */
7137 *pbitsize = -1;
7138 if (TREE_CODE (exp) == COMPONENT_REF)
7139 {
7140 tree field = TREE_OPERAND (exp, 1);
7141 size_tree = DECL_SIZE (field);
7142 if (flag_strict_volatile_bitfields > 0
7143 && TREE_THIS_VOLATILE (exp)
7144 && DECL_BIT_FIELD_TYPE (field)
7145 && DECL_MODE (field) != BLKmode)
7146 /* Volatile bitfields should be accessed in the mode of the
7147 field's type, not the mode computed based on the bit
7148 size. */
7149 mode = TYPE_MODE (DECL_BIT_FIELD_TYPE (field));
7150 else if (!DECL_BIT_FIELD (field))
7151 {
7152 mode = DECL_MODE (field);
7153 /* For vector fields re-check the target flags, as DECL_MODE
7154 could have been set with different target flags than
7155 the current function has. */
7156 if (mode == BLKmode
7157 && VECTOR_TYPE_P (TREE_TYPE (field))
7158 && VECTOR_MODE_P (TYPE_MODE_RAW (TREE_TYPE (field))))
7159 mode = TYPE_MODE (TREE_TYPE (field));
7160 }
7161 else if (DECL_MODE (field) == BLKmode)
7162 blkmode_bitfield = true;
7163
7164 *punsignedp = DECL_UNSIGNED (field);
7165 }
7166 else if (TREE_CODE (exp) == BIT_FIELD_REF)
7167 {
7168 size_tree = TREE_OPERAND (exp, 1);
7169 *punsignedp = (! INTEGRAL_TYPE_P (TREE_TYPE (exp))
7170 || TYPE_UNSIGNED (TREE_TYPE (exp)));
7171
7172 /* For vector types, with the correct size of access, use the mode of
7173 inner type. */
7174 if (TREE_CODE (TREE_TYPE (TREE_OPERAND (exp, 0))) == VECTOR_TYPE
7175 && TREE_TYPE (exp) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0)))
7176 && tree_int_cst_equal (size_tree, TYPE_SIZE (TREE_TYPE (exp))))
7177 mode = TYPE_MODE (TREE_TYPE (exp));
7178 }
7179 else
7180 {
7181 mode = TYPE_MODE (TREE_TYPE (exp));
7182 *punsignedp = TYPE_UNSIGNED (TREE_TYPE (exp));
7183
7184 if (mode == BLKmode)
7185 size_tree = TYPE_SIZE (TREE_TYPE (exp));
7186 else
7187 *pbitsize = GET_MODE_BITSIZE (mode);
7188 }
7189
7190 if (size_tree != 0)
7191 {
7192 if (! tree_fits_uhwi_p (size_tree))
7193 mode = BLKmode, *pbitsize = -1;
7194 else
7195 *pbitsize = tree_to_uhwi (size_tree);
7196 }
7197
7198 *preversep = reverse_storage_order_for_component_p (exp);
7199
7200 /* Compute cumulative bit-offset for nested component-refs and array-refs,
7201 and find the ultimate containing object. */
7202 while (1)
7203 {
7204 switch (TREE_CODE (exp))
7205 {
7206 case BIT_FIELD_REF:
7207 bit_offset += wi::to_poly_offset (TREE_OPERAND (exp, 2));
7208 break;
7209
7210 case COMPONENT_REF:
7211 {
7212 tree field = TREE_OPERAND (exp, 1);
7213 tree this_offset = component_ref_field_offset (exp);
7214
7215 /* If this field hasn't been filled in yet, don't go past it.
7216 This should only happen when folding expressions made during
7217 type construction. */
7218 if (this_offset == 0)
7219 break;
7220
7221 offset = size_binop (PLUS_EXPR, offset, this_offset);
7222 bit_offset += wi::to_poly_offset (DECL_FIELD_BIT_OFFSET (field));
7223
7224 /* ??? Right now we don't do anything with DECL_OFFSET_ALIGN. */
7225 }
7226 break;
7227
7228 case ARRAY_REF:
7229 case ARRAY_RANGE_REF:
7230 {
7231 tree index = TREE_OPERAND (exp, 1);
7232 tree low_bound = array_ref_low_bound (exp);
7233 tree unit_size = array_ref_element_size (exp);
7234
7235 /* We assume all arrays have sizes that are a multiple of a byte.
7236 First subtract the lower bound, if any, in the type of the
7237 index, then convert to sizetype and multiply by the size of
7238 the array element. */
7239 if (! integer_zerop (low_bound))
7240 index = fold_build2 (MINUS_EXPR, TREE_TYPE (index),
7241 index, low_bound);
7242
7243 offset = size_binop (PLUS_EXPR, offset,
7244 size_binop (MULT_EXPR,
7245 fold_convert (sizetype, index),
7246 unit_size));
7247 }
7248 break;
7249
7250 case REALPART_EXPR:
7251 break;
7252
7253 case IMAGPART_EXPR:
7254 bit_offset += *pbitsize;
7255 break;
7256
7257 case VIEW_CONVERT_EXPR:
7258 break;
7259
7260 case MEM_REF:
7261 /* Hand back the decl for MEM[&decl, off]. */
7262 if (TREE_CODE (TREE_OPERAND (exp, 0)) == ADDR_EXPR)
7263 {
7264 tree off = TREE_OPERAND (exp, 1);
7265 if (!integer_zerop (off))
7266 {
7267 poly_offset_int boff = mem_ref_offset (exp);
7268 boff <<= LOG2_BITS_PER_UNIT;
7269 bit_offset += boff;
7270 }
7271 exp = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7272 }
7273 goto done;
7274
7275 default:
7276 goto done;
7277 }
7278
7279 /* If any reference in the chain is volatile, the effect is volatile. */
7280 if (TREE_THIS_VOLATILE (exp))
7281 *pvolatilep = 1;
7282
7283 exp = TREE_OPERAND (exp, 0);
7284 }
7285 done:
7286
7287 /* If OFFSET is constant, see if we can return the whole thing as a
7288 constant bit position. Make sure to handle overflow during
7289 this conversion. */
7290 if (poly_int_tree_p (offset))
7291 {
7292 poly_offset_int tem = wi::sext (wi::to_poly_offset (offset),
7293 TYPE_PRECISION (sizetype));
7294 tem <<= LOG2_BITS_PER_UNIT;
7295 tem += bit_offset;
7296 if (tem.to_shwi (pbitpos))
7297 *poffset = offset = NULL_TREE;
7298 }
7299
7300 /* Otherwise, split it up. */
7301 if (offset)
7302 {
7303 /* Avoid returning a negative bitpos as this may wreak havoc later. */
7304 if (!bit_offset.to_shwi (pbitpos) || maybe_lt (*pbitpos, 0))
7305 {
7306 *pbitpos = num_trailing_bits (bit_offset.force_shwi ());
7307 poly_offset_int bytes = bits_to_bytes_round_down (bit_offset);
7308 offset = size_binop (PLUS_EXPR, offset,
7309 build_int_cst (sizetype, bytes.force_shwi ()));
7310 }
7311
7312 *poffset = offset;
7313 }
7314
7315 /* We can use BLKmode for a byte-aligned BLKmode bitfield. */
7316 if (mode == VOIDmode
7317 && blkmode_bitfield
7318 && multiple_p (*pbitpos, BITS_PER_UNIT)
7319 && multiple_p (*pbitsize, BITS_PER_UNIT))
7320 *pmode = BLKmode;
7321 else
7322 *pmode = mode;
7323
7324 return exp;
7325 }
7326
7327 /* Alignment in bits the TARGET of an assignment may be assumed to have. */
7328
7329 static unsigned HOST_WIDE_INT
7330 target_align (const_tree target)
7331 {
7332 /* We might have a chain of nested references with intermediate misaligning
7333 bitfields components, so need to recurse to find out. */
7334
7335 unsigned HOST_WIDE_INT this_align, outer_align;
7336
7337 switch (TREE_CODE (target))
7338 {
7339 case BIT_FIELD_REF:
7340 return 1;
7341
7342 case COMPONENT_REF:
7343 this_align = DECL_ALIGN (TREE_OPERAND (target, 1));
7344 outer_align = target_align (TREE_OPERAND (target, 0));
7345 return MIN (this_align, outer_align);
7346
7347 case ARRAY_REF:
7348 case ARRAY_RANGE_REF:
7349 this_align = TYPE_ALIGN (TREE_TYPE (target));
7350 outer_align = target_align (TREE_OPERAND (target, 0));
7351 return MIN (this_align, outer_align);
7352
7353 CASE_CONVERT:
7354 case NON_LVALUE_EXPR:
7355 case VIEW_CONVERT_EXPR:
7356 this_align = TYPE_ALIGN (TREE_TYPE (target));
7357 outer_align = target_align (TREE_OPERAND (target, 0));
7358 return MAX (this_align, outer_align);
7359
7360 default:
7361 return TYPE_ALIGN (TREE_TYPE (target));
7362 }
7363 }
7364
7365 \f
7366 /* Given an rtx VALUE that may contain additions and multiplications, return
7367 an equivalent value that just refers to a register, memory, or constant.
7368 This is done by generating instructions to perform the arithmetic and
7369 returning a pseudo-register containing the value.
7370
7371 The returned value may be a REG, SUBREG, MEM or constant. */
7372
7373 rtx
7374 force_operand (rtx value, rtx target)
7375 {
7376 rtx op1, op2;
7377 /* Use subtarget as the target for operand 0 of a binary operation. */
7378 rtx subtarget = get_subtarget (target);
7379 enum rtx_code code = GET_CODE (value);
7380
7381 /* Check for subreg applied to an expression produced by loop optimizer. */
7382 if (code == SUBREG
7383 && !REG_P (SUBREG_REG (value))
7384 && !MEM_P (SUBREG_REG (value)))
7385 {
7386 value
7387 = simplify_gen_subreg (GET_MODE (value),
7388 force_reg (GET_MODE (SUBREG_REG (value)),
7389 force_operand (SUBREG_REG (value),
7390 NULL_RTX)),
7391 GET_MODE (SUBREG_REG (value)),
7392 SUBREG_BYTE (value));
7393 code = GET_CODE (value);
7394 }
7395
7396 /* Check for a PIC address load. */
7397 if ((code == PLUS || code == MINUS)
7398 && XEXP (value, 0) == pic_offset_table_rtx
7399 && (GET_CODE (XEXP (value, 1)) == SYMBOL_REF
7400 || GET_CODE (XEXP (value, 1)) == LABEL_REF
7401 || GET_CODE (XEXP (value, 1)) == CONST))
7402 {
7403 if (!subtarget)
7404 subtarget = gen_reg_rtx (GET_MODE (value));
7405 emit_move_insn (subtarget, value);
7406 return subtarget;
7407 }
7408
7409 if (ARITHMETIC_P (value))
7410 {
7411 op2 = XEXP (value, 1);
7412 if (!CONSTANT_P (op2) && !(REG_P (op2) && op2 != subtarget))
7413 subtarget = 0;
7414 if (code == MINUS && CONST_INT_P (op2))
7415 {
7416 code = PLUS;
7417 op2 = negate_rtx (GET_MODE (value), op2);
7418 }
7419
7420 /* Check for an addition with OP2 a constant integer and our first
7421 operand a PLUS of a virtual register and something else. In that
7422 case, we want to emit the sum of the virtual register and the
7423 constant first and then add the other value. This allows virtual
7424 register instantiation to simply modify the constant rather than
7425 creating another one around this addition. */
7426 if (code == PLUS && CONST_INT_P (op2)
7427 && GET_CODE (XEXP (value, 0)) == PLUS
7428 && REG_P (XEXP (XEXP (value, 0), 0))
7429 && REGNO (XEXP (XEXP (value, 0), 0)) >= FIRST_VIRTUAL_REGISTER
7430 && REGNO (XEXP (XEXP (value, 0), 0)) <= LAST_VIRTUAL_REGISTER)
7431 {
7432 rtx temp = expand_simple_binop (GET_MODE (value), code,
7433 XEXP (XEXP (value, 0), 0), op2,
7434 subtarget, 0, OPTAB_LIB_WIDEN);
7435 return expand_simple_binop (GET_MODE (value), code, temp,
7436 force_operand (XEXP (XEXP (value,
7437 0), 1), 0),
7438 target, 0, OPTAB_LIB_WIDEN);
7439 }
7440
7441 op1 = force_operand (XEXP (value, 0), subtarget);
7442 op2 = force_operand (op2, NULL_RTX);
7443 switch (code)
7444 {
7445 case MULT:
7446 return expand_mult (GET_MODE (value), op1, op2, target, 1);
7447 case DIV:
7448 if (!INTEGRAL_MODE_P (GET_MODE (value)))
7449 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7450 target, 1, OPTAB_LIB_WIDEN);
7451 else
7452 return expand_divmod (0,
7453 FLOAT_MODE_P (GET_MODE (value))
7454 ? RDIV_EXPR : TRUNC_DIV_EXPR,
7455 GET_MODE (value), op1, op2, target, 0);
7456 case MOD:
7457 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7458 target, 0);
7459 case UDIV:
7460 return expand_divmod (0, TRUNC_DIV_EXPR, GET_MODE (value), op1, op2,
7461 target, 1);
7462 case UMOD:
7463 return expand_divmod (1, TRUNC_MOD_EXPR, GET_MODE (value), op1, op2,
7464 target, 1);
7465 case ASHIFTRT:
7466 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7467 target, 0, OPTAB_LIB_WIDEN);
7468 default:
7469 return expand_simple_binop (GET_MODE (value), code, op1, op2,
7470 target, 1, OPTAB_LIB_WIDEN);
7471 }
7472 }
7473 if (UNARY_P (value))
7474 {
7475 if (!target)
7476 target = gen_reg_rtx (GET_MODE (value));
7477 op1 = force_operand (XEXP (value, 0), NULL_RTX);
7478 switch (code)
7479 {
7480 case ZERO_EXTEND:
7481 case SIGN_EXTEND:
7482 case TRUNCATE:
7483 case FLOAT_EXTEND:
7484 case FLOAT_TRUNCATE:
7485 convert_move (target, op1, code == ZERO_EXTEND);
7486 return target;
7487
7488 case FIX:
7489 case UNSIGNED_FIX:
7490 expand_fix (target, op1, code == UNSIGNED_FIX);
7491 return target;
7492
7493 case FLOAT:
7494 case UNSIGNED_FLOAT:
7495 expand_float (target, op1, code == UNSIGNED_FLOAT);
7496 return target;
7497
7498 default:
7499 return expand_simple_unop (GET_MODE (value), code, op1, target, 0);
7500 }
7501 }
7502
7503 #ifdef INSN_SCHEDULING
7504 /* On machines that have insn scheduling, we want all memory reference to be
7505 explicit, so we need to deal with such paradoxical SUBREGs. */
7506 if (paradoxical_subreg_p (value) && MEM_P (SUBREG_REG (value)))
7507 value
7508 = simplify_gen_subreg (GET_MODE (value),
7509 force_reg (GET_MODE (SUBREG_REG (value)),
7510 force_operand (SUBREG_REG (value),
7511 NULL_RTX)),
7512 GET_MODE (SUBREG_REG (value)),
7513 SUBREG_BYTE (value));
7514 #endif
7515
7516 return value;
7517 }
7518 \f
7519 /* Subroutine of expand_expr: return nonzero iff there is no way that
7520 EXP can reference X, which is being modified. TOP_P is nonzero if this
7521 call is going to be used to determine whether we need a temporary
7522 for EXP, as opposed to a recursive call to this function.
7523
7524 It is always safe for this routine to return zero since it merely
7525 searches for optimization opportunities. */
7526
7527 int
7528 safe_from_p (const_rtx x, tree exp, int top_p)
7529 {
7530 rtx exp_rtl = 0;
7531 int i, nops;
7532
7533 if (x == 0
7534 /* If EXP has varying size, we MUST use a target since we currently
7535 have no way of allocating temporaries of variable size
7536 (except for arrays that have TYPE_ARRAY_MAX_SIZE set).
7537 So we assume here that something at a higher level has prevented a
7538 clash. This is somewhat bogus, but the best we can do. Only
7539 do this when X is BLKmode and when we are at the top level. */
7540 || (top_p && TREE_TYPE (exp) != 0 && COMPLETE_TYPE_P (TREE_TYPE (exp))
7541 && TREE_CODE (TYPE_SIZE (TREE_TYPE (exp))) != INTEGER_CST
7542 && (TREE_CODE (TREE_TYPE (exp)) != ARRAY_TYPE
7543 || TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)) == NULL_TREE
7544 || TREE_CODE (TYPE_ARRAY_MAX_SIZE (TREE_TYPE (exp)))
7545 != INTEGER_CST)
7546 && GET_MODE (x) == BLKmode)
7547 /* If X is in the outgoing argument area, it is always safe. */
7548 || (MEM_P (x)
7549 && (XEXP (x, 0) == virtual_outgoing_args_rtx
7550 || (GET_CODE (XEXP (x, 0)) == PLUS
7551 && XEXP (XEXP (x, 0), 0) == virtual_outgoing_args_rtx))))
7552 return 1;
7553
7554 /* If this is a subreg of a hard register, declare it unsafe, otherwise,
7555 find the underlying pseudo. */
7556 if (GET_CODE (x) == SUBREG)
7557 {
7558 x = SUBREG_REG (x);
7559 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7560 return 0;
7561 }
7562
7563 /* Now look at our tree code and possibly recurse. */
7564 switch (TREE_CODE_CLASS (TREE_CODE (exp)))
7565 {
7566 case tcc_declaration:
7567 exp_rtl = DECL_RTL_IF_SET (exp);
7568 break;
7569
7570 case tcc_constant:
7571 return 1;
7572
7573 case tcc_exceptional:
7574 if (TREE_CODE (exp) == TREE_LIST)
7575 {
7576 while (1)
7577 {
7578 if (TREE_VALUE (exp) && !safe_from_p (x, TREE_VALUE (exp), 0))
7579 return 0;
7580 exp = TREE_CHAIN (exp);
7581 if (!exp)
7582 return 1;
7583 if (TREE_CODE (exp) != TREE_LIST)
7584 return safe_from_p (x, exp, 0);
7585 }
7586 }
7587 else if (TREE_CODE (exp) == CONSTRUCTOR)
7588 {
7589 constructor_elt *ce;
7590 unsigned HOST_WIDE_INT idx;
7591
7592 FOR_EACH_VEC_SAFE_ELT (CONSTRUCTOR_ELTS (exp), idx, ce)
7593 if ((ce->index != NULL_TREE && !safe_from_p (x, ce->index, 0))
7594 || !safe_from_p (x, ce->value, 0))
7595 return 0;
7596 return 1;
7597 }
7598 else if (TREE_CODE (exp) == ERROR_MARK)
7599 return 1; /* An already-visited SAVE_EXPR? */
7600 else
7601 return 0;
7602
7603 case tcc_statement:
7604 /* The only case we look at here is the DECL_INITIAL inside a
7605 DECL_EXPR. */
7606 return (TREE_CODE (exp) != DECL_EXPR
7607 || TREE_CODE (DECL_EXPR_DECL (exp)) != VAR_DECL
7608 || !DECL_INITIAL (DECL_EXPR_DECL (exp))
7609 || safe_from_p (x, DECL_INITIAL (DECL_EXPR_DECL (exp)), 0));
7610
7611 case tcc_binary:
7612 case tcc_comparison:
7613 if (!safe_from_p (x, TREE_OPERAND (exp, 1), 0))
7614 return 0;
7615 /* Fall through. */
7616
7617 case tcc_unary:
7618 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7619
7620 case tcc_expression:
7621 case tcc_reference:
7622 case tcc_vl_exp:
7623 /* Now do code-specific tests. EXP_RTL is set to any rtx we find in
7624 the expression. If it is set, we conflict iff we are that rtx or
7625 both are in memory. Otherwise, we check all operands of the
7626 expression recursively. */
7627
7628 switch (TREE_CODE (exp))
7629 {
7630 case ADDR_EXPR:
7631 /* If the operand is static or we are static, we can't conflict.
7632 Likewise if we don't conflict with the operand at all. */
7633 if (staticp (TREE_OPERAND (exp, 0))
7634 || TREE_STATIC (exp)
7635 || safe_from_p (x, TREE_OPERAND (exp, 0), 0))
7636 return 1;
7637
7638 /* Otherwise, the only way this can conflict is if we are taking
7639 the address of a DECL a that address if part of X, which is
7640 very rare. */
7641 exp = TREE_OPERAND (exp, 0);
7642 if (DECL_P (exp))
7643 {
7644 if (!DECL_RTL_SET_P (exp)
7645 || !MEM_P (DECL_RTL (exp)))
7646 return 0;
7647 else
7648 exp_rtl = XEXP (DECL_RTL (exp), 0);
7649 }
7650 break;
7651
7652 case MEM_REF:
7653 if (MEM_P (x)
7654 && alias_sets_conflict_p (MEM_ALIAS_SET (x),
7655 get_alias_set (exp)))
7656 return 0;
7657 break;
7658
7659 case CALL_EXPR:
7660 /* Assume that the call will clobber all hard registers and
7661 all of memory. */
7662 if ((REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
7663 || MEM_P (x))
7664 return 0;
7665 break;
7666
7667 case WITH_CLEANUP_EXPR:
7668 case CLEANUP_POINT_EXPR:
7669 /* Lowered by gimplify.c. */
7670 gcc_unreachable ();
7671
7672 case SAVE_EXPR:
7673 return safe_from_p (x, TREE_OPERAND (exp, 0), 0);
7674
7675 default:
7676 break;
7677 }
7678
7679 /* If we have an rtx, we do not need to scan our operands. */
7680 if (exp_rtl)
7681 break;
7682
7683 nops = TREE_OPERAND_LENGTH (exp);
7684 for (i = 0; i < nops; i++)
7685 if (TREE_OPERAND (exp, i) != 0
7686 && ! safe_from_p (x, TREE_OPERAND (exp, i), 0))
7687 return 0;
7688
7689 break;
7690
7691 case tcc_type:
7692 /* Should never get a type here. */
7693 gcc_unreachable ();
7694 }
7695
7696 /* If we have an rtl, find any enclosed object. Then see if we conflict
7697 with it. */
7698 if (exp_rtl)
7699 {
7700 if (GET_CODE (exp_rtl) == SUBREG)
7701 {
7702 exp_rtl = SUBREG_REG (exp_rtl);
7703 if (REG_P (exp_rtl)
7704 && REGNO (exp_rtl) < FIRST_PSEUDO_REGISTER)
7705 return 0;
7706 }
7707
7708 /* If the rtl is X, then it is not safe. Otherwise, it is unless both
7709 are memory and they conflict. */
7710 return ! (rtx_equal_p (x, exp_rtl)
7711 || (MEM_P (x) && MEM_P (exp_rtl)
7712 && true_dependence (exp_rtl, VOIDmode, x)));
7713 }
7714
7715 /* If we reach here, it is safe. */
7716 return 1;
7717 }
7718
7719 \f
7720 /* Return the highest power of two that EXP is known to be a multiple of.
7721 This is used in updating alignment of MEMs in array references. */
7722
7723 unsigned HOST_WIDE_INT
7724 highest_pow2_factor (const_tree exp)
7725 {
7726 unsigned HOST_WIDE_INT ret;
7727 int trailing_zeros = tree_ctz (exp);
7728 if (trailing_zeros >= HOST_BITS_PER_WIDE_INT)
7729 return BIGGEST_ALIGNMENT;
7730 ret = HOST_WIDE_INT_1U << trailing_zeros;
7731 if (ret > BIGGEST_ALIGNMENT)
7732 return BIGGEST_ALIGNMENT;
7733 return ret;
7734 }
7735
7736 /* Similar, except that the alignment requirements of TARGET are
7737 taken into account. Assume it is at least as aligned as its
7738 type, unless it is a COMPONENT_REF in which case the layout of
7739 the structure gives the alignment. */
7740
7741 static unsigned HOST_WIDE_INT
7742 highest_pow2_factor_for_target (const_tree target, const_tree exp)
7743 {
7744 unsigned HOST_WIDE_INT talign = target_align (target) / BITS_PER_UNIT;
7745 unsigned HOST_WIDE_INT factor = highest_pow2_factor (exp);
7746
7747 return MAX (factor, talign);
7748 }
7749 \f
7750 /* Convert the tree comparison code TCODE to the rtl one where the
7751 signedness is UNSIGNEDP. */
7752
7753 static enum rtx_code
7754 convert_tree_comp_to_rtx (enum tree_code tcode, int unsignedp)
7755 {
7756 enum rtx_code code;
7757 switch (tcode)
7758 {
7759 case EQ_EXPR:
7760 code = EQ;
7761 break;
7762 case NE_EXPR:
7763 code = NE;
7764 break;
7765 case LT_EXPR:
7766 code = unsignedp ? LTU : LT;
7767 break;
7768 case LE_EXPR:
7769 code = unsignedp ? LEU : LE;
7770 break;
7771 case GT_EXPR:
7772 code = unsignedp ? GTU : GT;
7773 break;
7774 case GE_EXPR:
7775 code = unsignedp ? GEU : GE;
7776 break;
7777 case UNORDERED_EXPR:
7778 code = UNORDERED;
7779 break;
7780 case ORDERED_EXPR:
7781 code = ORDERED;
7782 break;
7783 case UNLT_EXPR:
7784 code = UNLT;
7785 break;
7786 case UNLE_EXPR:
7787 code = UNLE;
7788 break;
7789 case UNGT_EXPR:
7790 code = UNGT;
7791 break;
7792 case UNGE_EXPR:
7793 code = UNGE;
7794 break;
7795 case UNEQ_EXPR:
7796 code = UNEQ;
7797 break;
7798 case LTGT_EXPR:
7799 code = LTGT;
7800 break;
7801
7802 default:
7803 gcc_unreachable ();
7804 }
7805 return code;
7806 }
7807
7808 /* Subroutine of expand_expr. Expand the two operands of a binary
7809 expression EXP0 and EXP1 placing the results in OP0 and OP1.
7810 The value may be stored in TARGET if TARGET is nonzero. The
7811 MODIFIER argument is as documented by expand_expr. */
7812
7813 void
7814 expand_operands (tree exp0, tree exp1, rtx target, rtx *op0, rtx *op1,
7815 enum expand_modifier modifier)
7816 {
7817 if (! safe_from_p (target, exp1, 1))
7818 target = 0;
7819 if (operand_equal_p (exp0, exp1, 0))
7820 {
7821 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7822 *op1 = copy_rtx (*op0);
7823 }
7824 else
7825 {
7826 *op0 = expand_expr (exp0, target, VOIDmode, modifier);
7827 *op1 = expand_expr (exp1, NULL_RTX, VOIDmode, modifier);
7828 }
7829 }
7830
7831 \f
7832 /* Return a MEM that contains constant EXP. DEFER is as for
7833 output_constant_def and MODIFIER is as for expand_expr. */
7834
7835 static rtx
7836 expand_expr_constant (tree exp, int defer, enum expand_modifier modifier)
7837 {
7838 rtx mem;
7839
7840 mem = output_constant_def (exp, defer);
7841 if (modifier != EXPAND_INITIALIZER)
7842 mem = use_anchored_address (mem);
7843 return mem;
7844 }
7845
7846 /* A subroutine of expand_expr_addr_expr. Evaluate the address of EXP.
7847 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
7848
7849 static rtx
7850 expand_expr_addr_expr_1 (tree exp, rtx target, scalar_int_mode tmode,
7851 enum expand_modifier modifier, addr_space_t as)
7852 {
7853 rtx result, subtarget;
7854 tree inner, offset;
7855 poly_int64 bitsize, bitpos;
7856 int unsignedp, reversep, volatilep = 0;
7857 machine_mode mode1;
7858
7859 /* If we are taking the address of a constant and are at the top level,
7860 we have to use output_constant_def since we can't call force_const_mem
7861 at top level. */
7862 /* ??? This should be considered a front-end bug. We should not be
7863 generating ADDR_EXPR of something that isn't an LVALUE. The only
7864 exception here is STRING_CST. */
7865 if (CONSTANT_CLASS_P (exp))
7866 {
7867 result = XEXP (expand_expr_constant (exp, 0, modifier), 0);
7868 if (modifier < EXPAND_SUM)
7869 result = force_operand (result, target);
7870 return result;
7871 }
7872
7873 /* Everything must be something allowed by is_gimple_addressable. */
7874 switch (TREE_CODE (exp))
7875 {
7876 case INDIRECT_REF:
7877 /* This case will happen via recursion for &a->b. */
7878 return expand_expr (TREE_OPERAND (exp, 0), target, tmode, modifier);
7879
7880 case MEM_REF:
7881 {
7882 tree tem = TREE_OPERAND (exp, 0);
7883 if (!integer_zerop (TREE_OPERAND (exp, 1)))
7884 tem = fold_build_pointer_plus (tem, TREE_OPERAND (exp, 1));
7885 return expand_expr (tem, target, tmode, modifier);
7886 }
7887
7888 case CONST_DECL:
7889 /* Expand the initializer like constants above. */
7890 result = XEXP (expand_expr_constant (DECL_INITIAL (exp),
7891 0, modifier), 0);
7892 if (modifier < EXPAND_SUM)
7893 result = force_operand (result, target);
7894 return result;
7895
7896 case REALPART_EXPR:
7897 /* The real part of the complex number is always first, therefore
7898 the address is the same as the address of the parent object. */
7899 offset = 0;
7900 bitpos = 0;
7901 inner = TREE_OPERAND (exp, 0);
7902 break;
7903
7904 case IMAGPART_EXPR:
7905 /* The imaginary part of the complex number is always second.
7906 The expression is therefore always offset by the size of the
7907 scalar type. */
7908 offset = 0;
7909 bitpos = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (exp)));
7910 inner = TREE_OPERAND (exp, 0);
7911 break;
7912
7913 case COMPOUND_LITERAL_EXPR:
7914 /* Allow COMPOUND_LITERAL_EXPR in initializers or coming from
7915 initializers, if e.g. rtl_for_decl_init is called on DECL_INITIAL
7916 with COMPOUND_LITERAL_EXPRs in it, or ARRAY_REF on a const static
7917 array with address of COMPOUND_LITERAL_EXPR in DECL_INITIAL;
7918 the initializers aren't gimplified. */
7919 if (COMPOUND_LITERAL_EXPR_DECL (exp)
7920 && TREE_STATIC (COMPOUND_LITERAL_EXPR_DECL (exp)))
7921 return expand_expr_addr_expr_1 (COMPOUND_LITERAL_EXPR_DECL (exp),
7922 target, tmode, modifier, as);
7923 /* FALLTHRU */
7924 default:
7925 /* If the object is a DECL, then expand it for its rtl. Don't bypass
7926 expand_expr, as that can have various side effects; LABEL_DECLs for
7927 example, may not have their DECL_RTL set yet. Expand the rtl of
7928 CONSTRUCTORs too, which should yield a memory reference for the
7929 constructor's contents. Assume language specific tree nodes can
7930 be expanded in some interesting way. */
7931 gcc_assert (TREE_CODE (exp) < LAST_AND_UNUSED_TREE_CODE);
7932 if (DECL_P (exp)
7933 || TREE_CODE (exp) == CONSTRUCTOR
7934 || TREE_CODE (exp) == COMPOUND_LITERAL_EXPR)
7935 {
7936 result = expand_expr (exp, target, tmode,
7937 modifier == EXPAND_INITIALIZER
7938 ? EXPAND_INITIALIZER : EXPAND_CONST_ADDRESS);
7939
7940 /* If the DECL isn't in memory, then the DECL wasn't properly
7941 marked TREE_ADDRESSABLE, which will be either a front-end
7942 or a tree optimizer bug. */
7943
7944 gcc_assert (MEM_P (result));
7945 result = XEXP (result, 0);
7946
7947 /* ??? Is this needed anymore? */
7948 if (DECL_P (exp))
7949 TREE_USED (exp) = 1;
7950
7951 if (modifier != EXPAND_INITIALIZER
7952 && modifier != EXPAND_CONST_ADDRESS
7953 && modifier != EXPAND_SUM)
7954 result = force_operand (result, target);
7955 return result;
7956 }
7957
7958 /* Pass FALSE as the last argument to get_inner_reference although
7959 we are expanding to RTL. The rationale is that we know how to
7960 handle "aligning nodes" here: we can just bypass them because
7961 they won't change the final object whose address will be returned
7962 (they actually exist only for that purpose). */
7963 inner = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
7964 &unsignedp, &reversep, &volatilep);
7965 break;
7966 }
7967
7968 /* We must have made progress. */
7969 gcc_assert (inner != exp);
7970
7971 subtarget = offset || maybe_ne (bitpos, 0) ? NULL_RTX : target;
7972 /* For VIEW_CONVERT_EXPR, where the outer alignment is bigger than
7973 inner alignment, force the inner to be sufficiently aligned. */
7974 if (CONSTANT_CLASS_P (inner)
7975 && TYPE_ALIGN (TREE_TYPE (inner)) < TYPE_ALIGN (TREE_TYPE (exp)))
7976 {
7977 inner = copy_node (inner);
7978 TREE_TYPE (inner) = copy_node (TREE_TYPE (inner));
7979 SET_TYPE_ALIGN (TREE_TYPE (inner), TYPE_ALIGN (TREE_TYPE (exp)));
7980 TYPE_USER_ALIGN (TREE_TYPE (inner)) = 1;
7981 }
7982 result = expand_expr_addr_expr_1 (inner, subtarget, tmode, modifier, as);
7983
7984 if (offset)
7985 {
7986 rtx tmp;
7987
7988 if (modifier != EXPAND_NORMAL)
7989 result = force_operand (result, NULL);
7990 tmp = expand_expr (offset, NULL_RTX, tmode,
7991 modifier == EXPAND_INITIALIZER
7992 ? EXPAND_INITIALIZER : EXPAND_NORMAL);
7993
7994 /* expand_expr is allowed to return an object in a mode other
7995 than TMODE. If it did, we need to convert. */
7996 if (GET_MODE (tmp) != VOIDmode && tmode != GET_MODE (tmp))
7997 tmp = convert_modes (tmode, GET_MODE (tmp),
7998 tmp, TYPE_UNSIGNED (TREE_TYPE (offset)));
7999 result = convert_memory_address_addr_space (tmode, result, as);
8000 tmp = convert_memory_address_addr_space (tmode, tmp, as);
8001
8002 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8003 result = simplify_gen_binary (PLUS, tmode, result, tmp);
8004 else
8005 {
8006 subtarget = maybe_ne (bitpos, 0) ? NULL_RTX : target;
8007 result = expand_simple_binop (tmode, PLUS, result, tmp, subtarget,
8008 1, OPTAB_LIB_WIDEN);
8009 }
8010 }
8011
8012 if (maybe_ne (bitpos, 0))
8013 {
8014 /* Someone beforehand should have rejected taking the address
8015 of an object that isn't byte-aligned. */
8016 poly_int64 bytepos = exact_div (bitpos, BITS_PER_UNIT);
8017 result = convert_memory_address_addr_space (tmode, result, as);
8018 result = plus_constant (tmode, result, bytepos);
8019 if (modifier < EXPAND_SUM)
8020 result = force_operand (result, target);
8021 }
8022
8023 return result;
8024 }
8025
8026 /* A subroutine of expand_expr. Evaluate EXP, which is an ADDR_EXPR.
8027 The TARGET, TMODE and MODIFIER arguments are as for expand_expr. */
8028
8029 static rtx
8030 expand_expr_addr_expr (tree exp, rtx target, machine_mode tmode,
8031 enum expand_modifier modifier)
8032 {
8033 addr_space_t as = ADDR_SPACE_GENERIC;
8034 scalar_int_mode address_mode = Pmode;
8035 scalar_int_mode pointer_mode = ptr_mode;
8036 machine_mode rmode;
8037 rtx result;
8038
8039 /* Target mode of VOIDmode says "whatever's natural". */
8040 if (tmode == VOIDmode)
8041 tmode = TYPE_MODE (TREE_TYPE (exp));
8042
8043 if (POINTER_TYPE_P (TREE_TYPE (exp)))
8044 {
8045 as = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (exp)));
8046 address_mode = targetm.addr_space.address_mode (as);
8047 pointer_mode = targetm.addr_space.pointer_mode (as);
8048 }
8049
8050 /* We can get called with some Weird Things if the user does silliness
8051 like "(short) &a". In that case, convert_memory_address won't do
8052 the right thing, so ignore the given target mode. */
8053 scalar_int_mode new_tmode = (tmode == pointer_mode
8054 ? pointer_mode
8055 : address_mode);
8056
8057 result = expand_expr_addr_expr_1 (TREE_OPERAND (exp, 0), target,
8058 new_tmode, modifier, as);
8059
8060 /* Despite expand_expr claims concerning ignoring TMODE when not
8061 strictly convenient, stuff breaks if we don't honor it. Note
8062 that combined with the above, we only do this for pointer modes. */
8063 rmode = GET_MODE (result);
8064 if (rmode == VOIDmode)
8065 rmode = new_tmode;
8066 if (rmode != new_tmode)
8067 result = convert_memory_address_addr_space (new_tmode, result, as);
8068
8069 return result;
8070 }
8071
8072 /* Generate code for computing CONSTRUCTOR EXP.
8073 An rtx for the computed value is returned. If AVOID_TEMP_MEM
8074 is TRUE, instead of creating a temporary variable in memory
8075 NULL is returned and the caller needs to handle it differently. */
8076
8077 static rtx
8078 expand_constructor (tree exp, rtx target, enum expand_modifier modifier,
8079 bool avoid_temp_mem)
8080 {
8081 tree type = TREE_TYPE (exp);
8082 machine_mode mode = TYPE_MODE (type);
8083
8084 /* Try to avoid creating a temporary at all. This is possible
8085 if all of the initializer is zero.
8086 FIXME: try to handle all [0..255] initializers we can handle
8087 with memset. */
8088 if (TREE_STATIC (exp)
8089 && !TREE_ADDRESSABLE (exp)
8090 && target != 0 && mode == BLKmode
8091 && all_zeros_p (exp))
8092 {
8093 clear_storage (target, expr_size (exp), BLOCK_OP_NORMAL);
8094 return target;
8095 }
8096
8097 /* All elts simple constants => refer to a constant in memory. But
8098 if this is a non-BLKmode mode, let it store a field at a time
8099 since that should make a CONST_INT, CONST_WIDE_INT or
8100 CONST_DOUBLE when we fold. Likewise, if we have a target we can
8101 use, it is best to store directly into the target unless the type
8102 is large enough that memcpy will be used. If we are making an
8103 initializer and all operands are constant, put it in memory as
8104 well.
8105
8106 FIXME: Avoid trying to fill vector constructors piece-meal.
8107 Output them with output_constant_def below unless we're sure
8108 they're zeros. This should go away when vector initializers
8109 are treated like VECTOR_CST instead of arrays. */
8110 if ((TREE_STATIC (exp)
8111 && ((mode == BLKmode
8112 && ! (target != 0 && safe_from_p (target, exp, 1)))
8113 || TREE_ADDRESSABLE (exp)
8114 || (tree_fits_uhwi_p (TYPE_SIZE_UNIT (type))
8115 && (! can_move_by_pieces
8116 (tree_to_uhwi (TYPE_SIZE_UNIT (type)),
8117 TYPE_ALIGN (type)))
8118 && ! mostly_zeros_p (exp))))
8119 || ((modifier == EXPAND_INITIALIZER || modifier == EXPAND_CONST_ADDRESS)
8120 && TREE_CONSTANT (exp)))
8121 {
8122 rtx constructor;
8123
8124 if (avoid_temp_mem)
8125 return NULL_RTX;
8126
8127 constructor = expand_expr_constant (exp, 1, modifier);
8128
8129 if (modifier != EXPAND_CONST_ADDRESS
8130 && modifier != EXPAND_INITIALIZER
8131 && modifier != EXPAND_SUM)
8132 constructor = validize_mem (constructor);
8133
8134 return constructor;
8135 }
8136
8137 /* Handle calls that pass values in multiple non-contiguous
8138 locations. The Irix 6 ABI has examples of this. */
8139 if (target == 0 || ! safe_from_p (target, exp, 1)
8140 || GET_CODE (target) == PARALLEL || modifier == EXPAND_STACK_PARM)
8141 {
8142 if (avoid_temp_mem)
8143 return NULL_RTX;
8144
8145 target = assign_temp (type, TREE_ADDRESSABLE (exp), 1);
8146 }
8147
8148 store_constructor (exp, target, 0, int_expr_size (exp), false);
8149 return target;
8150 }
8151
8152
8153 /* expand_expr: generate code for computing expression EXP.
8154 An rtx for the computed value is returned. The value is never null.
8155 In the case of a void EXP, const0_rtx is returned.
8156
8157 The value may be stored in TARGET if TARGET is nonzero.
8158 TARGET is just a suggestion; callers must assume that
8159 the rtx returned may not be the same as TARGET.
8160
8161 If TARGET is CONST0_RTX, it means that the value will be ignored.
8162
8163 If TMODE is not VOIDmode, it suggests generating the
8164 result in mode TMODE. But this is done only when convenient.
8165 Otherwise, TMODE is ignored and the value generated in its natural mode.
8166 TMODE is just a suggestion; callers must assume that
8167 the rtx returned may not have mode TMODE.
8168
8169 Note that TARGET may have neither TMODE nor MODE. In that case, it
8170 probably will not be used.
8171
8172 If MODIFIER is EXPAND_SUM then when EXP is an addition
8173 we can return an rtx of the form (MULT (REG ...) (CONST_INT ...))
8174 or a nest of (PLUS ...) and (MINUS ...) where the terms are
8175 products as above, or REG or MEM, or constant.
8176 Ordinarily in such cases we would output mul or add instructions
8177 and then return a pseudo reg containing the sum.
8178
8179 EXPAND_INITIALIZER is much like EXPAND_SUM except that
8180 it also marks a label as absolutely required (it can't be dead).
8181 It also makes a ZERO_EXTEND or SIGN_EXTEND instead of emitting extend insns.
8182 This is used for outputting expressions used in initializers.
8183
8184 EXPAND_CONST_ADDRESS says that it is okay to return a MEM
8185 with a constant address even if that address is not normally legitimate.
8186 EXPAND_INITIALIZER and EXPAND_SUM also have this effect.
8187
8188 EXPAND_STACK_PARM is used when expanding to a TARGET on the stack for
8189 a call parameter. Such targets require special care as we haven't yet
8190 marked TARGET so that it's safe from being trashed by libcalls. We
8191 don't want to use TARGET for anything but the final result;
8192 Intermediate values must go elsewhere. Additionally, calls to
8193 emit_block_move will be flagged with BLOCK_OP_CALL_PARM.
8194
8195 If EXP is a VAR_DECL whose DECL_RTL was a MEM with an invalid
8196 address, and ALT_RTL is non-NULL, then *ALT_RTL is set to the
8197 DECL_RTL of the VAR_DECL. *ALT_RTL is also set if EXP is a
8198 COMPOUND_EXPR whose second argument is such a VAR_DECL, and so on
8199 recursively.
8200
8201 If INNER_REFERENCE_P is true, we are expanding an inner reference.
8202 In this case, we don't adjust a returned MEM rtx that wouldn't be
8203 sufficiently aligned for its mode; instead, it's up to the caller
8204 to deal with it afterwards. This is used to make sure that unaligned
8205 base objects for which out-of-bounds accesses are supported, for
8206 example record types with trailing arrays, aren't realigned behind
8207 the back of the caller.
8208 The normal operating mode is to pass FALSE for this parameter. */
8209
8210 rtx
8211 expand_expr_real (tree exp, rtx target, machine_mode tmode,
8212 enum expand_modifier modifier, rtx *alt_rtl,
8213 bool inner_reference_p)
8214 {
8215 rtx ret;
8216
8217 /* Handle ERROR_MARK before anybody tries to access its type. */
8218 if (TREE_CODE (exp) == ERROR_MARK
8219 || (TREE_CODE (TREE_TYPE (exp)) == ERROR_MARK))
8220 {
8221 ret = CONST0_RTX (tmode);
8222 return ret ? ret : const0_rtx;
8223 }
8224
8225 ret = expand_expr_real_1 (exp, target, tmode, modifier, alt_rtl,
8226 inner_reference_p);
8227 return ret;
8228 }
8229
8230 /* Try to expand the conditional expression which is represented by
8231 TREEOP0 ? TREEOP1 : TREEOP2 using conditonal moves. If it succeeds
8232 return the rtl reg which represents the result. Otherwise return
8233 NULL_RTX. */
8234
8235 static rtx
8236 expand_cond_expr_using_cmove (tree treeop0 ATTRIBUTE_UNUSED,
8237 tree treeop1 ATTRIBUTE_UNUSED,
8238 tree treeop2 ATTRIBUTE_UNUSED)
8239 {
8240 rtx insn;
8241 rtx op00, op01, op1, op2;
8242 enum rtx_code comparison_code;
8243 machine_mode comparison_mode;
8244 gimple *srcstmt;
8245 rtx temp;
8246 tree type = TREE_TYPE (treeop1);
8247 int unsignedp = TYPE_UNSIGNED (type);
8248 machine_mode mode = TYPE_MODE (type);
8249 machine_mode orig_mode = mode;
8250 static bool expanding_cond_expr_using_cmove = false;
8251
8252 /* Conditional move expansion can end up TERing two operands which,
8253 when recursively hitting conditional expressions can result in
8254 exponential behavior if the cmove expansion ultimatively fails.
8255 It's hardly profitable to TER a cmove into a cmove so avoid doing
8256 that by failing early if we end up recursing. */
8257 if (expanding_cond_expr_using_cmove)
8258 return NULL_RTX;
8259
8260 /* If we cannot do a conditional move on the mode, try doing it
8261 with the promoted mode. */
8262 if (!can_conditionally_move_p (mode))
8263 {
8264 mode = promote_mode (type, mode, &unsignedp);
8265 if (!can_conditionally_move_p (mode))
8266 return NULL_RTX;
8267 temp = assign_temp (type, 0, 0); /* Use promoted mode for temp. */
8268 }
8269 else
8270 temp = assign_temp (type, 0, 1);
8271
8272 expanding_cond_expr_using_cmove = true;
8273 start_sequence ();
8274 expand_operands (treeop1, treeop2,
8275 temp, &op1, &op2, EXPAND_NORMAL);
8276
8277 if (TREE_CODE (treeop0) == SSA_NAME
8278 && (srcstmt = get_def_for_expr_class (treeop0, tcc_comparison)))
8279 {
8280 tree type = TREE_TYPE (gimple_assign_rhs1 (srcstmt));
8281 enum tree_code cmpcode = gimple_assign_rhs_code (srcstmt);
8282 op00 = expand_normal (gimple_assign_rhs1 (srcstmt));
8283 op01 = expand_normal (gimple_assign_rhs2 (srcstmt));
8284 comparison_mode = TYPE_MODE (type);
8285 unsignedp = TYPE_UNSIGNED (type);
8286 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8287 }
8288 else if (COMPARISON_CLASS_P (treeop0))
8289 {
8290 tree type = TREE_TYPE (TREE_OPERAND (treeop0, 0));
8291 enum tree_code cmpcode = TREE_CODE (treeop0);
8292 op00 = expand_normal (TREE_OPERAND (treeop0, 0));
8293 op01 = expand_normal (TREE_OPERAND (treeop0, 1));
8294 unsignedp = TYPE_UNSIGNED (type);
8295 comparison_mode = TYPE_MODE (type);
8296 comparison_code = convert_tree_comp_to_rtx (cmpcode, unsignedp);
8297 }
8298 else
8299 {
8300 op00 = expand_normal (treeop0);
8301 op01 = const0_rtx;
8302 comparison_code = NE;
8303 comparison_mode = GET_MODE (op00);
8304 if (comparison_mode == VOIDmode)
8305 comparison_mode = TYPE_MODE (TREE_TYPE (treeop0));
8306 }
8307 expanding_cond_expr_using_cmove = false;
8308
8309 if (GET_MODE (op1) != mode)
8310 op1 = gen_lowpart (mode, op1);
8311
8312 if (GET_MODE (op2) != mode)
8313 op2 = gen_lowpart (mode, op2);
8314
8315 /* Try to emit the conditional move. */
8316 insn = emit_conditional_move (temp, comparison_code,
8317 op00, op01, comparison_mode,
8318 op1, op2, mode,
8319 unsignedp);
8320
8321 /* If we could do the conditional move, emit the sequence,
8322 and return. */
8323 if (insn)
8324 {
8325 rtx_insn *seq = get_insns ();
8326 end_sequence ();
8327 emit_insn (seq);
8328 return convert_modes (orig_mode, mode, temp, 0);
8329 }
8330
8331 /* Otherwise discard the sequence and fall back to code with
8332 branches. */
8333 end_sequence ();
8334 return NULL_RTX;
8335 }
8336
8337 rtx
8338 expand_expr_real_2 (sepops ops, rtx target, machine_mode tmode,
8339 enum expand_modifier modifier)
8340 {
8341 rtx op0, op1, op2, temp;
8342 rtx_code_label *lab;
8343 tree type;
8344 int unsignedp;
8345 machine_mode mode;
8346 scalar_int_mode int_mode;
8347 enum tree_code code = ops->code;
8348 optab this_optab;
8349 rtx subtarget, original_target;
8350 int ignore;
8351 bool reduce_bit_field;
8352 location_t loc = ops->location;
8353 tree treeop0, treeop1, treeop2;
8354 #define REDUCE_BIT_FIELD(expr) (reduce_bit_field \
8355 ? reduce_to_bit_field_precision ((expr), \
8356 target, \
8357 type) \
8358 : (expr))
8359
8360 type = ops->type;
8361 mode = TYPE_MODE (type);
8362 unsignedp = TYPE_UNSIGNED (type);
8363
8364 treeop0 = ops->op0;
8365 treeop1 = ops->op1;
8366 treeop2 = ops->op2;
8367
8368 /* We should be called only on simple (binary or unary) expressions,
8369 exactly those that are valid in gimple expressions that aren't
8370 GIMPLE_SINGLE_RHS (or invalid). */
8371 gcc_assert (get_gimple_rhs_class (code) == GIMPLE_UNARY_RHS
8372 || get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS
8373 || get_gimple_rhs_class (code) == GIMPLE_TERNARY_RHS);
8374
8375 ignore = (target == const0_rtx
8376 || ((CONVERT_EXPR_CODE_P (code)
8377 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
8378 && TREE_CODE (type) == VOID_TYPE));
8379
8380 /* We should be called only if we need the result. */
8381 gcc_assert (!ignore);
8382
8383 /* An operation in what may be a bit-field type needs the
8384 result to be reduced to the precision of the bit-field type,
8385 which is narrower than that of the type's mode. */
8386 reduce_bit_field = (INTEGRAL_TYPE_P (type)
8387 && !type_has_mode_precision_p (type));
8388
8389 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
8390 target = 0;
8391
8392 /* Use subtarget as the target for operand 0 of a binary operation. */
8393 subtarget = get_subtarget (target);
8394 original_target = target;
8395
8396 switch (code)
8397 {
8398 case NON_LVALUE_EXPR:
8399 case PAREN_EXPR:
8400 CASE_CONVERT:
8401 if (treeop0 == error_mark_node)
8402 return const0_rtx;
8403
8404 if (TREE_CODE (type) == UNION_TYPE)
8405 {
8406 tree valtype = TREE_TYPE (treeop0);
8407
8408 /* If both input and output are BLKmode, this conversion isn't doing
8409 anything except possibly changing memory attribute. */
8410 if (mode == BLKmode && TYPE_MODE (valtype) == BLKmode)
8411 {
8412 rtx result = expand_expr (treeop0, target, tmode,
8413 modifier);
8414
8415 result = copy_rtx (result);
8416 set_mem_attributes (result, type, 0);
8417 return result;
8418 }
8419
8420 if (target == 0)
8421 {
8422 if (TYPE_MODE (type) != BLKmode)
8423 target = gen_reg_rtx (TYPE_MODE (type));
8424 else
8425 target = assign_temp (type, 1, 1);
8426 }
8427
8428 if (MEM_P (target))
8429 /* Store data into beginning of memory target. */
8430 store_expr (treeop0,
8431 adjust_address (target, TYPE_MODE (valtype), 0),
8432 modifier == EXPAND_STACK_PARM,
8433 false, TYPE_REVERSE_STORAGE_ORDER (type));
8434
8435 else
8436 {
8437 gcc_assert (REG_P (target)
8438 && !TYPE_REVERSE_STORAGE_ORDER (type));
8439
8440 /* Store this field into a union of the proper type. */
8441 poly_uint64 op0_size
8442 = tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (treeop0)));
8443 poly_uint64 union_size = GET_MODE_BITSIZE (mode);
8444 store_field (target,
8445 /* The conversion must be constructed so that
8446 we know at compile time how many bits
8447 to preserve. */
8448 ordered_min (op0_size, union_size),
8449 0, 0, 0, TYPE_MODE (valtype), treeop0, 0,
8450 false, false);
8451 }
8452
8453 /* Return the entire union. */
8454 return target;
8455 }
8456
8457 if (mode == TYPE_MODE (TREE_TYPE (treeop0)))
8458 {
8459 op0 = expand_expr (treeop0, target, VOIDmode,
8460 modifier);
8461
8462 /* If the signedness of the conversion differs and OP0 is
8463 a promoted SUBREG, clear that indication since we now
8464 have to do the proper extension. */
8465 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)) != unsignedp
8466 && GET_CODE (op0) == SUBREG)
8467 SUBREG_PROMOTED_VAR_P (op0) = 0;
8468
8469 return REDUCE_BIT_FIELD (op0);
8470 }
8471
8472 op0 = expand_expr (treeop0, NULL_RTX, mode,
8473 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier);
8474 if (GET_MODE (op0) == mode)
8475 ;
8476
8477 /* If OP0 is a constant, just convert it into the proper mode. */
8478 else if (CONSTANT_P (op0))
8479 {
8480 tree inner_type = TREE_TYPE (treeop0);
8481 machine_mode inner_mode = GET_MODE (op0);
8482
8483 if (inner_mode == VOIDmode)
8484 inner_mode = TYPE_MODE (inner_type);
8485
8486 if (modifier == EXPAND_INITIALIZER)
8487 op0 = lowpart_subreg (mode, op0, inner_mode);
8488 else
8489 op0= convert_modes (mode, inner_mode, op0,
8490 TYPE_UNSIGNED (inner_type));
8491 }
8492
8493 else if (modifier == EXPAND_INITIALIZER)
8494 op0 = gen_rtx_fmt_e (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8495 ? ZERO_EXTEND : SIGN_EXTEND, mode, op0);
8496
8497 else if (target == 0)
8498 op0 = convert_to_mode (mode, op0,
8499 TYPE_UNSIGNED (TREE_TYPE
8500 (treeop0)));
8501 else
8502 {
8503 convert_move (target, op0,
8504 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
8505 op0 = target;
8506 }
8507
8508 return REDUCE_BIT_FIELD (op0);
8509
8510 case ADDR_SPACE_CONVERT_EXPR:
8511 {
8512 tree treeop0_type = TREE_TYPE (treeop0);
8513
8514 gcc_assert (POINTER_TYPE_P (type));
8515 gcc_assert (POINTER_TYPE_P (treeop0_type));
8516
8517 addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
8518 addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (treeop0_type));
8519
8520 /* Conversions between pointers to the same address space should
8521 have been implemented via CONVERT_EXPR / NOP_EXPR. */
8522 gcc_assert (as_to != as_from);
8523
8524 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
8525
8526 /* Ask target code to handle conversion between pointers
8527 to overlapping address spaces. */
8528 if (targetm.addr_space.subset_p (as_to, as_from)
8529 || targetm.addr_space.subset_p (as_from, as_to))
8530 {
8531 op0 = targetm.addr_space.convert (op0, treeop0_type, type);
8532 }
8533 else
8534 {
8535 /* For disjoint address spaces, converting anything but a null
8536 pointer invokes undefined behavior. We truncate or extend the
8537 value as if we'd converted via integers, which handles 0 as
8538 required, and all others as the programmer likely expects. */
8539 #ifndef POINTERS_EXTEND_UNSIGNED
8540 const int POINTERS_EXTEND_UNSIGNED = 1;
8541 #endif
8542 op0 = convert_modes (mode, TYPE_MODE (treeop0_type),
8543 op0, POINTERS_EXTEND_UNSIGNED);
8544 }
8545 gcc_assert (op0);
8546 return op0;
8547 }
8548
8549 case POINTER_PLUS_EXPR:
8550 /* Even though the sizetype mode and the pointer's mode can be different
8551 expand is able to handle this correctly and get the correct result out
8552 of the PLUS_EXPR code. */
8553 /* Make sure to sign-extend the sizetype offset in a POINTER_PLUS_EXPR
8554 if sizetype precision is smaller than pointer precision. */
8555 if (TYPE_PRECISION (sizetype) < TYPE_PRECISION (type))
8556 treeop1 = fold_convert_loc (loc, type,
8557 fold_convert_loc (loc, ssizetype,
8558 treeop1));
8559 /* If sizetype precision is larger than pointer precision, truncate the
8560 offset to have matching modes. */
8561 else if (TYPE_PRECISION (sizetype) > TYPE_PRECISION (type))
8562 treeop1 = fold_convert_loc (loc, type, treeop1);
8563 /* FALLTHRU */
8564
8565 case PLUS_EXPR:
8566 /* If we are adding a constant, a VAR_DECL that is sp, fp, or ap, and
8567 something else, make sure we add the register to the constant and
8568 then to the other thing. This case can occur during strength
8569 reduction and doing it this way will produce better code if the
8570 frame pointer or argument pointer is eliminated.
8571
8572 fold-const.c will ensure that the constant is always in the inner
8573 PLUS_EXPR, so the only case we need to do anything about is if
8574 sp, ap, or fp is our second argument, in which case we must swap
8575 the innermost first argument and our second argument. */
8576
8577 if (TREE_CODE (treeop0) == PLUS_EXPR
8578 && TREE_CODE (TREE_OPERAND (treeop0, 1)) == INTEGER_CST
8579 && VAR_P (treeop1)
8580 && (DECL_RTL (treeop1) == frame_pointer_rtx
8581 || DECL_RTL (treeop1) == stack_pointer_rtx
8582 || DECL_RTL (treeop1) == arg_pointer_rtx))
8583 {
8584 gcc_unreachable ();
8585 }
8586
8587 /* If the result is to be ptr_mode and we are adding an integer to
8588 something, we might be forming a constant. So try to use
8589 plus_constant. If it produces a sum and we can't accept it,
8590 use force_operand. This allows P = &ARR[const] to generate
8591 efficient code on machines where a SYMBOL_REF is not a valid
8592 address.
8593
8594 If this is an EXPAND_SUM call, always return the sum. */
8595 if (modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER
8596 || (mode == ptr_mode && (unsignedp || ! flag_trapv)))
8597 {
8598 if (modifier == EXPAND_STACK_PARM)
8599 target = 0;
8600 if (TREE_CODE (treeop0) == INTEGER_CST
8601 && HWI_COMPUTABLE_MODE_P (mode)
8602 && TREE_CONSTANT (treeop1))
8603 {
8604 rtx constant_part;
8605 HOST_WIDE_INT wc;
8606 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop1));
8607
8608 op1 = expand_expr (treeop1, subtarget, VOIDmode,
8609 EXPAND_SUM);
8610 /* Use wi::shwi to ensure that the constant is
8611 truncated according to the mode of OP1, then sign extended
8612 to a HOST_WIDE_INT. Using the constant directly can result
8613 in non-canonical RTL in a 64x32 cross compile. */
8614 wc = TREE_INT_CST_LOW (treeop0);
8615 constant_part =
8616 immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8617 op1 = plus_constant (mode, op1, INTVAL (constant_part));
8618 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8619 op1 = force_operand (op1, target);
8620 return REDUCE_BIT_FIELD (op1);
8621 }
8622
8623 else if (TREE_CODE (treeop1) == INTEGER_CST
8624 && HWI_COMPUTABLE_MODE_P (mode)
8625 && TREE_CONSTANT (treeop0))
8626 {
8627 rtx constant_part;
8628 HOST_WIDE_INT wc;
8629 machine_mode wmode = TYPE_MODE (TREE_TYPE (treeop0));
8630
8631 op0 = expand_expr (treeop0, subtarget, VOIDmode,
8632 (modifier == EXPAND_INITIALIZER
8633 ? EXPAND_INITIALIZER : EXPAND_SUM));
8634 if (! CONSTANT_P (op0))
8635 {
8636 op1 = expand_expr (treeop1, NULL_RTX,
8637 VOIDmode, modifier);
8638 /* Return a PLUS if modifier says it's OK. */
8639 if (modifier == EXPAND_SUM
8640 || modifier == EXPAND_INITIALIZER)
8641 return simplify_gen_binary (PLUS, mode, op0, op1);
8642 goto binop2;
8643 }
8644 /* Use wi::shwi to ensure that the constant is
8645 truncated according to the mode of OP1, then sign extended
8646 to a HOST_WIDE_INT. Using the constant directly can result
8647 in non-canonical RTL in a 64x32 cross compile. */
8648 wc = TREE_INT_CST_LOW (treeop1);
8649 constant_part
8650 = immed_wide_int_const (wi::shwi (wc, wmode), wmode);
8651 op0 = plus_constant (mode, op0, INTVAL (constant_part));
8652 if (modifier != EXPAND_SUM && modifier != EXPAND_INITIALIZER)
8653 op0 = force_operand (op0, target);
8654 return REDUCE_BIT_FIELD (op0);
8655 }
8656 }
8657
8658 /* Use TER to expand pointer addition of a negated value
8659 as pointer subtraction. */
8660 if ((POINTER_TYPE_P (TREE_TYPE (treeop0))
8661 || (TREE_CODE (TREE_TYPE (treeop0)) == VECTOR_TYPE
8662 && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (treeop0)))))
8663 && TREE_CODE (treeop1) == SSA_NAME
8664 && TYPE_MODE (TREE_TYPE (treeop0))
8665 == TYPE_MODE (TREE_TYPE (treeop1)))
8666 {
8667 gimple *def = get_def_for_expr (treeop1, NEGATE_EXPR);
8668 if (def)
8669 {
8670 treeop1 = gimple_assign_rhs1 (def);
8671 code = MINUS_EXPR;
8672 goto do_minus;
8673 }
8674 }
8675
8676 /* No sense saving up arithmetic to be done
8677 if it's all in the wrong mode to form part of an address.
8678 And force_operand won't know whether to sign-extend or
8679 zero-extend. */
8680 if (modifier != EXPAND_INITIALIZER
8681 && (modifier != EXPAND_SUM || mode != ptr_mode))
8682 {
8683 expand_operands (treeop0, treeop1,
8684 subtarget, &op0, &op1, modifier);
8685 if (op0 == const0_rtx)
8686 return op1;
8687 if (op1 == const0_rtx)
8688 return op0;
8689 goto binop2;
8690 }
8691
8692 expand_operands (treeop0, treeop1,
8693 subtarget, &op0, &op1, modifier);
8694 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8695
8696 case MINUS_EXPR:
8697 case POINTER_DIFF_EXPR:
8698 do_minus:
8699 /* For initializers, we are allowed to return a MINUS of two
8700 symbolic constants. Here we handle all cases when both operands
8701 are constant. */
8702 /* Handle difference of two symbolic constants,
8703 for the sake of an initializer. */
8704 if ((modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
8705 && really_constant_p (treeop0)
8706 && really_constant_p (treeop1))
8707 {
8708 expand_operands (treeop0, treeop1,
8709 NULL_RTX, &op0, &op1, modifier);
8710 return simplify_gen_binary (MINUS, mode, op0, op1);
8711 }
8712
8713 /* No sense saving up arithmetic to be done
8714 if it's all in the wrong mode to form part of an address.
8715 And force_operand won't know whether to sign-extend or
8716 zero-extend. */
8717 if (modifier != EXPAND_INITIALIZER
8718 && (modifier != EXPAND_SUM || mode != ptr_mode))
8719 goto binop;
8720
8721 expand_operands (treeop0, treeop1,
8722 subtarget, &op0, &op1, modifier);
8723
8724 /* Convert A - const to A + (-const). */
8725 if (CONST_INT_P (op1))
8726 {
8727 op1 = negate_rtx (mode, op1);
8728 return REDUCE_BIT_FIELD (simplify_gen_binary (PLUS, mode, op0, op1));
8729 }
8730
8731 goto binop2;
8732
8733 case WIDEN_MULT_PLUS_EXPR:
8734 case WIDEN_MULT_MINUS_EXPR:
8735 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
8736 op2 = expand_normal (treeop2);
8737 target = expand_widen_pattern_expr (ops, op0, op1, op2,
8738 target, unsignedp);
8739 return target;
8740
8741 case WIDEN_MULT_EXPR:
8742 /* If first operand is constant, swap them.
8743 Thus the following special case checks need only
8744 check the second operand. */
8745 if (TREE_CODE (treeop0) == INTEGER_CST)
8746 std::swap (treeop0, treeop1);
8747
8748 /* First, check if we have a multiplication of one signed and one
8749 unsigned operand. */
8750 if (TREE_CODE (treeop1) != INTEGER_CST
8751 && (TYPE_UNSIGNED (TREE_TYPE (treeop0))
8752 != TYPE_UNSIGNED (TREE_TYPE (treeop1))))
8753 {
8754 machine_mode innermode = TYPE_MODE (TREE_TYPE (treeop0));
8755 this_optab = usmul_widen_optab;
8756 if (find_widening_optab_handler (this_optab, mode, innermode)
8757 != CODE_FOR_nothing)
8758 {
8759 if (TYPE_UNSIGNED (TREE_TYPE (treeop0)))
8760 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8761 EXPAND_NORMAL);
8762 else
8763 expand_operands (treeop0, treeop1, NULL_RTX, &op1, &op0,
8764 EXPAND_NORMAL);
8765 /* op0 and op1 might still be constant, despite the above
8766 != INTEGER_CST check. Handle it. */
8767 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8768 {
8769 op0 = convert_modes (innermode, mode, op0, true);
8770 op1 = convert_modes (innermode, mode, op1, false);
8771 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8772 target, unsignedp));
8773 }
8774 goto binop3;
8775 }
8776 }
8777 /* Check for a multiplication with matching signedness. */
8778 else if ((TREE_CODE (treeop1) == INTEGER_CST
8779 && int_fits_type_p (treeop1, TREE_TYPE (treeop0)))
8780 || (TYPE_UNSIGNED (TREE_TYPE (treeop1))
8781 == TYPE_UNSIGNED (TREE_TYPE (treeop0))))
8782 {
8783 tree op0type = TREE_TYPE (treeop0);
8784 machine_mode innermode = TYPE_MODE (op0type);
8785 bool zextend_p = TYPE_UNSIGNED (op0type);
8786 optab other_optab = zextend_p ? smul_widen_optab : umul_widen_optab;
8787 this_optab = zextend_p ? umul_widen_optab : smul_widen_optab;
8788
8789 if (TREE_CODE (treeop0) != INTEGER_CST)
8790 {
8791 if (find_widening_optab_handler (this_optab, mode, innermode)
8792 != CODE_FOR_nothing)
8793 {
8794 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1,
8795 EXPAND_NORMAL);
8796 /* op0 and op1 might still be constant, despite the above
8797 != INTEGER_CST check. Handle it. */
8798 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8799 {
8800 widen_mult_const:
8801 op0 = convert_modes (innermode, mode, op0, zextend_p);
8802 op1
8803 = convert_modes (innermode, mode, op1,
8804 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8805 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1,
8806 target,
8807 unsignedp));
8808 }
8809 temp = expand_widening_mult (mode, op0, op1, target,
8810 unsignedp, this_optab);
8811 return REDUCE_BIT_FIELD (temp);
8812 }
8813 if (find_widening_optab_handler (other_optab, mode, innermode)
8814 != CODE_FOR_nothing
8815 && innermode == word_mode)
8816 {
8817 rtx htem, hipart;
8818 op0 = expand_normal (treeop0);
8819 if (TREE_CODE (treeop1) == INTEGER_CST)
8820 op1 = convert_modes (word_mode, mode,
8821 expand_normal (treeop1),
8822 TYPE_UNSIGNED (TREE_TYPE (treeop1)));
8823 else
8824 op1 = expand_normal (treeop1);
8825 /* op0 and op1 might still be constant, despite the above
8826 != INTEGER_CST check. Handle it. */
8827 if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
8828 goto widen_mult_const;
8829 temp = expand_binop (mode, other_optab, op0, op1, target,
8830 unsignedp, OPTAB_LIB_WIDEN);
8831 hipart = gen_highpart (word_mode, temp);
8832 htem = expand_mult_highpart_adjust (word_mode, hipart,
8833 op0, op1, hipart,
8834 zextend_p);
8835 if (htem != hipart)
8836 emit_move_insn (hipart, htem);
8837 return REDUCE_BIT_FIELD (temp);
8838 }
8839 }
8840 }
8841 treeop0 = fold_build1 (CONVERT_EXPR, type, treeop0);
8842 treeop1 = fold_build1 (CONVERT_EXPR, type, treeop1);
8843 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8844 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8845
8846 case FMA_EXPR:
8847 {
8848 optab opt = fma_optab;
8849 gimple *def0, *def2;
8850
8851 /* If there is no insn for FMA, emit it as __builtin_fma{,f,l}
8852 call. */
8853 if (optab_handler (fma_optab, mode) == CODE_FOR_nothing)
8854 {
8855 tree fn = mathfn_built_in (TREE_TYPE (treeop0), BUILT_IN_FMA);
8856 tree call_expr;
8857
8858 gcc_assert (fn != NULL_TREE);
8859 call_expr = build_call_expr (fn, 3, treeop0, treeop1, treeop2);
8860 return expand_builtin (call_expr, target, subtarget, mode, false);
8861 }
8862
8863 def0 = get_def_for_expr (treeop0, NEGATE_EXPR);
8864 /* The multiplication is commutative - look at its 2nd operand
8865 if the first isn't fed by a negate. */
8866 if (!def0)
8867 {
8868 def0 = get_def_for_expr (treeop1, NEGATE_EXPR);
8869 /* Swap operands if the 2nd operand is fed by a negate. */
8870 if (def0)
8871 std::swap (treeop0, treeop1);
8872 }
8873 def2 = get_def_for_expr (treeop2, NEGATE_EXPR);
8874
8875 op0 = op2 = NULL;
8876
8877 if (def0 && def2
8878 && optab_handler (fnms_optab, mode) != CODE_FOR_nothing)
8879 {
8880 opt = fnms_optab;
8881 op0 = expand_normal (gimple_assign_rhs1 (def0));
8882 op2 = expand_normal (gimple_assign_rhs1 (def2));
8883 }
8884 else if (def0
8885 && optab_handler (fnma_optab, mode) != CODE_FOR_nothing)
8886 {
8887 opt = fnma_optab;
8888 op0 = expand_normal (gimple_assign_rhs1 (def0));
8889 }
8890 else if (def2
8891 && optab_handler (fms_optab, mode) != CODE_FOR_nothing)
8892 {
8893 opt = fms_optab;
8894 op2 = expand_normal (gimple_assign_rhs1 (def2));
8895 }
8896
8897 if (op0 == NULL)
8898 op0 = expand_expr (treeop0, subtarget, VOIDmode, EXPAND_NORMAL);
8899 if (op2 == NULL)
8900 op2 = expand_normal (treeop2);
8901 op1 = expand_normal (treeop1);
8902
8903 return expand_ternary_op (TYPE_MODE (type), opt,
8904 op0, op1, op2, target, 0);
8905 }
8906
8907 case MULT_EXPR:
8908 /* If this is a fixed-point operation, then we cannot use the code
8909 below because "expand_mult" doesn't support sat/no-sat fixed-point
8910 multiplications. */
8911 if (ALL_FIXED_POINT_MODE_P (mode))
8912 goto binop;
8913
8914 /* If first operand is constant, swap them.
8915 Thus the following special case checks need only
8916 check the second operand. */
8917 if (TREE_CODE (treeop0) == INTEGER_CST)
8918 std::swap (treeop0, treeop1);
8919
8920 /* Attempt to return something suitable for generating an
8921 indexed address, for machines that support that. */
8922
8923 if (modifier == EXPAND_SUM && mode == ptr_mode
8924 && tree_fits_shwi_p (treeop1))
8925 {
8926 tree exp1 = treeop1;
8927
8928 op0 = expand_expr (treeop0, subtarget, VOIDmode,
8929 EXPAND_SUM);
8930
8931 if (!REG_P (op0))
8932 op0 = force_operand (op0, NULL_RTX);
8933 if (!REG_P (op0))
8934 op0 = copy_to_mode_reg (mode, op0);
8935
8936 return REDUCE_BIT_FIELD (gen_rtx_MULT (mode, op0,
8937 gen_int_mode (tree_to_shwi (exp1),
8938 TYPE_MODE (TREE_TYPE (exp1)))));
8939 }
8940
8941 if (modifier == EXPAND_STACK_PARM)
8942 target = 0;
8943
8944 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
8945 return REDUCE_BIT_FIELD (expand_mult (mode, op0, op1, target, unsignedp));
8946
8947 case TRUNC_MOD_EXPR:
8948 case FLOOR_MOD_EXPR:
8949 case CEIL_MOD_EXPR:
8950 case ROUND_MOD_EXPR:
8951
8952 case TRUNC_DIV_EXPR:
8953 case FLOOR_DIV_EXPR:
8954 case CEIL_DIV_EXPR:
8955 case ROUND_DIV_EXPR:
8956 case EXACT_DIV_EXPR:
8957 {
8958 /* If this is a fixed-point operation, then we cannot use the code
8959 below because "expand_divmod" doesn't support sat/no-sat fixed-point
8960 divisions. */
8961 if (ALL_FIXED_POINT_MODE_P (mode))
8962 goto binop;
8963
8964 if (modifier == EXPAND_STACK_PARM)
8965 target = 0;
8966 /* Possible optimization: compute the dividend with EXPAND_SUM
8967 then if the divisor is constant can optimize the case
8968 where some terms of the dividend have coeffs divisible by it. */
8969 expand_operands (treeop0, treeop1,
8970 subtarget, &op0, &op1, EXPAND_NORMAL);
8971 bool mod_p = code == TRUNC_MOD_EXPR || code == FLOOR_MOD_EXPR
8972 || code == CEIL_MOD_EXPR || code == ROUND_MOD_EXPR;
8973 if (SCALAR_INT_MODE_P (mode)
8974 && optimize >= 2
8975 && get_range_pos_neg (treeop0) == 1
8976 && get_range_pos_neg (treeop1) == 1)
8977 {
8978 /* If both arguments are known to be positive when interpreted
8979 as signed, we can expand it as both signed and unsigned
8980 division or modulo. Choose the cheaper sequence in that case. */
8981 bool speed_p = optimize_insn_for_speed_p ();
8982 do_pending_stack_adjust ();
8983 start_sequence ();
8984 rtx uns_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 1);
8985 rtx_insn *uns_insns = get_insns ();
8986 end_sequence ();
8987 start_sequence ();
8988 rtx sgn_ret = expand_divmod (mod_p, code, mode, op0, op1, target, 0);
8989 rtx_insn *sgn_insns = get_insns ();
8990 end_sequence ();
8991 unsigned uns_cost = seq_cost (uns_insns, speed_p);
8992 unsigned sgn_cost = seq_cost (sgn_insns, speed_p);
8993
8994 /* If costs are the same then use as tie breaker the other
8995 other factor. */
8996 if (uns_cost == sgn_cost)
8997 {
8998 uns_cost = seq_cost (uns_insns, !speed_p);
8999 sgn_cost = seq_cost (sgn_insns, !speed_p);
9000 }
9001
9002 if (uns_cost < sgn_cost || (uns_cost == sgn_cost && unsignedp))
9003 {
9004 emit_insn (uns_insns);
9005 return uns_ret;
9006 }
9007 emit_insn (sgn_insns);
9008 return sgn_ret;
9009 }
9010 return expand_divmod (mod_p, code, mode, op0, op1, target, unsignedp);
9011 }
9012 case RDIV_EXPR:
9013 goto binop;
9014
9015 case MULT_HIGHPART_EXPR:
9016 expand_operands (treeop0, treeop1, subtarget, &op0, &op1, EXPAND_NORMAL);
9017 temp = expand_mult_highpart (mode, op0, op1, target, unsignedp);
9018 gcc_assert (temp);
9019 return temp;
9020
9021 case FIXED_CONVERT_EXPR:
9022 op0 = expand_normal (treeop0);
9023 if (target == 0 || modifier == EXPAND_STACK_PARM)
9024 target = gen_reg_rtx (mode);
9025
9026 if ((TREE_CODE (TREE_TYPE (treeop0)) == INTEGER_TYPE
9027 && TYPE_UNSIGNED (TREE_TYPE (treeop0)))
9028 || (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type)))
9029 expand_fixed_convert (target, op0, 1, TYPE_SATURATING (type));
9030 else
9031 expand_fixed_convert (target, op0, 0, TYPE_SATURATING (type));
9032 return target;
9033
9034 case FIX_TRUNC_EXPR:
9035 op0 = expand_normal (treeop0);
9036 if (target == 0 || modifier == EXPAND_STACK_PARM)
9037 target = gen_reg_rtx (mode);
9038 expand_fix (target, op0, unsignedp);
9039 return target;
9040
9041 case FLOAT_EXPR:
9042 op0 = expand_normal (treeop0);
9043 if (target == 0 || modifier == EXPAND_STACK_PARM)
9044 target = gen_reg_rtx (mode);
9045 /* expand_float can't figure out what to do if FROM has VOIDmode.
9046 So give it the correct mode. With -O, cse will optimize this. */
9047 if (GET_MODE (op0) == VOIDmode)
9048 op0 = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (treeop0)),
9049 op0);
9050 expand_float (target, op0,
9051 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9052 return target;
9053
9054 case NEGATE_EXPR:
9055 op0 = expand_expr (treeop0, subtarget,
9056 VOIDmode, EXPAND_NORMAL);
9057 if (modifier == EXPAND_STACK_PARM)
9058 target = 0;
9059 temp = expand_unop (mode,
9060 optab_for_tree_code (NEGATE_EXPR, type,
9061 optab_default),
9062 op0, target, 0);
9063 gcc_assert (temp);
9064 return REDUCE_BIT_FIELD (temp);
9065
9066 case ABS_EXPR:
9067 op0 = expand_expr (treeop0, subtarget,
9068 VOIDmode, EXPAND_NORMAL);
9069 if (modifier == EXPAND_STACK_PARM)
9070 target = 0;
9071
9072 /* ABS_EXPR is not valid for complex arguments. */
9073 gcc_assert (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
9074 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT);
9075
9076 /* Unsigned abs is simply the operand. Testing here means we don't
9077 risk generating incorrect code below. */
9078 if (TYPE_UNSIGNED (type))
9079 return op0;
9080
9081 return expand_abs (mode, op0, target, unsignedp,
9082 safe_from_p (target, treeop0, 1));
9083
9084 case MAX_EXPR:
9085 case MIN_EXPR:
9086 target = original_target;
9087 if (target == 0
9088 || modifier == EXPAND_STACK_PARM
9089 || (MEM_P (target) && MEM_VOLATILE_P (target))
9090 || GET_MODE (target) != mode
9091 || (REG_P (target)
9092 && REGNO (target) < FIRST_PSEUDO_REGISTER))
9093 target = gen_reg_rtx (mode);
9094 expand_operands (treeop0, treeop1,
9095 target, &op0, &op1, EXPAND_NORMAL);
9096
9097 /* First try to do it with a special MIN or MAX instruction.
9098 If that does not win, use a conditional jump to select the proper
9099 value. */
9100 this_optab = optab_for_tree_code (code, type, optab_default);
9101 temp = expand_binop (mode, this_optab, op0, op1, target, unsignedp,
9102 OPTAB_WIDEN);
9103 if (temp != 0)
9104 return temp;
9105
9106 /* For vector MIN <x, y>, expand it a VEC_COND_EXPR <x <= y, x, y>
9107 and similarly for MAX <x, y>. */
9108 if (VECTOR_TYPE_P (type))
9109 {
9110 tree t0 = make_tree (type, op0);
9111 tree t1 = make_tree (type, op1);
9112 tree comparison = build2 (code == MIN_EXPR ? LE_EXPR : GE_EXPR,
9113 type, t0, t1);
9114 return expand_vec_cond_expr (type, comparison, t0, t1,
9115 original_target);
9116 }
9117
9118 /* At this point, a MEM target is no longer useful; we will get better
9119 code without it. */
9120
9121 if (! REG_P (target))
9122 target = gen_reg_rtx (mode);
9123
9124 /* If op1 was placed in target, swap op0 and op1. */
9125 if (target != op0 && target == op1)
9126 std::swap (op0, op1);
9127
9128 /* We generate better code and avoid problems with op1 mentioning
9129 target by forcing op1 into a pseudo if it isn't a constant. */
9130 if (! CONSTANT_P (op1))
9131 op1 = force_reg (mode, op1);
9132
9133 {
9134 enum rtx_code comparison_code;
9135 rtx cmpop1 = op1;
9136
9137 if (code == MAX_EXPR)
9138 comparison_code = unsignedp ? GEU : GE;
9139 else
9140 comparison_code = unsignedp ? LEU : LE;
9141
9142 /* Canonicalize to comparisons against 0. */
9143 if (op1 == const1_rtx)
9144 {
9145 /* Converting (a >= 1 ? a : 1) into (a > 0 ? a : 1)
9146 or (a != 0 ? a : 1) for unsigned.
9147 For MIN we are safe converting (a <= 1 ? a : 1)
9148 into (a <= 0 ? a : 1) */
9149 cmpop1 = const0_rtx;
9150 if (code == MAX_EXPR)
9151 comparison_code = unsignedp ? NE : GT;
9152 }
9153 if (op1 == constm1_rtx && !unsignedp)
9154 {
9155 /* Converting (a >= -1 ? a : -1) into (a >= 0 ? a : -1)
9156 and (a <= -1 ? a : -1) into (a < 0 ? a : -1) */
9157 cmpop1 = const0_rtx;
9158 if (code == MIN_EXPR)
9159 comparison_code = LT;
9160 }
9161
9162 /* Use a conditional move if possible. */
9163 if (can_conditionally_move_p (mode))
9164 {
9165 rtx insn;
9166
9167 start_sequence ();
9168
9169 /* Try to emit the conditional move. */
9170 insn = emit_conditional_move (target, comparison_code,
9171 op0, cmpop1, mode,
9172 op0, op1, mode,
9173 unsignedp);
9174
9175 /* If we could do the conditional move, emit the sequence,
9176 and return. */
9177 if (insn)
9178 {
9179 rtx_insn *seq = get_insns ();
9180 end_sequence ();
9181 emit_insn (seq);
9182 return target;
9183 }
9184
9185 /* Otherwise discard the sequence and fall back to code with
9186 branches. */
9187 end_sequence ();
9188 }
9189
9190 if (target != op0)
9191 emit_move_insn (target, op0);
9192
9193 lab = gen_label_rtx ();
9194 do_compare_rtx_and_jump (target, cmpop1, comparison_code,
9195 unsignedp, mode, NULL_RTX, NULL, lab,
9196 profile_probability::uninitialized ());
9197 }
9198 emit_move_insn (target, op1);
9199 emit_label (lab);
9200 return target;
9201
9202 case BIT_NOT_EXPR:
9203 op0 = expand_expr (treeop0, subtarget,
9204 VOIDmode, EXPAND_NORMAL);
9205 if (modifier == EXPAND_STACK_PARM)
9206 target = 0;
9207 /* In case we have to reduce the result to bitfield precision
9208 for unsigned bitfield expand this as XOR with a proper constant
9209 instead. */
9210 if (reduce_bit_field && TYPE_UNSIGNED (type))
9211 {
9212 int_mode = SCALAR_INT_TYPE_MODE (type);
9213 wide_int mask = wi::mask (TYPE_PRECISION (type),
9214 false, GET_MODE_PRECISION (int_mode));
9215
9216 temp = expand_binop (int_mode, xor_optab, op0,
9217 immed_wide_int_const (mask, int_mode),
9218 target, 1, OPTAB_LIB_WIDEN);
9219 }
9220 else
9221 temp = expand_unop (mode, one_cmpl_optab, op0, target, 1);
9222 gcc_assert (temp);
9223 return temp;
9224
9225 /* ??? Can optimize bitwise operations with one arg constant.
9226 Can optimize (a bitwise1 n) bitwise2 (a bitwise3 b)
9227 and (a bitwise1 b) bitwise2 b (etc)
9228 but that is probably not worth while. */
9229
9230 case BIT_AND_EXPR:
9231 case BIT_IOR_EXPR:
9232 case BIT_XOR_EXPR:
9233 goto binop;
9234
9235 case LROTATE_EXPR:
9236 case RROTATE_EXPR:
9237 gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
9238 || type_has_mode_precision_p (type));
9239 /* fall through */
9240
9241 case LSHIFT_EXPR:
9242 case RSHIFT_EXPR:
9243 {
9244 /* If this is a fixed-point operation, then we cannot use the code
9245 below because "expand_shift" doesn't support sat/no-sat fixed-point
9246 shifts. */
9247 if (ALL_FIXED_POINT_MODE_P (mode))
9248 goto binop;
9249
9250 if (! safe_from_p (subtarget, treeop1, 1))
9251 subtarget = 0;
9252 if (modifier == EXPAND_STACK_PARM)
9253 target = 0;
9254 op0 = expand_expr (treeop0, subtarget,
9255 VOIDmode, EXPAND_NORMAL);
9256
9257 /* Left shift optimization when shifting across word_size boundary.
9258
9259 If mode == GET_MODE_WIDER_MODE (word_mode), then normally
9260 there isn't native instruction to support this wide mode
9261 left shift. Given below scenario:
9262
9263 Type A = (Type) B << C
9264
9265 |< T >|
9266 | dest_high | dest_low |
9267
9268 | word_size |
9269
9270 If the shift amount C caused we shift B to across the word
9271 size boundary, i.e part of B shifted into high half of
9272 destination register, and part of B remains in the low
9273 half, then GCC will use the following left shift expand
9274 logic:
9275
9276 1. Initialize dest_low to B.
9277 2. Initialize every bit of dest_high to the sign bit of B.
9278 3. Logic left shift dest_low by C bit to finalize dest_low.
9279 The value of dest_low before this shift is kept in a temp D.
9280 4. Logic left shift dest_high by C.
9281 5. Logic right shift D by (word_size - C).
9282 6. Or the result of 4 and 5 to finalize dest_high.
9283
9284 While, by checking gimple statements, if operand B is
9285 coming from signed extension, then we can simplify above
9286 expand logic into:
9287
9288 1. dest_high = src_low >> (word_size - C).
9289 2. dest_low = src_low << C.
9290
9291 We can use one arithmetic right shift to finish all the
9292 purpose of steps 2, 4, 5, 6, thus we reduce the steps
9293 needed from 6 into 2.
9294
9295 The case is similar for zero extension, except that we
9296 initialize dest_high to zero rather than copies of the sign
9297 bit from B. Furthermore, we need to use a logical right shift
9298 in this case.
9299
9300 The choice of sign-extension versus zero-extension is
9301 determined entirely by whether or not B is signed and is
9302 independent of the current setting of unsignedp. */
9303
9304 temp = NULL_RTX;
9305 if (code == LSHIFT_EXPR
9306 && target
9307 && REG_P (target)
9308 && GET_MODE_2XWIDER_MODE (word_mode).exists (&int_mode)
9309 && mode == int_mode
9310 && TREE_CONSTANT (treeop1)
9311 && TREE_CODE (treeop0) == SSA_NAME)
9312 {
9313 gimple *def = SSA_NAME_DEF_STMT (treeop0);
9314 if (is_gimple_assign (def)
9315 && gimple_assign_rhs_code (def) == NOP_EXPR)
9316 {
9317 scalar_int_mode rmode = SCALAR_INT_TYPE_MODE
9318 (TREE_TYPE (gimple_assign_rhs1 (def)));
9319
9320 if (GET_MODE_SIZE (rmode) < GET_MODE_SIZE (int_mode)
9321 && TREE_INT_CST_LOW (treeop1) < GET_MODE_BITSIZE (word_mode)
9322 && ((TREE_INT_CST_LOW (treeop1) + GET_MODE_BITSIZE (rmode))
9323 >= GET_MODE_BITSIZE (word_mode)))
9324 {
9325 rtx_insn *seq, *seq_old;
9326 poly_uint64 high_off = subreg_highpart_offset (word_mode,
9327 int_mode);
9328 bool extend_unsigned
9329 = TYPE_UNSIGNED (TREE_TYPE (gimple_assign_rhs1 (def)));
9330 rtx low = lowpart_subreg (word_mode, op0, int_mode);
9331 rtx dest_low = lowpart_subreg (word_mode, target, int_mode);
9332 rtx dest_high = simplify_gen_subreg (word_mode, target,
9333 int_mode, high_off);
9334 HOST_WIDE_INT ramount = (BITS_PER_WORD
9335 - TREE_INT_CST_LOW (treeop1));
9336 tree rshift = build_int_cst (TREE_TYPE (treeop1), ramount);
9337
9338 start_sequence ();
9339 /* dest_high = src_low >> (word_size - C). */
9340 temp = expand_variable_shift (RSHIFT_EXPR, word_mode, low,
9341 rshift, dest_high,
9342 extend_unsigned);
9343 if (temp != dest_high)
9344 emit_move_insn (dest_high, temp);
9345
9346 /* dest_low = src_low << C. */
9347 temp = expand_variable_shift (LSHIFT_EXPR, word_mode, low,
9348 treeop1, dest_low, unsignedp);
9349 if (temp != dest_low)
9350 emit_move_insn (dest_low, temp);
9351
9352 seq = get_insns ();
9353 end_sequence ();
9354 temp = target ;
9355
9356 if (have_insn_for (ASHIFT, int_mode))
9357 {
9358 bool speed_p = optimize_insn_for_speed_p ();
9359 start_sequence ();
9360 rtx ret_old = expand_variable_shift (code, int_mode,
9361 op0, treeop1,
9362 target,
9363 unsignedp);
9364
9365 seq_old = get_insns ();
9366 end_sequence ();
9367 if (seq_cost (seq, speed_p)
9368 >= seq_cost (seq_old, speed_p))
9369 {
9370 seq = seq_old;
9371 temp = ret_old;
9372 }
9373 }
9374 emit_insn (seq);
9375 }
9376 }
9377 }
9378
9379 if (temp == NULL_RTX)
9380 temp = expand_variable_shift (code, mode, op0, treeop1, target,
9381 unsignedp);
9382 if (code == LSHIFT_EXPR)
9383 temp = REDUCE_BIT_FIELD (temp);
9384 return temp;
9385 }
9386
9387 /* Could determine the answer when only additive constants differ. Also,
9388 the addition of one can be handled by changing the condition. */
9389 case LT_EXPR:
9390 case LE_EXPR:
9391 case GT_EXPR:
9392 case GE_EXPR:
9393 case EQ_EXPR:
9394 case NE_EXPR:
9395 case UNORDERED_EXPR:
9396 case ORDERED_EXPR:
9397 case UNLT_EXPR:
9398 case UNLE_EXPR:
9399 case UNGT_EXPR:
9400 case UNGE_EXPR:
9401 case UNEQ_EXPR:
9402 case LTGT_EXPR:
9403 {
9404 temp = do_store_flag (ops,
9405 modifier != EXPAND_STACK_PARM ? target : NULL_RTX,
9406 tmode != VOIDmode ? tmode : mode);
9407 if (temp)
9408 return temp;
9409
9410 /* Use a compare and a jump for BLKmode comparisons, or for function
9411 type comparisons is have_canonicalize_funcptr_for_compare. */
9412
9413 if ((target == 0
9414 || modifier == EXPAND_STACK_PARM
9415 || ! safe_from_p (target, treeop0, 1)
9416 || ! safe_from_p (target, treeop1, 1)
9417 /* Make sure we don't have a hard reg (such as function's return
9418 value) live across basic blocks, if not optimizing. */
9419 || (!optimize && REG_P (target)
9420 && REGNO (target) < FIRST_PSEUDO_REGISTER)))
9421 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
9422
9423 emit_move_insn (target, const0_rtx);
9424
9425 rtx_code_label *lab1 = gen_label_rtx ();
9426 jumpifnot_1 (code, treeop0, treeop1, lab1,
9427 profile_probability::uninitialized ());
9428
9429 if (TYPE_PRECISION (type) == 1 && !TYPE_UNSIGNED (type))
9430 emit_move_insn (target, constm1_rtx);
9431 else
9432 emit_move_insn (target, const1_rtx);
9433
9434 emit_label (lab1);
9435 return target;
9436 }
9437 case COMPLEX_EXPR:
9438 /* Get the rtx code of the operands. */
9439 op0 = expand_normal (treeop0);
9440 op1 = expand_normal (treeop1);
9441
9442 if (!target)
9443 target = gen_reg_rtx (TYPE_MODE (type));
9444 else
9445 /* If target overlaps with op1, then either we need to force
9446 op1 into a pseudo (if target also overlaps with op0),
9447 or write the complex parts in reverse order. */
9448 switch (GET_CODE (target))
9449 {
9450 case CONCAT:
9451 if (reg_overlap_mentioned_p (XEXP (target, 0), op1))
9452 {
9453 if (reg_overlap_mentioned_p (XEXP (target, 1), op0))
9454 {
9455 complex_expr_force_op1:
9456 temp = gen_reg_rtx (GET_MODE_INNER (GET_MODE (target)));
9457 emit_move_insn (temp, op1);
9458 op1 = temp;
9459 break;
9460 }
9461 complex_expr_swap_order:
9462 /* Move the imaginary (op1) and real (op0) parts to their
9463 location. */
9464 write_complex_part (target, op1, true);
9465 write_complex_part (target, op0, false);
9466
9467 return target;
9468 }
9469 break;
9470 case MEM:
9471 temp = adjust_address_nv (target,
9472 GET_MODE_INNER (GET_MODE (target)), 0);
9473 if (reg_overlap_mentioned_p (temp, op1))
9474 {
9475 scalar_mode imode = GET_MODE_INNER (GET_MODE (target));
9476 temp = adjust_address_nv (target, imode,
9477 GET_MODE_SIZE (imode));
9478 if (reg_overlap_mentioned_p (temp, op0))
9479 goto complex_expr_force_op1;
9480 goto complex_expr_swap_order;
9481 }
9482 break;
9483 default:
9484 if (reg_overlap_mentioned_p (target, op1))
9485 {
9486 if (reg_overlap_mentioned_p (target, op0))
9487 goto complex_expr_force_op1;
9488 goto complex_expr_swap_order;
9489 }
9490 break;
9491 }
9492
9493 /* Move the real (op0) and imaginary (op1) parts to their location. */
9494 write_complex_part (target, op0, false);
9495 write_complex_part (target, op1, true);
9496
9497 return target;
9498
9499 case WIDEN_SUM_EXPR:
9500 {
9501 tree oprnd0 = treeop0;
9502 tree oprnd1 = treeop1;
9503
9504 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9505 target = expand_widen_pattern_expr (ops, op0, NULL_RTX, op1,
9506 target, unsignedp);
9507 return target;
9508 }
9509
9510 case VEC_UNPACK_HI_EXPR:
9511 case VEC_UNPACK_LO_EXPR:
9512 {
9513 op0 = expand_normal (treeop0);
9514 temp = expand_widen_pattern_expr (ops, op0, NULL_RTX, NULL_RTX,
9515 target, unsignedp);
9516 gcc_assert (temp);
9517 return temp;
9518 }
9519
9520 case VEC_UNPACK_FLOAT_HI_EXPR:
9521 case VEC_UNPACK_FLOAT_LO_EXPR:
9522 {
9523 op0 = expand_normal (treeop0);
9524 /* The signedness is determined from input operand. */
9525 temp = expand_widen_pattern_expr
9526 (ops, op0, NULL_RTX, NULL_RTX,
9527 target, TYPE_UNSIGNED (TREE_TYPE (treeop0)));
9528
9529 gcc_assert (temp);
9530 return temp;
9531 }
9532
9533 case VEC_WIDEN_MULT_HI_EXPR:
9534 case VEC_WIDEN_MULT_LO_EXPR:
9535 case VEC_WIDEN_MULT_EVEN_EXPR:
9536 case VEC_WIDEN_MULT_ODD_EXPR:
9537 case VEC_WIDEN_LSHIFT_HI_EXPR:
9538 case VEC_WIDEN_LSHIFT_LO_EXPR:
9539 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9540 target = expand_widen_pattern_expr (ops, op0, op1, NULL_RTX,
9541 target, unsignedp);
9542 gcc_assert (target);
9543 return target;
9544
9545 case VEC_PACK_TRUNC_EXPR:
9546 case VEC_PACK_SAT_EXPR:
9547 case VEC_PACK_FIX_TRUNC_EXPR:
9548 mode = TYPE_MODE (TREE_TYPE (treeop0));
9549 goto binop;
9550
9551 case VEC_PERM_EXPR:
9552 {
9553 expand_operands (treeop0, treeop1, target, &op0, &op1, EXPAND_NORMAL);
9554 vec_perm_builder sel;
9555 if (TREE_CODE (treeop2) == VECTOR_CST
9556 && tree_to_vec_perm_builder (&sel, treeop2))
9557 {
9558 machine_mode sel_mode = TYPE_MODE (TREE_TYPE (treeop2));
9559 temp = expand_vec_perm_const (mode, op0, op1, sel,
9560 sel_mode, target);
9561 }
9562 else
9563 {
9564 op2 = expand_normal (treeop2);
9565 temp = expand_vec_perm_var (mode, op0, op1, op2, target);
9566 }
9567 gcc_assert (temp);
9568 return temp;
9569 }
9570
9571 case DOT_PROD_EXPR:
9572 {
9573 tree oprnd0 = treeop0;
9574 tree oprnd1 = treeop1;
9575 tree oprnd2 = treeop2;
9576 rtx op2;
9577
9578 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9579 op2 = expand_normal (oprnd2);
9580 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9581 target, unsignedp);
9582 return target;
9583 }
9584
9585 case SAD_EXPR:
9586 {
9587 tree oprnd0 = treeop0;
9588 tree oprnd1 = treeop1;
9589 tree oprnd2 = treeop2;
9590 rtx op2;
9591
9592 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9593 op2 = expand_normal (oprnd2);
9594 target = expand_widen_pattern_expr (ops, op0, op1, op2,
9595 target, unsignedp);
9596 return target;
9597 }
9598
9599 case REALIGN_LOAD_EXPR:
9600 {
9601 tree oprnd0 = treeop0;
9602 tree oprnd1 = treeop1;
9603 tree oprnd2 = treeop2;
9604 rtx op2;
9605
9606 this_optab = optab_for_tree_code (code, type, optab_default);
9607 expand_operands (oprnd0, oprnd1, NULL_RTX, &op0, &op1, EXPAND_NORMAL);
9608 op2 = expand_normal (oprnd2);
9609 temp = expand_ternary_op (mode, this_optab, op0, op1, op2,
9610 target, unsignedp);
9611 gcc_assert (temp);
9612 return temp;
9613 }
9614
9615 case COND_EXPR:
9616 {
9617 /* A COND_EXPR with its type being VOID_TYPE represents a
9618 conditional jump and is handled in
9619 expand_gimple_cond_expr. */
9620 gcc_assert (!VOID_TYPE_P (type));
9621
9622 /* Note that COND_EXPRs whose type is a structure or union
9623 are required to be constructed to contain assignments of
9624 a temporary variable, so that we can evaluate them here
9625 for side effect only. If type is void, we must do likewise. */
9626
9627 gcc_assert (!TREE_ADDRESSABLE (type)
9628 && !ignore
9629 && TREE_TYPE (treeop1) != void_type_node
9630 && TREE_TYPE (treeop2) != void_type_node);
9631
9632 temp = expand_cond_expr_using_cmove (treeop0, treeop1, treeop2);
9633 if (temp)
9634 return temp;
9635
9636 /* If we are not to produce a result, we have no target. Otherwise,
9637 if a target was specified use it; it will not be used as an
9638 intermediate target unless it is safe. If no target, use a
9639 temporary. */
9640
9641 if (modifier != EXPAND_STACK_PARM
9642 && original_target
9643 && safe_from_p (original_target, treeop0, 1)
9644 && GET_MODE (original_target) == mode
9645 && !MEM_P (original_target))
9646 temp = original_target;
9647 else
9648 temp = assign_temp (type, 0, 1);
9649
9650 do_pending_stack_adjust ();
9651 NO_DEFER_POP;
9652 rtx_code_label *lab0 = gen_label_rtx ();
9653 rtx_code_label *lab1 = gen_label_rtx ();
9654 jumpifnot (treeop0, lab0,
9655 profile_probability::uninitialized ());
9656 store_expr (treeop1, temp,
9657 modifier == EXPAND_STACK_PARM,
9658 false, false);
9659
9660 emit_jump_insn (targetm.gen_jump (lab1));
9661 emit_barrier ();
9662 emit_label (lab0);
9663 store_expr (treeop2, temp,
9664 modifier == EXPAND_STACK_PARM,
9665 false, false);
9666
9667 emit_label (lab1);
9668 OK_DEFER_POP;
9669 return temp;
9670 }
9671
9672 case VEC_COND_EXPR:
9673 target = expand_vec_cond_expr (type, treeop0, treeop1, treeop2, target);
9674 return target;
9675
9676 case VEC_DUPLICATE_EXPR:
9677 op0 = expand_expr (treeop0, NULL_RTX, VOIDmode, modifier);
9678 target = expand_vector_broadcast (mode, op0);
9679 gcc_assert (target);
9680 return target;
9681
9682 case VEC_SERIES_EXPR:
9683 expand_operands (treeop0, treeop1, NULL_RTX, &op0, &op1, modifier);
9684 return expand_vec_series_expr (mode, op0, op1, target);
9685
9686 case BIT_INSERT_EXPR:
9687 {
9688 unsigned bitpos = tree_to_uhwi (treeop2);
9689 unsigned bitsize;
9690 if (INTEGRAL_TYPE_P (TREE_TYPE (treeop1)))
9691 bitsize = TYPE_PRECISION (TREE_TYPE (treeop1));
9692 else
9693 bitsize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (treeop1)));
9694 rtx op0 = expand_normal (treeop0);
9695 rtx op1 = expand_normal (treeop1);
9696 rtx dst = gen_reg_rtx (mode);
9697 emit_move_insn (dst, op0);
9698 store_bit_field (dst, bitsize, bitpos, 0, 0,
9699 TYPE_MODE (TREE_TYPE (treeop1)), op1, false);
9700 return dst;
9701 }
9702
9703 default:
9704 gcc_unreachable ();
9705 }
9706
9707 /* Here to do an ordinary binary operator. */
9708 binop:
9709 expand_operands (treeop0, treeop1,
9710 subtarget, &op0, &op1, EXPAND_NORMAL);
9711 binop2:
9712 this_optab = optab_for_tree_code (code, type, optab_default);
9713 binop3:
9714 if (modifier == EXPAND_STACK_PARM)
9715 target = 0;
9716 temp = expand_binop (mode, this_optab, op0, op1, target,
9717 unsignedp, OPTAB_LIB_WIDEN);
9718 gcc_assert (temp);
9719 /* Bitwise operations do not need bitfield reduction as we expect their
9720 operands being properly truncated. */
9721 if (code == BIT_XOR_EXPR
9722 || code == BIT_AND_EXPR
9723 || code == BIT_IOR_EXPR)
9724 return temp;
9725 return REDUCE_BIT_FIELD (temp);
9726 }
9727 #undef REDUCE_BIT_FIELD
9728
9729
9730 /* Return TRUE if expression STMT is suitable for replacement.
9731 Never consider memory loads as replaceable, because those don't ever lead
9732 into constant expressions. */
9733
9734 static bool
9735 stmt_is_replaceable_p (gimple *stmt)
9736 {
9737 if (ssa_is_replaceable_p (stmt))
9738 {
9739 /* Don't move around loads. */
9740 if (!gimple_assign_single_p (stmt)
9741 || is_gimple_val (gimple_assign_rhs1 (stmt)))
9742 return true;
9743 }
9744 return false;
9745 }
9746
9747 rtx
9748 expand_expr_real_1 (tree exp, rtx target, machine_mode tmode,
9749 enum expand_modifier modifier, rtx *alt_rtl,
9750 bool inner_reference_p)
9751 {
9752 rtx op0, op1, temp, decl_rtl;
9753 tree type;
9754 int unsignedp;
9755 machine_mode mode, dmode;
9756 enum tree_code code = TREE_CODE (exp);
9757 rtx subtarget, original_target;
9758 int ignore;
9759 tree context;
9760 bool reduce_bit_field;
9761 location_t loc = EXPR_LOCATION (exp);
9762 struct separate_ops ops;
9763 tree treeop0, treeop1, treeop2;
9764 tree ssa_name = NULL_TREE;
9765 gimple *g;
9766
9767 type = TREE_TYPE (exp);
9768 mode = TYPE_MODE (type);
9769 unsignedp = TYPE_UNSIGNED (type);
9770
9771 treeop0 = treeop1 = treeop2 = NULL_TREE;
9772 if (!VL_EXP_CLASS_P (exp))
9773 switch (TREE_CODE_LENGTH (code))
9774 {
9775 default:
9776 case 3: treeop2 = TREE_OPERAND (exp, 2); /* FALLTHRU */
9777 case 2: treeop1 = TREE_OPERAND (exp, 1); /* FALLTHRU */
9778 case 1: treeop0 = TREE_OPERAND (exp, 0); /* FALLTHRU */
9779 case 0: break;
9780 }
9781 ops.code = code;
9782 ops.type = type;
9783 ops.op0 = treeop0;
9784 ops.op1 = treeop1;
9785 ops.op2 = treeop2;
9786 ops.location = loc;
9787
9788 ignore = (target == const0_rtx
9789 || ((CONVERT_EXPR_CODE_P (code)
9790 || code == COND_EXPR || code == VIEW_CONVERT_EXPR)
9791 && TREE_CODE (type) == VOID_TYPE));
9792
9793 /* An operation in what may be a bit-field type needs the
9794 result to be reduced to the precision of the bit-field type,
9795 which is narrower than that of the type's mode. */
9796 reduce_bit_field = (!ignore
9797 && INTEGRAL_TYPE_P (type)
9798 && !type_has_mode_precision_p (type));
9799
9800 /* If we are going to ignore this result, we need only do something
9801 if there is a side-effect somewhere in the expression. If there
9802 is, short-circuit the most common cases here. Note that we must
9803 not call expand_expr with anything but const0_rtx in case this
9804 is an initial expansion of a size that contains a PLACEHOLDER_EXPR. */
9805
9806 if (ignore)
9807 {
9808 if (! TREE_SIDE_EFFECTS (exp))
9809 return const0_rtx;
9810
9811 /* Ensure we reference a volatile object even if value is ignored, but
9812 don't do this if all we are doing is taking its address. */
9813 if (TREE_THIS_VOLATILE (exp)
9814 && TREE_CODE (exp) != FUNCTION_DECL
9815 && mode != VOIDmode && mode != BLKmode
9816 && modifier != EXPAND_CONST_ADDRESS)
9817 {
9818 temp = expand_expr (exp, NULL_RTX, VOIDmode, modifier);
9819 if (MEM_P (temp))
9820 copy_to_reg (temp);
9821 return const0_rtx;
9822 }
9823
9824 if (TREE_CODE_CLASS (code) == tcc_unary
9825 || code == BIT_FIELD_REF
9826 || code == COMPONENT_REF
9827 || code == INDIRECT_REF)
9828 return expand_expr (treeop0, const0_rtx, VOIDmode,
9829 modifier);
9830
9831 else if (TREE_CODE_CLASS (code) == tcc_binary
9832 || TREE_CODE_CLASS (code) == tcc_comparison
9833 || code == ARRAY_REF || code == ARRAY_RANGE_REF)
9834 {
9835 expand_expr (treeop0, const0_rtx, VOIDmode, modifier);
9836 expand_expr (treeop1, const0_rtx, VOIDmode, modifier);
9837 return const0_rtx;
9838 }
9839
9840 target = 0;
9841 }
9842
9843 if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
9844 target = 0;
9845
9846 /* Use subtarget as the target for operand 0 of a binary operation. */
9847 subtarget = get_subtarget (target);
9848 original_target = target;
9849
9850 switch (code)
9851 {
9852 case LABEL_DECL:
9853 {
9854 tree function = decl_function_context (exp);
9855
9856 temp = label_rtx (exp);
9857 temp = gen_rtx_LABEL_REF (Pmode, temp);
9858
9859 if (function != current_function_decl
9860 && function != 0)
9861 LABEL_REF_NONLOCAL_P (temp) = 1;
9862
9863 temp = gen_rtx_MEM (FUNCTION_MODE, temp);
9864 return temp;
9865 }
9866
9867 case SSA_NAME:
9868 /* ??? ivopts calls expander, without any preparation from
9869 out-of-ssa. So fake instructions as if this was an access to the
9870 base variable. This unnecessarily allocates a pseudo, see how we can
9871 reuse it, if partition base vars have it set already. */
9872 if (!currently_expanding_to_rtl)
9873 {
9874 tree var = SSA_NAME_VAR (exp);
9875 if (var && DECL_RTL_SET_P (var))
9876 return DECL_RTL (var);
9877 return gen_raw_REG (TYPE_MODE (TREE_TYPE (exp)),
9878 LAST_VIRTUAL_REGISTER + 1);
9879 }
9880
9881 g = get_gimple_for_ssa_name (exp);
9882 /* For EXPAND_INITIALIZER try harder to get something simpler. */
9883 if (g == NULL
9884 && modifier == EXPAND_INITIALIZER
9885 && !SSA_NAME_IS_DEFAULT_DEF (exp)
9886 && (optimize || !SSA_NAME_VAR (exp)
9887 || DECL_IGNORED_P (SSA_NAME_VAR (exp)))
9888 && stmt_is_replaceable_p (SSA_NAME_DEF_STMT (exp)))
9889 g = SSA_NAME_DEF_STMT (exp);
9890 if (g)
9891 {
9892 rtx r;
9893 location_t saved_loc = curr_insn_location ();
9894 location_t loc = gimple_location (g);
9895 if (loc != UNKNOWN_LOCATION)
9896 set_curr_insn_location (loc);
9897 ops.code = gimple_assign_rhs_code (g);
9898 switch (get_gimple_rhs_class (ops.code))
9899 {
9900 case GIMPLE_TERNARY_RHS:
9901 ops.op2 = gimple_assign_rhs3 (g);
9902 /* Fallthru */
9903 case GIMPLE_BINARY_RHS:
9904 ops.op1 = gimple_assign_rhs2 (g);
9905
9906 /* Try to expand conditonal compare. */
9907 if (targetm.gen_ccmp_first)
9908 {
9909 gcc_checking_assert (targetm.gen_ccmp_next != NULL);
9910 r = expand_ccmp_expr (g, mode);
9911 if (r)
9912 break;
9913 }
9914 /* Fallthru */
9915 case GIMPLE_UNARY_RHS:
9916 ops.op0 = gimple_assign_rhs1 (g);
9917 ops.type = TREE_TYPE (gimple_assign_lhs (g));
9918 ops.location = loc;
9919 r = expand_expr_real_2 (&ops, target, tmode, modifier);
9920 break;
9921 case GIMPLE_SINGLE_RHS:
9922 {
9923 r = expand_expr_real (gimple_assign_rhs1 (g), target,
9924 tmode, modifier, alt_rtl,
9925 inner_reference_p);
9926 break;
9927 }
9928 default:
9929 gcc_unreachable ();
9930 }
9931 set_curr_insn_location (saved_loc);
9932 if (REG_P (r) && !REG_EXPR (r))
9933 set_reg_attrs_for_decl_rtl (SSA_NAME_VAR (exp), r);
9934 return r;
9935 }
9936
9937 ssa_name = exp;
9938 decl_rtl = get_rtx_for_ssa_name (ssa_name);
9939 exp = SSA_NAME_VAR (ssa_name);
9940 goto expand_decl_rtl;
9941
9942 case PARM_DECL:
9943 case VAR_DECL:
9944 /* If a static var's type was incomplete when the decl was written,
9945 but the type is complete now, lay out the decl now. */
9946 if (DECL_SIZE (exp) == 0
9947 && COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (TREE_TYPE (exp))
9948 && (TREE_STATIC (exp) || DECL_EXTERNAL (exp)))
9949 layout_decl (exp, 0);
9950
9951 /* fall through */
9952
9953 case FUNCTION_DECL:
9954 case RESULT_DECL:
9955 decl_rtl = DECL_RTL (exp);
9956 expand_decl_rtl:
9957 gcc_assert (decl_rtl);
9958
9959 /* DECL_MODE might change when TYPE_MODE depends on attribute target
9960 settings for VECTOR_TYPE_P that might switch for the function. */
9961 if (currently_expanding_to_rtl
9962 && code == VAR_DECL && MEM_P (decl_rtl)
9963 && VECTOR_TYPE_P (type) && exp && DECL_MODE (exp) != mode)
9964 decl_rtl = change_address (decl_rtl, TYPE_MODE (type), 0);
9965 else
9966 decl_rtl = copy_rtx (decl_rtl);
9967
9968 /* Record writes to register variables. */
9969 if (modifier == EXPAND_WRITE
9970 && REG_P (decl_rtl)
9971 && HARD_REGISTER_P (decl_rtl))
9972 add_to_hard_reg_set (&crtl->asm_clobbers,
9973 GET_MODE (decl_rtl), REGNO (decl_rtl));
9974
9975 /* Ensure variable marked as used even if it doesn't go through
9976 a parser. If it hasn't be used yet, write out an external
9977 definition. */
9978 if (exp)
9979 TREE_USED (exp) = 1;
9980
9981 /* Show we haven't gotten RTL for this yet. */
9982 temp = 0;
9983
9984 /* Variables inherited from containing functions should have
9985 been lowered by this point. */
9986 if (exp)
9987 context = decl_function_context (exp);
9988 gcc_assert (!exp
9989 || SCOPE_FILE_SCOPE_P (context)
9990 || context == current_function_decl
9991 || TREE_STATIC (exp)
9992 || DECL_EXTERNAL (exp)
9993 /* ??? C++ creates functions that are not TREE_STATIC. */
9994 || TREE_CODE (exp) == FUNCTION_DECL);
9995
9996 /* This is the case of an array whose size is to be determined
9997 from its initializer, while the initializer is still being parsed.
9998 ??? We aren't parsing while expanding anymore. */
9999
10000 if (MEM_P (decl_rtl) && REG_P (XEXP (decl_rtl, 0)))
10001 temp = validize_mem (decl_rtl);
10002
10003 /* If DECL_RTL is memory, we are in the normal case and the
10004 address is not valid, get the address into a register. */
10005
10006 else if (MEM_P (decl_rtl) && modifier != EXPAND_INITIALIZER)
10007 {
10008 if (alt_rtl)
10009 *alt_rtl = decl_rtl;
10010 decl_rtl = use_anchored_address (decl_rtl);
10011 if (modifier != EXPAND_CONST_ADDRESS
10012 && modifier != EXPAND_SUM
10013 && !memory_address_addr_space_p (exp ? DECL_MODE (exp)
10014 : GET_MODE (decl_rtl),
10015 XEXP (decl_rtl, 0),
10016 MEM_ADDR_SPACE (decl_rtl)))
10017 temp = replace_equiv_address (decl_rtl,
10018 copy_rtx (XEXP (decl_rtl, 0)));
10019 }
10020
10021 /* If we got something, return it. But first, set the alignment
10022 if the address is a register. */
10023 if (temp != 0)
10024 {
10025 if (exp && MEM_P (temp) && REG_P (XEXP (temp, 0)))
10026 mark_reg_pointer (XEXP (temp, 0), DECL_ALIGN (exp));
10027
10028 return temp;
10029 }
10030
10031 if (exp)
10032 dmode = DECL_MODE (exp);
10033 else
10034 dmode = TYPE_MODE (TREE_TYPE (ssa_name));
10035
10036 /* If the mode of DECL_RTL does not match that of the decl,
10037 there are two cases: we are dealing with a BLKmode value
10038 that is returned in a register, or we are dealing with
10039 a promoted value. In the latter case, return a SUBREG
10040 of the wanted mode, but mark it so that we know that it
10041 was already extended. */
10042 if (REG_P (decl_rtl)
10043 && dmode != BLKmode
10044 && GET_MODE (decl_rtl) != dmode)
10045 {
10046 machine_mode pmode;
10047
10048 /* Get the signedness to be used for this variable. Ensure we get
10049 the same mode we got when the variable was declared. */
10050 if (code != SSA_NAME)
10051 pmode = promote_decl_mode (exp, &unsignedp);
10052 else if ((g = SSA_NAME_DEF_STMT (ssa_name))
10053 && gimple_code (g) == GIMPLE_CALL
10054 && !gimple_call_internal_p (g))
10055 pmode = promote_function_mode (type, mode, &unsignedp,
10056 gimple_call_fntype (g),
10057 2);
10058 else
10059 pmode = promote_ssa_mode (ssa_name, &unsignedp);
10060 gcc_assert (GET_MODE (decl_rtl) == pmode);
10061
10062 temp = gen_lowpart_SUBREG (mode, decl_rtl);
10063 SUBREG_PROMOTED_VAR_P (temp) = 1;
10064 SUBREG_PROMOTED_SET (temp, unsignedp);
10065 return temp;
10066 }
10067
10068 return decl_rtl;
10069
10070 case INTEGER_CST:
10071 {
10072 /* Given that TYPE_PRECISION (type) is not always equal to
10073 GET_MODE_PRECISION (TYPE_MODE (type)), we need to extend from
10074 the former to the latter according to the signedness of the
10075 type. */
10076 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
10077 temp = immed_wide_int_const
10078 (wi::to_wide (exp, GET_MODE_PRECISION (mode)), mode);
10079 return temp;
10080 }
10081
10082 case VECTOR_CST:
10083 {
10084 tree tmp = NULL_TREE;
10085 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
10086 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
10087 || GET_MODE_CLASS (mode) == MODE_VECTOR_FRACT
10088 || GET_MODE_CLASS (mode) == MODE_VECTOR_UFRACT
10089 || GET_MODE_CLASS (mode) == MODE_VECTOR_ACCUM
10090 || GET_MODE_CLASS (mode) == MODE_VECTOR_UACCUM)
10091 return const_vector_from_tree (exp);
10092 scalar_int_mode int_mode;
10093 if (is_int_mode (mode, &int_mode))
10094 {
10095 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
10096 return const_scalar_mask_from_tree (int_mode, exp);
10097 else
10098 {
10099 tree type_for_mode
10100 = lang_hooks.types.type_for_mode (int_mode, 1);
10101 if (type_for_mode)
10102 tmp = fold_unary_loc (loc, VIEW_CONVERT_EXPR,
10103 type_for_mode, exp);
10104 }
10105 }
10106 if (!tmp)
10107 {
10108 vec<constructor_elt, va_gc> *v;
10109 /* Constructors need to be fixed-length. FIXME. */
10110 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
10111 vec_alloc (v, nunits);
10112 for (unsigned int i = 0; i < nunits; ++i)
10113 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
10114 tmp = build_constructor (type, v);
10115 }
10116 return expand_expr (tmp, ignore ? const0_rtx : target,
10117 tmode, modifier);
10118 }
10119
10120 case CONST_DECL:
10121 if (modifier == EXPAND_WRITE)
10122 {
10123 /* Writing into CONST_DECL is always invalid, but handle it
10124 gracefully. */
10125 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (exp));
10126 scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
10127 op0 = expand_expr_addr_expr_1 (exp, NULL_RTX, address_mode,
10128 EXPAND_NORMAL, as);
10129 op0 = memory_address_addr_space (mode, op0, as);
10130 temp = gen_rtx_MEM (mode, op0);
10131 set_mem_addr_space (temp, as);
10132 return temp;
10133 }
10134 return expand_expr (DECL_INITIAL (exp), target, VOIDmode, modifier);
10135
10136 case REAL_CST:
10137 /* If optimized, generate immediate CONST_DOUBLE
10138 which will be turned into memory by reload if necessary.
10139
10140 We used to force a register so that loop.c could see it. But
10141 this does not allow gen_* patterns to perform optimizations with
10142 the constants. It also produces two insns in cases like "x = 1.0;".
10143 On most machines, floating-point constants are not permitted in
10144 many insns, so we'd end up copying it to a register in any case.
10145
10146 Now, we do the copying in expand_binop, if appropriate. */
10147 return const_double_from_real_value (TREE_REAL_CST (exp),
10148 TYPE_MODE (TREE_TYPE (exp)));
10149
10150 case FIXED_CST:
10151 return CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (exp),
10152 TYPE_MODE (TREE_TYPE (exp)));
10153
10154 case COMPLEX_CST:
10155 /* Handle evaluating a complex constant in a CONCAT target. */
10156 if (original_target && GET_CODE (original_target) == CONCAT)
10157 {
10158 machine_mode mode = TYPE_MODE (TREE_TYPE (TREE_TYPE (exp)));
10159 rtx rtarg, itarg;
10160
10161 rtarg = XEXP (original_target, 0);
10162 itarg = XEXP (original_target, 1);
10163
10164 /* Move the real and imaginary parts separately. */
10165 op0 = expand_expr (TREE_REALPART (exp), rtarg, mode, EXPAND_NORMAL);
10166 op1 = expand_expr (TREE_IMAGPART (exp), itarg, mode, EXPAND_NORMAL);
10167
10168 if (op0 != rtarg)
10169 emit_move_insn (rtarg, op0);
10170 if (op1 != itarg)
10171 emit_move_insn (itarg, op1);
10172
10173 return original_target;
10174 }
10175
10176 /* fall through */
10177
10178 case STRING_CST:
10179 temp = expand_expr_constant (exp, 1, modifier);
10180
10181 /* temp contains a constant address.
10182 On RISC machines where a constant address isn't valid,
10183 make some insns to get that address into a register. */
10184 if (modifier != EXPAND_CONST_ADDRESS
10185 && modifier != EXPAND_INITIALIZER
10186 && modifier != EXPAND_SUM
10187 && ! memory_address_addr_space_p (mode, XEXP (temp, 0),
10188 MEM_ADDR_SPACE (temp)))
10189 return replace_equiv_address (temp,
10190 copy_rtx (XEXP (temp, 0)));
10191 return temp;
10192
10193 case POLY_INT_CST:
10194 return immed_wide_int_const (poly_int_cst_value (exp), mode);
10195
10196 case SAVE_EXPR:
10197 {
10198 tree val = treeop0;
10199 rtx ret = expand_expr_real_1 (val, target, tmode, modifier, alt_rtl,
10200 inner_reference_p);
10201
10202 if (!SAVE_EXPR_RESOLVED_P (exp))
10203 {
10204 /* We can indeed still hit this case, typically via builtin
10205 expanders calling save_expr immediately before expanding
10206 something. Assume this means that we only have to deal
10207 with non-BLKmode values. */
10208 gcc_assert (GET_MODE (ret) != BLKmode);
10209
10210 val = build_decl (curr_insn_location (),
10211 VAR_DECL, NULL, TREE_TYPE (exp));
10212 DECL_ARTIFICIAL (val) = 1;
10213 DECL_IGNORED_P (val) = 1;
10214 treeop0 = val;
10215 TREE_OPERAND (exp, 0) = treeop0;
10216 SAVE_EXPR_RESOLVED_P (exp) = 1;
10217
10218 if (!CONSTANT_P (ret))
10219 ret = copy_to_reg (ret);
10220 SET_DECL_RTL (val, ret);
10221 }
10222
10223 return ret;
10224 }
10225
10226
10227 case CONSTRUCTOR:
10228 /* If we don't need the result, just ensure we evaluate any
10229 subexpressions. */
10230 if (ignore)
10231 {
10232 unsigned HOST_WIDE_INT idx;
10233 tree value;
10234
10235 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), idx, value)
10236 expand_expr (value, const0_rtx, VOIDmode, EXPAND_NORMAL);
10237
10238 return const0_rtx;
10239 }
10240
10241 return expand_constructor (exp, target, modifier, false);
10242
10243 case TARGET_MEM_REF:
10244 {
10245 addr_space_t as
10246 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10247 enum insn_code icode;
10248 unsigned int align;
10249
10250 op0 = addr_for_mem_ref (exp, as, true);
10251 op0 = memory_address_addr_space (mode, op0, as);
10252 temp = gen_rtx_MEM (mode, op0);
10253 set_mem_attributes (temp, exp, 0);
10254 set_mem_addr_space (temp, as);
10255 align = get_object_alignment (exp);
10256 if (modifier != EXPAND_WRITE
10257 && modifier != EXPAND_MEMORY
10258 && mode != BLKmode
10259 && align < GET_MODE_ALIGNMENT (mode)
10260 /* If the target does not have special handling for unaligned
10261 loads of mode then it can use regular moves for them. */
10262 && ((icode = optab_handler (movmisalign_optab, mode))
10263 != CODE_FOR_nothing))
10264 {
10265 struct expand_operand ops[2];
10266
10267 /* We've already validated the memory, and we're creating a
10268 new pseudo destination. The predicates really can't fail,
10269 nor can the generator. */
10270 create_output_operand (&ops[0], NULL_RTX, mode);
10271 create_fixed_operand (&ops[1], temp);
10272 expand_insn (icode, 2, ops);
10273 temp = ops[0].value;
10274 }
10275 return temp;
10276 }
10277
10278 case MEM_REF:
10279 {
10280 const bool reverse = REF_REVERSE_STORAGE_ORDER (exp);
10281 addr_space_t as
10282 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (TREE_OPERAND (exp, 0))));
10283 machine_mode address_mode;
10284 tree base = TREE_OPERAND (exp, 0);
10285 gimple *def_stmt;
10286 enum insn_code icode;
10287 unsigned align;
10288 /* Handle expansion of non-aliased memory with non-BLKmode. That
10289 might end up in a register. */
10290 if (mem_ref_refers_to_non_mem_p (exp))
10291 {
10292 poly_int64 offset = mem_ref_offset (exp).force_shwi ();
10293 base = TREE_OPERAND (base, 0);
10294 if (known_eq (offset, 0)
10295 && !reverse
10296 && tree_fits_uhwi_p (TYPE_SIZE (type))
10297 && known_eq (GET_MODE_BITSIZE (DECL_MODE (base)),
10298 tree_to_uhwi (TYPE_SIZE (type))))
10299 return expand_expr (build1 (VIEW_CONVERT_EXPR, type, base),
10300 target, tmode, modifier);
10301 if (TYPE_MODE (type) == BLKmode)
10302 {
10303 temp = assign_stack_temp (DECL_MODE (base),
10304 GET_MODE_SIZE (DECL_MODE (base)));
10305 store_expr (base, temp, 0, false, false);
10306 temp = adjust_address (temp, BLKmode, offset);
10307 set_mem_size (temp, int_size_in_bytes (type));
10308 return temp;
10309 }
10310 exp = build3 (BIT_FIELD_REF, type, base, TYPE_SIZE (type),
10311 bitsize_int (offset * BITS_PER_UNIT));
10312 REF_REVERSE_STORAGE_ORDER (exp) = reverse;
10313 return expand_expr (exp, target, tmode, modifier);
10314 }
10315 address_mode = targetm.addr_space.address_mode (as);
10316 base = TREE_OPERAND (exp, 0);
10317 if ((def_stmt = get_def_for_expr (base, BIT_AND_EXPR)))
10318 {
10319 tree mask = gimple_assign_rhs2 (def_stmt);
10320 base = build2 (BIT_AND_EXPR, TREE_TYPE (base),
10321 gimple_assign_rhs1 (def_stmt), mask);
10322 TREE_OPERAND (exp, 0) = base;
10323 }
10324 align = get_object_alignment (exp);
10325 op0 = expand_expr (base, NULL_RTX, VOIDmode, EXPAND_SUM);
10326 op0 = memory_address_addr_space (mode, op0, as);
10327 if (!integer_zerop (TREE_OPERAND (exp, 1)))
10328 {
10329 rtx off = immed_wide_int_const (mem_ref_offset (exp), address_mode);
10330 op0 = simplify_gen_binary (PLUS, address_mode, op0, off);
10331 op0 = memory_address_addr_space (mode, op0, as);
10332 }
10333 temp = gen_rtx_MEM (mode, op0);
10334 set_mem_attributes (temp, exp, 0);
10335 set_mem_addr_space (temp, as);
10336 if (TREE_THIS_VOLATILE (exp))
10337 MEM_VOLATILE_P (temp) = 1;
10338 if (modifier != EXPAND_WRITE
10339 && modifier != EXPAND_MEMORY
10340 && !inner_reference_p
10341 && mode != BLKmode
10342 && align < GET_MODE_ALIGNMENT (mode))
10343 {
10344 if ((icode = optab_handler (movmisalign_optab, mode))
10345 != CODE_FOR_nothing)
10346 {
10347 struct expand_operand ops[2];
10348
10349 /* We've already validated the memory, and we're creating a
10350 new pseudo destination. The predicates really can't fail,
10351 nor can the generator. */
10352 create_output_operand (&ops[0], NULL_RTX, mode);
10353 create_fixed_operand (&ops[1], temp);
10354 expand_insn (icode, 2, ops);
10355 temp = ops[0].value;
10356 }
10357 else if (targetm.slow_unaligned_access (mode, align))
10358 temp = extract_bit_field (temp, GET_MODE_BITSIZE (mode),
10359 0, TYPE_UNSIGNED (TREE_TYPE (exp)),
10360 (modifier == EXPAND_STACK_PARM
10361 ? NULL_RTX : target),
10362 mode, mode, false, alt_rtl);
10363 }
10364 if (reverse
10365 && modifier != EXPAND_MEMORY
10366 && modifier != EXPAND_WRITE)
10367 temp = flip_storage_order (mode, temp);
10368 return temp;
10369 }
10370
10371 case ARRAY_REF:
10372
10373 {
10374 tree array = treeop0;
10375 tree index = treeop1;
10376 tree init;
10377
10378 /* Fold an expression like: "foo"[2].
10379 This is not done in fold so it won't happen inside &.
10380 Don't fold if this is for wide characters since it's too
10381 difficult to do correctly and this is a very rare case. */
10382
10383 if (modifier != EXPAND_CONST_ADDRESS
10384 && modifier != EXPAND_INITIALIZER
10385 && modifier != EXPAND_MEMORY)
10386 {
10387 tree t = fold_read_from_constant_string (exp);
10388
10389 if (t)
10390 return expand_expr (t, target, tmode, modifier);
10391 }
10392
10393 /* If this is a constant index into a constant array,
10394 just get the value from the array. Handle both the cases when
10395 we have an explicit constructor and when our operand is a variable
10396 that was declared const. */
10397
10398 if (modifier != EXPAND_CONST_ADDRESS
10399 && modifier != EXPAND_INITIALIZER
10400 && modifier != EXPAND_MEMORY
10401 && TREE_CODE (array) == CONSTRUCTOR
10402 && ! TREE_SIDE_EFFECTS (array)
10403 && TREE_CODE (index) == INTEGER_CST)
10404 {
10405 unsigned HOST_WIDE_INT ix;
10406 tree field, value;
10407
10408 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (array), ix,
10409 field, value)
10410 if (tree_int_cst_equal (field, index))
10411 {
10412 if (!TREE_SIDE_EFFECTS (value))
10413 return expand_expr (fold (value), target, tmode, modifier);
10414 break;
10415 }
10416 }
10417
10418 else if (optimize >= 1
10419 && modifier != EXPAND_CONST_ADDRESS
10420 && modifier != EXPAND_INITIALIZER
10421 && modifier != EXPAND_MEMORY
10422 && TREE_READONLY (array) && ! TREE_SIDE_EFFECTS (array)
10423 && TREE_CODE (index) == INTEGER_CST
10424 && (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
10425 && (init = ctor_for_folding (array)) != error_mark_node)
10426 {
10427 if (init == NULL_TREE)
10428 {
10429 tree value = build_zero_cst (type);
10430 if (TREE_CODE (value) == CONSTRUCTOR)
10431 {
10432 /* If VALUE is a CONSTRUCTOR, this optimization is only
10433 useful if this doesn't store the CONSTRUCTOR into
10434 memory. If it does, it is more efficient to just
10435 load the data from the array directly. */
10436 rtx ret = expand_constructor (value, target,
10437 modifier, true);
10438 if (ret == NULL_RTX)
10439 value = NULL_TREE;
10440 }
10441
10442 if (value)
10443 return expand_expr (value, target, tmode, modifier);
10444 }
10445 else if (TREE_CODE (init) == CONSTRUCTOR)
10446 {
10447 unsigned HOST_WIDE_INT ix;
10448 tree field, value;
10449
10450 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (init), ix,
10451 field, value)
10452 if (tree_int_cst_equal (field, index))
10453 {
10454 if (TREE_SIDE_EFFECTS (value))
10455 break;
10456
10457 if (TREE_CODE (value) == CONSTRUCTOR)
10458 {
10459 /* If VALUE is a CONSTRUCTOR, this
10460 optimization is only useful if
10461 this doesn't store the CONSTRUCTOR
10462 into memory. If it does, it is more
10463 efficient to just load the data from
10464 the array directly. */
10465 rtx ret = expand_constructor (value, target,
10466 modifier, true);
10467 if (ret == NULL_RTX)
10468 break;
10469 }
10470
10471 return
10472 expand_expr (fold (value), target, tmode, modifier);
10473 }
10474 }
10475 else if (TREE_CODE (init) == STRING_CST)
10476 {
10477 tree low_bound = array_ref_low_bound (exp);
10478 tree index1 = fold_convert_loc (loc, sizetype, treeop1);
10479
10480 /* Optimize the special case of a zero lower bound.
10481
10482 We convert the lower bound to sizetype to avoid problems
10483 with constant folding. E.g. suppose the lower bound is
10484 1 and its mode is QI. Without the conversion
10485 (ARRAY + (INDEX - (unsigned char)1))
10486 becomes
10487 (ARRAY + (-(unsigned char)1) + INDEX)
10488 which becomes
10489 (ARRAY + 255 + INDEX). Oops! */
10490 if (!integer_zerop (low_bound))
10491 index1 = size_diffop_loc (loc, index1,
10492 fold_convert_loc (loc, sizetype,
10493 low_bound));
10494
10495 if (tree_fits_uhwi_p (index1)
10496 && compare_tree_int (index1, TREE_STRING_LENGTH (init)) < 0)
10497 {
10498 tree type = TREE_TYPE (TREE_TYPE (init));
10499 scalar_int_mode mode;
10500
10501 if (is_int_mode (TYPE_MODE (type), &mode)
10502 && GET_MODE_SIZE (mode) == 1)
10503 return gen_int_mode (TREE_STRING_POINTER (init)
10504 [TREE_INT_CST_LOW (index1)],
10505 mode);
10506 }
10507 }
10508 }
10509 }
10510 goto normal_inner_ref;
10511
10512 case COMPONENT_REF:
10513 /* If the operand is a CONSTRUCTOR, we can just extract the
10514 appropriate field if it is present. */
10515 if (TREE_CODE (treeop0) == CONSTRUCTOR)
10516 {
10517 unsigned HOST_WIDE_INT idx;
10518 tree field, value;
10519 scalar_int_mode field_mode;
10520
10521 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (treeop0),
10522 idx, field, value)
10523 if (field == treeop1
10524 /* We can normally use the value of the field in the
10525 CONSTRUCTOR. However, if this is a bitfield in
10526 an integral mode that we can fit in a HOST_WIDE_INT,
10527 we must mask only the number of bits in the bitfield,
10528 since this is done implicitly by the constructor. If
10529 the bitfield does not meet either of those conditions,
10530 we can't do this optimization. */
10531 && (! DECL_BIT_FIELD (field)
10532 || (is_int_mode (DECL_MODE (field), &field_mode)
10533 && (GET_MODE_PRECISION (field_mode)
10534 <= HOST_BITS_PER_WIDE_INT))))
10535 {
10536 if (DECL_BIT_FIELD (field)
10537 && modifier == EXPAND_STACK_PARM)
10538 target = 0;
10539 op0 = expand_expr (value, target, tmode, modifier);
10540 if (DECL_BIT_FIELD (field))
10541 {
10542 HOST_WIDE_INT bitsize = TREE_INT_CST_LOW (DECL_SIZE (field));
10543 scalar_int_mode imode
10544 = SCALAR_INT_TYPE_MODE (TREE_TYPE (field));
10545
10546 if (TYPE_UNSIGNED (TREE_TYPE (field)))
10547 {
10548 op1 = gen_int_mode ((HOST_WIDE_INT_1 << bitsize) - 1,
10549 imode);
10550 op0 = expand_and (imode, op0, op1, target);
10551 }
10552 else
10553 {
10554 int count = GET_MODE_PRECISION (imode) - bitsize;
10555
10556 op0 = expand_shift (LSHIFT_EXPR, imode, op0, count,
10557 target, 0);
10558 op0 = expand_shift (RSHIFT_EXPR, imode, op0, count,
10559 target, 0);
10560 }
10561 }
10562
10563 return op0;
10564 }
10565 }
10566 goto normal_inner_ref;
10567
10568 case BIT_FIELD_REF:
10569 case ARRAY_RANGE_REF:
10570 normal_inner_ref:
10571 {
10572 machine_mode mode1, mode2;
10573 poly_int64 bitsize, bitpos, bytepos;
10574 tree offset;
10575 int reversep, volatilep = 0, must_force_mem;
10576 tree tem
10577 = get_inner_reference (exp, &bitsize, &bitpos, &offset, &mode1,
10578 &unsignedp, &reversep, &volatilep);
10579 rtx orig_op0, memloc;
10580 bool clear_mem_expr = false;
10581
10582 /* If we got back the original object, something is wrong. Perhaps
10583 we are evaluating an expression too early. In any event, don't
10584 infinitely recurse. */
10585 gcc_assert (tem != exp);
10586
10587 /* If TEM's type is a union of variable size, pass TARGET to the inner
10588 computation, since it will need a temporary and TARGET is known
10589 to have to do. This occurs in unchecked conversion in Ada. */
10590 orig_op0 = op0
10591 = expand_expr_real (tem,
10592 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
10593 && COMPLETE_TYPE_P (TREE_TYPE (tem))
10594 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
10595 != INTEGER_CST)
10596 && modifier != EXPAND_STACK_PARM
10597 ? target : NULL_RTX),
10598 VOIDmode,
10599 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
10600 NULL, true);
10601
10602 /* If the field has a mode, we want to access it in the
10603 field's mode, not the computed mode.
10604 If a MEM has VOIDmode (external with incomplete type),
10605 use BLKmode for it instead. */
10606 if (MEM_P (op0))
10607 {
10608 if (mode1 != VOIDmode)
10609 op0 = adjust_address (op0, mode1, 0);
10610 else if (GET_MODE (op0) == VOIDmode)
10611 op0 = adjust_address (op0, BLKmode, 0);
10612 }
10613
10614 mode2
10615 = CONSTANT_P (op0) ? TYPE_MODE (TREE_TYPE (tem)) : GET_MODE (op0);
10616
10617 /* If we have either an offset, a BLKmode result, or a reference
10618 outside the underlying object, we must force it to memory.
10619 Such a case can occur in Ada if we have unchecked conversion
10620 of an expression from a scalar type to an aggregate type or
10621 for an ARRAY_RANGE_REF whose type is BLKmode, or if we were
10622 passed a partially uninitialized object or a view-conversion
10623 to a larger size. */
10624 must_force_mem = (offset
10625 || mode1 == BLKmode
10626 || maybe_gt (bitpos + bitsize,
10627 GET_MODE_BITSIZE (mode2)));
10628
10629 /* Handle CONCAT first. */
10630 if (GET_CODE (op0) == CONCAT && !must_force_mem)
10631 {
10632 if (known_eq (bitpos, 0)
10633 && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
10634 && COMPLEX_MODE_P (mode1)
10635 && COMPLEX_MODE_P (GET_MODE (op0))
10636 && (GET_MODE_PRECISION (GET_MODE_INNER (mode1))
10637 == GET_MODE_PRECISION (GET_MODE_INNER (GET_MODE (op0)))))
10638 {
10639 if (reversep)
10640 op0 = flip_storage_order (GET_MODE (op0), op0);
10641 if (mode1 != GET_MODE (op0))
10642 {
10643 rtx parts[2];
10644 for (int i = 0; i < 2; i++)
10645 {
10646 rtx op = read_complex_part (op0, i != 0);
10647 if (GET_CODE (op) == SUBREG)
10648 op = force_reg (GET_MODE (op), op);
10649 rtx temp = gen_lowpart_common (GET_MODE_INNER (mode1),
10650 op);
10651 if (temp)
10652 op = temp;
10653 else
10654 {
10655 if (!REG_P (op) && !MEM_P (op))
10656 op = force_reg (GET_MODE (op), op);
10657 op = gen_lowpart (GET_MODE_INNER (mode1), op);
10658 }
10659 parts[i] = op;
10660 }
10661 op0 = gen_rtx_CONCAT (mode1, parts[0], parts[1]);
10662 }
10663 return op0;
10664 }
10665 if (known_eq (bitpos, 0)
10666 && known_eq (bitsize,
10667 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10668 && maybe_ne (bitsize, 0))
10669 {
10670 op0 = XEXP (op0, 0);
10671 mode2 = GET_MODE (op0);
10672 }
10673 else if (known_eq (bitpos,
10674 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 0))))
10675 && known_eq (bitsize,
10676 GET_MODE_BITSIZE (GET_MODE (XEXP (op0, 1))))
10677 && maybe_ne (bitpos, 0)
10678 && maybe_ne (bitsize, 0))
10679 {
10680 op0 = XEXP (op0, 1);
10681 bitpos = 0;
10682 mode2 = GET_MODE (op0);
10683 }
10684 else
10685 /* Otherwise force into memory. */
10686 must_force_mem = 1;
10687 }
10688
10689 /* If this is a constant, put it in a register if it is a legitimate
10690 constant and we don't need a memory reference. */
10691 if (CONSTANT_P (op0)
10692 && mode2 != BLKmode
10693 && targetm.legitimate_constant_p (mode2, op0)
10694 && !must_force_mem)
10695 op0 = force_reg (mode2, op0);
10696
10697 /* Otherwise, if this is a constant, try to force it to the constant
10698 pool. Note that back-ends, e.g. MIPS, may refuse to do so if it
10699 is a legitimate constant. */
10700 else if (CONSTANT_P (op0) && (memloc = force_const_mem (mode2, op0)))
10701 op0 = validize_mem (memloc);
10702
10703 /* Otherwise, if this is a constant or the object is not in memory
10704 and need be, put it there. */
10705 else if (CONSTANT_P (op0) || (!MEM_P (op0) && must_force_mem))
10706 {
10707 memloc = assign_temp (TREE_TYPE (tem), 1, 1);
10708 emit_move_insn (memloc, op0);
10709 op0 = memloc;
10710 clear_mem_expr = true;
10711 }
10712
10713 if (offset)
10714 {
10715 machine_mode address_mode;
10716 rtx offset_rtx = expand_expr (offset, NULL_RTX, VOIDmode,
10717 EXPAND_SUM);
10718
10719 gcc_assert (MEM_P (op0));
10720
10721 address_mode = get_address_mode (op0);
10722 if (GET_MODE (offset_rtx) != address_mode)
10723 {
10724 /* We cannot be sure that the RTL in offset_rtx is valid outside
10725 of a memory address context, so force it into a register
10726 before attempting to convert it to the desired mode. */
10727 offset_rtx = force_operand (offset_rtx, NULL_RTX);
10728 offset_rtx = convert_to_mode (address_mode, offset_rtx, 0);
10729 }
10730
10731 /* See the comment in expand_assignment for the rationale. */
10732 if (mode1 != VOIDmode
10733 && maybe_ne (bitpos, 0)
10734 && maybe_gt (bitsize, 0)
10735 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
10736 && multiple_p (bitpos, bitsize)
10737 && multiple_p (bitsize, GET_MODE_ALIGNMENT (mode1))
10738 && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode1))
10739 {
10740 op0 = adjust_address (op0, mode1, bytepos);
10741 bitpos = 0;
10742 }
10743
10744 op0 = offset_address (op0, offset_rtx,
10745 highest_pow2_factor (offset));
10746 }
10747
10748 /* If OFFSET is making OP0 more aligned than BIGGEST_ALIGNMENT,
10749 record its alignment as BIGGEST_ALIGNMENT. */
10750 if (MEM_P (op0)
10751 && known_eq (bitpos, 0)
10752 && offset != 0
10753 && is_aligning_offset (offset, tem))
10754 set_mem_align (op0, BIGGEST_ALIGNMENT);
10755
10756 /* Don't forget about volatility even if this is a bitfield. */
10757 if (MEM_P (op0) && volatilep && ! MEM_VOLATILE_P (op0))
10758 {
10759 if (op0 == orig_op0)
10760 op0 = copy_rtx (op0);
10761
10762 MEM_VOLATILE_P (op0) = 1;
10763 }
10764
10765 /* In cases where an aligned union has an unaligned object
10766 as a field, we might be extracting a BLKmode value from
10767 an integer-mode (e.g., SImode) object. Handle this case
10768 by doing the extract into an object as wide as the field
10769 (which we know to be the width of a basic mode), then
10770 storing into memory, and changing the mode to BLKmode. */
10771 if (mode1 == VOIDmode
10772 || REG_P (op0) || GET_CODE (op0) == SUBREG
10773 || (mode1 != BLKmode && ! direct_load[(int) mode1]
10774 && GET_MODE_CLASS (mode) != MODE_COMPLEX_INT
10775 && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT
10776 && modifier != EXPAND_CONST_ADDRESS
10777 && modifier != EXPAND_INITIALIZER
10778 && modifier != EXPAND_MEMORY)
10779 /* If the bitfield is volatile and the bitsize
10780 is narrower than the access size of the bitfield,
10781 we need to extract bitfields from the access. */
10782 || (volatilep && TREE_CODE (exp) == COMPONENT_REF
10783 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (exp, 1))
10784 && mode1 != BLKmode
10785 && maybe_lt (bitsize, GET_MODE_SIZE (mode1) * BITS_PER_UNIT))
10786 /* If the field isn't aligned enough to fetch as a memref,
10787 fetch it as a bit field. */
10788 || (mode1 != BLKmode
10789 && (((MEM_P (op0)
10790 ? MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode1)
10791 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode1))
10792 : TYPE_ALIGN (TREE_TYPE (tem)) < GET_MODE_ALIGNMENT (mode)
10793 || !multiple_p (bitpos, GET_MODE_ALIGNMENT (mode)))
10794 && modifier != EXPAND_MEMORY
10795 && ((modifier == EXPAND_CONST_ADDRESS
10796 || modifier == EXPAND_INITIALIZER)
10797 ? STRICT_ALIGNMENT
10798 : targetm.slow_unaligned_access (mode1,
10799 MEM_ALIGN (op0))))
10800 || !multiple_p (bitpos, BITS_PER_UNIT)))
10801 /* If the type and the field are a constant size and the
10802 size of the type isn't the same size as the bitfield,
10803 we must use bitfield operations. */
10804 || (known_size_p (bitsize)
10805 && TYPE_SIZE (TREE_TYPE (exp))
10806 && poly_int_tree_p (TYPE_SIZE (TREE_TYPE (exp)))
10807 && maybe_ne (wi::to_poly_offset (TYPE_SIZE (TREE_TYPE (exp))),
10808 bitsize)))
10809 {
10810 machine_mode ext_mode = mode;
10811
10812 if (ext_mode == BLKmode
10813 && ! (target != 0 && MEM_P (op0)
10814 && MEM_P (target)
10815 && multiple_p (bitpos, BITS_PER_UNIT)))
10816 ext_mode = int_mode_for_size (bitsize, 1).else_blk ();
10817
10818 if (ext_mode == BLKmode)
10819 {
10820 if (target == 0)
10821 target = assign_temp (type, 1, 1);
10822
10823 /* ??? Unlike the similar test a few lines below, this one is
10824 very likely obsolete. */
10825 if (known_eq (bitsize, 0))
10826 return target;
10827
10828 /* In this case, BITPOS must start at a byte boundary and
10829 TARGET, if specified, must be a MEM. */
10830 gcc_assert (MEM_P (op0)
10831 && (!target || MEM_P (target)));
10832
10833 bytepos = exact_div (bitpos, BITS_PER_UNIT);
10834 poly_int64 bytesize = bits_to_bytes_round_up (bitsize);
10835 emit_block_move (target,
10836 adjust_address (op0, VOIDmode, bytepos),
10837 gen_int_mode (bytesize, Pmode),
10838 (modifier == EXPAND_STACK_PARM
10839 ? BLOCK_OP_CALL_PARM : BLOCK_OP_NORMAL));
10840
10841 return target;
10842 }
10843
10844 /* If we have nothing to extract, the result will be 0 for targets
10845 with SHIFT_COUNT_TRUNCATED == 0 and garbage otherwise. Always
10846 return 0 for the sake of consistency, as reading a zero-sized
10847 bitfield is valid in Ada and the value is fully specified. */
10848 if (known_eq (bitsize, 0))
10849 return const0_rtx;
10850
10851 op0 = validize_mem (op0);
10852
10853 if (MEM_P (op0) && REG_P (XEXP (op0, 0)))
10854 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10855
10856 /* If the result has a record type and the extraction is done in
10857 an integral mode, then the field may be not aligned on a byte
10858 boundary; in this case, if it has reverse storage order, it
10859 needs to be extracted as a scalar field with reverse storage
10860 order and put back into memory order afterwards. */
10861 if (TREE_CODE (type) == RECORD_TYPE
10862 && GET_MODE_CLASS (ext_mode) == MODE_INT)
10863 reversep = TYPE_REVERSE_STORAGE_ORDER (type);
10864
10865 op0 = extract_bit_field (op0, bitsize, bitpos, unsignedp,
10866 (modifier == EXPAND_STACK_PARM
10867 ? NULL_RTX : target),
10868 ext_mode, ext_mode, reversep, alt_rtl);
10869
10870 /* If the result has a record type and the mode of OP0 is an
10871 integral mode then, if BITSIZE is narrower than this mode
10872 and this is for big-endian data, we must put the field
10873 into the high-order bits. And we must also put it back
10874 into memory order if it has been previously reversed. */
10875 scalar_int_mode op0_mode;
10876 if (TREE_CODE (type) == RECORD_TYPE
10877 && is_int_mode (GET_MODE (op0), &op0_mode))
10878 {
10879 HOST_WIDE_INT size = GET_MODE_BITSIZE (op0_mode);
10880
10881 gcc_checking_assert (known_le (bitsize, size));
10882 if (maybe_lt (bitsize, size)
10883 && reversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
10884 op0 = expand_shift (LSHIFT_EXPR, op0_mode, op0,
10885 size - bitsize, op0, 1);
10886
10887 if (reversep)
10888 op0 = flip_storage_order (op0_mode, op0);
10889 }
10890
10891 /* If the result type is BLKmode, store the data into a temporary
10892 of the appropriate type, but with the mode corresponding to the
10893 mode for the data we have (op0's mode). */
10894 if (mode == BLKmode)
10895 {
10896 rtx new_rtx
10897 = assign_stack_temp_for_type (ext_mode,
10898 GET_MODE_BITSIZE (ext_mode),
10899 type);
10900 emit_move_insn (new_rtx, op0);
10901 op0 = copy_rtx (new_rtx);
10902 PUT_MODE (op0, BLKmode);
10903 }
10904
10905 return op0;
10906 }
10907
10908 /* If the result is BLKmode, use that to access the object
10909 now as well. */
10910 if (mode == BLKmode)
10911 mode1 = BLKmode;
10912
10913 /* Get a reference to just this component. */
10914 bytepos = bits_to_bytes_round_down (bitpos);
10915 if (modifier == EXPAND_CONST_ADDRESS
10916 || modifier == EXPAND_SUM || modifier == EXPAND_INITIALIZER)
10917 op0 = adjust_address_nv (op0, mode1, bytepos);
10918 else
10919 op0 = adjust_address (op0, mode1, bytepos);
10920
10921 if (op0 == orig_op0)
10922 op0 = copy_rtx (op0);
10923
10924 /* Don't set memory attributes if the base expression is
10925 SSA_NAME that got expanded as a MEM. In that case, we should
10926 just honor its original memory attributes. */
10927 if (TREE_CODE (tem) != SSA_NAME || !MEM_P (orig_op0))
10928 set_mem_attributes (op0, exp, 0);
10929
10930 if (REG_P (XEXP (op0, 0)))
10931 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
10932
10933 /* If op0 is a temporary because the original expressions was forced
10934 to memory, clear MEM_EXPR so that the original expression cannot
10935 be marked as addressable through MEM_EXPR of the temporary. */
10936 if (clear_mem_expr)
10937 set_mem_expr (op0, NULL_TREE);
10938
10939 MEM_VOLATILE_P (op0) |= volatilep;
10940
10941 if (reversep
10942 && modifier != EXPAND_MEMORY
10943 && modifier != EXPAND_WRITE)
10944 op0 = flip_storage_order (mode1, op0);
10945
10946 if (mode == mode1 || mode1 == BLKmode || mode1 == tmode
10947 || modifier == EXPAND_CONST_ADDRESS
10948 || modifier == EXPAND_INITIALIZER)
10949 return op0;
10950
10951 if (target == 0)
10952 target = gen_reg_rtx (tmode != VOIDmode ? tmode : mode);
10953
10954 convert_move (target, op0, unsignedp);
10955 return target;
10956 }
10957
10958 case OBJ_TYPE_REF:
10959 return expand_expr (OBJ_TYPE_REF_EXPR (exp), target, tmode, modifier);
10960
10961 case CALL_EXPR:
10962 /* All valid uses of __builtin_va_arg_pack () are removed during
10963 inlining. */
10964 if (CALL_EXPR_VA_ARG_PACK (exp))
10965 error ("%Kinvalid use of %<__builtin_va_arg_pack ()%>", exp);
10966 {
10967 tree fndecl = get_callee_fndecl (exp), attr;
10968
10969 if (fndecl
10970 && (attr = lookup_attribute ("error",
10971 DECL_ATTRIBUTES (fndecl))) != NULL)
10972 error ("%Kcall to %qs declared with attribute error: %s",
10973 exp, identifier_to_locale (lang_hooks.decl_printable_name (fndecl, 1)),
10974 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
10975 if (fndecl
10976 && (attr = lookup_attribute ("warning",
10977 DECL_ATTRIBUTES (fndecl))) != NULL)
10978 warning_at (tree_nonartificial_location (exp),
10979 0, "%Kcall to %qs declared with attribute warning: %s",
10980 exp, identifier_to_locale (lang_hooks.decl_printable_name (fndecl, 1)),
10981 TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))));
10982
10983 /* Check for a built-in function. */
10984 if (fndecl && DECL_BUILT_IN (fndecl))
10985 {
10986 gcc_assert (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_FRONTEND);
10987 if (CALL_WITH_BOUNDS_P (exp))
10988 return expand_builtin_with_bounds (exp, target, subtarget,
10989 tmode, ignore);
10990 else
10991 return expand_builtin (exp, target, subtarget, tmode, ignore);
10992 }
10993 }
10994 return expand_call (exp, target, ignore);
10995
10996 case VIEW_CONVERT_EXPR:
10997 op0 = NULL_RTX;
10998
10999 /* If we are converting to BLKmode, try to avoid an intermediate
11000 temporary by fetching an inner memory reference. */
11001 if (mode == BLKmode
11002 && poly_int_tree_p (TYPE_SIZE (type))
11003 && TYPE_MODE (TREE_TYPE (treeop0)) != BLKmode
11004 && handled_component_p (treeop0))
11005 {
11006 machine_mode mode1;
11007 poly_int64 bitsize, bitpos, bytepos;
11008 tree offset;
11009 int unsignedp, reversep, volatilep = 0;
11010 tree tem
11011 = get_inner_reference (treeop0, &bitsize, &bitpos, &offset, &mode1,
11012 &unsignedp, &reversep, &volatilep);
11013 rtx orig_op0;
11014
11015 /* ??? We should work harder and deal with non-zero offsets. */
11016 if (!offset
11017 && multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
11018 && !reversep
11019 && known_size_p (bitsize)
11020 && known_eq (wi::to_poly_offset (TYPE_SIZE (type)), bitsize))
11021 {
11022 /* See the normal_inner_ref case for the rationale. */
11023 orig_op0
11024 = expand_expr_real (tem,
11025 (TREE_CODE (TREE_TYPE (tem)) == UNION_TYPE
11026 && (TREE_CODE (TYPE_SIZE (TREE_TYPE (tem)))
11027 != INTEGER_CST)
11028 && modifier != EXPAND_STACK_PARM
11029 ? target : NULL_RTX),
11030 VOIDmode,
11031 modifier == EXPAND_SUM ? EXPAND_NORMAL : modifier,
11032 NULL, true);
11033
11034 if (MEM_P (orig_op0))
11035 {
11036 op0 = orig_op0;
11037
11038 /* Get a reference to just this component. */
11039 if (modifier == EXPAND_CONST_ADDRESS
11040 || modifier == EXPAND_SUM
11041 || modifier == EXPAND_INITIALIZER)
11042 op0 = adjust_address_nv (op0, mode, bytepos);
11043 else
11044 op0 = adjust_address (op0, mode, bytepos);
11045
11046 if (op0 == orig_op0)
11047 op0 = copy_rtx (op0);
11048
11049 set_mem_attributes (op0, treeop0, 0);
11050 if (REG_P (XEXP (op0, 0)))
11051 mark_reg_pointer (XEXP (op0, 0), MEM_ALIGN (op0));
11052
11053 MEM_VOLATILE_P (op0) |= volatilep;
11054 }
11055 }
11056 }
11057
11058 if (!op0)
11059 op0 = expand_expr_real (treeop0, NULL_RTX, VOIDmode, modifier,
11060 NULL, inner_reference_p);
11061
11062 /* If the input and output modes are both the same, we are done. */
11063 if (mode == GET_MODE (op0))
11064 ;
11065 /* If neither mode is BLKmode, and both modes are the same size
11066 then we can use gen_lowpart. */
11067 else if (mode != BLKmode
11068 && GET_MODE (op0) != BLKmode
11069 && known_eq (GET_MODE_PRECISION (mode),
11070 GET_MODE_PRECISION (GET_MODE (op0)))
11071 && !COMPLEX_MODE_P (GET_MODE (op0)))
11072 {
11073 if (GET_CODE (op0) == SUBREG)
11074 op0 = force_reg (GET_MODE (op0), op0);
11075 temp = gen_lowpart_common (mode, op0);
11076 if (temp)
11077 op0 = temp;
11078 else
11079 {
11080 if (!REG_P (op0) && !MEM_P (op0))
11081 op0 = force_reg (GET_MODE (op0), op0);
11082 op0 = gen_lowpart (mode, op0);
11083 }
11084 }
11085 /* If both types are integral, convert from one mode to the other. */
11086 else if (INTEGRAL_TYPE_P (type) && INTEGRAL_TYPE_P (TREE_TYPE (treeop0)))
11087 op0 = convert_modes (mode, GET_MODE (op0), op0,
11088 TYPE_UNSIGNED (TREE_TYPE (treeop0)));
11089 /* If the output type is a bit-field type, do an extraction. */
11090 else if (reduce_bit_field)
11091 return extract_bit_field (op0, TYPE_PRECISION (type), 0,
11092 TYPE_UNSIGNED (type), NULL_RTX,
11093 mode, mode, false, NULL);
11094 /* As a last resort, spill op0 to memory, and reload it in a
11095 different mode. */
11096 else if (!MEM_P (op0))
11097 {
11098 /* If the operand is not a MEM, force it into memory. Since we
11099 are going to be changing the mode of the MEM, don't call
11100 force_const_mem for constants because we don't allow pool
11101 constants to change mode. */
11102 tree inner_type = TREE_TYPE (treeop0);
11103
11104 gcc_assert (!TREE_ADDRESSABLE (exp));
11105
11106 if (target == 0 || GET_MODE (target) != TYPE_MODE (inner_type))
11107 target
11108 = assign_stack_temp_for_type
11109 (TYPE_MODE (inner_type),
11110 GET_MODE_SIZE (TYPE_MODE (inner_type)), inner_type);
11111
11112 emit_move_insn (target, op0);
11113 op0 = target;
11114 }
11115
11116 /* If OP0 is (now) a MEM, we need to deal with alignment issues. If the
11117 output type is such that the operand is known to be aligned, indicate
11118 that it is. Otherwise, we need only be concerned about alignment for
11119 non-BLKmode results. */
11120 if (MEM_P (op0))
11121 {
11122 enum insn_code icode;
11123
11124 if (modifier != EXPAND_WRITE
11125 && modifier != EXPAND_MEMORY
11126 && !inner_reference_p
11127 && mode != BLKmode
11128 && MEM_ALIGN (op0) < GET_MODE_ALIGNMENT (mode))
11129 {
11130 /* If the target does have special handling for unaligned
11131 loads of mode then use them. */
11132 if ((icode = optab_handler (movmisalign_optab, mode))
11133 != CODE_FOR_nothing)
11134 {
11135 rtx reg;
11136
11137 op0 = adjust_address (op0, mode, 0);
11138 /* We've already validated the memory, and we're creating a
11139 new pseudo destination. The predicates really can't
11140 fail. */
11141 reg = gen_reg_rtx (mode);
11142
11143 /* Nor can the insn generator. */
11144 rtx_insn *insn = GEN_FCN (icode) (reg, op0);
11145 emit_insn (insn);
11146 return reg;
11147 }
11148 else if (STRICT_ALIGNMENT)
11149 {
11150 tree inner_type = TREE_TYPE (treeop0);
11151 poly_uint64 mode_size = GET_MODE_SIZE (mode);
11152 poly_uint64 op0_size
11153 = tree_to_poly_uint64 (TYPE_SIZE_UNIT (inner_type));
11154 poly_int64 temp_size = upper_bound (op0_size, mode_size);
11155 rtx new_rtx
11156 = assign_stack_temp_for_type (mode, temp_size, type);
11157 rtx new_with_op0_mode
11158 = adjust_address (new_rtx, GET_MODE (op0), 0);
11159
11160 gcc_assert (!TREE_ADDRESSABLE (exp));
11161
11162 if (GET_MODE (op0) == BLKmode)
11163 {
11164 rtx size_rtx = gen_int_mode (mode_size, Pmode);
11165 emit_block_move (new_with_op0_mode, op0, size_rtx,
11166 (modifier == EXPAND_STACK_PARM
11167 ? BLOCK_OP_CALL_PARM
11168 : BLOCK_OP_NORMAL));
11169 }
11170 else
11171 emit_move_insn (new_with_op0_mode, op0);
11172
11173 op0 = new_rtx;
11174 }
11175 }
11176
11177 op0 = adjust_address (op0, mode, 0);
11178 }
11179
11180 return op0;
11181
11182 case MODIFY_EXPR:
11183 {
11184 tree lhs = treeop0;
11185 tree rhs = treeop1;
11186 gcc_assert (ignore);
11187
11188 /* Check for |= or &= of a bitfield of size one into another bitfield
11189 of size 1. In this case, (unless we need the result of the
11190 assignment) we can do this more efficiently with a
11191 test followed by an assignment, if necessary.
11192
11193 ??? At this point, we can't get a BIT_FIELD_REF here. But if
11194 things change so we do, this code should be enhanced to
11195 support it. */
11196 if (TREE_CODE (lhs) == COMPONENT_REF
11197 && (TREE_CODE (rhs) == BIT_IOR_EXPR
11198 || TREE_CODE (rhs) == BIT_AND_EXPR)
11199 && TREE_OPERAND (rhs, 0) == lhs
11200 && TREE_CODE (TREE_OPERAND (rhs, 1)) == COMPONENT_REF
11201 && integer_onep (DECL_SIZE (TREE_OPERAND (lhs, 1)))
11202 && integer_onep (DECL_SIZE (TREE_OPERAND (TREE_OPERAND (rhs, 1), 1))))
11203 {
11204 rtx_code_label *label = gen_label_rtx ();
11205 int value = TREE_CODE (rhs) == BIT_IOR_EXPR;
11206 do_jump (TREE_OPERAND (rhs, 1),
11207 value ? label : 0,
11208 value ? 0 : label,
11209 profile_probability::uninitialized ());
11210 expand_assignment (lhs, build_int_cst (TREE_TYPE (rhs), value),
11211 false);
11212 do_pending_stack_adjust ();
11213 emit_label (label);
11214 return const0_rtx;
11215 }
11216
11217 expand_assignment (lhs, rhs, false);
11218 return const0_rtx;
11219 }
11220
11221 case ADDR_EXPR:
11222 return expand_expr_addr_expr (exp, target, tmode, modifier);
11223
11224 case REALPART_EXPR:
11225 op0 = expand_normal (treeop0);
11226 return read_complex_part (op0, false);
11227
11228 case IMAGPART_EXPR:
11229 op0 = expand_normal (treeop0);
11230 return read_complex_part (op0, true);
11231
11232 case RETURN_EXPR:
11233 case LABEL_EXPR:
11234 case GOTO_EXPR:
11235 case SWITCH_EXPR:
11236 case ASM_EXPR:
11237 /* Expanded in cfgexpand.c. */
11238 gcc_unreachable ();
11239
11240 case TRY_CATCH_EXPR:
11241 case CATCH_EXPR:
11242 case EH_FILTER_EXPR:
11243 case TRY_FINALLY_EXPR:
11244 /* Lowered by tree-eh.c. */
11245 gcc_unreachable ();
11246
11247 case WITH_CLEANUP_EXPR:
11248 case CLEANUP_POINT_EXPR:
11249 case TARGET_EXPR:
11250 case CASE_LABEL_EXPR:
11251 case VA_ARG_EXPR:
11252 case BIND_EXPR:
11253 case INIT_EXPR:
11254 case CONJ_EXPR:
11255 case COMPOUND_EXPR:
11256 case PREINCREMENT_EXPR:
11257 case PREDECREMENT_EXPR:
11258 case POSTINCREMENT_EXPR:
11259 case POSTDECREMENT_EXPR:
11260 case LOOP_EXPR:
11261 case EXIT_EXPR:
11262 case COMPOUND_LITERAL_EXPR:
11263 /* Lowered by gimplify.c. */
11264 gcc_unreachable ();
11265
11266 case FDESC_EXPR:
11267 /* Function descriptors are not valid except for as
11268 initialization constants, and should not be expanded. */
11269 gcc_unreachable ();
11270
11271 case WITH_SIZE_EXPR:
11272 /* WITH_SIZE_EXPR expands to its first argument. The caller should
11273 have pulled out the size to use in whatever context it needed. */
11274 return expand_expr_real (treeop0, original_target, tmode,
11275 modifier, alt_rtl, inner_reference_p);
11276
11277 default:
11278 return expand_expr_real_2 (&ops, target, tmode, modifier);
11279 }
11280 }
11281 \f
11282 /* Subroutine of above: reduce EXP to the precision of TYPE (in the
11283 signedness of TYPE), possibly returning the result in TARGET.
11284 TYPE is known to be a partial integer type. */
11285 static rtx
11286 reduce_to_bit_field_precision (rtx exp, rtx target, tree type)
11287 {
11288 HOST_WIDE_INT prec = TYPE_PRECISION (type);
11289 if (target && GET_MODE (target) != GET_MODE (exp))
11290 target = 0;
11291 /* For constant values, reduce using build_int_cst_type. */
11292 if (CONST_INT_P (exp))
11293 {
11294 HOST_WIDE_INT value = INTVAL (exp);
11295 tree t = build_int_cst_type (type, value);
11296 return expand_expr (t, target, VOIDmode, EXPAND_NORMAL);
11297 }
11298 else if (TYPE_UNSIGNED (type))
11299 {
11300 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11301 rtx mask = immed_wide_int_const
11302 (wi::mask (prec, false, GET_MODE_PRECISION (mode)), mode);
11303 return expand_and (mode, exp, mask, target);
11304 }
11305 else
11306 {
11307 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (exp));
11308 int count = GET_MODE_PRECISION (mode) - prec;
11309 exp = expand_shift (LSHIFT_EXPR, mode, exp, count, target, 0);
11310 return expand_shift (RSHIFT_EXPR, mode, exp, count, target, 0);
11311 }
11312 }
11313 \f
11314 /* Subroutine of above: returns 1 if OFFSET corresponds to an offset that
11315 when applied to the address of EXP produces an address known to be
11316 aligned more than BIGGEST_ALIGNMENT. */
11317
11318 static int
11319 is_aligning_offset (const_tree offset, const_tree exp)
11320 {
11321 /* Strip off any conversions. */
11322 while (CONVERT_EXPR_P (offset))
11323 offset = TREE_OPERAND (offset, 0);
11324
11325 /* We must now have a BIT_AND_EXPR with a constant that is one less than
11326 power of 2 and which is larger than BIGGEST_ALIGNMENT. */
11327 if (TREE_CODE (offset) != BIT_AND_EXPR
11328 || !tree_fits_uhwi_p (TREE_OPERAND (offset, 1))
11329 || compare_tree_int (TREE_OPERAND (offset, 1),
11330 BIGGEST_ALIGNMENT / BITS_PER_UNIT) <= 0
11331 || !pow2p_hwi (tree_to_uhwi (TREE_OPERAND (offset, 1)) + 1))
11332 return 0;
11333
11334 /* Look at the first operand of BIT_AND_EXPR and strip any conversion.
11335 It must be NEGATE_EXPR. Then strip any more conversions. */
11336 offset = TREE_OPERAND (offset, 0);
11337 while (CONVERT_EXPR_P (offset))
11338 offset = TREE_OPERAND (offset, 0);
11339
11340 if (TREE_CODE (offset) != NEGATE_EXPR)
11341 return 0;
11342
11343 offset = TREE_OPERAND (offset, 0);
11344 while (CONVERT_EXPR_P (offset))
11345 offset = TREE_OPERAND (offset, 0);
11346
11347 /* This must now be the address of EXP. */
11348 return TREE_CODE (offset) == ADDR_EXPR && TREE_OPERAND (offset, 0) == exp;
11349 }
11350 \f
11351 /* Return the tree node if an ARG corresponds to a string constant or zero
11352 if it doesn't. If we return nonzero, set *PTR_OFFSET to the offset
11353 in bytes within the string that ARG is accessing. The type of the
11354 offset will be `sizetype'. */
11355
11356 tree
11357 string_constant (tree arg, tree *ptr_offset)
11358 {
11359 tree array, offset, lower_bound;
11360 STRIP_NOPS (arg);
11361
11362 if (TREE_CODE (arg) == ADDR_EXPR)
11363 {
11364 if (TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST)
11365 {
11366 *ptr_offset = size_zero_node;
11367 return TREE_OPERAND (arg, 0);
11368 }
11369 else if (TREE_CODE (TREE_OPERAND (arg, 0)) == VAR_DECL)
11370 {
11371 array = TREE_OPERAND (arg, 0);
11372 offset = size_zero_node;
11373 }
11374 else if (TREE_CODE (TREE_OPERAND (arg, 0)) == ARRAY_REF)
11375 {
11376 array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
11377 offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
11378 if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
11379 return 0;
11380
11381 /* Check if the array has a nonzero lower bound. */
11382 lower_bound = array_ref_low_bound (TREE_OPERAND (arg, 0));
11383 if (!integer_zerop (lower_bound))
11384 {
11385 /* If the offset and base aren't both constants, return 0. */
11386 if (TREE_CODE (lower_bound) != INTEGER_CST)
11387 return 0;
11388 if (TREE_CODE (offset) != INTEGER_CST)
11389 return 0;
11390 /* Adjust offset by the lower bound. */
11391 offset = size_diffop (fold_convert (sizetype, offset),
11392 fold_convert (sizetype, lower_bound));
11393 }
11394 }
11395 else if (TREE_CODE (TREE_OPERAND (arg, 0)) == MEM_REF)
11396 {
11397 array = TREE_OPERAND (TREE_OPERAND (arg, 0), 0);
11398 offset = TREE_OPERAND (TREE_OPERAND (arg, 0), 1);
11399 if (TREE_CODE (array) != ADDR_EXPR)
11400 return 0;
11401 array = TREE_OPERAND (array, 0);
11402 if (TREE_CODE (array) != STRING_CST && !VAR_P (array))
11403 return 0;
11404 }
11405 else
11406 return 0;
11407 }
11408 else if (TREE_CODE (arg) == PLUS_EXPR || TREE_CODE (arg) == POINTER_PLUS_EXPR)
11409 {
11410 tree arg0 = TREE_OPERAND (arg, 0);
11411 tree arg1 = TREE_OPERAND (arg, 1);
11412
11413 STRIP_NOPS (arg0);
11414 STRIP_NOPS (arg1);
11415
11416 if (TREE_CODE (arg0) == ADDR_EXPR
11417 && (TREE_CODE (TREE_OPERAND (arg0, 0)) == STRING_CST
11418 || TREE_CODE (TREE_OPERAND (arg0, 0)) == VAR_DECL))
11419 {
11420 array = TREE_OPERAND (arg0, 0);
11421 offset = arg1;
11422 }
11423 else if (TREE_CODE (arg1) == ADDR_EXPR
11424 && (TREE_CODE (TREE_OPERAND (arg1, 0)) == STRING_CST
11425 || TREE_CODE (TREE_OPERAND (arg1, 0)) == VAR_DECL))
11426 {
11427 array = TREE_OPERAND (arg1, 0);
11428 offset = arg0;
11429 }
11430 else
11431 return 0;
11432 }
11433 else
11434 return 0;
11435
11436 if (TREE_CODE (array) == STRING_CST)
11437 {
11438 *ptr_offset = fold_convert (sizetype, offset);
11439 return array;
11440 }
11441 else if (VAR_P (array) || TREE_CODE (array) == CONST_DECL)
11442 {
11443 int length;
11444 tree init = ctor_for_folding (array);
11445
11446 /* Variables initialized to string literals can be handled too. */
11447 if (init == error_mark_node
11448 || !init
11449 || TREE_CODE (init) != STRING_CST)
11450 return 0;
11451
11452 /* Avoid const char foo[4] = "abcde"; */
11453 if (DECL_SIZE_UNIT (array) == NULL_TREE
11454 || TREE_CODE (DECL_SIZE_UNIT (array)) != INTEGER_CST
11455 || (length = TREE_STRING_LENGTH (init)) <= 0
11456 || compare_tree_int (DECL_SIZE_UNIT (array), length) < 0)
11457 return 0;
11458
11459 /* If variable is bigger than the string literal, OFFSET must be constant
11460 and inside of the bounds of the string literal. */
11461 offset = fold_convert (sizetype, offset);
11462 if (compare_tree_int (DECL_SIZE_UNIT (array), length) > 0
11463 && (! tree_fits_uhwi_p (offset)
11464 || compare_tree_int (offset, length) >= 0))
11465 return 0;
11466
11467 *ptr_offset = offset;
11468 return init;
11469 }
11470
11471 return 0;
11472 }
11473 \f
11474 /* Generate code to calculate OPS, and exploded expression
11475 using a store-flag instruction and return an rtx for the result.
11476 OPS reflects a comparison.
11477
11478 If TARGET is nonzero, store the result there if convenient.
11479
11480 Return zero if there is no suitable set-flag instruction
11481 available on this machine.
11482
11483 Once expand_expr has been called on the arguments of the comparison,
11484 we are committed to doing the store flag, since it is not safe to
11485 re-evaluate the expression. We emit the store-flag insn by calling
11486 emit_store_flag, but only expand the arguments if we have a reason
11487 to believe that emit_store_flag will be successful. If we think that
11488 it will, but it isn't, we have to simulate the store-flag with a
11489 set/jump/set sequence. */
11490
11491 static rtx
11492 do_store_flag (sepops ops, rtx target, machine_mode mode)
11493 {
11494 enum rtx_code code;
11495 tree arg0, arg1, type;
11496 machine_mode operand_mode;
11497 int unsignedp;
11498 rtx op0, op1;
11499 rtx subtarget = target;
11500 location_t loc = ops->location;
11501
11502 arg0 = ops->op0;
11503 arg1 = ops->op1;
11504
11505 /* Don't crash if the comparison was erroneous. */
11506 if (arg0 == error_mark_node || arg1 == error_mark_node)
11507 return const0_rtx;
11508
11509 type = TREE_TYPE (arg0);
11510 operand_mode = TYPE_MODE (type);
11511 unsignedp = TYPE_UNSIGNED (type);
11512
11513 /* We won't bother with BLKmode store-flag operations because it would mean
11514 passing a lot of information to emit_store_flag. */
11515 if (operand_mode == BLKmode)
11516 return 0;
11517
11518 /* We won't bother with store-flag operations involving function pointers
11519 when function pointers must be canonicalized before comparisons. */
11520 if (targetm.have_canonicalize_funcptr_for_compare ()
11521 && ((TREE_CODE (TREE_TYPE (arg0)) == POINTER_TYPE
11522 && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg0)))
11523 == FUNCTION_TYPE))
11524 || (TREE_CODE (TREE_TYPE (arg1)) == POINTER_TYPE
11525 && (TREE_CODE (TREE_TYPE (TREE_TYPE (arg1)))
11526 == FUNCTION_TYPE))))
11527 return 0;
11528
11529 STRIP_NOPS (arg0);
11530 STRIP_NOPS (arg1);
11531
11532 /* For vector typed comparisons emit code to generate the desired
11533 all-ones or all-zeros mask. Conveniently use the VEC_COND_EXPR
11534 expander for this. */
11535 if (TREE_CODE (ops->type) == VECTOR_TYPE)
11536 {
11537 tree ifexp = build2 (ops->code, ops->type, arg0, arg1);
11538 if (VECTOR_BOOLEAN_TYPE_P (ops->type)
11539 && expand_vec_cmp_expr_p (TREE_TYPE (arg0), ops->type, ops->code))
11540 return expand_vec_cmp_expr (ops->type, ifexp, target);
11541 else
11542 {
11543 tree if_true = constant_boolean_node (true, ops->type);
11544 tree if_false = constant_boolean_node (false, ops->type);
11545 return expand_vec_cond_expr (ops->type, ifexp, if_true,
11546 if_false, target);
11547 }
11548 }
11549
11550 /* Get the rtx comparison code to use. We know that EXP is a comparison
11551 operation of some type. Some comparisons against 1 and -1 can be
11552 converted to comparisons with zero. Do so here so that the tests
11553 below will be aware that we have a comparison with zero. These
11554 tests will not catch constants in the first operand, but constants
11555 are rarely passed as the first operand. */
11556
11557 switch (ops->code)
11558 {
11559 case EQ_EXPR:
11560 code = EQ;
11561 break;
11562 case NE_EXPR:
11563 code = NE;
11564 break;
11565 case LT_EXPR:
11566 if (integer_onep (arg1))
11567 arg1 = integer_zero_node, code = unsignedp ? LEU : LE;
11568 else
11569 code = unsignedp ? LTU : LT;
11570 break;
11571 case LE_EXPR:
11572 if (! unsignedp && integer_all_onesp (arg1))
11573 arg1 = integer_zero_node, code = LT;
11574 else
11575 code = unsignedp ? LEU : LE;
11576 break;
11577 case GT_EXPR:
11578 if (! unsignedp && integer_all_onesp (arg1))
11579 arg1 = integer_zero_node, code = GE;
11580 else
11581 code = unsignedp ? GTU : GT;
11582 break;
11583 case GE_EXPR:
11584 if (integer_onep (arg1))
11585 arg1 = integer_zero_node, code = unsignedp ? GTU : GT;
11586 else
11587 code = unsignedp ? GEU : GE;
11588 break;
11589
11590 case UNORDERED_EXPR:
11591 code = UNORDERED;
11592 break;
11593 case ORDERED_EXPR:
11594 code = ORDERED;
11595 break;
11596 case UNLT_EXPR:
11597 code = UNLT;
11598 break;
11599 case UNLE_EXPR:
11600 code = UNLE;
11601 break;
11602 case UNGT_EXPR:
11603 code = UNGT;
11604 break;
11605 case UNGE_EXPR:
11606 code = UNGE;
11607 break;
11608 case UNEQ_EXPR:
11609 code = UNEQ;
11610 break;
11611 case LTGT_EXPR:
11612 code = LTGT;
11613 break;
11614
11615 default:
11616 gcc_unreachable ();
11617 }
11618
11619 /* Put a constant second. */
11620 if (TREE_CODE (arg0) == REAL_CST || TREE_CODE (arg0) == INTEGER_CST
11621 || TREE_CODE (arg0) == FIXED_CST)
11622 {
11623 std::swap (arg0, arg1);
11624 code = swap_condition (code);
11625 }
11626
11627 /* If this is an equality or inequality test of a single bit, we can
11628 do this by shifting the bit being tested to the low-order bit and
11629 masking the result with the constant 1. If the condition was EQ,
11630 we xor it with 1. This does not require an scc insn and is faster
11631 than an scc insn even if we have it.
11632
11633 The code to make this transformation was moved into fold_single_bit_test,
11634 so we just call into the folder and expand its result. */
11635
11636 if ((code == NE || code == EQ)
11637 && integer_zerop (arg1)
11638 && (TYPE_PRECISION (ops->type) != 1 || TYPE_UNSIGNED (ops->type)))
11639 {
11640 gimple *srcstmt = get_def_for_expr (arg0, BIT_AND_EXPR);
11641 if (srcstmt
11642 && integer_pow2p (gimple_assign_rhs2 (srcstmt)))
11643 {
11644 enum tree_code tcode = code == NE ? NE_EXPR : EQ_EXPR;
11645 tree type = lang_hooks.types.type_for_mode (mode, unsignedp);
11646 tree temp = fold_build2_loc (loc, BIT_AND_EXPR, TREE_TYPE (arg1),
11647 gimple_assign_rhs1 (srcstmt),
11648 gimple_assign_rhs2 (srcstmt));
11649 temp = fold_single_bit_test (loc, tcode, temp, arg1, type);
11650 if (temp)
11651 return expand_expr (temp, target, VOIDmode, EXPAND_NORMAL);
11652 }
11653 }
11654
11655 if (! get_subtarget (target)
11656 || GET_MODE (subtarget) != operand_mode)
11657 subtarget = 0;
11658
11659 expand_operands (arg0, arg1, subtarget, &op0, &op1, EXPAND_NORMAL);
11660
11661 if (target == 0)
11662 target = gen_reg_rtx (mode);
11663
11664 /* Try a cstore if possible. */
11665 return emit_store_flag_force (target, code, op0, op1,
11666 operand_mode, unsignedp,
11667 (TYPE_PRECISION (ops->type) == 1
11668 && !TYPE_UNSIGNED (ops->type)) ? -1 : 1);
11669 }
11670 \f
11671 /* Attempt to generate a casesi instruction. Returns 1 if successful,
11672 0 otherwise (i.e. if there is no casesi instruction).
11673
11674 DEFAULT_PROBABILITY is the probability of jumping to the default
11675 label. */
11676 int
11677 try_casesi (tree index_type, tree index_expr, tree minval, tree range,
11678 rtx table_label, rtx default_label, rtx fallback_label,
11679 profile_probability default_probability)
11680 {
11681 struct expand_operand ops[5];
11682 scalar_int_mode index_mode = SImode;
11683 rtx op1, op2, index;
11684
11685 if (! targetm.have_casesi ())
11686 return 0;
11687
11688 /* The index must be some form of integer. Convert it to SImode. */
11689 scalar_int_mode omode = SCALAR_INT_TYPE_MODE (index_type);
11690 if (GET_MODE_BITSIZE (omode) > GET_MODE_BITSIZE (index_mode))
11691 {
11692 rtx rangertx = expand_normal (range);
11693
11694 /* We must handle the endpoints in the original mode. */
11695 index_expr = build2 (MINUS_EXPR, index_type,
11696 index_expr, minval);
11697 minval = integer_zero_node;
11698 index = expand_normal (index_expr);
11699 if (default_label)
11700 emit_cmp_and_jump_insns (rangertx, index, LTU, NULL_RTX,
11701 omode, 1, default_label,
11702 default_probability);
11703 /* Now we can safely truncate. */
11704 index = convert_to_mode (index_mode, index, 0);
11705 }
11706 else
11707 {
11708 if (omode != index_mode)
11709 {
11710 index_type = lang_hooks.types.type_for_mode (index_mode, 0);
11711 index_expr = fold_convert (index_type, index_expr);
11712 }
11713
11714 index = expand_normal (index_expr);
11715 }
11716
11717 do_pending_stack_adjust ();
11718
11719 op1 = expand_normal (minval);
11720 op2 = expand_normal (range);
11721
11722 create_input_operand (&ops[0], index, index_mode);
11723 create_convert_operand_from_type (&ops[1], op1, TREE_TYPE (minval));
11724 create_convert_operand_from_type (&ops[2], op2, TREE_TYPE (range));
11725 create_fixed_operand (&ops[3], table_label);
11726 create_fixed_operand (&ops[4], (default_label
11727 ? default_label
11728 : fallback_label));
11729 expand_jump_insn (targetm.code_for_casesi, 5, ops);
11730 return 1;
11731 }
11732
11733 /* Attempt to generate a tablejump instruction; same concept. */
11734 /* Subroutine of the next function.
11735
11736 INDEX is the value being switched on, with the lowest value
11737 in the table already subtracted.
11738 MODE is its expected mode (needed if INDEX is constant).
11739 RANGE is the length of the jump table.
11740 TABLE_LABEL is a CODE_LABEL rtx for the table itself.
11741
11742 DEFAULT_LABEL is a CODE_LABEL rtx to jump to if the
11743 index value is out of range.
11744 DEFAULT_PROBABILITY is the probability of jumping to
11745 the default label. */
11746
11747 static void
11748 do_tablejump (rtx index, machine_mode mode, rtx range, rtx table_label,
11749 rtx default_label, profile_probability default_probability)
11750 {
11751 rtx temp, vector;
11752
11753 if (INTVAL (range) > cfun->cfg->max_jumptable_ents)
11754 cfun->cfg->max_jumptable_ents = INTVAL (range);
11755
11756 /* Do an unsigned comparison (in the proper mode) between the index
11757 expression and the value which represents the length of the range.
11758 Since we just finished subtracting the lower bound of the range
11759 from the index expression, this comparison allows us to simultaneously
11760 check that the original index expression value is both greater than
11761 or equal to the minimum value of the range and less than or equal to
11762 the maximum value of the range. */
11763
11764 if (default_label)
11765 emit_cmp_and_jump_insns (index, range, GTU, NULL_RTX, mode, 1,
11766 default_label, default_probability);
11767
11768
11769 /* If index is in range, it must fit in Pmode.
11770 Convert to Pmode so we can index with it. */
11771 if (mode != Pmode)
11772 index = convert_to_mode (Pmode, index, 1);
11773
11774 /* Don't let a MEM slip through, because then INDEX that comes
11775 out of PIC_CASE_VECTOR_ADDRESS won't be a valid address,
11776 and break_out_memory_refs will go to work on it and mess it up. */
11777 #ifdef PIC_CASE_VECTOR_ADDRESS
11778 if (flag_pic && !REG_P (index))
11779 index = copy_to_mode_reg (Pmode, index);
11780 #endif
11781
11782 /* ??? The only correct use of CASE_VECTOR_MODE is the one inside the
11783 GET_MODE_SIZE, because this indicates how large insns are. The other
11784 uses should all be Pmode, because they are addresses. This code
11785 could fail if addresses and insns are not the same size. */
11786 index = simplify_gen_binary (MULT, Pmode, index,
11787 gen_int_mode (GET_MODE_SIZE (CASE_VECTOR_MODE),
11788 Pmode));
11789 index = simplify_gen_binary (PLUS, Pmode, index,
11790 gen_rtx_LABEL_REF (Pmode, table_label));
11791
11792 #ifdef PIC_CASE_VECTOR_ADDRESS
11793 if (flag_pic)
11794 index = PIC_CASE_VECTOR_ADDRESS (index);
11795 else
11796 #endif
11797 index = memory_address (CASE_VECTOR_MODE, index);
11798 temp = gen_reg_rtx (CASE_VECTOR_MODE);
11799 vector = gen_const_mem (CASE_VECTOR_MODE, index);
11800 convert_move (temp, vector, 0);
11801
11802 emit_jump_insn (targetm.gen_tablejump (temp, table_label));
11803
11804 /* If we are generating PIC code or if the table is PC-relative, the
11805 table and JUMP_INSN must be adjacent, so don't output a BARRIER. */
11806 if (! CASE_VECTOR_PC_RELATIVE && ! flag_pic)
11807 emit_barrier ();
11808 }
11809
11810 int
11811 try_tablejump (tree index_type, tree index_expr, tree minval, tree range,
11812 rtx table_label, rtx default_label,
11813 profile_probability default_probability)
11814 {
11815 rtx index;
11816
11817 if (! targetm.have_tablejump ())
11818 return 0;
11819
11820 index_expr = fold_build2 (MINUS_EXPR, index_type,
11821 fold_convert (index_type, index_expr),
11822 fold_convert (index_type, minval));
11823 index = expand_normal (index_expr);
11824 do_pending_stack_adjust ();
11825
11826 do_tablejump (index, TYPE_MODE (index_type),
11827 convert_modes (TYPE_MODE (index_type),
11828 TYPE_MODE (TREE_TYPE (range)),
11829 expand_normal (range),
11830 TYPE_UNSIGNED (TREE_TYPE (range))),
11831 table_label, default_label, default_probability);
11832 return 1;
11833 }
11834
11835 /* Return a CONST_VECTOR rtx representing vector mask for
11836 a VECTOR_CST of booleans. */
11837 static rtx
11838 const_vector_mask_from_tree (tree exp)
11839 {
11840 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
11841 machine_mode inner = GET_MODE_INNER (mode);
11842
11843 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
11844 VECTOR_CST_NELTS_PER_PATTERN (exp));
11845 unsigned int count = builder.encoded_nelts ();
11846 for (unsigned int i = 0; i < count; ++i)
11847 {
11848 tree elt = VECTOR_CST_ELT (exp, i);
11849 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
11850 if (integer_zerop (elt))
11851 builder.quick_push (CONST0_RTX (inner));
11852 else if (integer_onep (elt)
11853 || integer_minus_onep (elt))
11854 builder.quick_push (CONSTM1_RTX (inner));
11855 else
11856 gcc_unreachable ();
11857 }
11858 return builder.build ();
11859 }
11860
11861 /* EXP is a VECTOR_CST in which each element is either all-zeros or all-ones.
11862 Return a constant scalar rtx of mode MODE in which bit X is set if element
11863 X of EXP is nonzero. */
11864 static rtx
11865 const_scalar_mask_from_tree (scalar_int_mode mode, tree exp)
11866 {
11867 wide_int res = wi::zero (GET_MODE_PRECISION (mode));
11868 tree elt;
11869
11870 /* The result has a fixed number of bits so the input must too. */
11871 unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
11872 for (unsigned int i = 0; i < nunits; ++i)
11873 {
11874 elt = VECTOR_CST_ELT (exp, i);
11875 gcc_assert (TREE_CODE (elt) == INTEGER_CST);
11876 if (integer_all_onesp (elt))
11877 res = wi::set_bit (res, i);
11878 else
11879 gcc_assert (integer_zerop (elt));
11880 }
11881
11882 return immed_wide_int_const (res, mode);
11883 }
11884
11885 /* Return a CONST_VECTOR rtx for a VECTOR_CST tree. */
11886 static rtx
11887 const_vector_from_tree (tree exp)
11888 {
11889 machine_mode mode = TYPE_MODE (TREE_TYPE (exp));
11890
11891 if (initializer_zerop (exp))
11892 return CONST0_RTX (mode);
11893
11894 if (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (exp)))
11895 return const_vector_mask_from_tree (exp);
11896
11897 machine_mode inner = GET_MODE_INNER (mode);
11898
11899 rtx_vector_builder builder (mode, VECTOR_CST_NPATTERNS (exp),
11900 VECTOR_CST_NELTS_PER_PATTERN (exp));
11901 unsigned int count = builder.encoded_nelts ();
11902 for (unsigned int i = 0; i < count; ++i)
11903 {
11904 tree elt = VECTOR_CST_ELT (exp, i);
11905 if (TREE_CODE (elt) == REAL_CST)
11906 builder.quick_push (const_double_from_real_value (TREE_REAL_CST (elt),
11907 inner));
11908 else if (TREE_CODE (elt) == FIXED_CST)
11909 builder.quick_push (CONST_FIXED_FROM_FIXED_VALUE (TREE_FIXED_CST (elt),
11910 inner));
11911 else
11912 builder.quick_push (immed_wide_int_const (wi::to_poly_wide (elt),
11913 inner));
11914 }
11915 return builder.build ();
11916 }
11917
11918 /* Build a decl for a personality function given a language prefix. */
11919
11920 tree
11921 build_personality_function (const char *lang)
11922 {
11923 const char *unwind_and_version;
11924 tree decl, type;
11925 char *name;
11926
11927 switch (targetm_common.except_unwind_info (&global_options))
11928 {
11929 case UI_NONE:
11930 return NULL;
11931 case UI_SJLJ:
11932 unwind_and_version = "_sj0";
11933 break;
11934 case UI_DWARF2:
11935 case UI_TARGET:
11936 unwind_and_version = "_v0";
11937 break;
11938 case UI_SEH:
11939 unwind_and_version = "_seh0";
11940 break;
11941 default:
11942 gcc_unreachable ();
11943 }
11944
11945 name = ACONCAT (("__", lang, "_personality", unwind_and_version, NULL));
11946
11947 type = build_function_type_list (integer_type_node, integer_type_node,
11948 long_long_unsigned_type_node,
11949 ptr_type_node, ptr_type_node, NULL_TREE);
11950 decl = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
11951 get_identifier (name), type);
11952 DECL_ARTIFICIAL (decl) = 1;
11953 DECL_EXTERNAL (decl) = 1;
11954 TREE_PUBLIC (decl) = 1;
11955
11956 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
11957 are the flags assigned by targetm.encode_section_info. */
11958 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
11959
11960 return decl;
11961 }
11962
11963 /* Extracts the personality function of DECL and returns the corresponding
11964 libfunc. */
11965
11966 rtx
11967 get_personality_function (tree decl)
11968 {
11969 tree personality = DECL_FUNCTION_PERSONALITY (decl);
11970 enum eh_personality_kind pk;
11971
11972 pk = function_needs_eh_personality (DECL_STRUCT_FUNCTION (decl));
11973 if (pk == eh_personality_none)
11974 return NULL;
11975
11976 if (!personality
11977 && pk == eh_personality_any)
11978 personality = lang_hooks.eh_personality ();
11979
11980 if (pk == eh_personality_lang)
11981 gcc_assert (personality != NULL_TREE);
11982
11983 return XEXP (DECL_RTL (personality), 0);
11984 }
11985
11986 /* Returns a tree for the size of EXP in bytes. */
11987
11988 static tree
11989 tree_expr_size (const_tree exp)
11990 {
11991 if (DECL_P (exp)
11992 && DECL_SIZE_UNIT (exp) != 0)
11993 return DECL_SIZE_UNIT (exp);
11994 else
11995 return size_in_bytes (TREE_TYPE (exp));
11996 }
11997
11998 /* Return an rtx for the size in bytes of the value of EXP. */
11999
12000 rtx
12001 expr_size (tree exp)
12002 {
12003 tree size;
12004
12005 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12006 size = TREE_OPERAND (exp, 1);
12007 else
12008 {
12009 size = tree_expr_size (exp);
12010 gcc_assert (size);
12011 gcc_assert (size == SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, exp));
12012 }
12013
12014 return expand_expr (size, NULL_RTX, TYPE_MODE (sizetype), EXPAND_NORMAL);
12015 }
12016
12017 /* Return a wide integer for the size in bytes of the value of EXP, or -1
12018 if the size can vary or is larger than an integer. */
12019
12020 static HOST_WIDE_INT
12021 int_expr_size (tree exp)
12022 {
12023 tree size;
12024
12025 if (TREE_CODE (exp) == WITH_SIZE_EXPR)
12026 size = TREE_OPERAND (exp, 1);
12027 else
12028 {
12029 size = tree_expr_size (exp);
12030 gcc_assert (size);
12031 }
12032
12033 if (size == 0 || !tree_fits_shwi_p (size))
12034 return -1;
12035
12036 return tree_to_shwi (size);
12037 }