1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
36 #include "rtx-vector-builder.h"
38 /* Include insn-config.h before expr.h so that HAVE_conditional_move
39 is properly defined. */
40 #include "stor-layout.h"
45 #include "optabs-tree.h"
47 #include "internal-fn.h"
48 #include "langhooks.h"
50 static void prepare_float_lib_cmp (rtx
, rtx
, enum rtx_code
, rtx
*,
52 static rtx
expand_unop_direct (machine_mode
, optab
, rtx
, rtx
, int);
53 static void emit_libcall_block_1 (rtx_insn
*, rtx
, rtx
, rtx
, bool);
55 /* Debug facility for use in GDB. */
56 void debug_optab_libfuncs (void);
58 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
59 the result of operation CODE applied to OP0 (and OP1 if it is a binary
60 operation). OP0_MODE is OP0's mode.
62 If the last insn does not set TARGET, don't do anything, but return 1.
64 If the last insn or a previous insn sets TARGET and TARGET is one of OP0
65 or OP1, don't add the REG_EQUAL note but return 0. Our caller can then
66 try again, ensuring that TARGET is not one of the operands. */
69 add_equal_note (rtx_insn
*insns
, rtx target
, enum rtx_code code
, rtx op0
,
70 rtx op1
, machine_mode op0_mode
)
76 gcc_assert (insns
&& INSN_P (insns
) && NEXT_INSN (insns
));
78 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
79 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
80 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
81 && GET_RTX_CLASS (code
) != RTX_COMPARE
82 && GET_RTX_CLASS (code
) != RTX_UNARY
)
85 if (GET_CODE (target
) == ZERO_EXTRACT
)
88 for (last_insn
= insns
;
89 NEXT_INSN (last_insn
) != NULL_RTX
;
90 last_insn
= NEXT_INSN (last_insn
))
93 /* If TARGET is in OP0 or OP1, punt. We'd end up with a note referencing
94 a value changing in the insn, so the note would be invalid for CSE. */
95 if (reg_overlap_mentioned_p (target
, op0
)
96 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
99 && (rtx_equal_p (target
, op0
)
100 || (op1
&& rtx_equal_p (target
, op1
))))
102 /* For MEM target, with MEM = MEM op X, prefer no REG_EQUAL note
103 over expanding it as temp = MEM op X, MEM = temp. If the target
104 supports MEM = MEM op X instructions, it is sometimes too hard
105 to reconstruct that form later, especially if X is also a memory,
106 and due to multiple occurrences of addresses the address might
107 be forced into register unnecessarily.
108 Note that not emitting the REG_EQUIV note might inhibit
109 CSE in some cases. */
110 set
= single_set (last_insn
);
112 && GET_CODE (SET_SRC (set
)) == code
113 && MEM_P (SET_DEST (set
))
114 && (rtx_equal_p (SET_DEST (set
), XEXP (SET_SRC (set
), 0))
115 || (op1
&& rtx_equal_p (SET_DEST (set
),
116 XEXP (SET_SRC (set
), 1)))))
122 set
= set_for_reg_notes (last_insn
);
126 if (! rtx_equal_p (SET_DEST (set
), target
)
127 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
128 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
129 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
132 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
142 if (op0_mode
!= VOIDmode
&& GET_MODE (target
) != op0_mode
)
144 note
= gen_rtx_fmt_e (code
, op0_mode
, copy_rtx (op0
));
145 if (GET_MODE_UNIT_SIZE (op0_mode
)
146 > GET_MODE_UNIT_SIZE (GET_MODE (target
)))
147 note
= simplify_gen_unary (TRUNCATE
, GET_MODE (target
),
150 note
= simplify_gen_unary (ZERO_EXTEND
, GET_MODE (target
),
156 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
160 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
162 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
167 /* Given two input operands, OP0 and OP1, determine what the correct from_mode
168 for a widening operation would be. In most cases this would be OP0, but if
169 that's a constant it'll be VOIDmode, which isn't useful. */
172 widened_mode (machine_mode to_mode
, rtx op0
, rtx op1
)
174 machine_mode m0
= GET_MODE (op0
);
175 machine_mode m1
= GET_MODE (op1
);
178 if (m0
== VOIDmode
&& m1
== VOIDmode
)
180 else if (m0
== VOIDmode
|| GET_MODE_UNIT_SIZE (m0
) < GET_MODE_UNIT_SIZE (m1
))
185 if (GET_MODE_UNIT_SIZE (result
) > GET_MODE_UNIT_SIZE (to_mode
))
191 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
192 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
193 not actually do a sign-extend or zero-extend, but can leave the
194 higher-order bits of the result rtx undefined, for example, in the case
195 of logical operations, but not right shifts. */
198 widen_operand (rtx op
, machine_mode mode
, machine_mode oldmode
,
199 int unsignedp
, int no_extend
)
202 scalar_int_mode int_mode
;
204 /* If we don't have to extend and this is a constant, return it. */
205 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
208 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
209 extend since it will be more efficient to do so unless the signedness of
210 a promoted object differs from our extension. */
212 || !is_a
<scalar_int_mode
> (mode
, &int_mode
)
213 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
214 && SUBREG_CHECK_PROMOTED_SIGN (op
, unsignedp
)))
215 return convert_modes (mode
, oldmode
, op
, unsignedp
);
217 /* If MODE is no wider than a single word, we return a lowpart or paradoxical
219 if (GET_MODE_SIZE (int_mode
) <= UNITS_PER_WORD
)
220 return gen_lowpart (int_mode
, force_reg (GET_MODE (op
), op
));
222 /* Otherwise, get an object of MODE, clobber it, and set the low-order
225 result
= gen_reg_rtx (int_mode
);
226 emit_clobber (result
);
227 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
231 /* Expand vector widening operations.
233 There are two different classes of operations handled here:
234 1) Operations whose result is wider than all the arguments to the operation.
235 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
236 In this case OP0 and optionally OP1 would be initialized,
237 but WIDE_OP wouldn't (not relevant for this case).
238 2) Operations whose result is of the same size as the last argument to the
239 operation, but wider than all the other arguments to the operation.
240 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
241 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
243 E.g, when called to expand the following operations, this is how
244 the arguments will be initialized:
246 widening-sum 2 oprnd0 - oprnd1
247 widening-dot-product 3 oprnd0 oprnd1 oprnd2
248 widening-mult 2 oprnd0 oprnd1 -
249 type-promotion (vec-unpack) 1 oprnd0 - - */
252 expand_widen_pattern_expr (sepops ops
, rtx op0
, rtx op1
, rtx wide_op
,
253 rtx target
, int unsignedp
)
255 class expand_operand eops
[4];
256 tree oprnd0
, oprnd1
, oprnd2
;
257 machine_mode wmode
= VOIDmode
, tmode0
, tmode1
= VOIDmode
;
258 optab widen_pattern_optab
;
259 enum insn_code icode
;
260 int nops
= TREE_CODE_LENGTH (ops
->code
);
265 tmode0
= TYPE_MODE (TREE_TYPE (oprnd0
));
266 if (ops
->code
== VEC_UNPACK_FIX_TRUNC_HI_EXPR
267 || ops
->code
== VEC_UNPACK_FIX_TRUNC_LO_EXPR
)
268 /* The sign is from the result type rather than operand's type
271 = optab_for_tree_code (ops
->code
, ops
->type
, optab_default
);
272 else if ((ops
->code
== VEC_UNPACK_HI_EXPR
273 || ops
->code
== VEC_UNPACK_LO_EXPR
)
274 && VECTOR_BOOLEAN_TYPE_P (ops
->type
)
275 && VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (oprnd0
))
276 && TYPE_MODE (ops
->type
) == TYPE_MODE (TREE_TYPE (oprnd0
))
277 && SCALAR_INT_MODE_P (TYPE_MODE (ops
->type
)))
279 /* For VEC_UNPACK_{LO,HI}_EXPR if the mode of op0 and result is
280 the same scalar mode for VECTOR_BOOLEAN_TYPE_P vectors, use
281 vec_unpacks_sbool_{lo,hi}_optab, so that we can pass in
282 the pattern number of elements in the wider vector. */
284 = (ops
->code
== VEC_UNPACK_HI_EXPR
285 ? vec_unpacks_sbool_hi_optab
: vec_unpacks_sbool_lo_optab
);
290 = optab_for_tree_code (ops
->code
, TREE_TYPE (oprnd0
), optab_default
);
291 if (ops
->code
== WIDEN_MULT_PLUS_EXPR
292 || ops
->code
== WIDEN_MULT_MINUS_EXPR
)
293 icode
= find_widening_optab_handler (widen_pattern_optab
,
294 TYPE_MODE (TREE_TYPE (ops
->op2
)),
297 icode
= optab_handler (widen_pattern_optab
, tmode0
);
298 gcc_assert (icode
!= CODE_FOR_nothing
);
303 tmode1
= TYPE_MODE (TREE_TYPE (oprnd1
));
308 op1
= GEN_INT (TYPE_VECTOR_SUBPARTS (TREE_TYPE (oprnd0
)).to_constant ());
312 /* The last operand is of a wider mode than the rest of the operands. */
317 gcc_assert (tmode1
== tmode0
);
320 wmode
= TYPE_MODE (TREE_TYPE (oprnd2
));
324 create_output_operand (&eops
[op
++], target
, TYPE_MODE (ops
->type
));
325 create_convert_operand_from (&eops
[op
++], op0
, tmode0
, unsignedp
);
327 create_convert_operand_from (&eops
[op
++], op1
, tmode1
, unsignedp
);
329 create_convert_operand_from (&eops
[op
++], wide_op
, wmode
, unsignedp
);
330 expand_insn (icode
, op
, eops
);
331 return eops
[0].value
;
334 /* Generate code to perform an operation specified by TERNARY_OPTAB
335 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
337 UNSIGNEDP is for the case where we have to widen the operands
338 to perform the operation. It says to use zero-extension.
340 If TARGET is nonzero, the value
341 is generated there, if it is convenient to do so.
342 In all cases an rtx is returned for the locus of the value;
343 this may or may not be TARGET. */
346 expand_ternary_op (machine_mode mode
, optab ternary_optab
, rtx op0
,
347 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
349 class expand_operand ops
[4];
350 enum insn_code icode
= optab_handler (ternary_optab
, mode
);
352 gcc_assert (optab_handler (ternary_optab
, mode
) != CODE_FOR_nothing
);
354 create_output_operand (&ops
[0], target
, mode
);
355 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
356 create_convert_operand_from (&ops
[2], op1
, mode
, unsignedp
);
357 create_convert_operand_from (&ops
[3], op2
, mode
, unsignedp
);
358 expand_insn (icode
, 4, ops
);
363 /* Like expand_binop, but return a constant rtx if the result can be
364 calculated at compile time. The arguments and return value are
365 otherwise the same as for expand_binop. */
368 simplify_expand_binop (machine_mode mode
, optab binoptab
,
369 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
370 enum optab_methods methods
)
372 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
374 rtx x
= simplify_binary_operation (optab_to_code (binoptab
),
380 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
383 /* Like simplify_expand_binop, but always put the result in TARGET.
384 Return true if the expansion succeeded. */
387 force_expand_binop (machine_mode mode
, optab binoptab
,
388 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
389 enum optab_methods methods
)
391 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
392 target
, unsignedp
, methods
);
396 emit_move_insn (target
, x
);
400 /* Create a new vector value in VMODE with all elements set to OP. The
401 mode of OP must be the element mode of VMODE. If OP is a constant,
402 then the return value will be a constant. */
405 expand_vector_broadcast (machine_mode vmode
, rtx op
)
410 gcc_checking_assert (VECTOR_MODE_P (vmode
));
412 if (valid_for_const_vector_p (vmode
, op
))
413 return gen_const_vec_duplicate (vmode
, op
);
415 insn_code icode
= optab_handler (vec_duplicate_optab
, vmode
);
416 if (icode
!= CODE_FOR_nothing
)
418 class expand_operand ops
[2];
419 create_output_operand (&ops
[0], NULL_RTX
, vmode
);
420 create_input_operand (&ops
[1], op
, GET_MODE (op
));
421 expand_insn (icode
, 2, ops
);
425 if (!GET_MODE_NUNITS (vmode
).is_constant (&n
))
428 /* ??? If the target doesn't have a vec_init, then we have no easy way
429 of performing this operation. Most of this sort of generic support
430 is hidden away in the vector lowering support in gimple. */
431 icode
= convert_optab_handler (vec_init_optab
, vmode
,
432 GET_MODE_INNER (vmode
));
433 if (icode
== CODE_FOR_nothing
)
436 vec
= rtvec_alloc (n
);
437 for (int i
= 0; i
< n
; ++i
)
438 RTVEC_ELT (vec
, i
) = op
;
439 rtx ret
= gen_reg_rtx (vmode
);
440 emit_insn (GEN_FCN (icode
) (ret
, gen_rtx_PARALLEL (vmode
, vec
)));
445 /* This subroutine of expand_doubleword_shift handles the cases in which
446 the effective shift value is >= BITS_PER_WORD. The arguments and return
447 value are the same as for the parent routine, except that SUPERWORD_OP1
448 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
449 INTO_TARGET may be null if the caller has decided to calculate it. */
452 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
453 rtx outof_target
, rtx into_target
,
454 int unsignedp
, enum optab_methods methods
)
456 if (into_target
!= 0)
457 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
458 into_target
, unsignedp
, methods
))
461 if (outof_target
!= 0)
463 /* For a signed right shift, we must fill OUTOF_TARGET with copies
464 of the sign bit, otherwise we must fill it with zeros. */
465 if (binoptab
!= ashr_optab
)
466 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
468 if (!force_expand_binop (word_mode
, binoptab
, outof_input
,
469 gen_int_shift_amount (word_mode
,
471 outof_target
, unsignedp
, methods
))
477 /* This subroutine of expand_doubleword_shift handles the cases in which
478 the effective shift value is < BITS_PER_WORD. The arguments and return
479 value are the same as for the parent routine. */
482 expand_subword_shift (scalar_int_mode op1_mode
, optab binoptab
,
483 rtx outof_input
, rtx into_input
, rtx op1
,
484 rtx outof_target
, rtx into_target
,
485 int unsignedp
, enum optab_methods methods
,
486 unsigned HOST_WIDE_INT shift_mask
)
488 optab reverse_unsigned_shift
, unsigned_shift
;
491 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
492 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
494 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
495 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
496 the opposite direction to BINOPTAB. */
497 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
499 carries
= outof_input
;
500 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
,
501 op1_mode
), op1_mode
);
502 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
507 /* We must avoid shifting by BITS_PER_WORD bits since that is either
508 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
509 has unknown behavior. Do a single shift first, then shift by the
510 remainder. It's OK to use ~OP1 as the remainder if shift counts
511 are truncated to the mode size. */
512 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
513 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
514 if (shift_mask
== BITS_PER_WORD
- 1)
516 tmp
= immed_wide_int_const
517 (wi::minus_one (GET_MODE_PRECISION (op1_mode
)), op1_mode
);
518 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
523 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
- 1,
524 op1_mode
), op1_mode
);
525 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
529 if (tmp
== 0 || carries
== 0)
531 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
532 carries
, tmp
, 0, unsignedp
, methods
);
536 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
537 so the result can go directly into INTO_TARGET if convenient. */
538 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
539 into_target
, unsignedp
, methods
);
543 /* Now OR in the bits carried over from OUTOF_INPUT. */
544 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
545 into_target
, unsignedp
, methods
))
548 /* Use a standard word_mode shift for the out-of half. */
549 if (outof_target
!= 0)
550 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
551 outof_target
, unsignedp
, methods
))
558 /* Try implementing expand_doubleword_shift using conditional moves.
559 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
560 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
561 are the shift counts to use in the former and latter case. All other
562 arguments are the same as the parent routine. */
565 expand_doubleword_shift_condmove (scalar_int_mode op1_mode
, optab binoptab
,
566 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
567 rtx outof_input
, rtx into_input
,
568 rtx subword_op1
, rtx superword_op1
,
569 rtx outof_target
, rtx into_target
,
570 int unsignedp
, enum optab_methods methods
,
571 unsigned HOST_WIDE_INT shift_mask
)
573 rtx outof_superword
, into_superword
;
575 /* Put the superword version of the output into OUTOF_SUPERWORD and
577 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
578 if (outof_target
!= 0 && subword_op1
== superword_op1
)
580 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
581 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
582 into_superword
= outof_target
;
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, 0, unsignedp
, methods
))
589 into_superword
= gen_reg_rtx (word_mode
);
590 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
591 outof_superword
, into_superword
,
596 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
597 if (!expand_subword_shift (op1_mode
, binoptab
,
598 outof_input
, into_input
, subword_op1
,
599 outof_target
, into_target
,
600 unsignedp
, methods
, shift_mask
))
603 /* Select between them. Do the INTO half first because INTO_SUPERWORD
604 might be the current value of OUTOF_TARGET. */
605 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
606 into_target
, into_superword
, word_mode
, false))
609 if (outof_target
!= 0)
610 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
611 outof_target
, outof_superword
,
618 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
619 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
620 input operand; the shift moves bits in the direction OUTOF_INPUT->
621 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
622 of the target. OP1 is the shift count and OP1_MODE is its mode.
623 If OP1 is constant, it will have been truncated as appropriate
624 and is known to be nonzero.
626 If SHIFT_MASK is zero, the result of word shifts is undefined when the
627 shift count is outside the range [0, BITS_PER_WORD). This routine must
628 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
630 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
631 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
632 fill with zeros or sign bits as appropriate.
634 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
635 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
636 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
637 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
640 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
641 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
642 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
643 function wants to calculate it itself.
645 Return true if the shift could be successfully synthesized. */
648 expand_doubleword_shift (scalar_int_mode op1_mode
, optab binoptab
,
649 rtx outof_input
, rtx into_input
, rtx op1
,
650 rtx outof_target
, rtx into_target
,
651 int unsignedp
, enum optab_methods methods
,
652 unsigned HOST_WIDE_INT shift_mask
)
654 rtx superword_op1
, tmp
, cmp1
, cmp2
;
655 enum rtx_code cmp_code
;
657 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
658 fill the result with sign or zero bits as appropriate. If so, the value
659 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
660 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
661 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
663 This isn't worthwhile for constant shifts since the optimizers will
664 cope better with in-range shift counts. */
665 if (shift_mask
>= BITS_PER_WORD
667 && !CONSTANT_P (op1
))
669 if (!expand_doubleword_shift (op1_mode
, binoptab
,
670 outof_input
, into_input
, op1
,
672 unsignedp
, methods
, shift_mask
))
674 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
675 outof_target
, unsignedp
, methods
))
680 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
681 is true when the effective shift value is less than BITS_PER_WORD.
682 Set SUPERWORD_OP1 to the shift count that should be used to shift
683 OUTOF_INPUT into INTO_TARGET when the condition is false. */
684 tmp
= immed_wide_int_const (wi::shwi (BITS_PER_WORD
, op1_mode
), op1_mode
);
685 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
687 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
688 is a subword shift count. */
689 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
691 cmp2
= CONST0_RTX (op1_mode
);
697 /* Set CMP1 to OP1 - BITS_PER_WORD. */
698 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
700 cmp2
= CONST0_RTX (op1_mode
);
702 superword_op1
= cmp1
;
707 /* If we can compute the condition at compile time, pick the
708 appropriate subroutine. */
709 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
710 if (tmp
!= 0 && CONST_INT_P (tmp
))
712 if (tmp
== const0_rtx
)
713 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
714 outof_target
, into_target
,
717 return expand_subword_shift (op1_mode
, binoptab
,
718 outof_input
, into_input
, op1
,
719 outof_target
, into_target
,
720 unsignedp
, methods
, shift_mask
);
723 /* Try using conditional moves to generate straight-line code. */
724 if (HAVE_conditional_move
)
726 rtx_insn
*start
= get_last_insn ();
727 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
728 cmp_code
, cmp1
, cmp2
,
729 outof_input
, into_input
,
731 outof_target
, into_target
,
732 unsignedp
, methods
, shift_mask
))
734 delete_insns_since (start
);
737 /* As a last resort, use branches to select the correct alternative. */
738 rtx_code_label
*subword_label
= gen_label_rtx ();
739 rtx_code_label
*done_label
= gen_label_rtx ();
742 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
744 profile_probability::uninitialized ());
747 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
748 outof_target
, into_target
,
752 emit_jump_insn (targetm
.gen_jump (done_label
));
754 emit_label (subword_label
);
756 if (!expand_subword_shift (op1_mode
, binoptab
,
757 outof_input
, into_input
, op1
,
758 outof_target
, into_target
,
759 unsignedp
, methods
, shift_mask
))
762 emit_label (done_label
);
766 /* Subroutine of expand_binop. Perform a double word multiplication of
767 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
768 as the target's word_mode. This function return NULL_RTX if anything
769 goes wrong, in which case it may have already emitted instructions
770 which need to be deleted.
772 If we want to multiply two two-word values and have normal and widening
773 multiplies of single-word values, we can do this with three smaller
776 The multiplication proceeds as follows:
777 _______________________
778 [__op0_high_|__op0_low__]
779 _______________________
780 * [__op1_high_|__op1_low__]
781 _______________________________________________
782 _______________________
783 (1) [__op0_low__*__op1_low__]
784 _______________________
785 (2a) [__op0_low__*__op1_high_]
786 _______________________
787 (2b) [__op0_high_*__op1_low__]
788 _______________________
789 (3) [__op0_high_*__op1_high_]
792 This gives a 4-word result. Since we are only interested in the
793 lower 2 words, partial result (3) and the upper words of (2a) and
794 (2b) don't need to be calculated. Hence (2a) and (2b) can be
795 calculated using non-widening multiplication.
797 (1), however, needs to be calculated with an unsigned widening
798 multiplication. If this operation is not directly supported we
799 try using a signed widening multiplication and adjust the result.
800 This adjustment works as follows:
802 If both operands are positive then no adjustment is needed.
804 If the operands have different signs, for example op0_low < 0 and
805 op1_low >= 0, the instruction treats the most significant bit of
806 op0_low as a sign bit instead of a bit with significance
807 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
808 with 2**BITS_PER_WORD - op0_low, and two's complements the
809 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
812 Similarly, if both operands are negative, we need to add
813 (op0_low + op1_low) * 2**BITS_PER_WORD.
815 We use a trick to adjust quickly. We logically shift op0_low right
816 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
817 op0_high (op1_high) before it is used to calculate 2b (2a). If no
818 logical shift exists, we do an arithmetic right shift and subtract
822 expand_doubleword_mult (machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
823 bool umulp
, enum optab_methods methods
)
825 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
826 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
827 rtx wordm1
= (umulp
? NULL_RTX
828 : gen_int_shift_amount (word_mode
, BITS_PER_WORD
- 1));
829 rtx product
, adjust
, product_high
, temp
;
831 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
832 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
833 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
834 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
836 /* If we're using an unsigned multiply to directly compute the product
837 of the low-order words of the operands and perform any required
838 adjustments of the operands, we begin by trying two more multiplications
839 and then computing the appropriate sum.
841 We have checked above that the required addition is provided.
842 Full-word addition will normally always succeed, especially if
843 it is provided at all, so we don't worry about its failure. The
844 multiplication may well fail, however, so we do handle that. */
848 /* ??? This could be done with emit_store_flag where available. */
849 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
850 NULL_RTX
, 1, methods
);
852 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
853 NULL_RTX
, 0, OPTAB_DIRECT
);
856 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
857 NULL_RTX
, 0, methods
);
860 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
861 NULL_RTX
, 0, OPTAB_DIRECT
);
868 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
869 NULL_RTX
, 0, OPTAB_DIRECT
);
873 /* OP0_HIGH should now be dead. */
877 /* ??? This could be done with emit_store_flag where available. */
878 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
879 NULL_RTX
, 1, methods
);
881 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
882 NULL_RTX
, 0, OPTAB_DIRECT
);
885 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
886 NULL_RTX
, 0, methods
);
889 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
890 NULL_RTX
, 0, OPTAB_DIRECT
);
897 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
898 NULL_RTX
, 0, OPTAB_DIRECT
);
902 /* OP1_HIGH should now be dead. */
904 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
905 NULL_RTX
, 0, OPTAB_DIRECT
);
907 if (target
&& !REG_P (target
))
910 /* *_widen_optab needs to determine operand mode, make sure at least
911 one operand has non-VOID mode. */
912 if (GET_MODE (op0_low
) == VOIDmode
&& GET_MODE (op1_low
) == VOIDmode
)
913 op0_low
= force_reg (word_mode
, op0_low
);
916 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
917 target
, 1, OPTAB_DIRECT
);
919 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
920 target
, 1, OPTAB_DIRECT
);
925 product_high
= operand_subword (product
, high
, 1, mode
);
926 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
927 NULL_RTX
, 0, OPTAB_DIRECT
);
928 emit_move_insn (product_high
, adjust
);
932 /* Subroutine of expand_binop. Optimize unsigned double-word OP0 % OP1 for
933 constant OP1. If for some bit in [BITS_PER_WORD / 2, BITS_PER_WORD] range
934 (prefer higher bits) ((1w << bit) % OP1) == 1, then the modulo can be
935 computed in word-mode as ((OP0 & (bit - 1)) + ((OP0 >> bit) & (bit - 1))
936 + (OP0 >> (2 * bit))) % OP1. Whether we need to sum 2, 3 or 4 values
937 depends on the bit value, if 2, then carry from the addition needs to be
938 added too, i.e. like:
939 sum += __builtin_add_overflow (low, high, &sum)
941 Optimize signed double-word OP0 % OP1 similarly, just apply some correction
942 factor to the sum before doing unsigned remainder, in the form of
943 sum += (((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & const);
944 then perform unsigned
945 remainder = sum % OP1;
947 remainder += ((signed) OP0 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1); */
950 expand_doubleword_mod (machine_mode mode
, rtx op0
, rtx op1
, bool unsignedp
)
952 if (INTVAL (op1
) <= 1 || (INTVAL (op1
) & 1) == 0)
955 rtx_insn
*last
= get_last_insn ();
956 for (int bit
= BITS_PER_WORD
; bit
>= BITS_PER_WORD
/ 2; bit
--)
958 wide_int w
= wi::shifted_mask (bit
, 1, false, 2 * BITS_PER_WORD
);
959 if (wi::ne_p (wi::umod_trunc (w
, INTVAL (op1
)), 1))
961 rtx sum
= NULL_RTX
, mask
= NULL_RTX
;
962 if (bit
== BITS_PER_WORD
)
964 /* For signed modulo we need to add correction to the sum
965 and that might again overflow. */
968 if (optab_handler (uaddv4_optab
, word_mode
) == CODE_FOR_nothing
)
970 tree wtype
= lang_hooks
.types
.type_for_mode (word_mode
, 1);
971 if (wtype
== NULL_TREE
)
973 tree ctype
= build_complex_type (wtype
);
974 if (TYPE_MODE (ctype
) != GET_MODE_COMPLEX_MODE (word_mode
))
976 machine_mode cmode
= TYPE_MODE (ctype
);
977 rtx op00
= operand_subword_force (op0
, 0, mode
);
978 rtx op01
= operand_subword_force (op0
, 1, mode
);
979 rtx cres
= gen_rtx_CONCAT (cmode
, gen_reg_rtx (word_mode
),
980 gen_reg_rtx (word_mode
));
981 tree lhs
= make_tree (ctype
, cres
);
982 tree arg0
= make_tree (wtype
, op00
);
983 tree arg1
= make_tree (wtype
, op01
);
984 expand_addsub_overflow (UNKNOWN_LOCATION
, PLUS_EXPR
, lhs
, arg0
,
985 arg1
, true, true, true, false, NULL
);
986 sum
= expand_simple_binop (word_mode
, PLUS
, XEXP (cres
, 0),
987 XEXP (cres
, 1), NULL_RTX
, 1,
994 /* Code below uses GEN_INT, so we need the masks to be representable
995 in HOST_WIDE_INTs. */
996 if (bit
>= HOST_BITS_PER_WIDE_INT
)
998 /* If op0 is e.g. -1 or -2 unsigned, then the 2 additions might
999 overflow. Consider 64-bit -1ULL for word size 32, if we add
1000 0x7fffffffU + 0x7fffffffU + 3U, it wraps around to 1. */
1001 if (bit
== BITS_PER_WORD
- 1)
1004 int count
= (2 * BITS_PER_WORD
+ bit
- 1) / bit
;
1005 rtx sum_corr
= NULL_RTX
;
1009 /* For signed modulo, compute it as unsigned modulo of
1010 sum with a correction added to it if OP0 is negative,
1011 such that the result can be computed as unsigned
1012 remainder + ((OP1 >> (2 * BITS_PER_WORD - 1)) & (1 - OP1). */
1013 w
= wi::min_value (2 * BITS_PER_WORD
, SIGNED
);
1014 wide_int wmod1
= wi::umod_trunc (w
, INTVAL (op1
));
1015 wide_int wmod2
= wi::smod_trunc (w
, INTVAL (op1
));
1016 /* wmod2 == -wmod1. */
1017 wmod2
= wmod2
+ (INTVAL (op1
) - 1);
1018 if (wi::ne_p (wmod1
, wmod2
))
1020 wide_int wcorr
= wmod2
- wmod1
;
1022 wcorr
= wcorr
+ INTVAL (op1
);
1023 /* Now verify if the count sums can't overflow, and punt
1025 w
= wi::mask (bit
, false, 2 * BITS_PER_WORD
);
1026 w
= w
* (count
- 1);
1027 w
= w
+ wi::mask (2 * BITS_PER_WORD
- (count
- 1) * bit
,
1028 false, 2 * BITS_PER_WORD
);
1030 w
= wi::lrshift (w
, BITS_PER_WORD
);
1031 if (wi::ne_p (w
, 0))
1034 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1036 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1037 GEN_INT (BITS_PER_WORD
- 1),
1038 NULL_RTX
, 0, OPTAB_DIRECT
);
1039 if (mask
== NULL_RTX
)
1041 sum_corr
= immed_wide_int_const (wcorr
, word_mode
);
1042 sum_corr
= expand_simple_binop (word_mode
, AND
, mask
,
1043 sum_corr
, NULL_RTX
, 1,
1045 if (sum_corr
== NULL_RTX
)
1050 for (int i
= 0; i
< count
; i
++)
1054 v
= expand_simple_binop (mode
, LSHIFTRT
, v
, GEN_INT (i
* bit
),
1055 NULL_RTX
, 1, OPTAB_DIRECT
);
1058 v
= lowpart_subreg (word_mode
, v
, mode
);
1062 v
= expand_simple_binop (word_mode
, AND
, v
,
1063 GEN_INT ((HOST_WIDE_INT_1U
<< bit
)
1068 if (sum
== NULL_RTX
)
1071 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, v
, NULL_RTX
,
1073 if (sum
== NULL_RTX
)
1078 sum
= expand_simple_binop (word_mode
, PLUS
, sum
, sum_corr
,
1079 NULL_RTX
, 1, OPTAB_DIRECT
);
1080 if (sum
== NULL_RTX
)
1084 rtx remainder
= expand_divmod (1, TRUNC_MOD_EXPR
, word_mode
, sum
,
1085 gen_int_mode (INTVAL (op1
), word_mode
),
1086 NULL_RTX
, 1, OPTAB_DIRECT
);
1087 if (remainder
== NULL_RTX
)
1092 if (mask
== NULL_RTX
)
1094 mask
= operand_subword_force (op0
, WORDS_BIG_ENDIAN
? 0 : 1,
1096 mask
= expand_simple_binop (word_mode
, ASHIFTRT
, mask
,
1097 GEN_INT (BITS_PER_WORD
- 1),
1098 NULL_RTX
, 0, OPTAB_DIRECT
);
1099 if (mask
== NULL_RTX
)
1102 mask
= expand_simple_binop (word_mode
, AND
, mask
,
1103 gen_int_mode (1 - INTVAL (op1
),
1105 NULL_RTX
, 1, OPTAB_DIRECT
);
1106 if (mask
== NULL_RTX
)
1108 remainder
= expand_simple_binop (word_mode
, PLUS
, remainder
,
1109 mask
, NULL_RTX
, 1, OPTAB_DIRECT
);
1110 if (remainder
== NULL_RTX
)
1114 remainder
= convert_modes (mode
, word_mode
, remainder
, unsignedp
);
1115 /* Punt if we need any library calls. */
1116 for (; last
; last
= NEXT_INSN (last
))
1124 /* Similarly to the above function, but compute both quotient and remainder.
1125 Quotient can be computed from the remainder as:
1126 rem = op0 % op1; // Handled using expand_doubleword_mod
1127 quot = (op0 - rem) * inv; // inv is multiplicative inverse of op1 modulo
1128 // 2 * BITS_PER_WORD
1130 We can also handle cases where op1 is a multiple of power of two constant
1131 and constant handled by expand_doubleword_mod.
1132 op11 = 1 << __builtin_ctz (op1);
1134 rem1 = op0 % op12; // Handled using expand_doubleword_mod
1135 quot1 = (op0 - rem1) * inv; // inv is multiplicative inverse of op12 modulo
1136 // 2 * BITS_PER_WORD
1137 rem = (quot1 % op11) * op12 + rem1;
1138 quot = quot1 / op11; */
1141 expand_doubleword_divmod (machine_mode mode
, rtx op0
, rtx op1
, rtx
*rem
,
1146 /* Negative dividend should have been optimized into positive,
1147 similarly modulo by 1 and modulo by power of two is optimized
1149 if (INTVAL (op1
) <= 1 || pow2p_hwi (INTVAL (op1
)))
1152 rtx op11
= const1_rtx
;
1154 if ((INTVAL (op1
) & 1) == 0)
1156 int bit
= ctz_hwi (INTVAL (op1
));
1157 op11
= GEN_INT (HOST_WIDE_INT_1
<< bit
);
1158 op12
= GEN_INT (INTVAL (op1
) >> bit
);
1161 rtx rem1
= expand_doubleword_mod (mode
, op0
, op12
, unsignedp
);
1162 if (rem1
== NULL_RTX
)
1165 int prec
= 2 * BITS_PER_WORD
;
1166 wide_int a
= wide_int::from (INTVAL (op12
), prec
+ 1, UNSIGNED
);
1167 wide_int b
= wi::shifted_mask (prec
, 1, false, prec
+ 1);
1168 wide_int m
= wide_int::from (wi::mod_inv (a
, b
), prec
, UNSIGNED
);
1169 rtx inv
= immed_wide_int_const (m
, mode
);
1171 rtx_insn
*last
= get_last_insn ();
1172 rtx quot1
= expand_simple_binop (mode
, MINUS
, op0
, rem1
,
1173 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1174 if (quot1
== NULL_RTX
)
1177 quot1
= expand_simple_binop (mode
, MULT
, quot1
, inv
,
1178 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1179 if (quot1
== NULL_RTX
)
1182 if (op11
!= const1_rtx
)
1184 rtx rem2
= expand_divmod (1, TRUNC_MOD_EXPR
, mode
, quot1
, op11
,
1185 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1186 if (rem2
== NULL_RTX
)
1189 rem2
= expand_simple_binop (mode
, MULT
, rem2
, op12
, NULL_RTX
,
1190 unsignedp
, OPTAB_DIRECT
);
1191 if (rem2
== NULL_RTX
)
1194 rem2
= expand_simple_binop (mode
, PLUS
, rem2
, rem1
, NULL_RTX
,
1195 unsignedp
, OPTAB_DIRECT
);
1196 if (rem2
== NULL_RTX
)
1199 rtx quot2
= expand_divmod (0, TRUNC_DIV_EXPR
, mode
, quot1
, op11
,
1200 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1201 if (quot2
== NULL_RTX
)
1208 /* Punt if we need any library calls. */
1209 for (; last
; last
= NEXT_INSN (last
))
1217 /* Wrapper around expand_binop which takes an rtx code to specify
1218 the operation to perform, not an optab pointer. All other
1219 arguments are the same. */
1221 expand_simple_binop (machine_mode mode
, enum rtx_code code
, rtx op0
,
1222 rtx op1
, rtx target
, int unsignedp
,
1223 enum optab_methods methods
)
1225 optab binop
= code_to_optab (code
);
1228 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
1231 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1232 binop. Order them according to commutative_operand_precedence and, if
1233 possible, try to put TARGET or a pseudo first. */
1235 swap_commutative_operands_with_target (rtx target
, rtx op0
, rtx op1
)
1237 int op0_prec
= commutative_operand_precedence (op0
);
1238 int op1_prec
= commutative_operand_precedence (op1
);
1240 if (op0_prec
< op1_prec
)
1243 if (op0_prec
> op1_prec
)
1246 /* With equal precedence, both orders are ok, but it is better if the
1247 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1248 if (target
== 0 || REG_P (target
))
1249 return (REG_P (op1
) && !REG_P (op0
)) || target
== op1
;
1251 return rtx_equal_p (op1
, target
);
1254 /* Return true if BINOPTAB implements a shift operation. */
1257 shift_optab_p (optab binoptab
)
1259 switch (optab_to_code (binoptab
))
1275 /* Return true if BINOPTAB implements a commutative binary operation. */
1278 commutative_optab_p (optab binoptab
)
1280 return (GET_RTX_CLASS (optab_to_code (binoptab
)) == RTX_COMM_ARITH
1281 || binoptab
== smul_widen_optab
1282 || binoptab
== umul_widen_optab
1283 || binoptab
== smul_highpart_optab
1284 || binoptab
== umul_highpart_optab
);
1287 /* X is to be used in mode MODE as operand OPN to BINOPTAB. If we're
1288 optimizing, and if the operand is a constant that costs more than
1289 1 instruction, force the constant into a register and return that
1290 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1293 avoid_expensive_constant (machine_mode mode
, optab binoptab
,
1294 int opn
, rtx x
, bool unsignedp
)
1296 bool speed
= optimize_insn_for_speed_p ();
1298 if (mode
!= VOIDmode
1301 && (rtx_cost (x
, mode
, optab_to_code (binoptab
), opn
, speed
)
1302 > set_src_cost (x
, mode
, speed
)))
1304 if (CONST_INT_P (x
))
1306 HOST_WIDE_INT intval
= trunc_int_for_mode (INTVAL (x
), mode
);
1307 if (intval
!= INTVAL (x
))
1308 x
= GEN_INT (intval
);
1311 x
= convert_modes (mode
, VOIDmode
, x
, unsignedp
);
1312 x
= force_reg (mode
, x
);
1317 /* Helper function for expand_binop: handle the case where there
1318 is an insn ICODE that directly implements the indicated operation.
1319 Returns null if this is not possible. */
1321 expand_binop_directly (enum insn_code icode
, machine_mode mode
, optab binoptab
,
1323 rtx target
, int unsignedp
, enum optab_methods methods
,
1326 machine_mode xmode0
= insn_data
[(int) icode
].operand
[1].mode
;
1327 machine_mode xmode1
= insn_data
[(int) icode
].operand
[2].mode
;
1328 machine_mode mode0
, mode1
, tmp_mode
;
1329 class expand_operand ops
[3];
1332 rtx xop0
= op0
, xop1
= op1
;
1333 bool canonicalize_op1
= false;
1335 /* If it is a commutative operator and the modes would match
1336 if we would swap the operands, we can save the conversions. */
1337 commutative_p
= commutative_optab_p (binoptab
);
1339 && GET_MODE (xop0
) != xmode0
&& GET_MODE (xop1
) != xmode1
1340 && GET_MODE (xop0
) == xmode1
&& GET_MODE (xop1
) == xmode0
)
1341 std::swap (xop0
, xop1
);
1343 /* If we are optimizing, force expensive constants into a register. */
1344 xop0
= avoid_expensive_constant (xmode0
, binoptab
, 0, xop0
, unsignedp
);
1345 if (!shift_optab_p (binoptab
))
1346 xop1
= avoid_expensive_constant (xmode1
, binoptab
, 1, xop1
, unsignedp
);
1348 /* Shifts and rotates often use a different mode for op1 from op0;
1349 for VOIDmode constants we don't know the mode, so force it
1350 to be canonicalized using convert_modes. */
1351 canonicalize_op1
= true;
1353 /* In case the insn wants input operands in modes different from
1354 those of the actual operands, convert the operands. It would
1355 seem that we don't need to convert CONST_INTs, but we do, so
1356 that they're properly zero-extended, sign-extended or truncated
1359 mode0
= GET_MODE (xop0
) != VOIDmode
? GET_MODE (xop0
) : mode
;
1360 if (xmode0
!= VOIDmode
&& xmode0
!= mode0
)
1362 xop0
= convert_modes (xmode0
, mode0
, xop0
, unsignedp
);
1366 mode1
= ((GET_MODE (xop1
) != VOIDmode
|| canonicalize_op1
)
1367 ? GET_MODE (xop1
) : mode
);
1368 if (xmode1
!= VOIDmode
&& xmode1
!= mode1
)
1370 xop1
= convert_modes (xmode1
, mode1
, xop1
, unsignedp
);
1374 /* If operation is commutative,
1375 try to make the first operand a register.
1376 Even better, try to make it the same as the target.
1377 Also try to make the last operand a constant. */
1379 && swap_commutative_operands_with_target (target
, xop0
, xop1
))
1380 std::swap (xop0
, xop1
);
1382 /* Now, if insn's predicates don't allow our operands, put them into
1385 if (binoptab
== vec_pack_trunc_optab
1386 || binoptab
== vec_pack_usat_optab
1387 || binoptab
== vec_pack_ssat_optab
1388 || binoptab
== vec_pack_ufix_trunc_optab
1389 || binoptab
== vec_pack_sfix_trunc_optab
1390 || binoptab
== vec_packu_float_optab
1391 || binoptab
== vec_packs_float_optab
)
1393 /* The mode of the result is different then the mode of the
1395 tmp_mode
= insn_data
[(int) icode
].operand
[0].mode
;
1396 if (VECTOR_MODE_P (mode
)
1397 && maybe_ne (GET_MODE_NUNITS (tmp_mode
), 2 * GET_MODE_NUNITS (mode
)))
1399 delete_insns_since (last
);
1406 create_output_operand (&ops
[0], target
, tmp_mode
);
1407 create_input_operand (&ops
[1], xop0
, mode0
);
1408 create_input_operand (&ops
[2], xop1
, mode1
);
1409 pat
= maybe_gen_insn (icode
, 3, ops
);
1412 /* If PAT is composed of more than one insn, try to add an appropriate
1413 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1414 operand, call expand_binop again, this time without a target. */
1415 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1416 && ! add_equal_note (pat
, ops
[0].value
,
1417 optab_to_code (binoptab
),
1418 ops
[1].value
, ops
[2].value
, mode0
))
1420 delete_insns_since (last
);
1421 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1422 unsignedp
, methods
);
1426 return ops
[0].value
;
1428 delete_insns_since (last
);
1432 /* Generate code to perform an operation specified by BINOPTAB
1433 on operands OP0 and OP1, with result having machine-mode MODE.
1435 UNSIGNEDP is for the case where we have to widen the operands
1436 to perform the operation. It says to use zero-extension.
1438 If TARGET is nonzero, the value
1439 is generated there, if it is convenient to do so.
1440 In all cases an rtx is returned for the locus of the value;
1441 this may or may not be TARGET. */
1444 expand_binop (machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
1445 rtx target
, int unsignedp
, enum optab_methods methods
)
1447 enum optab_methods next_methods
1448 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
1449 ? OPTAB_WIDEN
: methods
);
1450 enum mode_class mclass
;
1451 enum insn_code icode
;
1452 machine_mode wider_mode
;
1453 scalar_int_mode int_mode
;
1456 rtx_insn
*entry_last
= get_last_insn ();
1459 mclass
= GET_MODE_CLASS (mode
);
1461 /* If subtracting an integer constant, convert this into an addition of
1462 the negated constant. */
1464 if (binoptab
== sub_optab
&& CONST_INT_P (op1
))
1466 op1
= negate_rtx (mode
, op1
);
1467 binoptab
= add_optab
;
1469 /* For shifts, constant invalid op1 might be expanded from different
1470 mode than MODE. As those are invalid, force them to a register
1471 to avoid further problems during expansion. */
1472 else if (CONST_INT_P (op1
)
1473 && shift_optab_p (binoptab
)
1474 && UINTVAL (op1
) >= GET_MODE_BITSIZE (GET_MODE_INNER (mode
)))
1476 op1
= gen_int_mode (INTVAL (op1
), GET_MODE_INNER (mode
));
1477 op1
= force_reg (GET_MODE_INNER (mode
), op1
);
1480 /* Record where to delete back to if we backtrack. */
1481 last
= get_last_insn ();
1483 /* If we can do it with a three-operand insn, do so. */
1485 if (methods
!= OPTAB_MUST_WIDEN
)
1487 if (convert_optab_p (binoptab
))
1489 machine_mode from_mode
= widened_mode (mode
, op0
, op1
);
1490 icode
= find_widening_optab_handler (binoptab
, mode
, from_mode
);
1493 icode
= optab_handler (binoptab
, mode
);
1494 if (icode
!= CODE_FOR_nothing
)
1496 temp
= expand_binop_directly (icode
, mode
, binoptab
, op0
, op1
,
1497 target
, unsignedp
, methods
, last
);
1503 /* If we were trying to rotate, and that didn't work, try rotating
1504 the other direction before falling back to shifts and bitwise-or. */
1505 if (((binoptab
== rotl_optab
1506 && (icode
= optab_handler (rotr_optab
, mode
)) != CODE_FOR_nothing
)
1507 || (binoptab
== rotr_optab
1508 && (icode
= optab_handler (rotl_optab
, mode
)) != CODE_FOR_nothing
))
1509 && is_int_mode (mode
, &int_mode
))
1511 optab otheroptab
= (binoptab
== rotl_optab
? rotr_optab
: rotl_optab
);
1513 unsigned int bits
= GET_MODE_PRECISION (int_mode
);
1515 if (CONST_INT_P (op1
))
1516 newop1
= gen_int_shift_amount (int_mode
, bits
- INTVAL (op1
));
1517 else if (targetm
.shift_truncation_mask (int_mode
) == bits
- 1)
1518 newop1
= negate_rtx (GET_MODE (op1
), op1
);
1520 newop1
= expand_binop (GET_MODE (op1
), sub_optab
,
1521 gen_int_mode (bits
, GET_MODE (op1
)), op1
,
1522 NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1524 temp
= expand_binop_directly (icode
, int_mode
, otheroptab
, op0
, newop1
,
1525 target
, unsignedp
, methods
, last
);
1530 /* If this is a multiply, see if we can do a widening operation that
1531 takes operands of this mode and makes a wider mode. */
1533 if (binoptab
== smul_optab
1534 && GET_MODE_2XWIDER_MODE (mode
).exists (&wider_mode
)
1535 && (convert_optab_handler ((unsignedp
1537 : smul_widen_optab
),
1538 wider_mode
, mode
) != CODE_FOR_nothing
))
1540 /* *_widen_optab needs to determine operand mode, make sure at least
1541 one operand has non-VOID mode. */
1542 if (GET_MODE (op0
) == VOIDmode
&& GET_MODE (op1
) == VOIDmode
)
1543 op0
= force_reg (mode
, op0
);
1544 temp
= expand_binop (wider_mode
,
1545 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1546 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1550 if (GET_MODE_CLASS (mode
) == MODE_INT
1551 && TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (temp
)))
1552 return gen_lowpart (mode
, temp
);
1554 return convert_to_mode (mode
, temp
, unsignedp
);
1558 /* If this is a vector shift by a scalar, see if we can do a vector
1559 shift by a vector. If so, broadcast the scalar into a vector. */
1560 if (mclass
== MODE_VECTOR_INT
)
1562 optab otheroptab
= unknown_optab
;
1564 if (binoptab
== ashl_optab
)
1565 otheroptab
= vashl_optab
;
1566 else if (binoptab
== ashr_optab
)
1567 otheroptab
= vashr_optab
;
1568 else if (binoptab
== lshr_optab
)
1569 otheroptab
= vlshr_optab
;
1570 else if (binoptab
== rotl_optab
)
1571 otheroptab
= vrotl_optab
;
1572 else if (binoptab
== rotr_optab
)
1573 otheroptab
= vrotr_optab
;
1576 && (icode
= optab_handler (otheroptab
, mode
)) != CODE_FOR_nothing
)
1578 /* The scalar may have been extended to be too wide. Truncate
1579 it back to the proper size to fit in the broadcast vector. */
1580 scalar_mode inner_mode
= GET_MODE_INNER (mode
);
1581 if (!CONST_INT_P (op1
)
1582 && (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (op1
)))
1583 > GET_MODE_BITSIZE (inner_mode
)))
1584 op1
= force_reg (inner_mode
,
1585 simplify_gen_unary (TRUNCATE
, inner_mode
, op1
,
1587 rtx vop1
= expand_vector_broadcast (mode
, op1
);
1590 temp
= expand_binop_directly (icode
, mode
, otheroptab
, op0
, vop1
,
1591 target
, unsignedp
, methods
, last
);
1598 /* Look for a wider mode of the same class for which we think we
1599 can open-code the operation. Check for a widening multiply at the
1600 wider mode as well. */
1602 if (CLASS_HAS_WIDER_MODES_P (mclass
)
1603 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1604 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
1606 machine_mode next_mode
;
1607 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
1608 || (binoptab
== smul_optab
1609 && GET_MODE_WIDER_MODE (wider_mode
).exists (&next_mode
)
1610 && (find_widening_optab_handler ((unsignedp
1612 : smul_widen_optab
),
1614 != CODE_FOR_nothing
)))
1616 rtx xop0
= op0
, xop1
= op1
;
1619 /* For certain integer operations, we need not actually extend
1620 the narrow operands, as long as we will truncate
1621 the results to the same narrowness. */
1623 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1624 || binoptab
== xor_optab
1625 || binoptab
== add_optab
|| binoptab
== sub_optab
1626 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1627 && mclass
== MODE_INT
)
1630 xop0
= avoid_expensive_constant (mode
, binoptab
, 0,
1632 if (binoptab
!= ashl_optab
)
1633 xop1
= avoid_expensive_constant (mode
, binoptab
, 1,
1637 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1639 /* The second operand of a shift must always be extended. */
1640 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1641 no_extend
&& binoptab
!= ashl_optab
);
1643 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1644 unsignedp
, OPTAB_DIRECT
);
1647 if (mclass
!= MODE_INT
1648 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
1651 target
= gen_reg_rtx (mode
);
1652 convert_move (target
, temp
, 0);
1656 return gen_lowpart (mode
, temp
);
1659 delete_insns_since (last
);
1663 /* If operation is commutative,
1664 try to make the first operand a register.
1665 Even better, try to make it the same as the target.
1666 Also try to make the last operand a constant. */
1667 if (commutative_optab_p (binoptab
)
1668 && swap_commutative_operands_with_target (target
, op0
, op1
))
1669 std::swap (op0
, op1
);
1671 /* These can be done a word at a time. */
1672 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1673 && is_int_mode (mode
, &int_mode
)
1674 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
1675 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1680 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1681 won't be accurate, so use a new target. */
1685 || reg_overlap_mentioned_p (target
, op0
)
1686 || reg_overlap_mentioned_p (target
, op1
)
1687 || !valid_multiword_target_p (target
))
1688 target
= gen_reg_rtx (int_mode
);
1692 /* Do the actual arithmetic. */
1693 machine_mode op0_mode
= GET_MODE (op0
);
1694 machine_mode op1_mode
= GET_MODE (op1
);
1695 if (op0_mode
== VOIDmode
)
1696 op0_mode
= int_mode
;
1697 if (op1_mode
== VOIDmode
)
1698 op1_mode
= int_mode
;
1699 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
1701 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
1702 rtx x
= expand_binop (word_mode
, binoptab
,
1703 operand_subword_force (op0
, i
, op0_mode
),
1704 operand_subword_force (op1
, i
, op1_mode
),
1705 target_piece
, unsignedp
, next_methods
);
1710 if (target_piece
!= x
)
1711 emit_move_insn (target_piece
, x
);
1714 insns
= get_insns ();
1717 if (i
== GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
)
1724 /* Synthesize double word shifts from single word shifts. */
1725 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1726 || binoptab
== ashr_optab
)
1727 && is_int_mode (mode
, &int_mode
)
1728 && (CONST_INT_P (op1
) || optimize_insn_for_speed_p ())
1729 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
1730 && GET_MODE_PRECISION (int_mode
) == GET_MODE_BITSIZE (int_mode
)
1731 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
1732 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1733 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1735 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1736 scalar_int_mode op1_mode
;
1738 double_shift_mask
= targetm
.shift_truncation_mask (int_mode
);
1739 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1740 op1_mode
= (GET_MODE (op1
) != VOIDmode
1741 ? as_a
<scalar_int_mode
> (GET_MODE (op1
))
1744 /* Apply the truncation to constant shifts. */
1745 if (double_shift_mask
> 0 && CONST_INT_P (op1
))
1746 op1
= gen_int_mode (INTVAL (op1
) & double_shift_mask
, op1_mode
);
1748 if (op1
== CONST0_RTX (op1_mode
))
1751 /* Make sure that this is a combination that expand_doubleword_shift
1752 can handle. See the comments there for details. */
1753 if (double_shift_mask
== 0
1754 || (shift_mask
== BITS_PER_WORD
- 1
1755 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1758 rtx into_target
, outof_target
;
1759 rtx into_input
, outof_input
;
1760 int left_shift
, outof_word
;
1762 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1763 won't be accurate, so use a new target. */
1767 || reg_overlap_mentioned_p (target
, op0
)
1768 || reg_overlap_mentioned_p (target
, op1
)
1769 || !valid_multiword_target_p (target
))
1770 target
= gen_reg_rtx (int_mode
);
1774 /* OUTOF_* is the word we are shifting bits away from, and
1775 INTO_* is the word that we are shifting bits towards, thus
1776 they differ depending on the direction of the shift and
1777 WORDS_BIG_ENDIAN. */
1779 left_shift
= binoptab
== ashl_optab
;
1780 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1782 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1783 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1785 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1786 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1788 if (expand_doubleword_shift (op1_mode
, binoptab
,
1789 outof_input
, into_input
, op1
,
1790 outof_target
, into_target
,
1791 unsignedp
, next_methods
, shift_mask
))
1793 insns
= get_insns ();
1803 /* Synthesize double word rotates from single word shifts. */
1804 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1805 && is_int_mode (mode
, &int_mode
)
1806 && CONST_INT_P (op1
)
1807 && GET_MODE_PRECISION (int_mode
) == 2 * BITS_PER_WORD
1808 && optab_handler (ashl_optab
, word_mode
) != CODE_FOR_nothing
1809 && optab_handler (lshr_optab
, word_mode
) != CODE_FOR_nothing
)
1812 rtx into_target
, outof_target
;
1813 rtx into_input
, outof_input
;
1815 int shift_count
, left_shift
, outof_word
;
1817 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1818 won't be accurate, so use a new target. Do this also if target is not
1819 a REG, first because having a register instead may open optimization
1820 opportunities, and second because if target and op0 happen to be MEMs
1821 designating the same location, we would risk clobbering it too early
1822 in the code sequence we generate below. */
1827 || reg_overlap_mentioned_p (target
, op0
)
1828 || reg_overlap_mentioned_p (target
, op1
)
1829 || !valid_multiword_target_p (target
))
1830 target
= gen_reg_rtx (int_mode
);
1834 shift_count
= INTVAL (op1
);
1836 /* OUTOF_* is the word we are shifting bits away from, and
1837 INTO_* is the word that we are shifting bits towards, thus
1838 they differ depending on the direction of the shift and
1839 WORDS_BIG_ENDIAN. */
1841 left_shift
= (binoptab
== rotl_optab
);
1842 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1844 outof_target
= operand_subword (target
, outof_word
, 1, int_mode
);
1845 into_target
= operand_subword (target
, 1 - outof_word
, 1, int_mode
);
1847 outof_input
= operand_subword_force (op0
, outof_word
, int_mode
);
1848 into_input
= operand_subword_force (op0
, 1 - outof_word
, int_mode
);
1850 if (shift_count
== BITS_PER_WORD
)
1852 /* This is just a word swap. */
1853 emit_move_insn (outof_target
, into_input
);
1854 emit_move_insn (into_target
, outof_input
);
1859 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1860 HOST_WIDE_INT first_shift_count
, second_shift_count
;
1861 optab reverse_unsigned_shift
, unsigned_shift
;
1863 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1864 ? lshr_optab
: ashl_optab
);
1866 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1867 ? ashl_optab
: lshr_optab
);
1869 if (shift_count
> BITS_PER_WORD
)
1871 first_shift_count
= shift_count
- BITS_PER_WORD
;
1872 second_shift_count
= 2 * BITS_PER_WORD
- shift_count
;
1876 first_shift_count
= BITS_PER_WORD
- shift_count
;
1877 second_shift_count
= shift_count
;
1879 rtx first_shift_count_rtx
1880 = gen_int_shift_amount (word_mode
, first_shift_count
);
1881 rtx second_shift_count_rtx
1882 = gen_int_shift_amount (word_mode
, second_shift_count
);
1884 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1885 outof_input
, first_shift_count_rtx
,
1886 NULL_RTX
, unsignedp
, next_methods
);
1887 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1888 into_input
, second_shift_count_rtx
,
1889 NULL_RTX
, unsignedp
, next_methods
);
1891 if (into_temp1
!= 0 && into_temp2
!= 0)
1892 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1893 into_target
, unsignedp
, next_methods
);
1897 if (inter
!= 0 && inter
!= into_target
)
1898 emit_move_insn (into_target
, inter
);
1900 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1901 into_input
, first_shift_count_rtx
,
1902 NULL_RTX
, unsignedp
, next_methods
);
1903 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1904 outof_input
, second_shift_count_rtx
,
1905 NULL_RTX
, unsignedp
, next_methods
);
1907 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1908 inter
= expand_binop (word_mode
, ior_optab
,
1909 outof_temp1
, outof_temp2
,
1910 outof_target
, unsignedp
, next_methods
);
1912 if (inter
!= 0 && inter
!= outof_target
)
1913 emit_move_insn (outof_target
, inter
);
1916 insns
= get_insns ();
1926 /* These can be done a word at a time by propagating carries. */
1927 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1928 && is_int_mode (mode
, &int_mode
)
1929 && GET_MODE_SIZE (int_mode
) >= 2 * UNITS_PER_WORD
1930 && optab_handler (binoptab
, word_mode
) != CODE_FOR_nothing
)
1933 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1934 const unsigned int nwords
= GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
;
1935 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1936 rtx xop0
, xop1
, xtarget
;
1938 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1939 value is one of those, use it. Otherwise, use 1 since it is the
1940 one easiest to get. */
1941 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1942 int normalizep
= STORE_FLAG_VALUE
;
1947 /* Prepare the operands. */
1948 xop0
= force_reg (int_mode
, op0
);
1949 xop1
= force_reg (int_mode
, op1
);
1951 xtarget
= gen_reg_rtx (int_mode
);
1953 if (target
== 0 || !REG_P (target
) || !valid_multiword_target_p (target
))
1956 /* Indicate for flow that the entire target reg is being set. */
1958 emit_clobber (xtarget
);
1960 /* Do the actual arithmetic. */
1961 for (i
= 0; i
< nwords
; i
++)
1963 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1964 rtx target_piece
= operand_subword (xtarget
, index
, 1, int_mode
);
1965 rtx op0_piece
= operand_subword_force (xop0
, index
, int_mode
);
1966 rtx op1_piece
= operand_subword_force (xop1
, index
, int_mode
);
1969 /* Main add/subtract of the input operands. */
1970 x
= expand_binop (word_mode
, binoptab
,
1971 op0_piece
, op1_piece
,
1972 target_piece
, unsignedp
, next_methods
);
1978 /* Store carry from main add/subtract. */
1979 carry_out
= gen_reg_rtx (word_mode
);
1980 carry_out
= emit_store_flag_force (carry_out
,
1981 (binoptab
== add_optab
1984 word_mode
, 1, normalizep
);
1991 /* Add/subtract previous carry to main result. */
1992 newx
= expand_binop (word_mode
,
1993 normalizep
== 1 ? binoptab
: otheroptab
,
1995 NULL_RTX
, 1, next_methods
);
1999 /* Get out carry from adding/subtracting carry in. */
2000 rtx carry_tmp
= gen_reg_rtx (word_mode
);
2001 carry_tmp
= emit_store_flag_force (carry_tmp
,
2002 (binoptab
== add_optab
2005 word_mode
, 1, normalizep
);
2007 /* Logical-ior the two poss. carry together. */
2008 carry_out
= expand_binop (word_mode
, ior_optab
,
2009 carry_out
, carry_tmp
,
2010 carry_out
, 0, next_methods
);
2014 emit_move_insn (target_piece
, newx
);
2018 if (x
!= target_piece
)
2019 emit_move_insn (target_piece
, x
);
2022 carry_in
= carry_out
;
2025 if (i
== GET_MODE_BITSIZE (int_mode
) / (unsigned) BITS_PER_WORD
)
2027 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
2028 || ! rtx_equal_p (target
, xtarget
))
2030 rtx_insn
*temp
= emit_move_insn (target
, xtarget
);
2032 set_dst_reg_note (temp
, REG_EQUAL
,
2033 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2034 int_mode
, copy_rtx (xop0
),
2045 delete_insns_since (last
);
2048 /* Attempt to synthesize double word multiplies using a sequence of word
2049 mode multiplications. We first attempt to generate a sequence using a
2050 more efficient unsigned widening multiply, and if that fails we then
2051 try using a signed widening multiply. */
2053 if (binoptab
== smul_optab
2054 && is_int_mode (mode
, &int_mode
)
2055 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2056 && optab_handler (smul_optab
, word_mode
) != CODE_FOR_nothing
2057 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
)
2059 rtx product
= NULL_RTX
;
2060 if (convert_optab_handler (umul_widen_optab
, int_mode
, word_mode
)
2061 != CODE_FOR_nothing
)
2063 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2066 delete_insns_since (last
);
2069 if (product
== NULL_RTX
2070 && (convert_optab_handler (smul_widen_optab
, int_mode
, word_mode
)
2071 != CODE_FOR_nothing
))
2073 product
= expand_doubleword_mult (int_mode
, op0
, op1
, target
,
2076 delete_insns_since (last
);
2079 if (product
!= NULL_RTX
)
2081 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2083 rtx_insn
*move
= emit_move_insn (target
? target
: product
,
2085 set_dst_reg_note (move
,
2087 gen_rtx_fmt_ee (MULT
, int_mode
,
2090 target
? target
: product
);
2096 /* Attempt to synthetize double word modulo by constant divisor. */
2097 if ((binoptab
== umod_optab
2098 || binoptab
== smod_optab
2099 || binoptab
== udiv_optab
2100 || binoptab
== sdiv_optab
)
2102 && CONST_INT_P (op1
)
2103 && is_int_mode (mode
, &int_mode
)
2104 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
2105 && optab_handler ((binoptab
== umod_optab
|| binoptab
== udiv_optab
)
2106 ? udivmod_optab
: sdivmod_optab
,
2107 int_mode
) == CODE_FOR_nothing
2108 && optab_handler (and_optab
, word_mode
) != CODE_FOR_nothing
2109 && optab_handler (add_optab
, word_mode
) != CODE_FOR_nothing
2110 && optimize_insn_for_speed_p ())
2113 if ((binoptab
== umod_optab
|| binoptab
== smod_optab
)
2114 && (INTVAL (op1
) & 1) == 0)
2115 res
= expand_doubleword_mod (int_mode
, op0
, op1
,
2116 binoptab
== umod_optab
);
2119 rtx quot
= expand_doubleword_divmod (int_mode
, op0
, op1
, &res
,
2120 binoptab
== umod_optab
2121 || binoptab
== udiv_optab
);
2122 if (quot
== NULL_RTX
)
2124 else if (binoptab
== udiv_optab
|| binoptab
== sdiv_optab
)
2127 if (res
!= NULL_RTX
)
2129 if (optab_handler (mov_optab
, int_mode
) != CODE_FOR_nothing
)
2131 rtx_insn
*move
= emit_move_insn (target
? target
: res
,
2133 set_dst_reg_note (move
, REG_EQUAL
,
2134 gen_rtx_fmt_ee (optab_to_code (binoptab
),
2135 int_mode
, copy_rtx (op0
), op1
),
2136 target
? target
: res
);
2141 delete_insns_since (last
);
2144 /* It can't be open-coded in this mode.
2145 Use a library call if one is available and caller says that's ok. */
2147 libfunc
= optab_libfunc (binoptab
, mode
);
2149 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
2153 machine_mode op1_mode
= mode
;
2158 if (shift_optab_p (binoptab
))
2160 op1_mode
= targetm
.libgcc_shift_count_mode ();
2161 /* Specify unsigned here,
2162 since negative shift counts are meaningless. */
2163 op1x
= convert_to_mode (op1_mode
, op1
, 1);
2166 if (GET_MODE (op0
) != VOIDmode
2167 && GET_MODE (op0
) != mode
)
2168 op0
= convert_to_mode (mode
, op0
, unsignedp
);
2170 /* Pass 1 for NO_QUEUE so we don't lose any increments
2171 if the libcall is cse'd or moved. */
2172 value
= emit_library_call_value (libfunc
,
2173 NULL_RTX
, LCT_CONST
, mode
,
2174 op0
, mode
, op1x
, op1_mode
);
2176 insns
= get_insns ();
2179 bool trapv
= trapv_binoptab_p (binoptab
);
2180 target
= gen_reg_rtx (mode
);
2181 emit_libcall_block_1 (insns
, target
, value
,
2183 : gen_rtx_fmt_ee (optab_to_code (binoptab
),
2184 mode
, op0
, op1
), trapv
);
2189 delete_insns_since (last
);
2191 /* It can't be done in this mode. Can we do it in a wider mode? */
2193 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
2194 || methods
== OPTAB_MUST_WIDEN
))
2196 /* Caller says, don't even try. */
2197 delete_insns_since (entry_last
);
2201 /* Compute the value of METHODS to pass to recursive calls.
2202 Don't allow widening to be tried recursively. */
2204 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
2206 /* Look for a wider mode of the same class for which it appears we can do
2209 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2211 /* This code doesn't make sense for conversion optabs, since we
2212 wouldn't then want to extend the operands to be the same size
2214 gcc_assert (!convert_optab_p (binoptab
));
2215 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2217 if (optab_handler (binoptab
, wider_mode
)
2218 || (methods
== OPTAB_LIB
2219 && optab_libfunc (binoptab
, wider_mode
)))
2221 rtx xop0
= op0
, xop1
= op1
;
2224 /* For certain integer operations, we need not actually extend
2225 the narrow operands, as long as we will truncate
2226 the results to the same narrowness. */
2228 if ((binoptab
== ior_optab
|| binoptab
== and_optab
2229 || binoptab
== xor_optab
2230 || binoptab
== add_optab
|| binoptab
== sub_optab
2231 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
2232 && mclass
== MODE_INT
)
2235 xop0
= widen_operand (xop0
, wider_mode
, mode
,
2236 unsignedp
, no_extend
);
2238 /* The second operand of a shift must always be extended. */
2239 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
2240 no_extend
&& binoptab
!= ashl_optab
);
2242 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
2243 unsignedp
, methods
);
2246 if (mclass
!= MODE_INT
2247 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2250 target
= gen_reg_rtx (mode
);
2251 convert_move (target
, temp
, 0);
2255 return gen_lowpart (mode
, temp
);
2258 delete_insns_since (last
);
2263 delete_insns_since (entry_last
);
2267 /* Expand a binary operator which has both signed and unsigned forms.
2268 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2271 If we widen unsigned operands, we may use a signed wider operation instead
2272 of an unsigned wider operation, since the result would be the same. */
2275 sign_expand_binop (machine_mode mode
, optab uoptab
, optab soptab
,
2276 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
2277 enum optab_methods methods
)
2280 optab direct_optab
= unsignedp
? uoptab
: soptab
;
2283 /* Do it without widening, if possible. */
2284 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2285 unsignedp
, OPTAB_DIRECT
);
2286 if (temp
|| methods
== OPTAB_DIRECT
)
2289 /* Try widening to a signed int. Disable any direct use of any
2290 signed insn in the current mode. */
2291 save_enable
= swap_optab_enable (soptab
, mode
, false);
2293 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2294 unsignedp
, OPTAB_WIDEN
);
2296 /* For unsigned operands, try widening to an unsigned int. */
2297 if (!temp
&& unsignedp
)
2298 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2299 unsignedp
, OPTAB_WIDEN
);
2300 if (temp
|| methods
== OPTAB_WIDEN
)
2303 /* Use the right width libcall if that exists. */
2304 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
2305 unsignedp
, OPTAB_LIB
);
2306 if (temp
|| methods
== OPTAB_LIB
)
2309 /* Must widen and use a libcall, use either signed or unsigned. */
2310 temp
= expand_binop (mode
, soptab
, op0
, op1
, target
,
2311 unsignedp
, methods
);
2312 if (!temp
&& unsignedp
)
2313 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
2314 unsignedp
, methods
);
2317 /* Undo the fiddling above. */
2319 swap_optab_enable (soptab
, mode
, true);
2323 /* Generate code to perform an operation specified by UNOPPTAB
2324 on operand OP0, with two results to TARG0 and TARG1.
2325 We assume that the order of the operands for the instruction
2326 is TARG0, TARG1, OP0.
2328 Either TARG0 or TARG1 may be zero, but what that means is that
2329 the result is not actually wanted. We will generate it into
2330 a dummy pseudo-reg and discard it. They may not both be zero.
2332 Returns 1 if this operation can be performed; 0 if not. */
2335 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
2338 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2339 enum mode_class mclass
;
2340 machine_mode wider_mode
;
2341 rtx_insn
*entry_last
= get_last_insn ();
2344 mclass
= GET_MODE_CLASS (mode
);
2347 targ0
= gen_reg_rtx (mode
);
2349 targ1
= gen_reg_rtx (mode
);
2351 /* Record where to go back to if we fail. */
2352 last
= get_last_insn ();
2354 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
2356 class expand_operand ops
[3];
2357 enum insn_code icode
= optab_handler (unoptab
, mode
);
2359 create_fixed_operand (&ops
[0], targ0
);
2360 create_fixed_operand (&ops
[1], targ1
);
2361 create_convert_operand_from (&ops
[2], op0
, mode
, unsignedp
);
2362 if (maybe_expand_insn (icode
, 3, ops
))
2366 /* It can't be done in this mode. Can we do it in a wider mode? */
2368 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2370 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2372 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2374 rtx t0
= gen_reg_rtx (wider_mode
);
2375 rtx t1
= gen_reg_rtx (wider_mode
);
2376 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2378 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
2380 convert_move (targ0
, t0
, unsignedp
);
2381 convert_move (targ1
, t1
, unsignedp
);
2385 delete_insns_since (last
);
2390 delete_insns_since (entry_last
);
2394 /* Generate code to perform an operation specified by BINOPTAB
2395 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2396 We assume that the order of the operands for the instruction
2397 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2398 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2400 Either TARG0 or TARG1 may be zero, but what that means is that
2401 the result is not actually wanted. We will generate it into
2402 a dummy pseudo-reg and discard it. They may not both be zero.
2404 Returns 1 if this operation can be performed; 0 if not. */
2407 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
2410 machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
2411 enum mode_class mclass
;
2412 machine_mode wider_mode
;
2413 rtx_insn
*entry_last
= get_last_insn ();
2416 mclass
= GET_MODE_CLASS (mode
);
2419 targ0
= gen_reg_rtx (mode
);
2421 targ1
= gen_reg_rtx (mode
);
2423 /* Record where to go back to if we fail. */
2424 last
= get_last_insn ();
2426 if (optab_handler (binoptab
, mode
) != CODE_FOR_nothing
)
2428 class expand_operand ops
[4];
2429 enum insn_code icode
= optab_handler (binoptab
, mode
);
2430 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2431 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
2432 rtx xop0
= op0
, xop1
= op1
;
2434 /* If we are optimizing, force expensive constants into a register. */
2435 xop0
= avoid_expensive_constant (mode0
, binoptab
, 0, xop0
, unsignedp
);
2436 xop1
= avoid_expensive_constant (mode1
, binoptab
, 1, xop1
, unsignedp
);
2438 create_fixed_operand (&ops
[0], targ0
);
2439 create_convert_operand_from (&ops
[1], xop0
, mode
, unsignedp
);
2440 create_convert_operand_from (&ops
[2], xop1
, mode
, unsignedp
);
2441 create_fixed_operand (&ops
[3], targ1
);
2442 if (maybe_expand_insn (icode
, 4, ops
))
2444 delete_insns_since (last
);
2447 /* It can't be done in this mode. Can we do it in a wider mode? */
2449 if (CLASS_HAS_WIDER_MODES_P (mclass
))
2451 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
2453 if (optab_handler (binoptab
, wider_mode
) != CODE_FOR_nothing
)
2455 rtx t0
= gen_reg_rtx (wider_mode
);
2456 rtx t1
= gen_reg_rtx (wider_mode
);
2457 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
2458 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
2460 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
2463 convert_move (targ0
, t0
, unsignedp
);
2464 convert_move (targ1
, t1
, unsignedp
);
2468 delete_insns_since (last
);
2473 delete_insns_since (entry_last
);
2477 /* Expand the two-valued library call indicated by BINOPTAB, but
2478 preserve only one of the values. If TARG0 is non-NULL, the first
2479 value is placed into TARG0; otherwise the second value is placed
2480 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2481 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2482 This routine assumes that the value returned by the library call is
2483 as if the return value was of an integral mode twice as wide as the
2484 mode of OP0. Returns 1 if the call was successful. */
2487 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2488 rtx targ0
, rtx targ1
, enum rtx_code code
)
2491 machine_mode libval_mode
;
2496 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2497 gcc_assert (!targ0
!= !targ1
);
2499 mode
= GET_MODE (op0
);
2500 libfunc
= optab_libfunc (binoptab
, mode
);
2504 /* The value returned by the library function will have twice as
2505 many bits as the nominal MODE. */
2506 libval_mode
= smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode
));
2508 libval
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
2512 /* Get the part of VAL containing the value that we want. */
2513 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2514 targ0
? 0 : GET_MODE_SIZE (mode
));
2515 insns
= get_insns ();
2517 /* Move the into the desired location. */
2518 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2519 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2525 /* Wrapper around expand_unop which takes an rtx code to specify
2526 the operation to perform, not an optab pointer. All other
2527 arguments are the same. */
2529 expand_simple_unop (machine_mode mode
, enum rtx_code code
, rtx op0
,
2530 rtx target
, int unsignedp
)
2532 optab unop
= code_to_optab (code
);
2535 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2541 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)).
2543 A similar operation can be used for clrsb. UNOPTAB says which operation
2544 we are trying to expand. */
2546 widen_leading (scalar_int_mode mode
, rtx op0
, rtx target
, optab unoptab
)
2548 opt_scalar_int_mode wider_mode_iter
;
2549 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2551 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2552 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
2557 last
= get_last_insn ();
2560 target
= gen_reg_rtx (mode
);
2561 xop0
= widen_operand (op0
, wider_mode
, mode
,
2562 unoptab
!= clrsb_optab
, false);
2563 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2564 unoptab
!= clrsb_optab
);
2567 (wider_mode
, sub_optab
, temp
,
2568 gen_int_mode (GET_MODE_PRECISION (wider_mode
)
2569 - GET_MODE_PRECISION (mode
),
2571 target
, true, OPTAB_DIRECT
);
2573 delete_insns_since (last
);
2581 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2582 quantities, choosing which based on whether the high word is nonzero. */
2584 expand_doubleword_clz (scalar_int_mode mode
, rtx op0
, rtx target
)
2586 rtx xop0
= force_reg (mode
, op0
);
2587 rtx subhi
= gen_highpart (word_mode
, xop0
);
2588 rtx sublo
= gen_lowpart (word_mode
, xop0
);
2589 rtx_code_label
*hi0_label
= gen_label_rtx ();
2590 rtx_code_label
*after_label
= gen_label_rtx ();
2594 /* If we were not given a target, use a word_mode register, not a
2595 'mode' register. The result will fit, and nobody is expecting
2596 anything bigger (the return type of __builtin_clz* is int). */
2598 target
= gen_reg_rtx (word_mode
);
2600 /* In any case, write to a word_mode scratch in both branches of the
2601 conditional, so we can ensure there is a single move insn setting
2602 'target' to tag a REG_EQUAL note on. */
2603 result
= gen_reg_rtx (word_mode
);
2607 /* If the high word is not equal to zero,
2608 then clz of the full value is clz of the high word. */
2609 emit_cmp_and_jump_insns (subhi
, CONST0_RTX (word_mode
), EQ
, 0,
2610 word_mode
, true, hi0_label
);
2612 temp
= expand_unop_direct (word_mode
, clz_optab
, subhi
, result
, true);
2617 convert_move (result
, temp
, true);
2619 emit_jump_insn (targetm
.gen_jump (after_label
));
2622 /* Else clz of the full value is clz of the low word plus the number
2623 of bits in the high word. */
2624 emit_label (hi0_label
);
2626 temp
= expand_unop_direct (word_mode
, clz_optab
, sublo
, 0, true);
2629 temp
= expand_binop (word_mode
, add_optab
, temp
,
2630 gen_int_mode (GET_MODE_BITSIZE (word_mode
), word_mode
),
2631 result
, true, OPTAB_DIRECT
);
2635 convert_move (result
, temp
, true);
2637 emit_label (after_label
);
2638 convert_move (target
, result
, true);
2643 add_equal_note (seq
, target
, CLZ
, xop0
, NULL_RTX
, mode
);
2652 /* Try calculating popcount of a double-word quantity as two popcount's of
2653 word-sized quantities and summing up the results. */
2655 expand_doubleword_popcount (scalar_int_mode mode
, rtx op0
, rtx target
)
2662 t0
= expand_unop_direct (word_mode
, popcount_optab
,
2663 operand_subword_force (op0
, 0, mode
), NULL_RTX
,
2665 t1
= expand_unop_direct (word_mode
, popcount_optab
,
2666 operand_subword_force (op0
, 1, mode
), NULL_RTX
,
2674 /* If we were not given a target, use a word_mode register, not a
2675 'mode' register. The result will fit, and nobody is expecting
2676 anything bigger (the return type of __builtin_popcount* is int). */
2678 target
= gen_reg_rtx (word_mode
);
2680 t
= expand_binop (word_mode
, add_optab
, t0
, t1
, target
, 0, OPTAB_DIRECT
);
2685 add_equal_note (seq
, t
, POPCOUNT
, op0
, NULL_RTX
, mode
);
2693 (parity:narrow (low (x) ^ high (x))) */
2695 expand_doubleword_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2697 rtx t
= expand_binop (word_mode
, xor_optab
,
2698 operand_subword_force (op0
, 0, mode
),
2699 operand_subword_force (op0
, 1, mode
),
2700 NULL_RTX
, 0, OPTAB_DIRECT
);
2701 return expand_unop (word_mode
, parity_optab
, t
, target
, true);
2707 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2709 widen_bswap (scalar_int_mode mode
, rtx op0
, rtx target
)
2713 opt_scalar_int_mode wider_mode_iter
;
2715 FOR_EACH_WIDER_MODE (wider_mode_iter
, mode
)
2716 if (optab_handler (bswap_optab
, wider_mode_iter
.require ())
2717 != CODE_FOR_nothing
)
2720 if (!wider_mode_iter
.exists ())
2723 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2724 last
= get_last_insn ();
2726 x
= widen_operand (op0
, wider_mode
, mode
, true, true);
2727 x
= expand_unop (wider_mode
, bswap_optab
, x
, NULL_RTX
, true);
2729 gcc_assert (GET_MODE_PRECISION (wider_mode
) == GET_MODE_BITSIZE (wider_mode
)
2730 && GET_MODE_PRECISION (mode
) == GET_MODE_BITSIZE (mode
));
2732 x
= expand_shift (RSHIFT_EXPR
, wider_mode
, x
,
2733 GET_MODE_BITSIZE (wider_mode
)
2734 - GET_MODE_BITSIZE (mode
),
2740 target
= gen_reg_rtx (mode
);
2741 emit_move_insn (target
, gen_lowpart (mode
, x
));
2744 delete_insns_since (last
);
2749 /* Try calculating bswap as two bswaps of two word-sized operands. */
2752 expand_doubleword_bswap (machine_mode mode
, rtx op
, rtx target
)
2756 t1
= expand_unop (word_mode
, bswap_optab
,
2757 operand_subword_force (op
, 0, mode
), NULL_RTX
, true);
2758 t0
= expand_unop (word_mode
, bswap_optab
,
2759 operand_subword_force (op
, 1, mode
), NULL_RTX
, true);
2761 if (target
== 0 || !valid_multiword_target_p (target
))
2762 target
= gen_reg_rtx (mode
);
2764 emit_clobber (target
);
2765 emit_move_insn (operand_subword (target
, 0, 1, mode
), t0
);
2766 emit_move_insn (operand_subword (target
, 1, 1, mode
), t1
);
2771 /* Try calculating (parity x) as (and (popcount x) 1), where
2772 popcount can also be done in a wider mode. */
2774 expand_parity (scalar_int_mode mode
, rtx op0
, rtx target
)
2776 enum mode_class mclass
= GET_MODE_CLASS (mode
);
2777 opt_scalar_int_mode wider_mode_iter
;
2778 FOR_EACH_MODE_FROM (wider_mode_iter
, mode
)
2780 scalar_int_mode wider_mode
= wider_mode_iter
.require ();
2781 if (optab_handler (popcount_optab
, wider_mode
) != CODE_FOR_nothing
)
2786 last
= get_last_insn ();
2788 if (target
== 0 || GET_MODE (target
) != wider_mode
)
2789 target
= gen_reg_rtx (wider_mode
);
2791 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2792 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2795 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2796 target
, true, OPTAB_DIRECT
);
2800 if (mclass
!= MODE_INT
2801 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
2802 return convert_to_mode (mode
, temp
, 0);
2804 return gen_lowpart (mode
, temp
);
2807 delete_insns_since (last
);
2813 /* Try calculating ctz(x) as K - clz(x & -x) ,
2814 where K is GET_MODE_PRECISION(mode) - 1.
2816 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2817 don't have to worry about what the hardware does in that case. (If
2818 the clz instruction produces the usual value at 0, which is K, the
2819 result of this code sequence will be -1; expand_ffs, below, relies
2820 on this. It might be nice to have it be K instead, for consistency
2821 with the (very few) processors that provide a ctz with a defined
2822 value, but that would take one more instruction, and it would be
2823 less convenient for expand_ffs anyway. */
2826 expand_ctz (scalar_int_mode mode
, rtx op0
, rtx target
)
2831 if (optab_handler (clz_optab
, mode
) == CODE_FOR_nothing
)
2836 temp
= expand_unop_direct (mode
, neg_optab
, op0
, NULL_RTX
, true);
2838 temp
= expand_binop (mode
, and_optab
, op0
, temp
, NULL_RTX
,
2839 true, OPTAB_DIRECT
);
2841 temp
= expand_unop_direct (mode
, clz_optab
, temp
, NULL_RTX
, true);
2843 temp
= expand_binop (mode
, sub_optab
,
2844 gen_int_mode (GET_MODE_PRECISION (mode
) - 1, mode
),
2846 true, OPTAB_DIRECT
);
2856 add_equal_note (seq
, temp
, CTZ
, op0
, NULL_RTX
, mode
);
2862 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2863 else with the sequence used by expand_clz.
2865 The ffs builtin promises to return zero for a zero value and ctz/clz
2866 may have an undefined value in that case. If they do not give us a
2867 convenient value, we have to generate a test and branch. */
2869 expand_ffs (scalar_int_mode mode
, rtx op0
, rtx target
)
2871 HOST_WIDE_INT val
= 0;
2872 bool defined_at_zero
= false;
2876 if (optab_handler (ctz_optab
, mode
) != CODE_FOR_nothing
)
2880 temp
= expand_unop_direct (mode
, ctz_optab
, op0
, 0, true);
2884 defined_at_zero
= (CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2);
2886 else if (optab_handler (clz_optab
, mode
) != CODE_FOR_nothing
)
2889 temp
= expand_ctz (mode
, op0
, 0);
2893 if (CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
) == 2)
2895 defined_at_zero
= true;
2896 val
= (GET_MODE_PRECISION (mode
) - 1) - val
;
2902 if (defined_at_zero
&& val
== -1)
2903 /* No correction needed at zero. */;
2906 /* We don't try to do anything clever with the situation found
2907 on some processors (eg Alpha) where ctz(0:mode) ==
2908 bitsize(mode). If someone can think of a way to send N to -1
2909 and leave alone all values in the range 0..N-1 (where N is a
2910 power of two), cheaper than this test-and-branch, please add it.
2912 The test-and-branch is done after the operation itself, in case
2913 the operation sets condition codes that can be recycled for this.
2914 (This is true on i386, for instance.) */
2916 rtx_code_label
*nonzero_label
= gen_label_rtx ();
2917 emit_cmp_and_jump_insns (op0
, CONST0_RTX (mode
), NE
, 0,
2918 mode
, true, nonzero_label
);
2920 convert_move (temp
, GEN_INT (-1), false);
2921 emit_label (nonzero_label
);
2924 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2925 to produce a value in the range 0..bitsize. */
2926 temp
= expand_binop (mode
, add_optab
, temp
, gen_int_mode (1, mode
),
2927 target
, false, OPTAB_DIRECT
);
2934 add_equal_note (seq
, temp
, FFS
, op0
, NULL_RTX
, mode
);
2943 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2944 conditions, VAL may already be a SUBREG against which we cannot generate
2945 a further SUBREG. In this case, we expect forcing the value into a
2946 register will work around the situation. */
2949 lowpart_subreg_maybe_copy (machine_mode omode
, rtx val
,
2953 ret
= lowpart_subreg (omode
, val
, imode
);
2956 val
= force_reg (imode
, val
);
2957 ret
= lowpart_subreg (omode
, val
, imode
);
2958 gcc_assert (ret
!= NULL
);
2963 /* Expand a floating point absolute value or negation operation via a
2964 logical operation on the sign bit. */
2967 expand_absneg_bit (enum rtx_code code
, scalar_float_mode mode
,
2968 rtx op0
, rtx target
)
2970 const struct real_format
*fmt
;
2971 int bitpos
, word
, nwords
, i
;
2972 scalar_int_mode imode
;
2976 /* The format has to have a simple sign bit. */
2977 fmt
= REAL_MODE_FORMAT (mode
);
2981 bitpos
= fmt
->signbit_rw
;
2985 /* Don't create negative zeros if the format doesn't support them. */
2986 if (code
== NEG
&& !fmt
->has_signed_zero
)
2989 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2991 if (!int_mode_for_mode (mode
).exists (&imode
))
3000 if (FLOAT_WORDS_BIG_ENDIAN
)
3001 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3003 word
= bitpos
/ BITS_PER_WORD
;
3004 bitpos
= bitpos
% BITS_PER_WORD
;
3005 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3008 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3014 || reg_overlap_mentioned_p (target
, op0
)
3015 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3016 target
= gen_reg_rtx (mode
);
3022 for (i
= 0; i
< nwords
; ++i
)
3024 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3025 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3029 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3031 immed_wide_int_const (mask
, imode
),
3032 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3033 if (temp
!= targ_piece
)
3034 emit_move_insn (targ_piece
, temp
);
3037 emit_move_insn (targ_piece
, op0_piece
);
3040 insns
= get_insns ();
3047 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
3048 gen_lowpart (imode
, op0
),
3049 immed_wide_int_const (mask
, imode
),
3050 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3051 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3053 set_dst_reg_note (get_last_insn (), REG_EQUAL
,
3054 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)),
3061 /* As expand_unop, but will fail rather than attempt the operation in a
3062 different mode or with a libcall. */
3064 expand_unop_direct (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3067 if (optab_handler (unoptab
, mode
) != CODE_FOR_nothing
)
3069 class expand_operand ops
[2];
3070 enum insn_code icode
= optab_handler (unoptab
, mode
);
3071 rtx_insn
*last
= get_last_insn ();
3074 create_output_operand (&ops
[0], target
, mode
);
3075 create_convert_operand_from (&ops
[1], op0
, mode
, unsignedp
);
3076 pat
= maybe_gen_insn (icode
, 2, ops
);
3079 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3080 && ! add_equal_note (pat
, ops
[0].value
,
3081 optab_to_code (unoptab
),
3082 ops
[1].value
, NULL_RTX
, mode
))
3084 delete_insns_since (last
);
3085 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
3090 return ops
[0].value
;
3096 /* Generate code to perform an operation specified by UNOPTAB
3097 on operand OP0, with result having machine-mode MODE.
3099 UNSIGNEDP is for the case where we have to widen the operands
3100 to perform the operation. It says to use zero-extension.
3102 If TARGET is nonzero, the value
3103 is generated there, if it is convenient to do so.
3104 In all cases an rtx is returned for the locus of the value;
3105 this may or may not be TARGET. */
3108 expand_unop (machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
3111 enum mode_class mclass
= GET_MODE_CLASS (mode
);
3112 machine_mode wider_mode
;
3113 scalar_int_mode int_mode
;
3114 scalar_float_mode float_mode
;
3118 temp
= expand_unop_direct (mode
, unoptab
, op0
, target
, unsignedp
);
3122 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3124 /* Widening (or narrowing) clz needs special treatment. */
3125 if (unoptab
== clz_optab
)
3127 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3129 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3133 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3134 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3136 temp
= expand_doubleword_clz (int_mode
, op0
, target
);
3145 if (unoptab
== clrsb_optab
)
3147 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3149 temp
= widen_leading (int_mode
, op0
, target
, unoptab
);
3156 if (unoptab
== popcount_optab
3157 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3158 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3159 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3160 && optimize_insn_for_speed_p ())
3162 temp
= expand_doubleword_popcount (int_mode
, op0
, target
);
3167 if (unoptab
== parity_optab
3168 && is_a
<scalar_int_mode
> (mode
, &int_mode
)
3169 && GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3170 && (optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
3171 || optab_handler (popcount_optab
, word_mode
) != CODE_FOR_nothing
)
3172 && optimize_insn_for_speed_p ())
3174 temp
= expand_doubleword_parity (int_mode
, op0
, target
);
3179 /* Widening (or narrowing) bswap needs special treatment. */
3180 if (unoptab
== bswap_optab
)
3182 /* HImode is special because in this mode BSWAP is equivalent to ROTATE
3183 or ROTATERT. First try these directly; if this fails, then try the
3184 obvious pair of shifts with allowed widening, as this will probably
3185 be always more efficient than the other fallback methods. */
3191 if (optab_handler (rotl_optab
, mode
) != CODE_FOR_nothing
)
3193 temp
= expand_binop (mode
, rotl_optab
, op0
,
3194 gen_int_shift_amount (mode
, 8),
3195 target
, unsignedp
, OPTAB_DIRECT
);
3200 if (optab_handler (rotr_optab
, mode
) != CODE_FOR_nothing
)
3202 temp
= expand_binop (mode
, rotr_optab
, op0
,
3203 gen_int_shift_amount (mode
, 8),
3204 target
, unsignedp
, OPTAB_DIRECT
);
3209 last
= get_last_insn ();
3211 temp1
= expand_binop (mode
, ashl_optab
, op0
,
3212 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3213 unsignedp
, OPTAB_WIDEN
);
3214 temp2
= expand_binop (mode
, lshr_optab
, op0
,
3215 gen_int_shift_amount (mode
, 8), NULL_RTX
,
3216 unsignedp
, OPTAB_WIDEN
);
3219 temp
= expand_binop (mode
, ior_optab
, temp1
, temp2
, target
,
3220 unsignedp
, OPTAB_WIDEN
);
3225 delete_insns_since (last
);
3228 if (is_a
<scalar_int_mode
> (mode
, &int_mode
))
3230 temp
= widen_bswap (int_mode
, op0
, target
);
3234 /* We do not provide a 128-bit bswap in libgcc so force the use of
3235 a double bswap for 64-bit targets. */
3236 if (GET_MODE_SIZE (int_mode
) == 2 * UNITS_PER_WORD
3237 && (UNITS_PER_WORD
== 8
3238 || optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
))
3240 temp
= expand_doubleword_bswap (mode
, op0
, target
);
3249 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3250 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3252 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
)
3255 rtx_insn
*last
= get_last_insn ();
3257 /* For certain operations, we need not actually extend
3258 the narrow operand, as long as we will truncate the
3259 results to the same narrowness. */
3261 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3262 (unoptab
== neg_optab
3263 || unoptab
== one_cmpl_optab
)
3264 && mclass
== MODE_INT
);
3266 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3271 if (mclass
!= MODE_INT
3272 || !TRULY_NOOP_TRUNCATION_MODES_P (mode
, wider_mode
))
3275 target
= gen_reg_rtx (mode
);
3276 convert_move (target
, temp
, 0);
3280 return gen_lowpart (mode
, temp
);
3283 delete_insns_since (last
);
3287 /* These can be done a word at a time. */
3288 if (unoptab
== one_cmpl_optab
3289 && is_int_mode (mode
, &int_mode
)
3290 && GET_MODE_SIZE (int_mode
) > UNITS_PER_WORD
3291 && optab_handler (unoptab
, word_mode
) != CODE_FOR_nothing
)
3298 || reg_overlap_mentioned_p (target
, op0
)
3299 || !valid_multiword_target_p (target
))
3300 target
= gen_reg_rtx (int_mode
);
3304 /* Do the actual arithmetic. */
3305 for (i
= 0; i
< GET_MODE_BITSIZE (int_mode
) / BITS_PER_WORD
; i
++)
3307 rtx target_piece
= operand_subword (target
, i
, 1, int_mode
);
3308 rtx x
= expand_unop (word_mode
, unoptab
,
3309 operand_subword_force (op0
, i
, int_mode
),
3310 target_piece
, unsignedp
);
3312 if (target_piece
!= x
)
3313 emit_move_insn (target_piece
, x
);
3316 insns
= get_insns ();
3323 /* Emit ~op0 as op0 ^ -1. */
3324 if (unoptab
== one_cmpl_optab
3325 && (SCALAR_INT_MODE_P (mode
) || GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
3326 && optab_handler (xor_optab
, mode
) != CODE_FOR_nothing
)
3328 temp
= expand_binop (mode
, xor_optab
, op0
, CONSTM1_RTX (mode
),
3329 target
, unsignedp
, OPTAB_DIRECT
);
3334 if (optab_to_code (unoptab
) == NEG
)
3336 /* Try negating floating point values by flipping the sign bit. */
3337 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3339 temp
= expand_absneg_bit (NEG
, float_mode
, op0
, target
);
3344 /* If there is no negation pattern, and we have no negative zero,
3345 try subtracting from zero. */
3346 if (!HONOR_SIGNED_ZEROS (mode
))
3348 temp
= expand_binop (mode
, (unoptab
== negv_optab
3349 ? subv_optab
: sub_optab
),
3350 CONST0_RTX (mode
), op0
, target
,
3351 unsignedp
, OPTAB_DIRECT
);
3357 /* Try calculating parity (x) as popcount (x) % 2. */
3358 if (unoptab
== parity_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3360 temp
= expand_parity (int_mode
, op0
, target
);
3365 /* Try implementing ffs (x) in terms of clz (x). */
3366 if (unoptab
== ffs_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3368 temp
= expand_ffs (int_mode
, op0
, target
);
3373 /* Try implementing ctz (x) in terms of clz (x). */
3374 if (unoptab
== ctz_optab
&& is_a
<scalar_int_mode
> (mode
, &int_mode
))
3376 temp
= expand_ctz (int_mode
, op0
, target
);
3382 /* Now try a library call in this mode. */
3383 libfunc
= optab_libfunc (unoptab
, mode
);
3389 machine_mode outmode
= mode
;
3391 /* All of these functions return small values. Thus we choose to
3392 have them return something that isn't a double-word. */
3393 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
3394 || unoptab
== clrsb_optab
|| unoptab
== popcount_optab
3395 || unoptab
== parity_optab
)
3397 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
),
3398 optab_libfunc (unoptab
, mode
)));
3402 /* Pass 1 for NO_QUEUE so we don't lose any increments
3403 if the libcall is cse'd or moved. */
3404 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, outmode
,
3406 insns
= get_insns ();
3409 target
= gen_reg_rtx (outmode
);
3410 bool trapv
= trapv_unoptab_p (unoptab
);
3412 eq_value
= NULL_RTX
;
3415 eq_value
= gen_rtx_fmt_e (optab_to_code (unoptab
), mode
, op0
);
3416 if (GET_MODE_UNIT_SIZE (outmode
) < GET_MODE_UNIT_SIZE (mode
))
3417 eq_value
= simplify_gen_unary (TRUNCATE
, outmode
, eq_value
, mode
);
3418 else if (GET_MODE_UNIT_SIZE (outmode
) > GET_MODE_UNIT_SIZE (mode
))
3419 eq_value
= simplify_gen_unary (ZERO_EXTEND
,
3420 outmode
, eq_value
, mode
);
3422 emit_libcall_block_1 (insns
, target
, value
, eq_value
, trapv
);
3427 /* It can't be done in this mode. Can we do it in a wider mode? */
3429 if (CLASS_HAS_WIDER_MODES_P (mclass
))
3431 FOR_EACH_WIDER_MODE (wider_mode
, mode
)
3433 if (optab_handler (unoptab
, wider_mode
) != CODE_FOR_nothing
3434 || optab_libfunc (unoptab
, wider_mode
))
3437 rtx_insn
*last
= get_last_insn ();
3439 /* For certain operations, we need not actually extend
3440 the narrow operand, as long as we will truncate the
3441 results to the same narrowness. */
3442 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
3443 (unoptab
== neg_optab
3444 || unoptab
== one_cmpl_optab
3445 || unoptab
== bswap_optab
)
3446 && mclass
== MODE_INT
);
3448 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
3451 /* If we are generating clz using wider mode, adjust the
3452 result. Similarly for clrsb. */
3453 if ((unoptab
== clz_optab
|| unoptab
== clrsb_optab
)
3456 scalar_int_mode wider_int_mode
3457 = as_a
<scalar_int_mode
> (wider_mode
);
3458 int_mode
= as_a
<scalar_int_mode
> (mode
);
3460 (wider_mode
, sub_optab
, temp
,
3461 gen_int_mode (GET_MODE_PRECISION (wider_int_mode
)
3462 - GET_MODE_PRECISION (int_mode
),
3464 target
, true, OPTAB_DIRECT
);
3467 /* Likewise for bswap. */
3468 if (unoptab
== bswap_optab
&& temp
!= 0)
3470 scalar_int_mode wider_int_mode
3471 = as_a
<scalar_int_mode
> (wider_mode
);
3472 int_mode
= as_a
<scalar_int_mode
> (mode
);
3473 gcc_assert (GET_MODE_PRECISION (wider_int_mode
)
3474 == GET_MODE_BITSIZE (wider_int_mode
)
3475 && GET_MODE_PRECISION (int_mode
)
3476 == GET_MODE_BITSIZE (int_mode
));
3478 temp
= expand_shift (RSHIFT_EXPR
, wider_int_mode
, temp
,
3479 GET_MODE_BITSIZE (wider_int_mode
)
3480 - GET_MODE_BITSIZE (int_mode
),
3486 if (mclass
!= MODE_INT
)
3489 target
= gen_reg_rtx (mode
);
3490 convert_move (target
, temp
, 0);
3494 return gen_lowpart (mode
, temp
);
3497 delete_insns_since (last
);
3502 /* One final attempt at implementing negation via subtraction,
3503 this time allowing widening of the operand. */
3504 if (optab_to_code (unoptab
) == NEG
&& !HONOR_SIGNED_ZEROS (mode
))
3507 temp
= expand_binop (mode
,
3508 unoptab
== negv_optab
? subv_optab
: sub_optab
,
3509 CONST0_RTX (mode
), op0
,
3510 target
, unsignedp
, OPTAB_LIB_WIDEN
);
3518 /* Emit code to compute the absolute value of OP0, with result to
3519 TARGET if convenient. (TARGET may be 0.) The return value says
3520 where the result actually is to be found.
3522 MODE is the mode of the operand; the mode of the result is
3523 different but can be deduced from MODE.
3528 expand_abs_nojump (machine_mode mode
, rtx op0
, rtx target
,
3529 int result_unsignedp
)
3533 if (GET_MODE_CLASS (mode
) != MODE_INT
3535 result_unsignedp
= 1;
3537 /* First try to do it with a special abs instruction. */
3538 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
3543 /* For floating point modes, try clearing the sign bit. */
3544 scalar_float_mode float_mode
;
3545 if (is_a
<scalar_float_mode
> (mode
, &float_mode
))
3547 temp
= expand_absneg_bit (ABS
, float_mode
, op0
, target
);
3552 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3553 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
3554 && !HONOR_SIGNED_ZEROS (mode
))
3556 rtx_insn
*last
= get_last_insn ();
3558 temp
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3561 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3567 delete_insns_since (last
);
3570 /* If this machine has expensive jumps, we can do integer absolute
3571 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3572 where W is the width of MODE. */
3574 scalar_int_mode int_mode
;
3575 if (is_int_mode (mode
, &int_mode
)
3576 && BRANCH_COST (optimize_insn_for_speed_p (),
3579 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3580 GET_MODE_PRECISION (int_mode
) - 1,
3583 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3586 temp
= expand_binop (int_mode
,
3587 result_unsignedp
? sub_optab
: subv_optab
,
3588 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
3598 expand_abs (machine_mode mode
, rtx op0
, rtx target
,
3599 int result_unsignedp
, int safe
)
3602 rtx_code_label
*op1
;
3604 if (GET_MODE_CLASS (mode
) != MODE_INT
3606 result_unsignedp
= 1;
3608 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
3612 /* If that does not win, use conditional jump and negate. */
3614 /* It is safe to use the target if it is the same
3615 as the source if this is also a pseudo register */
3616 if (op0
== target
&& REG_P (op0
)
3617 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
3620 op1
= gen_label_rtx ();
3621 if (target
== 0 || ! safe
3622 || GET_MODE (target
) != mode
3623 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
3625 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
3626 target
= gen_reg_rtx (mode
);
3628 emit_move_insn (target
, op0
);
3631 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
3632 NULL_RTX
, NULL
, op1
,
3633 profile_probability::uninitialized ());
3635 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
3638 emit_move_insn (target
, op0
);
3644 /* Emit code to compute the one's complement absolute value of OP0
3645 (if (OP0 < 0) OP0 = ~OP0), with result to TARGET if convenient.
3646 (TARGET may be NULL_RTX.) The return value says where the result
3647 actually is to be found.
3649 MODE is the mode of the operand; the mode of the result is
3650 different but can be deduced from MODE. */
3653 expand_one_cmpl_abs_nojump (machine_mode mode
, rtx op0
, rtx target
)
3657 /* Not applicable for floating point modes. */
3658 if (FLOAT_MODE_P (mode
))
3661 /* If we have a MAX insn, we can do this as MAX (x, ~x). */
3662 if (optab_handler (smax_optab
, mode
) != CODE_FOR_nothing
)
3664 rtx_insn
*last
= get_last_insn ();
3666 temp
= expand_unop (mode
, one_cmpl_optab
, op0
, NULL_RTX
, 0);
3668 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
3674 delete_insns_since (last
);
3677 /* If this machine has expensive jumps, we can do one's complement
3678 absolute value of X as (((signed) x >> (W-1)) ^ x). */
3680 scalar_int_mode int_mode
;
3681 if (is_int_mode (mode
, &int_mode
)
3682 && BRANCH_COST (optimize_insn_for_speed_p (),
3685 rtx extended
= expand_shift (RSHIFT_EXPR
, int_mode
, op0
,
3686 GET_MODE_PRECISION (int_mode
) - 1,
3689 temp
= expand_binop (int_mode
, xor_optab
, extended
, op0
, target
, 0,
3699 /* A subroutine of expand_copysign, perform the copysign operation using the
3700 abs and neg primitives advertised to exist on the target. The assumption
3701 is that we have a split register file, and leaving op0 in fp registers,
3702 and not playing with subregs so much, will help the register allocator. */
3705 expand_copysign_absneg (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3706 int bitpos
, bool op0_is_abs
)
3708 scalar_int_mode imode
;
3709 enum insn_code icode
;
3711 rtx_code_label
*label
;
3716 /* Check if the back end provides an insn that handles signbit for the
3718 icode
= optab_handler (signbit_optab
, mode
);
3719 if (icode
!= CODE_FOR_nothing
)
3721 imode
= as_a
<scalar_int_mode
> (insn_data
[(int) icode
].operand
[0].mode
);
3722 sign
= gen_reg_rtx (imode
);
3723 emit_unop_insn (icode
, sign
, op1
, UNKNOWN
);
3727 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3729 if (!int_mode_for_mode (mode
).exists (&imode
))
3731 op1
= gen_lowpart (imode
, op1
);
3738 if (FLOAT_WORDS_BIG_ENDIAN
)
3739 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3741 word
= bitpos
/ BITS_PER_WORD
;
3742 bitpos
= bitpos
% BITS_PER_WORD
;
3743 op1
= operand_subword_force (op1
, word
, mode
);
3746 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3747 sign
= expand_binop (imode
, and_optab
, op1
,
3748 immed_wide_int_const (mask
, imode
),
3749 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3754 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
3761 if (target
== NULL_RTX
)
3762 target
= copy_to_reg (op0
);
3764 emit_move_insn (target
, op0
);
3767 label
= gen_label_rtx ();
3768 emit_cmp_and_jump_insns (sign
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
3770 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3771 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
3773 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
3775 emit_move_insn (target
, op0
);
3783 /* A subroutine of expand_copysign, perform the entire copysign operation
3784 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3785 is true if op0 is known to have its sign bit clear. */
3788 expand_copysign_bit (scalar_float_mode mode
, rtx op0
, rtx op1
, rtx target
,
3789 int bitpos
, bool op0_is_abs
)
3791 scalar_int_mode imode
;
3792 int word
, nwords
, i
;
3796 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
3798 if (!int_mode_for_mode (mode
).exists (&imode
))
3807 if (FLOAT_WORDS_BIG_ENDIAN
)
3808 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
3810 word
= bitpos
/ BITS_PER_WORD
;
3811 bitpos
= bitpos
% BITS_PER_WORD
;
3812 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
3815 wide_int mask
= wi::set_bit_in_zero (bitpos
, GET_MODE_PRECISION (imode
));
3820 || reg_overlap_mentioned_p (target
, op0
)
3821 || reg_overlap_mentioned_p (target
, op1
)
3822 || (nwords
> 1 && !valid_multiword_target_p (target
)))
3823 target
= gen_reg_rtx (mode
);
3829 for (i
= 0; i
< nwords
; ++i
)
3831 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
3832 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
3838 = expand_binop (imode
, and_optab
, op0_piece
,
3839 immed_wide_int_const (~mask
, imode
),
3840 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3841 op1
= expand_binop (imode
, and_optab
,
3842 operand_subword_force (op1
, i
, mode
),
3843 immed_wide_int_const (mask
, imode
),
3844 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3846 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
3847 targ_piece
, 1, OPTAB_LIB_WIDEN
);
3848 if (temp
!= targ_piece
)
3849 emit_move_insn (targ_piece
, temp
);
3852 emit_move_insn (targ_piece
, op0_piece
);
3855 insns
= get_insns ();
3862 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
3863 immed_wide_int_const (mask
, imode
),
3864 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3866 op0
= gen_lowpart (imode
, op0
);
3868 op0
= expand_binop (imode
, and_optab
, op0
,
3869 immed_wide_int_const (~mask
, imode
),
3870 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3872 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
3873 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
3874 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
3880 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3881 scalar floating point mode. Return NULL if we do not know how to
3882 expand the operation inline. */
3885 expand_copysign (rtx op0
, rtx op1
, rtx target
)
3887 scalar_float_mode mode
;
3888 const struct real_format
*fmt
;
3892 mode
= as_a
<scalar_float_mode
> (GET_MODE (op0
));
3893 gcc_assert (GET_MODE (op1
) == mode
);
3895 /* First try to do it with a special instruction. */
3896 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
3897 target
, 0, OPTAB_DIRECT
);
3901 fmt
= REAL_MODE_FORMAT (mode
);
3902 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
3906 if (CONST_DOUBLE_AS_FLOAT_P (op0
))
3908 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
3909 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
3913 if (fmt
->signbit_ro
>= 0
3914 && (CONST_DOUBLE_AS_FLOAT_P (op0
)
3915 || (optab_handler (neg_optab
, mode
) != CODE_FOR_nothing
3916 && optab_handler (abs_optab
, mode
) != CODE_FOR_nothing
)))
3918 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
3919 fmt
->signbit_ro
, op0_is_abs
);
3924 if (fmt
->signbit_rw
< 0)
3926 return expand_copysign_bit (mode
, op0
, op1
, target
,
3927 fmt
->signbit_rw
, op0_is_abs
);
3930 /* Generate an instruction whose insn-code is INSN_CODE,
3931 with two operands: an output TARGET and an input OP0.
3932 TARGET *must* be nonzero, and the output is always stored there.
3933 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3934 the value that is stored into TARGET.
3936 Return false if expansion failed. */
3939 maybe_emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
,
3942 class expand_operand ops
[2];
3945 create_output_operand (&ops
[0], target
, GET_MODE (target
));
3946 create_input_operand (&ops
[1], op0
, GET_MODE (op0
));
3947 pat
= maybe_gen_insn (icode
, 2, ops
);
3951 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
3953 add_equal_note (pat
, ops
[0].value
, code
, ops
[1].value
, NULL_RTX
,
3958 if (ops
[0].value
!= target
)
3959 emit_move_insn (target
, ops
[0].value
);
3962 /* Generate an instruction whose insn-code is INSN_CODE,
3963 with two operands: an output TARGET and an input OP0.
3964 TARGET *must* be nonzero, and the output is always stored there.
3965 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3966 the value that is stored into TARGET. */
3969 emit_unop_insn (enum insn_code icode
, rtx target
, rtx op0
, enum rtx_code code
)
3971 bool ok
= maybe_emit_unop_insn (icode
, target
, op0
, code
);
3975 struct no_conflict_data
3978 rtx_insn
*first
, *insn
;
3982 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3983 the currently examined clobber / store has to stay in the list of
3984 insns that constitute the actual libcall block. */
3986 no_conflict_move_test (rtx dest
, const_rtx set
, void *p0
)
3988 struct no_conflict_data
*p
= (struct no_conflict_data
*) p0
;
3990 /* If this inns directly contributes to setting the target, it must stay. */
3991 if (reg_overlap_mentioned_p (p
->target
, dest
))
3992 p
->must_stay
= true;
3993 /* If we haven't committed to keeping any other insns in the list yet,
3994 there is nothing more to check. */
3995 else if (p
->insn
== p
->first
)
3997 /* If this insn sets / clobbers a register that feeds one of the insns
3998 already in the list, this insn has to stay too. */
3999 else if (reg_overlap_mentioned_p (dest
, PATTERN (p
->first
))
4000 || (CALL_P (p
->first
) && (find_reg_fusage (p
->first
, USE
, dest
)))
4001 || reg_used_between_p (dest
, p
->first
, p
->insn
)
4002 /* Likewise if this insn depends on a register set by a previous
4003 insn in the list, or if it sets a result (presumably a hard
4004 register) that is set or clobbered by a previous insn.
4005 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
4006 SET_DEST perform the former check on the address, and the latter
4007 check on the MEM. */
4008 || (GET_CODE (set
) == SET
4009 && (modified_in_p (SET_SRC (set
), p
->first
)
4010 || modified_in_p (SET_DEST (set
), p
->first
)
4011 || modified_between_p (SET_SRC (set
), p
->first
, p
->insn
)
4012 || modified_between_p (SET_DEST (set
), p
->first
, p
->insn
))))
4013 p
->must_stay
= true;
4017 /* Emit code to make a call to a constant function or a library call.
4019 INSNS is a list containing all insns emitted in the call.
4020 These insns leave the result in RESULT. Our block is to copy RESULT
4021 to TARGET, which is logically equivalent to EQUIV.
4023 We first emit any insns that set a pseudo on the assumption that these are
4024 loading constants into registers; doing so allows them to be safely cse'ed
4025 between blocks. Then we emit all the other insns in the block, followed by
4026 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
4027 note with an operand of EQUIV. */
4030 emit_libcall_block_1 (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
,
4031 bool equiv_may_trap
)
4033 rtx final_dest
= target
;
4034 rtx_insn
*next
, *last
, *insn
;
4036 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
4037 into a MEM later. Protect the libcall block from this change. */
4038 if (! REG_P (target
) || REG_USERVAR_P (target
))
4039 target
= gen_reg_rtx (GET_MODE (target
));
4041 /* If we're using non-call exceptions, a libcall corresponding to an
4042 operation that may trap may also trap. */
4043 /* ??? See the comment in front of make_reg_eh_region_note. */
4044 if (cfun
->can_throw_non_call_exceptions
4045 && (equiv_may_trap
|| may_trap_p (equiv
)))
4047 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4050 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
4053 int lp_nr
= INTVAL (XEXP (note
, 0));
4054 if (lp_nr
== 0 || lp_nr
== INT_MIN
)
4055 remove_note (insn
, note
);
4061 /* Look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
4062 reg note to indicate that this call cannot throw or execute a nonlocal
4063 goto (unless there is already a REG_EH_REGION note, in which case
4065 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
4067 make_reg_eh_region_note_nothrow_nononlocal (insn
);
4070 /* First emit all insns that set pseudos. Remove them from the list as
4071 we go. Avoid insns that set pseudos which were referenced in previous
4072 insns. These can be generated by move_by_pieces, for example,
4073 to update an address. Similarly, avoid insns that reference things
4074 set in previous insns. */
4076 for (insn
= insns
; insn
; insn
= next
)
4078 rtx set
= single_set (insn
);
4080 next
= NEXT_INSN (insn
);
4082 if (set
!= 0 && REG_P (SET_DEST (set
))
4083 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
)
4085 struct no_conflict_data data
;
4087 data
.target
= const0_rtx
;
4091 note_stores (insn
, no_conflict_move_test
, &data
);
4092 if (! data
.must_stay
)
4094 if (PREV_INSN (insn
))
4095 SET_NEXT_INSN (PREV_INSN (insn
)) = next
;
4100 SET_PREV_INSN (next
) = PREV_INSN (insn
);
4106 /* Some ports use a loop to copy large arguments onto the stack.
4107 Don't move anything outside such a loop. */
4112 /* Write the remaining insns followed by the final copy. */
4113 for (insn
= insns
; insn
; insn
= next
)
4115 next
= NEXT_INSN (insn
);
4120 last
= emit_move_insn (target
, result
);
4122 set_dst_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
), target
);
4124 if (final_dest
!= target
)
4125 emit_move_insn (final_dest
, target
);
4129 emit_libcall_block (rtx_insn
*insns
, rtx target
, rtx result
, rtx equiv
)
4131 emit_libcall_block_1 (insns
, target
, result
, equiv
, false);
4134 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
4135 PURPOSE describes how this comparison will be used. CODE is the rtx
4136 comparison code we will be using.
4138 ??? Actually, CODE is slightly weaker than that. A target is still
4139 required to implement all of the normal bcc operations, but not
4140 required to implement all (or any) of the unordered bcc operations. */
4143 can_compare_p (enum rtx_code code
, machine_mode mode
,
4144 enum can_compare_purpose purpose
)
4147 test
= gen_rtx_fmt_ee (code
, mode
, const0_rtx
, const0_rtx
);
4150 enum insn_code icode
;
4152 if (purpose
== ccp_jump
4153 && (icode
= optab_handler (cbranch_optab
, mode
)) != CODE_FOR_nothing
4154 && insn_operand_matches (icode
, 0, test
))
4156 if (purpose
== ccp_store_flag
4157 && (icode
= optab_handler (cstore_optab
, mode
)) != CODE_FOR_nothing
4158 && insn_operand_matches (icode
, 1, test
))
4160 if (purpose
== ccp_cmov
4161 && optab_handler (cmov_optab
, mode
) != CODE_FOR_nothing
)
4164 mode
= GET_MODE_WIDER_MODE (mode
).else_void ();
4165 PUT_MODE (test
, mode
);
4167 while (mode
!= VOIDmode
);
4172 /* Return whether RTL code CODE corresponds to an unsigned optab. */
4175 unsigned_optab_p (enum rtx_code code
)
4177 return code
== LTU
|| code
== LEU
|| code
== GTU
|| code
== GEU
;
4180 /* Return whether the backend-emitted comparison for code CODE, comparing
4181 operands of mode VALUE_MODE and producing a result with MASK_MODE, matches
4182 operand OPNO of pattern ICODE. */
4185 insn_predicate_matches_p (enum insn_code icode
, unsigned int opno
,
4186 enum rtx_code code
, machine_mode mask_mode
,
4187 machine_mode value_mode
)
4189 rtx reg1
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4190 rtx reg2
= alloca_raw_REG (value_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4191 rtx test
= alloca_rtx_fmt_ee (code
, mask_mode
, reg1
, reg2
);
4192 return insn_operand_matches (icode
, opno
, test
);
4195 /* Return whether the backend can emit a vector comparison (vec_cmp/vec_cmpu)
4196 for code CODE, comparing operands of mode VALUE_MODE and producing a result
4200 can_vec_cmp_compare_p (enum rtx_code code
, machine_mode value_mode
,
4201 machine_mode mask_mode
)
4203 enum insn_code icode
4204 = get_vec_cmp_icode (value_mode
, mask_mode
, unsigned_optab_p (code
));
4205 if (icode
== CODE_FOR_nothing
)
4208 return insn_predicate_matches_p (icode
, 1, code
, mask_mode
, value_mode
);
4211 /* Return whether the backend can emit a vector comparison (vcond/vcondu) for
4212 code CODE, comparing operands of mode CMP_OP_MODE and producing a result
4216 can_vcond_compare_p (enum rtx_code code
, machine_mode value_mode
,
4217 machine_mode cmp_op_mode
)
4219 enum insn_code icode
4220 = get_vcond_icode (value_mode
, cmp_op_mode
, unsigned_optab_p (code
));
4221 if (icode
== CODE_FOR_nothing
)
4224 return insn_predicate_matches_p (icode
, 3, code
, value_mode
, cmp_op_mode
);
4227 /* Return whether the backend can emit vector set instructions for inserting
4228 element into vector at variable index position. */
4231 can_vec_set_var_idx_p (machine_mode vec_mode
)
4233 if (!VECTOR_MODE_P (vec_mode
))
4236 machine_mode inner_mode
= GET_MODE_INNER (vec_mode
);
4237 rtx reg1
= alloca_raw_REG (vec_mode
, LAST_VIRTUAL_REGISTER
+ 1);
4238 rtx reg2
= alloca_raw_REG (inner_mode
, LAST_VIRTUAL_REGISTER
+ 2);
4239 rtx reg3
= alloca_raw_REG (VOIDmode
, LAST_VIRTUAL_REGISTER
+ 3);
4241 enum insn_code icode
= optab_handler (vec_set_optab
, vec_mode
);
4243 return icode
!= CODE_FOR_nothing
&& insn_operand_matches (icode
, 0, reg1
)
4244 && insn_operand_matches (icode
, 1, reg2
)
4245 && insn_operand_matches (icode
, 2, reg3
);
4248 /* This function is called when we are going to emit a compare instruction that
4249 compares the values found in X and Y, using the rtl operator COMPARISON.
4251 If they have mode BLKmode, then SIZE specifies the size of both operands.
4253 UNSIGNEDP nonzero says that the operands are unsigned;
4254 this matters if they need to be widened (as given by METHODS).
4256 *PTEST is where the resulting comparison RTX is returned or NULL_RTX
4257 if we failed to produce one.
4259 *PMODE is the mode of the inputs (in case they are const_int).
4261 This function performs all the setup necessary so that the caller only has
4262 to emit a single comparison insn. This setup can involve doing a BLKmode
4263 comparison or emitting a library call to perform the comparison if no insn
4264 is available to handle it.
4265 The values which are passed in through pointers can be modified; the caller
4266 should perform the comparison on the modified values. Constant
4267 comparisons must have already been folded. */
4270 prepare_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4271 int unsignedp
, enum optab_methods methods
,
4272 rtx
*ptest
, machine_mode
*pmode
)
4274 machine_mode mode
= *pmode
;
4276 machine_mode cmp_mode
;
4277 enum mode_class mclass
;
4279 /* The other methods are not needed. */
4280 gcc_assert (methods
== OPTAB_DIRECT
|| methods
== OPTAB_WIDEN
4281 || methods
== OPTAB_LIB_WIDEN
);
4283 if (CONST_SCALAR_INT_P (y
))
4284 canonicalize_comparison (mode
, &comparison
, &y
);
4286 /* If we are optimizing, force expensive constants into a register. */
4287 if (CONSTANT_P (x
) && optimize
4288 && (rtx_cost (x
, mode
, COMPARE
, 0, optimize_insn_for_speed_p ())
4289 > COSTS_N_INSNS (1)))
4290 x
= force_reg (mode
, x
);
4292 if (CONSTANT_P (y
) && optimize
4293 && (rtx_cost (y
, mode
, COMPARE
, 1, optimize_insn_for_speed_p ())
4294 > COSTS_N_INSNS (1)))
4295 y
= force_reg (mode
, y
);
4297 /* Don't let both operands fail to indicate the mode. */
4298 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
4299 x
= force_reg (mode
, x
);
4300 if (mode
== VOIDmode
)
4301 mode
= GET_MODE (x
) != VOIDmode
? GET_MODE (x
) : GET_MODE (y
);
4303 /* Handle all BLKmode compares. */
4305 if (mode
== BLKmode
)
4307 machine_mode result_mode
;
4308 enum insn_code cmp_code
;
4311 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
4315 /* Try to use a memory block compare insn - either cmpstr
4316 or cmpmem will do. */
4317 opt_scalar_int_mode cmp_mode_iter
;
4318 FOR_EACH_MODE_IN_CLASS (cmp_mode_iter
, MODE_INT
)
4320 scalar_int_mode cmp_mode
= cmp_mode_iter
.require ();
4321 cmp_code
= direct_optab_handler (cmpmem_optab
, cmp_mode
);
4322 if (cmp_code
== CODE_FOR_nothing
)
4323 cmp_code
= direct_optab_handler (cmpstr_optab
, cmp_mode
);
4324 if (cmp_code
== CODE_FOR_nothing
)
4325 cmp_code
= direct_optab_handler (cmpstrn_optab
, cmp_mode
);
4326 if (cmp_code
== CODE_FOR_nothing
)
4329 /* Must make sure the size fits the insn's mode. */
4330 if (CONST_INT_P (size
)
4331 ? UINTVAL (size
) > GET_MODE_MASK (cmp_mode
)
4332 : (GET_MODE_BITSIZE (as_a
<scalar_int_mode
> (GET_MODE (size
)))
4333 > GET_MODE_BITSIZE (cmp_mode
)))
4336 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
4337 result
= gen_reg_rtx (result_mode
);
4338 size
= convert_to_mode (cmp_mode
, size
, 1);
4339 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
4341 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, result
, const0_rtx
);
4342 *pmode
= result_mode
;
4346 if (methods
!= OPTAB_LIB
&& methods
!= OPTAB_LIB_WIDEN
)
4349 /* Otherwise call a library function. */
4350 result
= emit_block_comp_via_libcall (x
, y
, size
);
4354 mode
= TYPE_MODE (integer_type_node
);
4355 methods
= OPTAB_LIB_WIDEN
;
4359 /* Don't allow operands to the compare to trap, as that can put the
4360 compare and branch in different basic blocks. */
4361 if (cfun
->can_throw_non_call_exceptions
)
4364 x
= copy_to_reg (x
);
4366 y
= copy_to_reg (y
);
4369 if (GET_MODE_CLASS (mode
) == MODE_CC
)
4371 enum insn_code icode
= optab_handler (cbranch_optab
, CCmode
);
4372 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4373 gcc_assert (icode
!= CODE_FOR_nothing
4374 && insn_operand_matches (icode
, 0, test
));
4379 mclass
= GET_MODE_CLASS (mode
);
4380 test
= gen_rtx_fmt_ee (comparison
, VOIDmode
, x
, y
);
4381 FOR_EACH_MODE_FROM (cmp_mode
, mode
)
4383 enum insn_code icode
;
4384 icode
= optab_handler (cbranch_optab
, cmp_mode
);
4385 if (icode
!= CODE_FOR_nothing
4386 && insn_operand_matches (icode
, 0, test
))
4388 rtx_insn
*last
= get_last_insn ();
4389 rtx op0
= prepare_operand (icode
, x
, 1, mode
, cmp_mode
, unsignedp
);
4390 rtx op1
= prepare_operand (icode
, y
, 2, mode
, cmp_mode
, unsignedp
);
4392 && insn_operand_matches (icode
, 1, op0
)
4393 && insn_operand_matches (icode
, 2, op1
))
4395 XEXP (test
, 0) = op0
;
4396 XEXP (test
, 1) = op1
;
4401 delete_insns_since (last
);
4404 if (methods
== OPTAB_DIRECT
|| !CLASS_HAS_WIDER_MODES_P (mclass
))
4408 if (methods
!= OPTAB_LIB_WIDEN
)
4411 if (SCALAR_FLOAT_MODE_P (mode
))
4413 /* Small trick if UNORDERED isn't implemented by the hardware. */
4414 if (comparison
== UNORDERED
&& rtx_equal_p (x
, y
))
4416 prepare_cmp_insn (x
, y
, UNLT
, NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4422 prepare_float_lib_cmp (x
, y
, comparison
, ptest
, pmode
);
4427 machine_mode ret_mode
;
4429 /* Handle a libcall just for the mode we are using. */
4430 libfunc
= optab_libfunc (cmp_optab
, mode
);
4431 gcc_assert (libfunc
);
4433 /* If we want unsigned, and this mode has a distinct unsigned
4434 comparison routine, use that. */
4437 rtx ulibfunc
= optab_libfunc (ucmp_optab
, mode
);
4442 ret_mode
= targetm
.libgcc_cmp_return_mode ();
4443 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4444 ret_mode
, x
, mode
, y
, mode
);
4446 /* There are two kinds of comparison routines. Biased routines
4447 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4448 of gcc expect that the comparison operation is equivalent
4449 to the modified comparison. For signed comparisons compare the
4450 result against 1 in the biased case, and zero in the unbiased
4451 case. For unsigned comparisons always compare against 1 after
4452 biasing the unbiased result by adding 1. This gives us a way to
4454 The comparisons in the fixed-point helper library are always
4459 if (!TARGET_LIB_INT_CMP_BIASED
&& !ALL_FIXED_POINT_MODE_P (mode
))
4462 x
= plus_constant (ret_mode
, result
, 1);
4468 prepare_cmp_insn (x
, y
, comparison
, NULL_RTX
, unsignedp
, methods
,
4478 /* Before emitting an insn with code ICODE, make sure that X, which is going
4479 to be used for operand OPNUM of the insn, is converted from mode MODE to
4480 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4481 that it is accepted by the operand predicate. Return the new value. */
4484 prepare_operand (enum insn_code icode
, rtx x
, int opnum
, machine_mode mode
,
4485 machine_mode wider_mode
, int unsignedp
)
4487 if (mode
!= wider_mode
)
4488 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
4490 if (!insn_operand_matches (icode
, opnum
, x
))
4492 machine_mode op_mode
= insn_data
[(int) icode
].operand
[opnum
].mode
;
4493 if (reload_completed
)
4495 if (GET_MODE (x
) != op_mode
&& GET_MODE (x
) != VOIDmode
)
4497 x
= copy_to_mode_reg (op_mode
, x
);
4503 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4504 we can do the branch. */
4507 emit_cmp_and_jump_insn_1 (rtx test
, machine_mode mode
, rtx label
,
4508 profile_probability prob
)
4510 machine_mode optab_mode
;
4511 enum mode_class mclass
;
4512 enum insn_code icode
;
4515 mclass
= GET_MODE_CLASS (mode
);
4516 optab_mode
= (mclass
== MODE_CC
) ? CCmode
: mode
;
4517 icode
= optab_handler (cbranch_optab
, optab_mode
);
4519 gcc_assert (icode
!= CODE_FOR_nothing
);
4520 gcc_assert (insn_operand_matches (icode
, 0, test
));
4521 insn
= emit_jump_insn (GEN_FCN (icode
) (test
, XEXP (test
, 0),
4522 XEXP (test
, 1), label
));
4523 if (prob
.initialized_p ()
4524 && profile_status_for_fn (cfun
) != PROFILE_ABSENT
4527 && any_condjump_p (insn
)
4528 && !find_reg_note (insn
, REG_BR_PROB
, 0))
4529 add_reg_br_prob_note (insn
, prob
);
4532 /* Generate code to compare X with Y so that the condition codes are
4533 set and to jump to LABEL if the condition is true. If X is a
4534 constant and Y is not a constant, then the comparison is swapped to
4535 ensure that the comparison RTL has the canonical form.
4537 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4538 need to be widened. UNSIGNEDP is also used to select the proper
4539 branch condition code.
4541 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4543 MODE is the mode of the inputs (in case they are const_int).
4545 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
4546 It will be potentially converted into an unsigned variant based on
4547 UNSIGNEDP to select a proper jump instruction.
4549 PROB is the probability of jumping to LABEL. */
4552 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
4553 machine_mode mode
, int unsignedp
, rtx label
,
4554 profile_probability prob
)
4556 rtx op0
= x
, op1
= y
;
4559 /* Swap operands and condition to ensure canonical RTL. */
4560 if (swap_commutative_operands_p (x
, y
)
4561 && can_compare_p (swap_condition (comparison
), mode
, ccp_jump
))
4564 comparison
= swap_condition (comparison
);
4567 /* If OP0 is still a constant, then both X and Y must be constants
4568 or the opposite comparison is not supported. Force X into a register
4569 to create canonical RTL. */
4570 if (CONSTANT_P (op0
))
4571 op0
= force_reg (mode
, op0
);
4574 comparison
= unsigned_condition (comparison
);
4576 prepare_cmp_insn (op0
, op1
, comparison
, size
, unsignedp
, OPTAB_LIB_WIDEN
,
4578 emit_cmp_and_jump_insn_1 (test
, mode
, label
, prob
);
4582 /* Emit a library call comparison between floating point X and Y.
4583 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4586 prepare_float_lib_cmp (rtx x
, rtx y
, enum rtx_code comparison
,
4587 rtx
*ptest
, machine_mode
*pmode
)
4589 enum rtx_code swapped
= swap_condition (comparison
);
4590 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
4591 machine_mode orig_mode
= GET_MODE (x
);
4593 rtx true_rtx
, false_rtx
;
4594 rtx value
, target
, equiv
;
4597 bool reversed_p
= false;
4598 scalar_int_mode cmp_mode
= targetm
.libgcc_cmp_return_mode ();
4600 FOR_EACH_MODE_FROM (mode
, orig_mode
)
4602 if (code_to_optab (comparison
)
4603 && (libfunc
= optab_libfunc (code_to_optab (comparison
), mode
)))
4606 if (code_to_optab (swapped
)
4607 && (libfunc
= optab_libfunc (code_to_optab (swapped
), mode
)))
4610 comparison
= swapped
;
4614 if (code_to_optab (reversed
)
4615 && (libfunc
= optab_libfunc (code_to_optab (reversed
), mode
)))
4617 comparison
= reversed
;
4623 gcc_assert (mode
!= VOIDmode
);
4625 if (mode
!= orig_mode
)
4627 x
= convert_to_mode (mode
, x
, 0);
4628 y
= convert_to_mode (mode
, y
, 0);
4631 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4632 the RTL. The allows the RTL optimizers to delete the libcall if the
4633 condition can be determined at compile-time. */
4634 if (comparison
== UNORDERED
4635 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4637 true_rtx
= const_true_rtx
;
4638 false_rtx
= const0_rtx
;
4645 true_rtx
= const0_rtx
;
4646 false_rtx
= const_true_rtx
;
4650 true_rtx
= const_true_rtx
;
4651 false_rtx
= const0_rtx
;
4655 true_rtx
= const1_rtx
;
4656 false_rtx
= const0_rtx
;
4660 true_rtx
= const0_rtx
;
4661 false_rtx
= constm1_rtx
;
4665 true_rtx
= constm1_rtx
;
4666 false_rtx
= const0_rtx
;
4670 true_rtx
= const0_rtx
;
4671 false_rtx
= const1_rtx
;
4679 if (comparison
== UNORDERED
)
4681 rtx temp
= simplify_gen_relational (NE
, cmp_mode
, mode
, x
, x
);
4682 equiv
= simplify_gen_relational (NE
, cmp_mode
, mode
, y
, y
);
4683 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4684 temp
, const_true_rtx
, equiv
);
4688 equiv
= simplify_gen_relational (comparison
, cmp_mode
, mode
, x
, y
);
4689 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
4690 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, cmp_mode
, cmp_mode
,
4691 equiv
, true_rtx
, false_rtx
);
4695 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4696 cmp_mode
, x
, mode
, y
, mode
);
4697 insns
= get_insns ();
4700 target
= gen_reg_rtx (cmp_mode
);
4701 emit_libcall_block (insns
, target
, value
, equiv
);
4703 if (comparison
== UNORDERED
4704 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
)
4706 *ptest
= gen_rtx_fmt_ee (reversed_p
? EQ
: NE
, VOIDmode
, target
, false_rtx
);
4708 *ptest
= gen_rtx_fmt_ee (comparison
, VOIDmode
, target
, const0_rtx
);
4713 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4716 emit_indirect_jump (rtx loc
)
4718 if (!targetm
.have_indirect_jump ())
4719 sorry ("indirect jumps are not available on this target");
4722 class expand_operand ops
[1];
4723 create_address_operand (&ops
[0], loc
);
4724 expand_jump_insn (targetm
.code_for_indirect_jump
, 1, ops
);
4730 /* Emit a conditional move instruction if the machine supports one for that
4731 condition and machine mode.
4733 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4734 the mode to use should they be constants. If it is VOIDmode, they cannot
4737 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4738 should be stored there. MODE is the mode to use should they be constants.
4739 If it is VOIDmode, they cannot both be constants.
4741 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4742 is not supported. */
4745 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4746 machine_mode cmode
, rtx op2
, rtx op3
,
4747 machine_mode mode
, int unsignedp
)
4751 enum insn_code icode
;
4752 enum rtx_code reversed
;
4754 /* If the two source operands are identical, that's just a move. */
4756 if (rtx_equal_p (op2
, op3
))
4759 target
= gen_reg_rtx (mode
);
4761 emit_move_insn (target
, op3
);
4765 /* If one operand is constant, make it the second one. Only do this
4766 if the other operand is not constant as well. */
4768 if (swap_commutative_operands_p (op0
, op1
))
4770 std::swap (op0
, op1
);
4771 code
= swap_condition (code
);
4774 /* get_condition will prefer to generate LT and GT even if the old
4775 comparison was against zero, so undo that canonicalization here since
4776 comparisons against zero are cheaper. */
4777 if (code
== LT
&& op1
== const1_rtx
)
4778 code
= LE
, op1
= const0_rtx
;
4779 else if (code
== GT
&& op1
== constm1_rtx
)
4780 code
= GE
, op1
= const0_rtx
;
4782 if (cmode
== VOIDmode
)
4783 cmode
= GET_MODE (op0
);
4785 enum rtx_code orig_code
= code
;
4786 bool swapped
= false;
4787 if (swap_commutative_operands_p (op2
, op3
)
4788 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
4791 std::swap (op2
, op3
);
4796 if (mode
== VOIDmode
)
4797 mode
= GET_MODE (op2
);
4799 icode
= direct_optab_handler (movcc_optab
, mode
);
4801 if (icode
== CODE_FOR_nothing
)
4805 target
= gen_reg_rtx (mode
);
4807 for (int pass
= 0; ; pass
++)
4809 code
= unsignedp
? unsigned_condition (code
) : code
;
4810 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4812 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4813 punt and let the caller figure out how best to deal with this
4815 if (COMPARISON_P (comparison
))
4817 saved_pending_stack_adjust save
;
4818 save_pending_stack_adjust (&save
);
4819 last
= get_last_insn ();
4820 do_pending_stack_adjust ();
4821 machine_mode cmpmode
= cmode
;
4822 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4823 GET_CODE (comparison
), NULL_RTX
, unsignedp
,
4824 OPTAB_WIDEN
, &comparison
, &cmpmode
);
4827 class expand_operand ops
[4];
4829 create_output_operand (&ops
[0], target
, mode
);
4830 create_fixed_operand (&ops
[1], comparison
);
4831 create_input_operand (&ops
[2], op2
, mode
);
4832 create_input_operand (&ops
[3], op3
, mode
);
4833 if (maybe_expand_insn (icode
, 4, ops
))
4835 if (ops
[0].value
!= target
)
4836 convert_move (target
, ops
[0].value
, false);
4840 delete_insns_since (last
);
4841 restore_pending_stack_adjust (&save
);
4847 /* If the preferred op2/op3 order is not usable, retry with other
4848 operand order, perhaps it will expand successfully. */
4851 else if ((reversed
= reversed_comparison_code_parts (orig_code
, op0
, op1
,
4857 std::swap (op2
, op3
);
4862 /* Emit a conditional negate or bitwise complement using the
4863 negcc or notcc optabs if available. Return NULL_RTX if such operations
4864 are not available. Otherwise return the RTX holding the result.
4865 TARGET is the desired destination of the result. COMP is the comparison
4866 on which to negate. If COND is true move into TARGET the negation
4867 or bitwise complement of OP1. Otherwise move OP2 into TARGET.
4868 CODE is either NEG or NOT. MODE is the machine mode in which the
4869 operation is performed. */
4872 emit_conditional_neg_or_complement (rtx target
, rtx_code code
,
4873 machine_mode mode
, rtx cond
, rtx op1
,
4876 optab op
= unknown_optab
;
4879 else if (code
== NOT
)
4884 insn_code icode
= direct_optab_handler (op
, mode
);
4886 if (icode
== CODE_FOR_nothing
)
4890 target
= gen_reg_rtx (mode
);
4892 rtx_insn
*last
= get_last_insn ();
4893 class expand_operand ops
[4];
4895 create_output_operand (&ops
[0], target
, mode
);
4896 create_fixed_operand (&ops
[1], cond
);
4897 create_input_operand (&ops
[2], op1
, mode
);
4898 create_input_operand (&ops
[3], op2
, mode
);
4900 if (maybe_expand_insn (icode
, 4, ops
))
4902 if (ops
[0].value
!= target
)
4903 convert_move (target
, ops
[0].value
, false);
4907 delete_insns_since (last
);
4911 /* Emit a conditional addition instruction if the machine supports one for that
4912 condition and machine mode.
4914 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4915 the mode to use should they be constants. If it is VOIDmode, they cannot
4918 OP2 should be stored in TARGET if the comparison is false, otherwise OP2+OP3
4919 should be stored there. MODE is the mode to use should they be constants.
4920 If it is VOIDmode, they cannot both be constants.
4922 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4923 is not supported. */
4926 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
4927 machine_mode cmode
, rtx op2
, rtx op3
,
4928 machine_mode mode
, int unsignedp
)
4932 enum insn_code icode
;
4934 /* If one operand is constant, make it the second one. Only do this
4935 if the other operand is not constant as well. */
4937 if (swap_commutative_operands_p (op0
, op1
))
4939 std::swap (op0
, op1
);
4940 code
= swap_condition (code
);
4943 /* get_condition will prefer to generate LT and GT even if the old
4944 comparison was against zero, so undo that canonicalization here since
4945 comparisons against zero are cheaper. */
4946 if (code
== LT
&& op1
== const1_rtx
)
4947 code
= LE
, op1
= const0_rtx
;
4948 else if (code
== GT
&& op1
== constm1_rtx
)
4949 code
= GE
, op1
= const0_rtx
;
4951 if (cmode
== VOIDmode
)
4952 cmode
= GET_MODE (op0
);
4954 if (mode
== VOIDmode
)
4955 mode
= GET_MODE (op2
);
4957 icode
= optab_handler (addcc_optab
, mode
);
4959 if (icode
== CODE_FOR_nothing
)
4963 target
= gen_reg_rtx (mode
);
4965 code
= unsignedp
? unsigned_condition (code
) : code
;
4966 comparison
= simplify_gen_relational (code
, VOIDmode
, cmode
, op0
, op1
);
4968 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4969 return NULL and let the caller figure out how best to deal with this
4971 if (!COMPARISON_P (comparison
))
4974 do_pending_stack_adjust ();
4975 last
= get_last_insn ();
4976 prepare_cmp_insn (XEXP (comparison
, 0), XEXP (comparison
, 1),
4977 GET_CODE (comparison
), NULL_RTX
, unsignedp
, OPTAB_WIDEN
,
4978 &comparison
, &cmode
);
4981 class expand_operand ops
[4];
4983 create_output_operand (&ops
[0], target
, mode
);
4984 create_fixed_operand (&ops
[1], comparison
);
4985 create_input_operand (&ops
[2], op2
, mode
);
4986 create_input_operand (&ops
[3], op3
, mode
);
4987 if (maybe_expand_insn (icode
, 4, ops
))
4989 if (ops
[0].value
!= target
)
4990 convert_move (target
, ops
[0].value
, false);
4994 delete_insns_since (last
);
4998 /* These functions attempt to generate an insn body, rather than
4999 emitting the insn, but if the gen function already emits them, we
5000 make no attempt to turn them back into naked patterns. */
5002 /* Generate and return an insn body to add Y to X. */
5005 gen_add2_insn (rtx x
, rtx y
)
5007 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (x
));
5009 gcc_assert (insn_operand_matches (icode
, 0, x
));
5010 gcc_assert (insn_operand_matches (icode
, 1, x
));
5011 gcc_assert (insn_operand_matches (icode
, 2, y
));
5013 return GEN_FCN (icode
) (x
, x
, y
);
5016 /* Generate and return an insn body to add r1 and c,
5017 storing the result in r0. */
5020 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
5022 enum insn_code icode
= optab_handler (add_optab
, GET_MODE (r0
));
5024 if (icode
== CODE_FOR_nothing
5025 || !insn_operand_matches (icode
, 0, r0
)
5026 || !insn_operand_matches (icode
, 1, r1
)
5027 || !insn_operand_matches (icode
, 2, c
))
5030 return GEN_FCN (icode
) (r0
, r1
, c
);
5034 have_add2_insn (rtx x
, rtx y
)
5036 enum insn_code icode
;
5038 gcc_assert (GET_MODE (x
) != VOIDmode
);
5040 icode
= optab_handler (add_optab
, GET_MODE (x
));
5042 if (icode
== CODE_FOR_nothing
)
5045 if (!insn_operand_matches (icode
, 0, x
)
5046 || !insn_operand_matches (icode
, 1, x
)
5047 || !insn_operand_matches (icode
, 2, y
))
5053 /* Generate and return an insn body to add Y to X. */
5056 gen_addptr3_insn (rtx x
, rtx y
, rtx z
)
5058 enum insn_code icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5060 gcc_assert (insn_operand_matches (icode
, 0, x
));
5061 gcc_assert (insn_operand_matches (icode
, 1, y
));
5062 gcc_assert (insn_operand_matches (icode
, 2, z
));
5064 return GEN_FCN (icode
) (x
, y
, z
);
5067 /* Return true if the target implements an addptr pattern and X, Y,
5068 and Z are valid for the pattern predicates. */
5071 have_addptr3_insn (rtx x
, rtx y
, rtx z
)
5073 enum insn_code icode
;
5075 gcc_assert (GET_MODE (x
) != VOIDmode
);
5077 icode
= optab_handler (addptr3_optab
, GET_MODE (x
));
5079 if (icode
== CODE_FOR_nothing
)
5082 if (!insn_operand_matches (icode
, 0, x
)
5083 || !insn_operand_matches (icode
, 1, y
)
5084 || !insn_operand_matches (icode
, 2, z
))
5090 /* Generate and return an insn body to subtract Y from X. */
5093 gen_sub2_insn (rtx x
, rtx y
)
5095 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (x
));
5097 gcc_assert (insn_operand_matches (icode
, 0, x
));
5098 gcc_assert (insn_operand_matches (icode
, 1, x
));
5099 gcc_assert (insn_operand_matches (icode
, 2, y
));
5101 return GEN_FCN (icode
) (x
, x
, y
);
5104 /* Generate and return an insn body to subtract r1 and c,
5105 storing the result in r0. */
5108 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
5110 enum insn_code icode
= optab_handler (sub_optab
, GET_MODE (r0
));
5112 if (icode
== CODE_FOR_nothing
5113 || !insn_operand_matches (icode
, 0, r0
)
5114 || !insn_operand_matches (icode
, 1, r1
)
5115 || !insn_operand_matches (icode
, 2, c
))
5118 return GEN_FCN (icode
) (r0
, r1
, c
);
5122 have_sub2_insn (rtx x
, rtx y
)
5124 enum insn_code icode
;
5126 gcc_assert (GET_MODE (x
) != VOIDmode
);
5128 icode
= optab_handler (sub_optab
, GET_MODE (x
));
5130 if (icode
== CODE_FOR_nothing
)
5133 if (!insn_operand_matches (icode
, 0, x
)
5134 || !insn_operand_matches (icode
, 1, x
)
5135 || !insn_operand_matches (icode
, 2, y
))
5141 /* Generate the body of an insn to extend Y (with mode MFROM)
5142 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
5145 gen_extend_insn (rtx x
, rtx y
, machine_mode mto
,
5146 machine_mode mfrom
, int unsignedp
)
5148 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
5149 return GEN_FCN (icode
) (x
, y
);
5152 /* Generate code to convert FROM to floating point
5153 and store in TO. FROM must be fixed point and not VOIDmode.
5154 UNSIGNEDP nonzero means regard FROM as unsigned.
5155 Normally this is done by correcting the final value
5156 if it is negative. */
5159 expand_float (rtx to
, rtx from
, int unsignedp
)
5161 enum insn_code icode
;
5163 scalar_mode from_mode
, to_mode
;
5164 machine_mode fmode
, imode
;
5165 bool can_do_signed
= false;
5167 /* Crash now, because we won't be able to decide which mode to use. */
5168 gcc_assert (GET_MODE (from
) != VOIDmode
);
5170 /* Look for an insn to do the conversion. Do it in the specified
5171 modes if possible; otherwise convert either input, output or both to
5172 wider mode. If the integer mode is wider than the mode of FROM,
5173 we can do the conversion signed even if the input is unsigned. */
5175 FOR_EACH_MODE_FROM (fmode
, GET_MODE (to
))
5176 FOR_EACH_MODE_FROM (imode
, GET_MODE (from
))
5178 int doing_unsigned
= unsignedp
;
5180 if (fmode
!= GET_MODE (to
)
5181 && (significand_size (fmode
)
5182 < GET_MODE_UNIT_PRECISION (GET_MODE (from
))))
5185 icode
= can_float_p (fmode
, imode
, unsignedp
);
5186 if (icode
== CODE_FOR_nothing
&& unsignedp
)
5188 enum insn_code scode
= can_float_p (fmode
, imode
, 0);
5189 if (scode
!= CODE_FOR_nothing
)
5190 can_do_signed
= true;
5191 if (imode
!= GET_MODE (from
))
5192 icode
= scode
, doing_unsigned
= 0;
5195 if (icode
!= CODE_FOR_nothing
)
5197 if (imode
!= GET_MODE (from
))
5198 from
= convert_to_mode (imode
, from
, unsignedp
);
5200 if (fmode
!= GET_MODE (to
))
5201 target
= gen_reg_rtx (fmode
);
5203 emit_unop_insn (icode
, target
, from
,
5204 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
5207 convert_move (to
, target
, 0);
5212 /* Unsigned integer, and no way to convert directly. Convert as signed,
5213 then unconditionally adjust the result. */
5216 && is_a
<scalar_mode
> (GET_MODE (to
), &to_mode
)
5217 && is_a
<scalar_mode
> (GET_MODE (from
), &from_mode
))
5219 opt_scalar_mode fmode_iter
;
5220 rtx_code_label
*label
= gen_label_rtx ();
5222 REAL_VALUE_TYPE offset
;
5224 /* Look for a usable floating mode FMODE wider than the source and at
5225 least as wide as the target. Using FMODE will avoid rounding woes
5226 with unsigned values greater than the signed maximum value. */
5228 FOR_EACH_MODE_FROM (fmode_iter
, to_mode
)
5230 scalar_mode fmode
= fmode_iter
.require ();
5231 if (GET_MODE_PRECISION (from_mode
) < GET_MODE_BITSIZE (fmode
)
5232 && can_float_p (fmode
, from_mode
, 0) != CODE_FOR_nothing
)
5236 if (!fmode_iter
.exists (&fmode
))
5238 /* There is no such mode. Pretend the target is wide enough. */
5241 /* Avoid double-rounding when TO is narrower than FROM. */
5242 if ((significand_size (fmode
) + 1)
5243 < GET_MODE_PRECISION (from_mode
))
5246 rtx_code_label
*neglabel
= gen_label_rtx ();
5248 /* Don't use TARGET if it isn't a register, is a hard register,
5249 or is the wrong mode. */
5251 || REGNO (target
) < FIRST_PSEUDO_REGISTER
5252 || GET_MODE (target
) != fmode
)
5253 target
= gen_reg_rtx (fmode
);
5256 do_pending_stack_adjust ();
5258 /* Test whether the sign bit is set. */
5259 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
5262 /* The sign bit is not set. Convert as signed. */
5263 expand_float (target
, from
, 0);
5264 emit_jump_insn (targetm
.gen_jump (label
));
5267 /* The sign bit is set.
5268 Convert to a usable (positive signed) value by shifting right
5269 one bit, while remembering if a nonzero bit was shifted
5270 out; i.e., compute (from & 1) | (from >> 1). */
5272 emit_label (neglabel
);
5273 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
5274 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
5275 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, 1, NULL_RTX
, 1);
5276 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
5278 expand_float (target
, temp
, 0);
5280 /* Multiply by 2 to undo the shift above. */
5281 temp
= expand_binop (fmode
, add_optab
, target
, target
,
5282 target
, 0, OPTAB_LIB_WIDEN
);
5284 emit_move_insn (target
, temp
);
5286 do_pending_stack_adjust ();
5292 /* If we are about to do some arithmetic to correct for an
5293 unsigned operand, do it in a pseudo-register. */
5295 if (to_mode
!= fmode
5296 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
5297 target
= gen_reg_rtx (fmode
);
5299 /* Convert as signed integer to floating. */
5300 expand_float (target
, from
, 0);
5302 /* If FROM is negative (and therefore TO is negative),
5303 correct its value by 2**bitwidth. */
5305 do_pending_stack_adjust ();
5306 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, from_mode
,
5310 real_2expN (&offset
, GET_MODE_PRECISION (from_mode
), fmode
);
5311 temp
= expand_binop (fmode
, add_optab
, target
,
5312 const_double_from_real_value (offset
, fmode
),
5313 target
, 0, OPTAB_LIB_WIDEN
);
5315 emit_move_insn (target
, temp
);
5317 do_pending_stack_adjust ();
5322 /* No hardware instruction available; call a library routine. */
5327 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
5329 if (is_narrower_int_mode (GET_MODE (from
), SImode
))
5330 from
= convert_to_mode (SImode
, from
, unsignedp
);
5332 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5333 gcc_assert (libfunc
);
5337 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5338 GET_MODE (to
), from
, GET_MODE (from
));
5339 insns
= get_insns ();
5342 emit_libcall_block (insns
, target
, value
,
5343 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FLOAT
: FLOAT
,
5344 GET_MODE (to
), from
));
5349 /* Copy result to requested destination
5350 if we have been computing in a temp location. */
5354 if (GET_MODE (target
) == GET_MODE (to
))
5355 emit_move_insn (to
, target
);
5357 convert_move (to
, target
, 0);
5361 /* Generate code to convert FROM to fixed point and store in TO. FROM
5362 must be floating point. */
5365 expand_fix (rtx to
, rtx from
, int unsignedp
)
5367 enum insn_code icode
;
5369 machine_mode fmode
, imode
;
5370 opt_scalar_mode fmode_iter
;
5371 bool must_trunc
= false;
5373 /* We first try to find a pair of modes, one real and one integer, at
5374 least as wide as FROM and TO, respectively, in which we can open-code
5375 this conversion. If the integer mode is wider than the mode of TO,
5376 we can do the conversion either signed or unsigned. */
5378 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5379 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5381 int doing_unsigned
= unsignedp
;
5383 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
5384 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
5385 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
5387 if (icode
!= CODE_FOR_nothing
)
5389 rtx_insn
*last
= get_last_insn ();
5390 if (fmode
!= GET_MODE (from
))
5391 from
= convert_to_mode (fmode
, from
, 0);
5395 rtx temp
= gen_reg_rtx (GET_MODE (from
));
5396 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
5400 if (imode
!= GET_MODE (to
))
5401 target
= gen_reg_rtx (imode
);
5403 if (maybe_emit_unop_insn (icode
, target
, from
,
5404 doing_unsigned
? UNSIGNED_FIX
: FIX
))
5407 convert_move (to
, target
, unsignedp
);
5410 delete_insns_since (last
);
5414 /* For an unsigned conversion, there is one more way to do it.
5415 If we have a signed conversion, we generate code that compares
5416 the real value to the largest representable positive number. If if
5417 is smaller, the conversion is done normally. Otherwise, subtract
5418 one plus the highest signed number, convert, and add it back.
5420 We only need to check all real modes, since we know we didn't find
5421 anything with a wider integer mode.
5423 This code used to extend FP value into mode wider than the destination.
5424 This is needed for decimal float modes which cannot accurately
5425 represent one plus the highest signed number of the same size, but
5426 not for binary modes. Consider, for instance conversion from SFmode
5429 The hot path through the code is dealing with inputs smaller than 2^63
5430 and doing just the conversion, so there is no bits to lose.
5432 In the other path we know the value is positive in the range 2^63..2^64-1
5433 inclusive. (as for other input overflow happens and result is undefined)
5434 So we know that the most important bit set in mantissa corresponds to
5435 2^63. The subtraction of 2^63 should not generate any rounding as it
5436 simply clears out that bit. The rest is trivial. */
5438 scalar_int_mode to_mode
;
5440 && is_a
<scalar_int_mode
> (GET_MODE (to
), &to_mode
)
5441 && HWI_COMPUTABLE_MODE_P (to_mode
))
5442 FOR_EACH_MODE_FROM (fmode_iter
, as_a
<scalar_mode
> (GET_MODE (from
)))
5444 scalar_mode fmode
= fmode_iter
.require ();
5445 if (CODE_FOR_nothing
!= can_fix_p (to_mode
, fmode
,
5447 && (!DECIMAL_FLOAT_MODE_P (fmode
)
5448 || (GET_MODE_BITSIZE (fmode
) > GET_MODE_PRECISION (to_mode
))))
5451 REAL_VALUE_TYPE offset
;
5453 rtx_code_label
*lab1
, *lab2
;
5456 bitsize
= GET_MODE_PRECISION (to_mode
);
5457 real_2expN (&offset
, bitsize
- 1, fmode
);
5458 limit
= const_double_from_real_value (offset
, fmode
);
5459 lab1
= gen_label_rtx ();
5460 lab2
= gen_label_rtx ();
5462 if (fmode
!= GET_MODE (from
))
5463 from
= convert_to_mode (fmode
, from
, 0);
5465 /* See if we need to do the subtraction. */
5466 do_pending_stack_adjust ();
5467 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
,
5468 GET_MODE (from
), 0, lab1
);
5470 /* If not, do the signed "fix" and branch around fixup code. */
5471 expand_fix (to
, from
, 0);
5472 emit_jump_insn (targetm
.gen_jump (lab2
));
5475 /* Otherwise, subtract 2**(N-1), convert to signed number,
5476 then add 2**(N-1). Do the addition using XOR since this
5477 will often generate better code. */
5479 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
5480 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
5481 expand_fix (to
, target
, 0);
5482 target
= expand_binop (to_mode
, xor_optab
, to
,
5484 (HOST_WIDE_INT_1
<< (bitsize
- 1),
5486 to
, 1, OPTAB_LIB_WIDEN
);
5489 emit_move_insn (to
, target
);
5493 if (optab_handler (mov_optab
, to_mode
) != CODE_FOR_nothing
)
5495 /* Make a place for a REG_NOTE and add it. */
5496 insn
= emit_move_insn (to
, to
);
5497 set_dst_reg_note (insn
, REG_EQUAL
,
5498 gen_rtx_fmt_e (UNSIGNED_FIX
, to_mode
,
5507 /* We can't do it with an insn, so use a library call. But first ensure
5508 that the mode of TO is at least as wide as SImode, since those are the
5509 only library calls we know about. */
5511 if (is_narrower_int_mode (GET_MODE (to
), SImode
))
5513 target
= gen_reg_rtx (SImode
);
5515 expand_fix (target
, from
, unsignedp
);
5523 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
5524 libfunc
= convert_optab_libfunc (tab
, GET_MODE (to
), GET_MODE (from
));
5525 gcc_assert (libfunc
);
5529 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
5530 GET_MODE (to
), from
, GET_MODE (from
));
5531 insns
= get_insns ();
5534 emit_libcall_block (insns
, target
, value
,
5535 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
5536 GET_MODE (to
), from
));
5541 if (GET_MODE (to
) == GET_MODE (target
))
5542 emit_move_insn (to
, target
);
5544 convert_move (to
, target
, 0);
5549 /* Promote integer arguments for a libcall if necessary.
5550 emit_library_call_value cannot do the promotion because it does not
5551 know if it should do a signed or unsigned promotion. This is because
5552 there are no tree types defined for libcalls. */
5555 prepare_libcall_arg (rtx arg
, int uintp
)
5557 scalar_int_mode mode
;
5558 machine_mode arg_mode
;
5559 if (is_a
<scalar_int_mode
> (GET_MODE (arg
), &mode
))
5561 /* If we need to promote the integer function argument we need to do
5562 it here instead of inside emit_library_call_value because in
5563 emit_library_call_value we don't know if we should do a signed or
5564 unsigned promotion. */
5567 arg_mode
= promote_function_mode (NULL_TREE
, mode
,
5568 &unsigned_p
, NULL_TREE
, 0);
5569 if (arg_mode
!= mode
)
5570 return convert_to_mode (arg_mode
, arg
, uintp
);
5575 /* Generate code to convert FROM or TO a fixed-point.
5576 If UINTP is true, either TO or FROM is an unsigned integer.
5577 If SATP is true, we need to saturate the result. */
5580 expand_fixed_convert (rtx to
, rtx from
, int uintp
, int satp
)
5582 machine_mode to_mode
= GET_MODE (to
);
5583 machine_mode from_mode
= GET_MODE (from
);
5585 enum rtx_code this_code
;
5586 enum insn_code code
;
5591 if (to_mode
== from_mode
)
5593 emit_move_insn (to
, from
);
5599 tab
= satp
? satfractuns_optab
: fractuns_optab
;
5600 this_code
= satp
? UNSIGNED_SAT_FRACT
: UNSIGNED_FRACT_CONVERT
;
5604 tab
= satp
? satfract_optab
: fract_optab
;
5605 this_code
= satp
? SAT_FRACT
: FRACT_CONVERT
;
5607 code
= convert_optab_handler (tab
, to_mode
, from_mode
);
5608 if (code
!= CODE_FOR_nothing
)
5610 emit_unop_insn (code
, to
, from
, this_code
);
5614 libfunc
= convert_optab_libfunc (tab
, to_mode
, from_mode
);
5615 gcc_assert (libfunc
);
5617 from
= prepare_libcall_arg (from
, uintp
);
5618 from_mode
= GET_MODE (from
);
5621 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
, to_mode
,
5623 insns
= get_insns ();
5626 emit_libcall_block (insns
, to
, value
,
5627 gen_rtx_fmt_e (optab_to_code (tab
), to_mode
, from
));
5630 /* Generate code to convert FROM to fixed point and store in TO. FROM
5631 must be floating point, TO must be signed. Use the conversion optab
5632 TAB to do the conversion. */
5635 expand_sfix_optab (rtx to
, rtx from
, convert_optab tab
)
5637 enum insn_code icode
;
5639 machine_mode fmode
, imode
;
5641 /* We first try to find a pair of modes, one real and one integer, at
5642 least as wide as FROM and TO, respectively, in which we can open-code
5643 this conversion. If the integer mode is wider than the mode of TO,
5644 we can do the conversion either signed or unsigned. */
5646 FOR_EACH_MODE_FROM (fmode
, GET_MODE (from
))
5647 FOR_EACH_MODE_FROM (imode
, GET_MODE (to
))
5649 icode
= convert_optab_handler (tab
, imode
, fmode
);
5650 if (icode
!= CODE_FOR_nothing
)
5652 rtx_insn
*last
= get_last_insn ();
5653 if (fmode
!= GET_MODE (from
))
5654 from
= convert_to_mode (fmode
, from
, 0);
5656 if (imode
!= GET_MODE (to
))
5657 target
= gen_reg_rtx (imode
);
5659 if (!maybe_emit_unop_insn (icode
, target
, from
, UNKNOWN
))
5661 delete_insns_since (last
);
5665 convert_move (to
, target
, 0);
5673 /* Report whether we have an instruction to perform the operation
5674 specified by CODE on operands of mode MODE. */
5676 have_insn_for (enum rtx_code code
, machine_mode mode
)
5678 return (code_to_optab (code
)
5679 && (optab_handler (code_to_optab (code
), mode
)
5680 != CODE_FOR_nothing
));
5683 /* Print information about the current contents of the optabs on
5687 debug_optab_libfuncs (void)
5691 /* Dump the arithmetic optabs. */
5692 for (i
= FIRST_NORM_OPTAB
; i
<= LAST_NORMLIB_OPTAB
; ++i
)
5693 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5695 rtx l
= optab_libfunc ((optab
) i
, (machine_mode
) j
);
5698 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5699 fprintf (stderr
, "%s\t%s:\t%s\n",
5700 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5706 /* Dump the conversion optabs. */
5707 for (i
= FIRST_CONV_OPTAB
; i
<= LAST_CONVLIB_OPTAB
; ++i
)
5708 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5709 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5711 rtx l
= convert_optab_libfunc ((optab
) i
, (machine_mode
) j
,
5715 gcc_assert (GET_CODE (l
) == SYMBOL_REF
);
5716 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5717 GET_RTX_NAME (optab_to_code ((optab
) i
)),
5725 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5726 CODE. Return 0 on failure. */
5729 gen_cond_trap (enum rtx_code code
, rtx op1
, rtx op2
, rtx tcode
)
5731 machine_mode mode
= GET_MODE (op1
);
5732 enum insn_code icode
;
5736 if (mode
== VOIDmode
)
5739 icode
= optab_handler (ctrap_optab
, mode
);
5740 if (icode
== CODE_FOR_nothing
)
5743 /* Some targets only accept a zero trap code. */
5744 if (!insn_operand_matches (icode
, 3, tcode
))
5747 do_pending_stack_adjust ();
5749 prepare_cmp_insn (op1
, op2
, code
, NULL_RTX
, false, OPTAB_DIRECT
,
5754 insn
= GEN_FCN (icode
) (trap_rtx
, XEXP (trap_rtx
, 0), XEXP (trap_rtx
, 1),
5757 /* If that failed, then give up. */
5765 insn
= get_insns ();
5770 /* Return rtx code for TCODE or UNKNOWN. Use UNSIGNEDP to select signed
5771 or unsigned operation code. */
5774 get_rtx_code_1 (enum tree_code tcode
, bool unsignedp
)
5786 code
= unsignedp
? LTU
: LT
;
5789 code
= unsignedp
? LEU
: LE
;
5792 code
= unsignedp
? GTU
: GT
;
5795 code
= unsignedp
? GEU
: GE
;
5798 case UNORDERED_EXPR
:
5838 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5839 or unsigned operation code. */
5842 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5844 enum rtx_code code
= get_rtx_code_1 (tcode
, unsignedp
);
5845 gcc_assert (code
!= UNKNOWN
);
5849 /* Return a comparison rtx of mode CMP_MODE for COND. Use UNSIGNEDP to
5850 select signed or unsigned operators. OPNO holds the index of the
5851 first comparison operand for insn ICODE. Do not generate the
5852 compare instruction itself. */
5855 vector_compare_rtx (machine_mode cmp_mode
, enum tree_code tcode
,
5856 tree t_op0
, tree t_op1
, bool unsignedp
,
5857 enum insn_code icode
, unsigned int opno
)
5859 class expand_operand ops
[2];
5860 rtx rtx_op0
, rtx_op1
;
5861 machine_mode m0
, m1
;
5862 enum rtx_code rcode
= get_rtx_code (tcode
, unsignedp
);
5864 gcc_assert (TREE_CODE_CLASS (tcode
) == tcc_comparison
);
5866 /* Expand operands. For vector types with scalar modes, e.g. where int64x1_t
5867 has mode DImode, this can produce a constant RTX of mode VOIDmode; in such
5868 cases, use the original mode. */
5869 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)),
5871 m0
= GET_MODE (rtx_op0
);
5873 m0
= TYPE_MODE (TREE_TYPE (t_op0
));
5875 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)),
5877 m1
= GET_MODE (rtx_op1
);
5879 m1
= TYPE_MODE (TREE_TYPE (t_op1
));
5881 create_input_operand (&ops
[0], rtx_op0
, m0
);
5882 create_input_operand (&ops
[1], rtx_op1
, m1
);
5883 if (!maybe_legitimize_operands (icode
, opno
, 2, ops
))
5885 return gen_rtx_fmt_ee (rcode
, cmp_mode
, ops
[0].value
, ops
[1].value
);
5888 /* Check if vec_perm mask SEL is a constant equivalent to a shift of
5889 the first vec_perm operand, assuming the second operand (for left shift
5890 first operand) is a constant vector of zeros. Return the shift distance
5891 in bits if so, or NULL_RTX if the vec_perm is not a shift. MODE is the
5892 mode of the value being shifted. SHIFT_OPTAB is vec_shr_optab for right
5893 shift or vec_shl_optab for left shift. */
5895 shift_amt_for_vec_perm_mask (machine_mode mode
, const vec_perm_indices
&sel
,
5898 unsigned int bitsize
= GET_MODE_UNIT_BITSIZE (mode
);
5899 poly_int64 first
= sel
[0];
5900 if (maybe_ge (sel
[0], GET_MODE_NUNITS (mode
)))
5903 if (shift_optab
== vec_shl_optab
)
5906 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5908 unsigned firstidx
= 0;
5909 for (unsigned int i
= 0; i
< nelt
; i
++)
5911 if (known_eq (sel
[i
], nelt
))
5913 if (i
== 0 || firstidx
)
5918 ? maybe_ne (sel
[i
], nelt
+ i
- firstidx
)
5919 : maybe_ge (sel
[i
], nelt
))
5927 else if (!sel
.series_p (0, 1, first
, 1))
5930 if (!GET_MODE_NUNITS (mode
).is_constant (&nelt
))
5932 for (unsigned int i
= 1; i
< nelt
; i
++)
5934 poly_int64 expected
= i
+ first
;
5935 /* Indices into the second vector are all equivalent. */
5936 if (maybe_lt (sel
[i
], nelt
)
5937 ? maybe_ne (sel
[i
], expected
)
5938 : maybe_lt (expected
, nelt
))
5943 return gen_int_shift_amount (mode
, first
* bitsize
);
5946 /* A subroutine of expand_vec_perm_var for expanding one vec_perm insn. */
5949 expand_vec_perm_1 (enum insn_code icode
, rtx target
,
5950 rtx v0
, rtx v1
, rtx sel
)
5952 machine_mode tmode
= GET_MODE (target
);
5953 machine_mode smode
= GET_MODE (sel
);
5954 class expand_operand ops
[4];
5956 gcc_assert (GET_MODE_CLASS (smode
) == MODE_VECTOR_INT
5957 || related_int_vector_mode (tmode
).require () == smode
);
5958 create_output_operand (&ops
[0], target
, tmode
);
5959 create_input_operand (&ops
[3], sel
, smode
);
5961 /* Make an effort to preserve v0 == v1. The target expander is able to
5962 rely on this to determine if we're permuting a single input operand. */
5963 if (rtx_equal_p (v0
, v1
))
5965 if (!insn_operand_matches (icode
, 1, v0
))
5966 v0
= force_reg (tmode
, v0
);
5967 gcc_checking_assert (insn_operand_matches (icode
, 1, v0
));
5968 gcc_checking_assert (insn_operand_matches (icode
, 2, v0
));
5970 create_fixed_operand (&ops
[1], v0
);
5971 create_fixed_operand (&ops
[2], v0
);
5975 create_input_operand (&ops
[1], v0
, tmode
);
5976 create_input_operand (&ops
[2], v1
, tmode
);
5979 if (maybe_expand_insn (icode
, 4, ops
))
5980 return ops
[0].value
;
5984 /* Implement a permutation of vectors v0 and v1 using the permutation
5985 vector in SEL and return the result. Use TARGET to hold the result
5986 if nonnull and convenient.
5988 MODE is the mode of the vectors being permuted (V0 and V1). SEL_MODE
5989 is the TYPE_MODE associated with SEL, or BLKmode if SEL isn't known
5990 to have a particular mode. */
5993 expand_vec_perm_const (machine_mode mode
, rtx v0
, rtx v1
,
5994 const vec_perm_builder
&sel
, machine_mode sel_mode
,
5997 if (!target
|| !register_operand (target
, mode
))
5998 target
= gen_reg_rtx (mode
);
6000 /* Set QIMODE to a different vector mode with byte elements.
6001 If no such mode, or if MODE already has byte elements, use VOIDmode. */
6002 machine_mode qimode
;
6003 if (!qimode_for_vec_perm (mode
).exists (&qimode
))
6006 rtx_insn
*last
= get_last_insn ();
6008 bool single_arg_p
= rtx_equal_p (v0
, v1
);
6009 /* Always specify two input vectors here and leave the target to handle
6010 cases in which the inputs are equal. Not all backends can cope with
6011 the single-input representation when testing for a double-input
6012 target instruction. */
6013 vec_perm_indices
indices (sel
, 2, GET_MODE_NUNITS (mode
));
6015 /* See if this can be handled with a vec_shr or vec_shl. We only do this
6016 if the second (for vec_shr) or first (for vec_shl) vector is all
6018 insn_code shift_code
= CODE_FOR_nothing
;
6019 insn_code shift_code_qi
= CODE_FOR_nothing
;
6020 optab shift_optab
= unknown_optab
;
6022 if (v1
== CONST0_RTX (GET_MODE (v1
)))
6023 shift_optab
= vec_shr_optab
;
6024 else if (v0
== CONST0_RTX (GET_MODE (v0
)))
6026 shift_optab
= vec_shl_optab
;
6029 if (shift_optab
!= unknown_optab
)
6031 shift_code
= optab_handler (shift_optab
, mode
);
6032 shift_code_qi
= ((qimode
!= VOIDmode
&& qimode
!= mode
)
6033 ? optab_handler (shift_optab
, qimode
)
6034 : CODE_FOR_nothing
);
6036 if (shift_code
!= CODE_FOR_nothing
|| shift_code_qi
!= CODE_FOR_nothing
)
6038 rtx shift_amt
= shift_amt_for_vec_perm_mask (mode
, indices
, shift_optab
);
6041 class expand_operand ops
[3];
6042 if (shift_amt
== const0_rtx
)
6044 if (shift_code
!= CODE_FOR_nothing
)
6046 create_output_operand (&ops
[0], target
, mode
);
6047 create_input_operand (&ops
[1], v2
, mode
);
6048 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6049 if (maybe_expand_insn (shift_code
, 3, ops
))
6050 return ops
[0].value
;
6052 if (shift_code_qi
!= CODE_FOR_nothing
)
6054 rtx tmp
= gen_reg_rtx (qimode
);
6055 create_output_operand (&ops
[0], tmp
, qimode
);
6056 create_input_operand (&ops
[1], gen_lowpart (qimode
, v2
), qimode
);
6057 create_convert_operand_from_type (&ops
[2], shift_amt
, sizetype
);
6058 if (maybe_expand_insn (shift_code_qi
, 3, ops
))
6059 return gen_lowpart (mode
, ops
[0].value
);
6064 if (targetm
.vectorize
.vec_perm_const
!= NULL
)
6069 if (targetm
.vectorize
.vec_perm_const (mode
, target
, v0
, v1
, indices
))
6073 /* Fall back to a constant byte-based permutation. */
6074 vec_perm_indices qimode_indices
;
6075 rtx target_qi
= NULL_RTX
, v0_qi
= NULL_RTX
, v1_qi
= NULL_RTX
;
6076 if (qimode
!= VOIDmode
)
6078 qimode_indices
.new_expanded_vector (indices
, GET_MODE_UNIT_SIZE (mode
));
6079 target_qi
= gen_reg_rtx (qimode
);
6080 v0_qi
= gen_lowpart (qimode
, v0
);
6081 v1_qi
= gen_lowpart (qimode
, v1
);
6082 if (targetm
.vectorize
.vec_perm_const
!= NULL
6083 && targetm
.vectorize
.vec_perm_const (qimode
, target_qi
, v0_qi
,
6084 v1_qi
, qimode_indices
))
6085 return gen_lowpart (mode
, target_qi
);
6088 v0
= force_reg (mode
, v0
);
6091 v1
= force_reg (mode
, v1
);
6093 /* Otherwise expand as a fully variable permuation. */
6095 /* The optabs are only defined for selectors with the same width
6096 as the values being permuted. */
6097 machine_mode required_sel_mode
;
6098 if (!related_int_vector_mode (mode
).exists (&required_sel_mode
))
6100 delete_insns_since (last
);
6104 /* We know that it is semantically valid to treat SEL as having SEL_MODE.
6105 If that isn't the mode we want then we need to prove that using
6106 REQUIRED_SEL_MODE is OK. */
6107 if (sel_mode
!= required_sel_mode
)
6109 if (!selector_fits_mode_p (required_sel_mode
, indices
))
6111 delete_insns_since (last
);
6114 sel_mode
= required_sel_mode
;
6117 insn_code icode
= direct_optab_handler (vec_perm_optab
, mode
);
6118 if (icode
!= CODE_FOR_nothing
)
6120 rtx sel_rtx
= vec_perm_indices_to_rtx (sel_mode
, indices
);
6121 rtx tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel_rtx
);
6126 if (qimode
!= VOIDmode
6127 && selector_fits_mode_p (qimode
, qimode_indices
))
6129 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6130 if (icode
!= CODE_FOR_nothing
)
6132 rtx sel_qi
= vec_perm_indices_to_rtx (qimode
, qimode_indices
);
6133 rtx tmp
= expand_vec_perm_1 (icode
, target_qi
, v0_qi
, v1_qi
, sel_qi
);
6135 return gen_lowpart (mode
, tmp
);
6139 delete_insns_since (last
);
6143 /* Implement a permutation of vectors v0 and v1 using the permutation
6144 vector in SEL and return the result. Use TARGET to hold the result
6145 if nonnull and convenient.
6147 MODE is the mode of the vectors being permuted (V0 and V1).
6148 SEL must have the integer equivalent of MODE and is known to be
6149 unsuitable for permutes with a constant permutation vector. */
6152 expand_vec_perm_var (machine_mode mode
, rtx v0
, rtx v1
, rtx sel
, rtx target
)
6154 enum insn_code icode
;
6158 u
= GET_MODE_UNIT_SIZE (mode
);
6160 if (!target
|| GET_MODE (target
) != mode
)
6161 target
= gen_reg_rtx (mode
);
6163 icode
= direct_optab_handler (vec_perm_optab
, mode
);
6164 if (icode
!= CODE_FOR_nothing
)
6166 tmp
= expand_vec_perm_1 (icode
, target
, v0
, v1
, sel
);
6171 /* As a special case to aid several targets, lower the element-based
6172 permutation to a byte-based permutation and try again. */
6173 machine_mode qimode
;
6174 if (!qimode_for_vec_perm (mode
).exists (&qimode
)
6175 || maybe_gt (GET_MODE_NUNITS (qimode
), GET_MODE_MASK (QImode
) + 1))
6177 icode
= direct_optab_handler (vec_perm_optab
, qimode
);
6178 if (icode
== CODE_FOR_nothing
)
6181 /* Multiply each element by its byte size. */
6182 machine_mode selmode
= GET_MODE (sel
);
6184 sel
= expand_simple_binop (selmode
, PLUS
, sel
, sel
,
6185 NULL
, 0, OPTAB_DIRECT
);
6187 sel
= expand_simple_binop (selmode
, ASHIFT
, sel
,
6188 gen_int_shift_amount (selmode
, exact_log2 (u
)),
6189 NULL
, 0, OPTAB_DIRECT
);
6190 gcc_assert (sel
!= NULL
);
6192 /* Broadcast the low byte each element into each of its bytes.
6193 The encoding has U interleaved stepped patterns, one for each
6194 byte of an element. */
6195 vec_perm_builder
const_sel (GET_MODE_SIZE (mode
), u
, 3);
6196 unsigned int low_byte_in_u
= BYTES_BIG_ENDIAN
? u
- 1 : 0;
6197 for (i
= 0; i
< 3; ++i
)
6198 for (unsigned int j
= 0; j
< u
; ++j
)
6199 const_sel
.quick_push (i
* u
+ low_byte_in_u
);
6200 sel
= gen_lowpart (qimode
, sel
);
6201 sel
= expand_vec_perm_const (qimode
, sel
, sel
, const_sel
, qimode
, NULL
);
6202 gcc_assert (sel
!= NULL
);
6204 /* Add the byte offset to each byte element. */
6205 /* Note that the definition of the indicies here is memory ordering,
6206 so there should be no difference between big and little endian. */
6207 rtx_vector_builder
byte_indices (qimode
, u
, 1);
6208 for (i
= 0; i
< u
; ++i
)
6209 byte_indices
.quick_push (GEN_INT (i
));
6210 tmp
= byte_indices
.build ();
6211 sel_qi
= expand_simple_binop (qimode
, PLUS
, sel
, tmp
,
6212 sel
, 0, OPTAB_DIRECT
);
6213 gcc_assert (sel_qi
!= NULL
);
6215 tmp
= mode
!= qimode
? gen_reg_rtx (qimode
) : target
;
6216 tmp
= expand_vec_perm_1 (icode
, tmp
, gen_lowpart (qimode
, v0
),
6217 gen_lowpart (qimode
, v1
), sel_qi
);
6219 tmp
= gen_lowpart (mode
, tmp
);
6223 /* Generate VEC_SERIES_EXPR <OP0, OP1>, returning a value of mode VMODE.
6224 Use TARGET for the result if nonnull and convenient. */
6227 expand_vec_series_expr (machine_mode vmode
, rtx op0
, rtx op1
, rtx target
)
6229 class expand_operand ops
[3];
6230 enum insn_code icode
;
6231 machine_mode emode
= GET_MODE_INNER (vmode
);
6233 icode
= direct_optab_handler (vec_series_optab
, vmode
);
6234 gcc_assert (icode
!= CODE_FOR_nothing
);
6236 create_output_operand (&ops
[0], target
, vmode
);
6237 create_input_operand (&ops
[1], op0
, emode
);
6238 create_input_operand (&ops
[2], op1
, emode
);
6240 expand_insn (icode
, 3, ops
);
6241 return ops
[0].value
;
6244 /* Generate insns for a vector comparison into a mask. */
6247 expand_vec_cmp_expr (tree type
, tree exp
, rtx target
)
6249 class expand_operand ops
[4];
6250 enum insn_code icode
;
6252 machine_mode mask_mode
= TYPE_MODE (type
);
6256 enum tree_code tcode
;
6258 op0a
= TREE_OPERAND (exp
, 0);
6259 op0b
= TREE_OPERAND (exp
, 1);
6260 tcode
= TREE_CODE (exp
);
6262 unsignedp
= TYPE_UNSIGNED (TREE_TYPE (op0a
));
6263 vmode
= TYPE_MODE (TREE_TYPE (op0a
));
6265 icode
= get_vec_cmp_icode (vmode
, mask_mode
, unsignedp
);
6266 if (icode
== CODE_FOR_nothing
)
6268 if (tcode
== EQ_EXPR
|| tcode
== NE_EXPR
)
6269 icode
= get_vec_cmp_eq_icode (vmode
, mask_mode
);
6270 if (icode
== CODE_FOR_nothing
)
6274 comparison
= vector_compare_rtx (mask_mode
, tcode
, op0a
, op0b
,
6275 unsignedp
, icode
, 2);
6276 create_output_operand (&ops
[0], target
, mask_mode
);
6277 create_fixed_operand (&ops
[1], comparison
);
6278 create_fixed_operand (&ops
[2], XEXP (comparison
, 0));
6279 create_fixed_operand (&ops
[3], XEXP (comparison
, 1));
6280 expand_insn (icode
, 4, ops
);
6281 return ops
[0].value
;
6284 /* Expand a highpart multiply. */
6287 expand_mult_highpart (machine_mode mode
, rtx op0
, rtx op1
,
6288 rtx target
, bool uns_p
)
6290 class expand_operand eops
[3];
6291 enum insn_code icode
;
6297 method
= can_mult_highpart_p (mode
, uns_p
);
6303 tab1
= uns_p
? umul_highpart_optab
: smul_highpart_optab
;
6304 return expand_binop (mode
, tab1
, op0
, op1
, target
, uns_p
,
6307 tab1
= uns_p
? vec_widen_umult_even_optab
: vec_widen_smult_even_optab
;
6308 tab2
= uns_p
? vec_widen_umult_odd_optab
: vec_widen_smult_odd_optab
;
6311 tab1
= uns_p
? vec_widen_umult_lo_optab
: vec_widen_smult_lo_optab
;
6312 tab2
= uns_p
? vec_widen_umult_hi_optab
: vec_widen_smult_hi_optab
;
6313 if (BYTES_BIG_ENDIAN
)
6314 std::swap (tab1
, tab2
);
6320 icode
= optab_handler (tab1
, mode
);
6321 wmode
= insn_data
[icode
].operand
[0].mode
;
6322 gcc_checking_assert (known_eq (2 * GET_MODE_NUNITS (wmode
),
6323 GET_MODE_NUNITS (mode
)));
6324 gcc_checking_assert (known_eq (GET_MODE_SIZE (wmode
), GET_MODE_SIZE (mode
)));
6326 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6327 create_input_operand (&eops
[1], op0
, mode
);
6328 create_input_operand (&eops
[2], op1
, mode
);
6329 expand_insn (icode
, 3, eops
);
6330 m1
= gen_lowpart (mode
, eops
[0].value
);
6332 create_output_operand (&eops
[0], gen_reg_rtx (wmode
), wmode
);
6333 create_input_operand (&eops
[1], op0
, mode
);
6334 create_input_operand (&eops
[2], op1
, mode
);
6335 expand_insn (optab_handler (tab2
, mode
), 3, eops
);
6336 m2
= gen_lowpart (mode
, eops
[0].value
);
6338 vec_perm_builder sel
;
6341 /* The encoding has 2 interleaved stepped patterns. */
6342 sel
.new_vector (GET_MODE_NUNITS (mode
), 2, 3);
6343 for (i
= 0; i
< 6; ++i
)
6344 sel
.quick_push (!BYTES_BIG_ENDIAN
+ (i
& ~1)
6345 + ((i
& 1) ? GET_MODE_NUNITS (mode
) : 0));
6349 /* The encoding has a single interleaved stepped pattern. */
6350 sel
.new_vector (GET_MODE_NUNITS (mode
), 1, 3);
6351 for (i
= 0; i
< 3; ++i
)
6352 sel
.quick_push (2 * i
+ (BYTES_BIG_ENDIAN
? 0 : 1));
6355 return expand_vec_perm_const (mode
, m1
, m2
, sel
, BLKmode
, target
);
6358 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6362 find_cc_set (rtx x
, const_rtx pat
, void *data
)
6364 if (REG_P (x
) && GET_MODE_CLASS (GET_MODE (x
)) == MODE_CC
6365 && GET_CODE (pat
) == SET
)
6367 rtx
*p_cc_reg
= (rtx
*) data
;
6368 gcc_assert (!*p_cc_reg
);
6373 /* This is a helper function for the other atomic operations. This function
6374 emits a loop that contains SEQ that iterates until a compare-and-swap
6375 operation at the end succeeds. MEM is the memory to be modified. SEQ is
6376 a set of instructions that takes a value from OLD_REG as an input and
6377 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
6378 set to the current contents of MEM. After SEQ, a compare-and-swap will
6379 attempt to update MEM with NEW_REG. The function returns true when the
6380 loop was generated successfully. */
6383 expand_compare_and_swap_loop (rtx mem
, rtx old_reg
, rtx new_reg
, rtx seq
)
6385 machine_mode mode
= GET_MODE (mem
);
6386 rtx_code_label
*label
;
6387 rtx cmp_reg
, success
, oldval
;
6389 /* The loop we want to generate looks like
6395 (success, cmp_reg) = compare-and-swap(mem, old_reg, new_reg)
6399 Note that we only do the plain load from memory once. Subsequent
6400 iterations use the value loaded by the compare-and-swap pattern. */
6402 label
= gen_label_rtx ();
6403 cmp_reg
= gen_reg_rtx (mode
);
6405 emit_move_insn (cmp_reg
, mem
);
6407 emit_move_insn (old_reg
, cmp_reg
);
6413 if (!expand_atomic_compare_and_swap (&success
, &oldval
, mem
, old_reg
,
6414 new_reg
, false, MEMMODEL_SYNC_SEQ_CST
,
6418 if (oldval
!= cmp_reg
)
6419 emit_move_insn (cmp_reg
, oldval
);
6421 /* Mark this jump predicted not taken. */
6422 emit_cmp_and_jump_insns (success
, const0_rtx
, EQ
, const0_rtx
,
6423 GET_MODE (success
), 1, label
,
6424 profile_probability::guessed_never ());
6429 /* This function tries to emit an atomic_exchange intruction. VAL is written
6430 to *MEM using memory model MODEL. The previous contents of *MEM are returned,
6431 using TARGET if possible. */
6434 maybe_emit_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6436 machine_mode mode
= GET_MODE (mem
);
6437 enum insn_code icode
;
6439 /* If the target supports the exchange directly, great. */
6440 icode
= direct_optab_handler (atomic_exchange_optab
, mode
);
6441 if (icode
!= CODE_FOR_nothing
)
6443 class expand_operand ops
[4];
6445 create_output_operand (&ops
[0], target
, mode
);
6446 create_fixed_operand (&ops
[1], mem
);
6447 create_input_operand (&ops
[2], val
, mode
);
6448 create_integer_operand (&ops
[3], model
);
6449 if (maybe_expand_insn (icode
, 4, ops
))
6450 return ops
[0].value
;
6456 /* This function tries to implement an atomic exchange operation using
6457 __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
6458 The previous contents of *MEM are returned, using TARGET if possible.
6459 Since this instructionn is an acquire barrier only, stronger memory
6460 models may require additional barriers to be emitted. */
6463 maybe_emit_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
,
6464 enum memmodel model
)
6466 machine_mode mode
= GET_MODE (mem
);
6467 enum insn_code icode
;
6468 rtx_insn
*last_insn
= get_last_insn ();
6470 icode
= optab_handler (sync_lock_test_and_set_optab
, mode
);
6472 /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
6473 exists, and the memory model is stronger than acquire, add a release
6474 barrier before the instruction. */
6476 if (is_mm_seq_cst (model
) || is_mm_release (model
) || is_mm_acq_rel (model
))
6477 expand_mem_thread_fence (model
);
6479 if (icode
!= CODE_FOR_nothing
)
6481 class expand_operand ops
[3];
6482 create_output_operand (&ops
[0], target
, mode
);
6483 create_fixed_operand (&ops
[1], mem
);
6484 create_input_operand (&ops
[2], val
, mode
);
6485 if (maybe_expand_insn (icode
, 3, ops
))
6486 return ops
[0].value
;
6489 /* If an external test-and-set libcall is provided, use that instead of
6490 any external compare-and-swap that we might get from the compare-and-
6491 swap-loop expansion later. */
6492 if (!can_compare_and_swap_p (mode
, false))
6494 rtx libfunc
= optab_libfunc (sync_lock_test_and_set_optab
, mode
);
6495 if (libfunc
!= NULL
)
6499 addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6500 return emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6501 mode
, addr
, ptr_mode
,
6506 /* If the test_and_set can't be emitted, eliminate any barrier that might
6507 have been emitted. */
6508 delete_insns_since (last_insn
);
6512 /* This function tries to implement an atomic exchange operation using a
6513 compare_and_swap loop. VAL is written to *MEM. The previous contents of
6514 *MEM are returned, using TARGET if possible. No memory model is required
6515 since a compare_and_swap loop is seq-cst. */
6518 maybe_emit_compare_and_swap_exchange_loop (rtx target
, rtx mem
, rtx val
)
6520 machine_mode mode
= GET_MODE (mem
);
6522 if (can_compare_and_swap_p (mode
, true))
6524 if (!target
|| !register_operand (target
, mode
))
6525 target
= gen_reg_rtx (mode
);
6526 if (expand_compare_and_swap_loop (mem
, target
, val
, NULL_RTX
))
6533 /* This function tries to implement an atomic test-and-set operation
6534 using the atomic_test_and_set instruction pattern. A boolean value
6535 is returned from the operation, using TARGET if possible. */
6538 maybe_emit_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6540 machine_mode pat_bool_mode
;
6541 class expand_operand ops
[3];
6543 if (!targetm
.have_atomic_test_and_set ())
6546 /* While we always get QImode from __atomic_test_and_set, we get
6547 other memory modes from __sync_lock_test_and_set. Note that we
6548 use no endian adjustment here. This matches the 4.6 behavior
6549 in the Sparc backend. */
6550 enum insn_code icode
= targetm
.code_for_atomic_test_and_set
;
6551 gcc_checking_assert (insn_data
[icode
].operand
[1].mode
== QImode
);
6552 if (GET_MODE (mem
) != QImode
)
6553 mem
= adjust_address_nv (mem
, QImode
, 0);
6555 pat_bool_mode
= insn_data
[icode
].operand
[0].mode
;
6556 create_output_operand (&ops
[0], target
, pat_bool_mode
);
6557 create_fixed_operand (&ops
[1], mem
);
6558 create_integer_operand (&ops
[2], model
);
6560 if (maybe_expand_insn (icode
, 3, ops
))
6561 return ops
[0].value
;
6565 /* This function expands the legacy _sync_lock test_and_set operation which is
6566 generally an atomic exchange. Some limited targets only allow the
6567 constant 1 to be stored. This is an ACQUIRE operation.
6569 TARGET is an optional place to stick the return value.
6570 MEM is where VAL is stored. */
6573 expand_sync_lock_test_and_set (rtx target
, rtx mem
, rtx val
)
6577 /* Try an atomic_exchange first. */
6578 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, MEMMODEL_SYNC_ACQUIRE
);
6582 ret
= maybe_emit_sync_lock_test_and_set (target
, mem
, val
,
6583 MEMMODEL_SYNC_ACQUIRE
);
6587 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6591 /* If there are no other options, try atomic_test_and_set if the value
6592 being stored is 1. */
6593 if (val
== const1_rtx
)
6594 ret
= maybe_emit_atomic_test_and_set (target
, mem
, MEMMODEL_SYNC_ACQUIRE
);
6599 /* This function expands the atomic test_and_set operation:
6600 atomically store a boolean TRUE into MEM and return the previous value.
6602 MEMMODEL is the memory model variant to use.
6603 TARGET is an optional place to stick the return value. */
6606 expand_atomic_test_and_set (rtx target
, rtx mem
, enum memmodel model
)
6608 machine_mode mode
= GET_MODE (mem
);
6609 rtx ret
, trueval
, subtarget
;
6611 ret
= maybe_emit_atomic_test_and_set (target
, mem
, model
);
6615 /* Be binary compatible with non-default settings of trueval, and different
6616 cpu revisions. E.g. one revision may have atomic-test-and-set, but
6617 another only has atomic-exchange. */
6618 if (targetm
.atomic_test_and_set_trueval
== 1)
6620 trueval
= const1_rtx
;
6621 subtarget
= target
? target
: gen_reg_rtx (mode
);
6625 trueval
= gen_int_mode (targetm
.atomic_test_and_set_trueval
, mode
);
6626 subtarget
= gen_reg_rtx (mode
);
6629 /* Try the atomic-exchange optab... */
6630 ret
= maybe_emit_atomic_exchange (subtarget
, mem
, trueval
, model
);
6632 /* ... then an atomic-compare-and-swap loop ... */
6634 ret
= maybe_emit_compare_and_swap_exchange_loop (subtarget
, mem
, trueval
);
6636 /* ... before trying the vaguely defined legacy lock_test_and_set. */
6638 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, trueval
, model
);
6640 /* Recall that the legacy lock_test_and_set optab was allowed to do magic
6641 things with the value 1. Thus we try again without trueval. */
6642 if (!ret
&& targetm
.atomic_test_and_set_trueval
!= 1)
6643 ret
= maybe_emit_sync_lock_test_and_set (subtarget
, mem
, const1_rtx
, model
);
6645 /* Failing all else, assume a single threaded environment and simply
6646 perform the operation. */
6649 /* If the result is ignored skip the move to target. */
6650 if (subtarget
!= const0_rtx
)
6651 emit_move_insn (subtarget
, mem
);
6653 emit_move_insn (mem
, trueval
);
6657 /* Recall that have to return a boolean value; rectify if trueval
6658 is not exactly one. */
6659 if (targetm
.atomic_test_and_set_trueval
!= 1)
6660 ret
= emit_store_flag_force (target
, NE
, ret
, const0_rtx
, mode
, 0, 1);
6665 /* This function expands the atomic exchange operation:
6666 atomically store VAL in MEM and return the previous value in MEM.
6668 MEMMODEL is the memory model variant to use.
6669 TARGET is an optional place to stick the return value. */
6672 expand_atomic_exchange (rtx target
, rtx mem
, rtx val
, enum memmodel model
)
6674 machine_mode mode
= GET_MODE (mem
);
6677 /* If loads are not atomic for the required size and we are not called to
6678 provide a __sync builtin, do not do anything so that we stay consistent
6679 with atomic loads of the same size. */
6680 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
6683 ret
= maybe_emit_atomic_exchange (target
, mem
, val
, model
);
6685 /* Next try a compare-and-swap loop for the exchange. */
6687 ret
= maybe_emit_compare_and_swap_exchange_loop (target
, mem
, val
);
6692 /* This function expands the atomic compare exchange operation:
6694 *PTARGET_BOOL is an optional place to store the boolean success/failure.
6695 *PTARGET_OVAL is an optional place to store the old value from memory.
6696 Both target parameters may be NULL or const0_rtx to indicate that we do
6697 not care about that return value. Both target parameters are updated on
6698 success to the actual location of the corresponding result.
6700 MEMMODEL is the memory model variant to use.
6702 The return value of the function is true for success. */
6705 expand_atomic_compare_and_swap (rtx
*ptarget_bool
, rtx
*ptarget_oval
,
6706 rtx mem
, rtx expected
, rtx desired
,
6707 bool is_weak
, enum memmodel succ_model
,
6708 enum memmodel fail_model
)
6710 machine_mode mode
= GET_MODE (mem
);
6711 class expand_operand ops
[8];
6712 enum insn_code icode
;
6713 rtx target_oval
, target_bool
= NULL_RTX
;
6716 /* If loads are not atomic for the required size and we are not called to
6717 provide a __sync builtin, do not do anything so that we stay consistent
6718 with atomic loads of the same size. */
6719 if (!can_atomic_load_p (mode
) && !is_mm_sync (succ_model
))
6722 /* Load expected into a register for the compare and swap. */
6723 if (MEM_P (expected
))
6724 expected
= copy_to_reg (expected
);
6726 /* Make sure we always have some place to put the return oldval.
6727 Further, make sure that place is distinct from the input expected,
6728 just in case we need that path down below. */
6729 if (ptarget_oval
&& *ptarget_oval
== const0_rtx
)
6730 ptarget_oval
= NULL
;
6732 if (ptarget_oval
== NULL
6733 || (target_oval
= *ptarget_oval
) == NULL
6734 || reg_overlap_mentioned_p (expected
, target_oval
))
6735 target_oval
= gen_reg_rtx (mode
);
6737 icode
= direct_optab_handler (atomic_compare_and_swap_optab
, mode
);
6738 if (icode
!= CODE_FOR_nothing
)
6740 machine_mode bool_mode
= insn_data
[icode
].operand
[0].mode
;
6742 if (ptarget_bool
&& *ptarget_bool
== const0_rtx
)
6743 ptarget_bool
= NULL
;
6745 /* Make sure we always have a place for the bool operand. */
6746 if (ptarget_bool
== NULL
6747 || (target_bool
= *ptarget_bool
) == NULL
6748 || GET_MODE (target_bool
) != bool_mode
)
6749 target_bool
= gen_reg_rtx (bool_mode
);
6751 /* Emit the compare_and_swap. */
6752 create_output_operand (&ops
[0], target_bool
, bool_mode
);
6753 create_output_operand (&ops
[1], target_oval
, mode
);
6754 create_fixed_operand (&ops
[2], mem
);
6755 create_input_operand (&ops
[3], expected
, mode
);
6756 create_input_operand (&ops
[4], desired
, mode
);
6757 create_integer_operand (&ops
[5], is_weak
);
6758 create_integer_operand (&ops
[6], succ_model
);
6759 create_integer_operand (&ops
[7], fail_model
);
6760 if (maybe_expand_insn (icode
, 8, ops
))
6762 /* Return success/failure. */
6763 target_bool
= ops
[0].value
;
6764 target_oval
= ops
[1].value
;
6769 /* Otherwise fall back to the original __sync_val_compare_and_swap
6770 which is always seq-cst. */
6771 icode
= optab_handler (sync_compare_and_swap_optab
, mode
);
6772 if (icode
!= CODE_FOR_nothing
)
6776 create_output_operand (&ops
[0], target_oval
, mode
);
6777 create_fixed_operand (&ops
[1], mem
);
6778 create_input_operand (&ops
[2], expected
, mode
);
6779 create_input_operand (&ops
[3], desired
, mode
);
6780 if (!maybe_expand_insn (icode
, 4, ops
))
6783 target_oval
= ops
[0].value
;
6785 /* If the caller isn't interested in the boolean return value,
6786 skip the computation of it. */
6787 if (ptarget_bool
== NULL
)
6790 /* Otherwise, work out if the compare-and-swap succeeded. */
6792 if (have_insn_for (COMPARE
, CCmode
))
6793 note_stores (get_last_insn (), find_cc_set
, &cc_reg
);
6796 target_bool
= emit_store_flag_force (target_bool
, EQ
, cc_reg
,
6797 const0_rtx
, VOIDmode
, 0, 1);
6800 goto success_bool_from_val
;
6803 /* Also check for library support for __sync_val_compare_and_swap. */
6804 libfunc
= optab_libfunc (sync_compare_and_swap_optab
, mode
);
6805 if (libfunc
!= NULL
)
6807 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
6808 rtx target
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_NORMAL
,
6809 mode
, addr
, ptr_mode
,
6810 expected
, mode
, desired
, mode
);
6811 emit_move_insn (target_oval
, target
);
6813 /* Compute the boolean return value only if requested. */
6815 goto success_bool_from_val
;
6823 success_bool_from_val
:
6824 target_bool
= emit_store_flag_force (target_bool
, EQ
, target_oval
,
6825 expected
, VOIDmode
, 1, 1);
6827 /* Make sure that the oval output winds up where the caller asked. */
6829 *ptarget_oval
= target_oval
;
6831 *ptarget_bool
= target_bool
;
6835 /* Generate asm volatile("" : : : "memory") as the memory blockage. */
6838 expand_asm_memory_blockage (void)
6842 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6843 rtvec_alloc (0), rtvec_alloc (0),
6844 rtvec_alloc (0), UNKNOWN_LOCATION
);
6845 MEM_VOLATILE_P (asm_op
) = 1;
6847 clob
= gen_rtx_SCRATCH (VOIDmode
);
6848 clob
= gen_rtx_MEM (BLKmode
, clob
);
6849 clob
= gen_rtx_CLOBBER (VOIDmode
, clob
);
6851 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, asm_op
, clob
)));
6854 /* Do not propagate memory accesses across this point. */
6857 expand_memory_blockage (void)
6859 if (targetm
.have_memory_blockage ())
6860 emit_insn (targetm
.gen_memory_blockage ());
6862 expand_asm_memory_blockage ();
6865 /* Generate asm volatile("" : : : "memory") as a memory blockage, at the
6866 same time clobbering the register set specified by REGS. */
6869 expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs
)
6871 rtx asm_op
, clob_mem
;
6873 unsigned int num_of_regs
= 0;
6874 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6875 if (TEST_HARD_REG_BIT (regs
, i
))
6878 asm_op
= gen_rtx_ASM_OPERANDS (VOIDmode
, "", "", 0,
6879 rtvec_alloc (0), rtvec_alloc (0),
6880 rtvec_alloc (0), UNKNOWN_LOCATION
);
6881 MEM_VOLATILE_P (asm_op
) = 1;
6883 rtvec v
= rtvec_alloc (num_of_regs
+ 2);
6885 clob_mem
= gen_rtx_SCRATCH (VOIDmode
);
6886 clob_mem
= gen_rtx_MEM (BLKmode
, clob_mem
);
6887 clob_mem
= gen_rtx_CLOBBER (VOIDmode
, clob_mem
);
6889 RTVEC_ELT (v
, 0) = asm_op
;
6890 RTVEC_ELT (v
, 1) = clob_mem
;
6892 if (num_of_regs
> 0)
6895 for (unsigned int i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
6896 if (TEST_HARD_REG_BIT (regs
, i
))
6898 RTVEC_ELT (v
, j
) = gen_rtx_CLOBBER (VOIDmode
, regno_reg_rtx
[i
]);
6901 gcc_assert (j
== (num_of_regs
+ 2));
6904 emit_insn (gen_rtx_PARALLEL (VOIDmode
, v
));
6907 /* This routine will either emit the mem_thread_fence pattern or issue a
6908 sync_synchronize to generate a fence for memory model MEMMODEL. */
6911 expand_mem_thread_fence (enum memmodel model
)
6913 if (is_mm_relaxed (model
))
6915 if (targetm
.have_mem_thread_fence ())
6917 emit_insn (targetm
.gen_mem_thread_fence (GEN_INT (model
)));
6918 expand_memory_blockage ();
6920 else if (targetm
.have_memory_barrier ())
6921 emit_insn (targetm
.gen_memory_barrier ());
6922 else if (synchronize_libfunc
!= NULL_RTX
)
6923 emit_library_call (synchronize_libfunc
, LCT_NORMAL
, VOIDmode
);
6925 expand_memory_blockage ();
6928 /* Emit a signal fence with given memory model. */
6931 expand_mem_signal_fence (enum memmodel model
)
6933 /* No machine barrier is required to implement a signal fence, but
6934 a compiler memory barrier must be issued, except for relaxed MM. */
6935 if (!is_mm_relaxed (model
))
6936 expand_memory_blockage ();
6939 /* This function expands the atomic load operation:
6940 return the atomically loaded value in MEM.
6942 MEMMODEL is the memory model variant to use.
6943 TARGET is an option place to stick the return value. */
6946 expand_atomic_load (rtx target
, rtx mem
, enum memmodel model
)
6948 machine_mode mode
= GET_MODE (mem
);
6949 enum insn_code icode
;
6951 /* If the target supports the load directly, great. */
6952 icode
= direct_optab_handler (atomic_load_optab
, mode
);
6953 if (icode
!= CODE_FOR_nothing
)
6955 class expand_operand ops
[3];
6956 rtx_insn
*last
= get_last_insn ();
6957 if (is_mm_seq_cst (model
))
6958 expand_memory_blockage ();
6960 create_output_operand (&ops
[0], target
, mode
);
6961 create_fixed_operand (&ops
[1], mem
);
6962 create_integer_operand (&ops
[2], model
);
6963 if (maybe_expand_insn (icode
, 3, ops
))
6965 if (!is_mm_relaxed (model
))
6966 expand_memory_blockage ();
6967 return ops
[0].value
;
6969 delete_insns_since (last
);
6972 /* If the size of the object is greater than word size on this target,
6973 then we assume that a load will not be atomic. We could try to
6974 emulate a load with a compare-and-swap operation, but the store that
6975 doing this could result in would be incorrect if this is a volatile
6976 atomic load or targetting read-only-mapped memory. */
6977 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
6978 /* If there is no atomic load, leave the library call. */
6981 /* Otherwise assume loads are atomic, and emit the proper barriers. */
6982 if (!target
|| target
== const0_rtx
)
6983 target
= gen_reg_rtx (mode
);
6985 /* For SEQ_CST, emit a barrier before the load. */
6986 if (is_mm_seq_cst (model
))
6987 expand_mem_thread_fence (model
);
6989 emit_move_insn (target
, mem
);
6991 /* Emit the appropriate barrier after the load. */
6992 expand_mem_thread_fence (model
);
6997 /* This function expands the atomic store operation:
6998 Atomically store VAL in MEM.
6999 MEMMODEL is the memory model variant to use.
7000 USE_RELEASE is true if __sync_lock_release can be used as a fall back.
7001 function returns const0_rtx if a pattern was emitted. */
7004 expand_atomic_store (rtx mem
, rtx val
, enum memmodel model
, bool use_release
)
7006 machine_mode mode
= GET_MODE (mem
);
7007 enum insn_code icode
;
7008 class expand_operand ops
[3];
7010 /* If the target supports the store directly, great. */
7011 icode
= direct_optab_handler (atomic_store_optab
, mode
);
7012 if (icode
!= CODE_FOR_nothing
)
7014 rtx_insn
*last
= get_last_insn ();
7015 if (!is_mm_relaxed (model
))
7016 expand_memory_blockage ();
7017 create_fixed_operand (&ops
[0], mem
);
7018 create_input_operand (&ops
[1], val
, mode
);
7019 create_integer_operand (&ops
[2], model
);
7020 if (maybe_expand_insn (icode
, 3, ops
))
7022 if (is_mm_seq_cst (model
))
7023 expand_memory_blockage ();
7026 delete_insns_since (last
);
7029 /* If using __sync_lock_release is a viable alternative, try it.
7030 Note that this will not be set to true if we are expanding a generic
7031 __atomic_store_n. */
7034 icode
= direct_optab_handler (sync_lock_release_optab
, mode
);
7035 if (icode
!= CODE_FOR_nothing
)
7037 create_fixed_operand (&ops
[0], mem
);
7038 create_input_operand (&ops
[1], const0_rtx
, mode
);
7039 if (maybe_expand_insn (icode
, 2, ops
))
7041 /* lock_release is only a release barrier. */
7042 if (is_mm_seq_cst (model
))
7043 expand_mem_thread_fence (model
);
7049 /* If the size of the object is greater than word size on this target,
7050 a default store will not be atomic. */
7051 if (maybe_gt (GET_MODE_PRECISION (mode
), BITS_PER_WORD
))
7053 /* If loads are atomic or we are called to provide a __sync builtin,
7054 we can try a atomic_exchange and throw away the result. Otherwise,
7055 don't do anything so that we do not create an inconsistency between
7056 loads and stores. */
7057 if (can_atomic_load_p (mode
) || is_mm_sync (model
))
7059 rtx target
= maybe_emit_atomic_exchange (NULL_RTX
, mem
, val
, model
);
7061 target
= maybe_emit_compare_and_swap_exchange_loop (NULL_RTX
, mem
,
7069 /* Otherwise assume stores are atomic, and emit the proper barriers. */
7070 expand_mem_thread_fence (model
);
7072 emit_move_insn (mem
, val
);
7074 /* For SEQ_CST, also emit a barrier after the store. */
7075 if (is_mm_seq_cst (model
))
7076 expand_mem_thread_fence (model
);
7082 /* Structure containing the pointers and values required to process the
7083 various forms of the atomic_fetch_op and atomic_op_fetch builtins. */
7085 struct atomic_op_functions
7087 direct_optab mem_fetch_before
;
7088 direct_optab mem_fetch_after
;
7089 direct_optab mem_no_result
;
7092 direct_optab no_result
;
7093 enum rtx_code reverse_code
;
7097 /* Fill in structure pointed to by OP with the various optab entries for an
7098 operation of type CODE. */
7101 get_atomic_op_for_code (struct atomic_op_functions
*op
, enum rtx_code code
)
7103 gcc_assert (op
!= NULL
);
7105 /* If SWITCHABLE_TARGET is defined, then subtargets can be switched
7106 in the source code during compilation, and the optab entries are not
7107 computable until runtime. Fill in the values at runtime. */
7111 op
->mem_fetch_before
= atomic_fetch_add_optab
;
7112 op
->mem_fetch_after
= atomic_add_fetch_optab
;
7113 op
->mem_no_result
= atomic_add_optab
;
7114 op
->fetch_before
= sync_old_add_optab
;
7115 op
->fetch_after
= sync_new_add_optab
;
7116 op
->no_result
= sync_add_optab
;
7117 op
->reverse_code
= MINUS
;
7120 op
->mem_fetch_before
= atomic_fetch_sub_optab
;
7121 op
->mem_fetch_after
= atomic_sub_fetch_optab
;
7122 op
->mem_no_result
= atomic_sub_optab
;
7123 op
->fetch_before
= sync_old_sub_optab
;
7124 op
->fetch_after
= sync_new_sub_optab
;
7125 op
->no_result
= sync_sub_optab
;
7126 op
->reverse_code
= PLUS
;
7129 op
->mem_fetch_before
= atomic_fetch_xor_optab
;
7130 op
->mem_fetch_after
= atomic_xor_fetch_optab
;
7131 op
->mem_no_result
= atomic_xor_optab
;
7132 op
->fetch_before
= sync_old_xor_optab
;
7133 op
->fetch_after
= sync_new_xor_optab
;
7134 op
->no_result
= sync_xor_optab
;
7135 op
->reverse_code
= XOR
;
7138 op
->mem_fetch_before
= atomic_fetch_and_optab
;
7139 op
->mem_fetch_after
= atomic_and_fetch_optab
;
7140 op
->mem_no_result
= atomic_and_optab
;
7141 op
->fetch_before
= sync_old_and_optab
;
7142 op
->fetch_after
= sync_new_and_optab
;
7143 op
->no_result
= sync_and_optab
;
7144 op
->reverse_code
= UNKNOWN
;
7147 op
->mem_fetch_before
= atomic_fetch_or_optab
;
7148 op
->mem_fetch_after
= atomic_or_fetch_optab
;
7149 op
->mem_no_result
= atomic_or_optab
;
7150 op
->fetch_before
= sync_old_ior_optab
;
7151 op
->fetch_after
= sync_new_ior_optab
;
7152 op
->no_result
= sync_ior_optab
;
7153 op
->reverse_code
= UNKNOWN
;
7156 op
->mem_fetch_before
= atomic_fetch_nand_optab
;
7157 op
->mem_fetch_after
= atomic_nand_fetch_optab
;
7158 op
->mem_no_result
= atomic_nand_optab
;
7159 op
->fetch_before
= sync_old_nand_optab
;
7160 op
->fetch_after
= sync_new_nand_optab
;
7161 op
->no_result
= sync_nand_optab
;
7162 op
->reverse_code
= UNKNOWN
;
7169 /* See if there is a more optimal way to implement the operation "*MEM CODE VAL"
7170 using memory order MODEL. If AFTER is true the operation needs to return
7171 the value of *MEM after the operation, otherwise the previous value.
7172 TARGET is an optional place to place the result. The result is unused if
7174 Return the result if there is a better sequence, otherwise NULL_RTX. */
7177 maybe_optimize_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7178 enum memmodel model
, bool after
)
7180 /* If the value is prefetched, or not used, it may be possible to replace
7181 the sequence with a native exchange operation. */
7182 if (!after
|| target
== const0_rtx
)
7184 /* fetch_and (&x, 0, m) can be replaced with exchange (&x, 0, m). */
7185 if (code
== AND
&& val
== const0_rtx
)
7187 if (target
== const0_rtx
)
7188 target
= gen_reg_rtx (GET_MODE (mem
));
7189 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7192 /* fetch_or (&x, -1, m) can be replaced with exchange (&x, -1, m). */
7193 if (code
== IOR
&& val
== constm1_rtx
)
7195 if (target
== const0_rtx
)
7196 target
= gen_reg_rtx (GET_MODE (mem
));
7197 return maybe_emit_atomic_exchange (target
, mem
, val
, model
);
7204 /* Try to emit an instruction for a specific operation varaition.
7205 OPTAB contains the OP functions.
7206 TARGET is an optional place to return the result. const0_rtx means unused.
7207 MEM is the memory location to operate on.
7208 VAL is the value to use in the operation.
7209 USE_MEMMODEL is TRUE if the variation with a memory model should be tried.
7210 MODEL is the memory model, if used.
7211 AFTER is true if the returned result is the value after the operation. */
7214 maybe_emit_op (const struct atomic_op_functions
*optab
, rtx target
, rtx mem
,
7215 rtx val
, bool use_memmodel
, enum memmodel model
, bool after
)
7217 machine_mode mode
= GET_MODE (mem
);
7218 class expand_operand ops
[4];
7219 enum insn_code icode
;
7223 /* Check to see if there is a result returned. */
7224 if (target
== const0_rtx
)
7228 icode
= direct_optab_handler (optab
->mem_no_result
, mode
);
7229 create_integer_operand (&ops
[2], model
);
7234 icode
= direct_optab_handler (optab
->no_result
, mode
);
7238 /* Otherwise, we need to generate a result. */
7243 icode
= direct_optab_handler (after
? optab
->mem_fetch_after
7244 : optab
->mem_fetch_before
, mode
);
7245 create_integer_operand (&ops
[3], model
);
7250 icode
= optab_handler (after
? optab
->fetch_after
7251 : optab
->fetch_before
, mode
);
7254 create_output_operand (&ops
[op_counter
++], target
, mode
);
7256 if (icode
== CODE_FOR_nothing
)
7259 create_fixed_operand (&ops
[op_counter
++], mem
);
7260 /* VAL may have been promoted to a wider mode. Shrink it if so. */
7261 create_convert_operand_to (&ops
[op_counter
++], val
, mode
, true);
7263 if (maybe_expand_insn (icode
, num_ops
, ops
))
7264 return (target
== const0_rtx
? const0_rtx
: ops
[0].value
);
7270 /* This function expands an atomic fetch_OP or OP_fetch operation:
7271 TARGET is an option place to stick the return value. const0_rtx indicates
7272 the result is unused.
7273 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7274 CODE is the operation being performed (OP)
7275 MEMMODEL is the memory model variant to use.
7276 AFTER is true to return the result of the operation (OP_fetch).
7277 AFTER is false to return the value before the operation (fetch_OP).
7279 This function will *only* generate instructions if there is a direct
7280 optab. No compare and swap loops or libcalls will be generated. */
7283 expand_atomic_fetch_op_no_fallback (rtx target
, rtx mem
, rtx val
,
7284 enum rtx_code code
, enum memmodel model
,
7287 machine_mode mode
= GET_MODE (mem
);
7288 struct atomic_op_functions optab
;
7290 bool unused_result
= (target
== const0_rtx
);
7292 get_atomic_op_for_code (&optab
, code
);
7294 /* Check to see if there are any better instructions. */
7295 result
= maybe_optimize_fetch_op (target
, mem
, val
, code
, model
, after
);
7299 /* Check for the case where the result isn't used and try those patterns. */
7302 /* Try the memory model variant first. */
7303 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, true);
7307 /* Next try the old style withuot a memory model. */
7308 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, true);
7312 /* There is no no-result pattern, so try patterns with a result. */
7316 /* Try the __atomic version. */
7317 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, after
);
7321 /* Try the older __sync version. */
7322 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, after
);
7326 /* If the fetch value can be calculated from the other variation of fetch,
7327 try that operation. */
7328 if (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
)
7330 /* Try the __atomic version, then the older __sync version. */
7331 result
= maybe_emit_op (&optab
, target
, mem
, val
, true, model
, !after
);
7333 result
= maybe_emit_op (&optab
, target
, mem
, val
, false, model
, !after
);
7337 /* If the result isn't used, no need to do compensation code. */
7341 /* Issue compensation code. Fetch_after == fetch_before OP val.
7342 Fetch_before == after REVERSE_OP val. */
7344 code
= optab
.reverse_code
;
7347 result
= expand_simple_binop (mode
, AND
, result
, val
, NULL_RTX
,
7348 true, OPTAB_LIB_WIDEN
);
7349 result
= expand_simple_unop (mode
, NOT
, result
, target
, true);
7352 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7353 true, OPTAB_LIB_WIDEN
);
7358 /* No direct opcode can be generated. */
7364 /* This function expands an atomic fetch_OP or OP_fetch operation:
7365 TARGET is an option place to stick the return value. const0_rtx indicates
7366 the result is unused.
7367 atomically fetch MEM, perform the operation with VAL and return it to MEM.
7368 CODE is the operation being performed (OP)
7369 MEMMODEL is the memory model variant to use.
7370 AFTER is true to return the result of the operation (OP_fetch).
7371 AFTER is false to return the value before the operation (fetch_OP). */
7373 expand_atomic_fetch_op (rtx target
, rtx mem
, rtx val
, enum rtx_code code
,
7374 enum memmodel model
, bool after
)
7376 machine_mode mode
= GET_MODE (mem
);
7378 bool unused_result
= (target
== const0_rtx
);
7380 /* If loads are not atomic for the required size and we are not called to
7381 provide a __sync builtin, do not do anything so that we stay consistent
7382 with atomic loads of the same size. */
7383 if (!can_atomic_load_p (mode
) && !is_mm_sync (model
))
7386 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, val
, code
, model
,
7392 /* Add/sub can be implemented by doing the reverse operation with -(val). */
7393 if (code
== PLUS
|| code
== MINUS
)
7396 enum rtx_code reverse
= (code
== PLUS
? MINUS
: PLUS
);
7399 tmp
= expand_simple_unop (mode
, NEG
, val
, NULL_RTX
, true);
7400 result
= expand_atomic_fetch_op_no_fallback (target
, mem
, tmp
, reverse
,
7404 /* PLUS worked so emit the insns and return. */
7411 /* PLUS did not work, so throw away the negation code and continue. */
7415 /* Try the __sync libcalls only if we can't do compare-and-swap inline. */
7416 if (!can_compare_and_swap_p (mode
, false))
7420 enum rtx_code orig_code
= code
;
7421 struct atomic_op_functions optab
;
7423 get_atomic_op_for_code (&optab
, code
);
7424 libfunc
= optab_libfunc (after
? optab
.fetch_after
7425 : optab
.fetch_before
, mode
);
7427 && (after
|| unused_result
|| optab
.reverse_code
!= UNKNOWN
))
7431 code
= optab
.reverse_code
;
7432 libfunc
= optab_libfunc (after
? optab
.fetch_before
7433 : optab
.fetch_after
, mode
);
7435 if (libfunc
!= NULL
)
7437 rtx addr
= convert_memory_address (ptr_mode
, XEXP (mem
, 0));
7438 result
= emit_library_call_value (libfunc
, NULL
, LCT_NORMAL
, mode
,
7439 addr
, ptr_mode
, val
, mode
);
7441 if (!unused_result
&& fixup
)
7442 result
= expand_simple_binop (mode
, code
, result
, val
, target
,
7443 true, OPTAB_LIB_WIDEN
);
7447 /* We need the original code for any further attempts. */
7451 /* If nothing else has succeeded, default to a compare and swap loop. */
7452 if (can_compare_and_swap_p (mode
, true))
7455 rtx t0
= gen_reg_rtx (mode
), t1
;
7459 /* If the result is used, get a register for it. */
7462 if (!target
|| !register_operand (target
, mode
))
7463 target
= gen_reg_rtx (mode
);
7464 /* If fetch_before, copy the value now. */
7466 emit_move_insn (target
, t0
);
7469 target
= const0_rtx
;
7474 t1
= expand_simple_binop (mode
, AND
, t1
, val
, NULL_RTX
,
7475 true, OPTAB_LIB_WIDEN
);
7476 t1
= expand_simple_unop (mode
, code
, t1
, NULL_RTX
, true);
7479 t1
= expand_simple_binop (mode
, code
, t1
, val
, NULL_RTX
, true,
7482 /* For after, copy the value now. */
7483 if (!unused_result
&& after
)
7484 emit_move_insn (target
, t1
);
7485 insn
= get_insns ();
7488 if (t1
!= NULL
&& expand_compare_and_swap_loop (mem
, t0
, t1
, insn
))
7495 /* Return true if OPERAND is suitable for operand number OPNO of
7496 instruction ICODE. */
7499 insn_operand_matches (enum insn_code icode
, unsigned int opno
, rtx operand
)
7501 return (!insn_data
[(int) icode
].operand
[opno
].predicate
7502 || (insn_data
[(int) icode
].operand
[opno
].predicate
7503 (operand
, insn_data
[(int) icode
].operand
[opno
].mode
)));
7506 /* TARGET is a target of a multiword operation that we are going to
7507 implement as a series of word-mode operations. Return true if
7508 TARGET is suitable for this purpose. */
7511 valid_multiword_target_p (rtx target
)
7516 mode
= GET_MODE (target
);
7517 if (!GET_MODE_SIZE (mode
).is_constant (&size
))
7519 for (i
= 0; i
< size
; i
+= UNITS_PER_WORD
)
7520 if (!validate_subreg (word_mode
, mode
, target
, i
))
7525 /* Make OP describe an input operand that has value INTVAL and that has
7526 no inherent mode. This function should only be used for operands that
7527 are always expand-time constants. The backend may request that INTVAL
7528 be copied into a different kind of rtx, but it must specify the mode
7529 of that rtx if so. */
7532 create_integer_operand (class expand_operand
*op
, poly_int64 intval
)
7534 create_expand_operand (op
, EXPAND_INTEGER
,
7535 gen_int_mode (intval
, MAX_MODE_INT
),
7536 VOIDmode
, false, intval
);
7539 /* Like maybe_legitimize_operand, but do not change the code of the
7540 current rtx value. */
7543 maybe_legitimize_operand_same_code (enum insn_code icode
, unsigned int opno
,
7544 class expand_operand
*op
)
7546 /* See if the operand matches in its current form. */
7547 if (insn_operand_matches (icode
, opno
, op
->value
))
7550 /* If the operand is a memory whose address has no side effects,
7551 try forcing the address into a non-virtual pseudo register.
7552 The check for side effects is important because copy_to_mode_reg
7553 cannot handle things like auto-modified addresses. */
7554 if (insn_data
[(int) icode
].operand
[opno
].allows_mem
&& MEM_P (op
->value
))
7559 addr
= XEXP (mem
, 0);
7560 if (!(REG_P (addr
) && REGNO (addr
) > LAST_VIRTUAL_REGISTER
)
7561 && !side_effects_p (addr
))
7566 last
= get_last_insn ();
7567 mode
= get_address_mode (mem
);
7568 mem
= replace_equiv_address (mem
, copy_to_mode_reg (mode
, addr
));
7569 if (insn_operand_matches (icode
, opno
, mem
))
7574 delete_insns_since (last
);
7581 /* Try to make OP match operand OPNO of instruction ICODE. Return true
7582 on success, storing the new operand value back in OP. */
7585 maybe_legitimize_operand (enum insn_code icode
, unsigned int opno
,
7586 class expand_operand
*op
)
7588 machine_mode mode
, imode
, tmode
;
7595 temporary_volatile_ok
v (true);
7596 return maybe_legitimize_operand_same_code (icode
, opno
, op
);
7600 gcc_assert (mode
!= VOIDmode
);
7602 && op
->value
!= const0_rtx
7603 && GET_MODE (op
->value
) == mode
7604 && maybe_legitimize_operand_same_code (icode
, opno
, op
))
7607 op
->value
= gen_reg_rtx (mode
);
7613 gcc_assert (mode
!= VOIDmode
);
7614 gcc_assert (GET_MODE (op
->value
) == VOIDmode
7615 || GET_MODE (op
->value
) == mode
);
7616 if (maybe_legitimize_operand_same_code (icode
, opno
, op
))
7619 op
->value
= copy_to_mode_reg (mode
, op
->value
);
7622 case EXPAND_CONVERT_TO
:
7623 gcc_assert (mode
!= VOIDmode
);
7624 op
->value
= convert_to_mode (mode
, op
->value
, op
->unsigned_p
);
7627 case EXPAND_CONVERT_FROM
:
7628 if (GET_MODE (op
->value
) != VOIDmode
)
7629 mode
= GET_MODE (op
->value
);
7631 /* The caller must tell us what mode this value has. */
7632 gcc_assert (mode
!= VOIDmode
);
7634 imode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7635 tmode
= (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
)
7636 ? GET_MODE_INNER (imode
) : imode
);
7637 if (tmode
!= VOIDmode
&& tmode
!= mode
)
7639 op
->value
= convert_modes (tmode
, mode
, op
->value
, op
->unsigned_p
);
7642 if (imode
!= VOIDmode
&& imode
!= mode
)
7644 gcc_assert (VECTOR_MODE_P (imode
) && !VECTOR_MODE_P (mode
));
7645 op
->value
= expand_vector_broadcast (imode
, op
->value
);
7650 case EXPAND_ADDRESS
:
7651 op
->value
= convert_memory_address (as_a
<scalar_int_mode
> (mode
),
7655 case EXPAND_INTEGER
:
7656 mode
= insn_data
[(int) icode
].operand
[opno
].mode
;
7657 if (mode
!= VOIDmode
7658 && known_eq (trunc_int_for_mode (op
->int_value
, mode
),
7661 op
->value
= gen_int_mode (op
->int_value
, mode
);
7666 return insn_operand_matches (icode
, opno
, op
->value
);
7669 /* Make OP describe an input operand that should have the same value
7670 as VALUE, after any mode conversion that the target might request.
7671 TYPE is the type of VALUE. */
7674 create_convert_operand_from_type (class expand_operand
*op
,
7675 rtx value
, tree type
)
7677 create_convert_operand_from (op
, value
, TYPE_MODE (type
),
7678 TYPE_UNSIGNED (type
));
7681 /* Return true if the requirements on operands OP1 and OP2 of instruction
7682 ICODE are similar enough for the result of legitimizing OP1 to be
7683 reusable for OP2. OPNO1 and OPNO2 are the operand numbers associated
7684 with OP1 and OP2 respectively. */
7687 can_reuse_operands_p (enum insn_code icode
,
7688 unsigned int opno1
, unsigned int opno2
,
7689 const class expand_operand
*op1
,
7690 const class expand_operand
*op2
)
7692 /* Check requirements that are common to all types. */
7693 if (op1
->type
!= op2
->type
7694 || op1
->mode
!= op2
->mode
7695 || (insn_data
[(int) icode
].operand
[opno1
].mode
7696 != insn_data
[(int) icode
].operand
[opno2
].mode
))
7699 /* Check the requirements for specific types. */
7703 /* Outputs must remain distinct. */
7708 case EXPAND_ADDRESS
:
7709 case EXPAND_INTEGER
:
7712 case EXPAND_CONVERT_TO
:
7713 case EXPAND_CONVERT_FROM
:
7714 return op1
->unsigned_p
== op2
->unsigned_p
;
7719 /* Try to make operands [OPS, OPS + NOPS) match operands [OPNO, OPNO + NOPS)
7720 of instruction ICODE. Return true on success, leaving the new operand
7721 values in the OPS themselves. Emit no code on failure. */
7724 maybe_legitimize_operands (enum insn_code icode
, unsigned int opno
,
7725 unsigned int nops
, class expand_operand
*ops
)
7727 rtx_insn
*last
= get_last_insn ();
7728 rtx
*orig_values
= XALLOCAVEC (rtx
, nops
);
7729 for (unsigned int i
= 0; i
< nops
; i
++)
7731 orig_values
[i
] = ops
[i
].value
;
7733 /* First try reusing the result of an earlier legitimization.
7734 This avoids duplicate rtl and ensures that tied operands
7737 This search is linear, but NOPS is bounded at compile time
7738 to a small number (current a single digit). */
7741 if (can_reuse_operands_p (icode
, opno
+ j
, opno
+ i
, &ops
[j
], &ops
[i
])
7742 && rtx_equal_p (orig_values
[j
], orig_values
[i
])
7744 && insn_operand_matches (icode
, opno
+ i
, ops
[j
].value
))
7746 ops
[i
].value
= copy_rtx (ops
[j
].value
);
7750 /* Otherwise try legitimizing the operand on its own. */
7751 if (j
== i
&& !maybe_legitimize_operand (icode
, opno
+ i
, &ops
[i
]))
7753 delete_insns_since (last
);
7760 /* Try to generate instruction ICODE, using operands [OPS, OPS + NOPS)
7761 as its operands. Return the instruction pattern on success,
7762 and emit any necessary set-up code. Return null and emit no
7766 maybe_gen_insn (enum insn_code icode
, unsigned int nops
,
7767 class expand_operand
*ops
)
7769 gcc_assert (nops
== (unsigned int) insn_data
[(int) icode
].n_generator_args
);
7770 if (!maybe_legitimize_operands (icode
, 0, nops
, ops
))
7776 return GEN_FCN (icode
) (ops
[0].value
);
7778 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
);
7780 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
);
7782 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7785 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7786 ops
[3].value
, ops
[4].value
);
7788 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7789 ops
[3].value
, ops
[4].value
, ops
[5].value
);
7791 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7792 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7795 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7796 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7797 ops
[6].value
, ops
[7].value
);
7799 return GEN_FCN (icode
) (ops
[0].value
, ops
[1].value
, ops
[2].value
,
7800 ops
[3].value
, ops
[4].value
, ops
[5].value
,
7801 ops
[6].value
, ops
[7].value
, ops
[8].value
);
7806 /* Try to emit instruction ICODE, using operands [OPS, OPS + NOPS)
7807 as its operands. Return true on success and emit no code on failure. */
7810 maybe_expand_insn (enum insn_code icode
, unsigned int nops
,
7811 class expand_operand
*ops
)
7813 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7822 /* Like maybe_expand_insn, but for jumps. */
7825 maybe_expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7826 class expand_operand
*ops
)
7828 rtx_insn
*pat
= maybe_gen_insn (icode
, nops
, ops
);
7831 emit_jump_insn (pat
);
7837 /* Emit instruction ICODE, using operands [OPS, OPS + NOPS)
7841 expand_insn (enum insn_code icode
, unsigned int nops
,
7842 class expand_operand
*ops
)
7844 if (!maybe_expand_insn (icode
, nops
, ops
))
7848 /* Like expand_insn, but for jumps. */
7851 expand_jump_insn (enum insn_code icode
, unsigned int nops
,
7852 class expand_operand
*ops
)
7854 if (!maybe_expand_jump_insn (icode
, nops
, ops
))