1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
45 #include "basic-block.h"
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
55 See expr.h for documentation of these optabs. */
57 optab optab_table
[OTI_MAX
];
59 rtx libfunc_table
[LTI_MAX
];
61 /* Tables of patterns for converting one mode to another. */
62 convert_optab convert_optab_table
[CTI_MAX
];
64 /* Contains the optab used for each rtx code. */
65 optab code_to_optab
[NUM_RTX_CODE
+ 1];
67 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
68 gives the gen_function to make a branch to test that condition. */
70 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
72 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
73 gives the insn code to make a store-condition insn
74 to test that condition. */
76 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
78 #ifdef HAVE_conditional_move
79 /* Indexed by the machine mode, gives the insn code to make a conditional
80 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
81 setcc_gen_code to cut down on the number of named patterns. Consider a day
82 when a lot more rtx codes are conditional (eg: for the ARM). */
84 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
87 /* Indexed by the machine mode, gives the insn code for vector conditional
90 enum insn_code vcond_gen_code
[NUM_MACHINE_MODES
];
91 enum insn_code vcondu_gen_code
[NUM_MACHINE_MODES
];
93 /* The insn generating function can not take an rtx_code argument.
94 TRAP_RTX is used as an rtx argument. Its code is replaced with
95 the code to be used in the trap insn and all other fields are ignored. */
96 static GTY(()) rtx trap_rtx
;
98 static int add_equal_note (rtx
, rtx
, enum rtx_code
, rtx
, rtx
);
99 static rtx
widen_operand (rtx
, enum machine_mode
, enum machine_mode
, int,
101 static void prepare_cmp_insn (rtx
*, rtx
*, enum rtx_code
*, rtx
,
102 enum machine_mode
*, int *,
103 enum can_compare_purpose
);
104 static enum insn_code
can_fix_p (enum machine_mode
, enum machine_mode
, int,
106 static enum insn_code
can_float_p (enum machine_mode
, enum machine_mode
, int);
107 static optab
new_optab (void);
108 static convert_optab
new_convert_optab (void);
109 static inline optab
init_optab (enum rtx_code
);
110 static inline optab
init_optabv (enum rtx_code
);
111 static inline convert_optab
init_convert_optab (enum rtx_code
);
112 static void init_libfuncs (optab
, int, int, const char *, int);
113 static void init_integral_libfuncs (optab
, const char *, int);
114 static void init_floating_libfuncs (optab
, const char *, int);
115 static void init_interclass_conv_libfuncs (convert_optab
, const char *,
116 enum mode_class
, enum mode_class
);
117 static void init_intraclass_conv_libfuncs (convert_optab
, const char *,
118 enum mode_class
, bool);
119 static void emit_cmp_and_jump_insn_1 (rtx
, rtx
, enum machine_mode
,
120 enum rtx_code
, int, rtx
);
121 static void prepare_float_lib_cmp (rtx
*, rtx
*, enum rtx_code
*,
122 enum machine_mode
*, int *);
123 static rtx
widen_clz (enum machine_mode
, rtx
, rtx
);
124 static rtx
expand_parity (enum machine_mode
, rtx
, rtx
);
125 static enum rtx_code
get_rtx_code (enum tree_code
, bool);
126 static rtx
vector_compare_rtx (tree
, bool, enum insn_code
);
128 #ifndef HAVE_conditional_trap
129 #define HAVE_conditional_trap 0
130 #define gen_conditional_trap(a,b) (abort (), NULL_RTX)
133 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
134 the result of operation CODE applied to OP0 (and OP1 if it is a binary
137 If the last insn does not set TARGET, don't do anything, but return 1.
139 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
140 don't add the REG_EQUAL note but return 0. Our caller can then try
141 again, ensuring that TARGET is not one of the operands. */
144 add_equal_note (rtx insns
, rtx target
, enum rtx_code code
, rtx op0
, rtx op1
)
146 rtx last_insn
, insn
, set
;
151 || NEXT_INSN (insns
) == NULL_RTX
)
154 if (GET_RTX_CLASS (code
) != RTX_COMM_ARITH
155 && GET_RTX_CLASS (code
) != RTX_BIN_ARITH
156 && GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
157 && GET_RTX_CLASS (code
) != RTX_COMPARE
158 && GET_RTX_CLASS (code
) != RTX_UNARY
)
161 if (GET_CODE (target
) == ZERO_EXTRACT
)
164 for (last_insn
= insns
;
165 NEXT_INSN (last_insn
) != NULL_RTX
;
166 last_insn
= NEXT_INSN (last_insn
))
169 set
= single_set (last_insn
);
173 if (! rtx_equal_p (SET_DEST (set
), target
)
174 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
175 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
176 || ! rtx_equal_p (XEXP (SET_DEST (set
), 0), target
)))
179 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
180 besides the last insn. */
181 if (reg_overlap_mentioned_p (target
, op0
)
182 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
184 insn
= PREV_INSN (last_insn
);
185 while (insn
!= NULL_RTX
)
187 if (reg_set_p (target
, insn
))
190 insn
= PREV_INSN (insn
);
194 if (GET_RTX_CLASS (code
) == RTX_UNARY
)
195 note
= gen_rtx_fmt_e (code
, GET_MODE (target
), copy_rtx (op0
));
197 note
= gen_rtx_fmt_ee (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
199 set_unique_reg_note (last_insn
, REG_EQUAL
, note
);
204 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
205 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
206 not actually do a sign-extend or zero-extend, but can leave the
207 higher-order bits of the result rtx undefined, for example, in the case
208 of logical operations, but not right shifts. */
211 widen_operand (rtx op
, enum machine_mode mode
, enum machine_mode oldmode
,
212 int unsignedp
, int no_extend
)
216 /* If we don't have to extend and this is a constant, return it. */
217 if (no_extend
&& GET_MODE (op
) == VOIDmode
)
220 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
221 extend since it will be more efficient to do so unless the signedness of
222 a promoted object differs from our extension. */
224 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)
225 && SUBREG_PROMOTED_UNSIGNED_P (op
) == unsignedp
))
226 return convert_modes (mode
, oldmode
, op
, unsignedp
);
228 /* If MODE is no wider than a single word, we return a paradoxical
230 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
231 return gen_rtx_SUBREG (mode
, force_reg (GET_MODE (op
), op
), 0);
233 /* Otherwise, get an object of MODE, clobber it, and set the low-order
236 result
= gen_reg_rtx (mode
);
237 emit_insn (gen_rtx_CLOBBER (VOIDmode
, result
));
238 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
242 /* Return the optab used for computing the operation given by
243 the tree code, CODE. This function is not always usable (for
244 example, it cannot give complete results for multiplication
245 or division) but probably ought to be relied on more widely
246 throughout the expander. */
248 optab_for_tree_code (enum tree_code code
, tree type
)
260 return one_cmpl_optab
;
269 return TYPE_UNSIGNED (type
) ? umod_optab
: smod_optab
;
277 return TYPE_UNSIGNED (type
) ? udiv_optab
: sdiv_optab
;
283 return TYPE_UNSIGNED (type
) ? lshr_optab
: ashr_optab
;
292 return TYPE_UNSIGNED (type
) ? umax_optab
: smax_optab
;
295 return TYPE_UNSIGNED (type
) ? umin_optab
: smin_optab
;
297 case REALIGN_LOAD_EXPR
:
298 return vec_realign_load_optab
;
304 trapv
= flag_trapv
&& INTEGRAL_TYPE_P (type
) && !TYPE_UNSIGNED (type
);
308 return trapv
? addv_optab
: add_optab
;
311 return trapv
? subv_optab
: sub_optab
;
314 return trapv
? smulv_optab
: smul_optab
;
317 return trapv
? negv_optab
: neg_optab
;
320 return trapv
? absv_optab
: abs_optab
;
328 /* Generate code to perform an operation specified by TERNARY_OPTAB
329 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
331 UNSIGNEDP is for the case where we have to widen the operands
332 to perform the operation. It says to use zero-extension.
334 If TARGET is nonzero, the value
335 is generated there, if it is convenient to do so.
336 In all cases an rtx is returned for the locus of the value;
337 this may or may not be TARGET. */
340 expand_ternary_op (enum machine_mode mode
, optab ternary_optab
, rtx op0
,
341 rtx op1
, rtx op2
, rtx target
, int unsignedp
)
343 int icode
= (int) ternary_optab
->handlers
[(int) mode
].insn_code
;
344 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
345 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
346 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
349 rtx xop0
= op0
, xop1
= op1
, xop2
= op2
;
351 if (ternary_optab
->handlers
[(int) mode
].insn_code
== CODE_FOR_nothing
)
355 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode
))
356 temp
= gen_reg_rtx (mode
);
360 /* In case the insn wants input operands in modes different from
361 those of the actual operands, convert the operands. It would
362 seem that we don't need to convert CONST_INTs, but we do, so
363 that they're properly zero-extended, sign-extended or truncated
366 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
367 xop0
= convert_modes (mode0
,
368 GET_MODE (op0
) != VOIDmode
373 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
374 xop1
= convert_modes (mode1
,
375 GET_MODE (op1
) != VOIDmode
380 if (GET_MODE (op2
) != mode2
&& mode2
!= VOIDmode
)
381 xop2
= convert_modes (mode2
,
382 GET_MODE (op2
) != VOIDmode
387 /* Now, if insn's predicates don't allow our operands, put them into
390 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
391 && mode0
!= VOIDmode
)
392 xop0
= copy_to_mode_reg (mode0
, xop0
);
394 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
395 && mode1
!= VOIDmode
)
396 xop1
= copy_to_mode_reg (mode1
, xop1
);
398 if (! (*insn_data
[icode
].operand
[3].predicate
) (xop2
, mode2
)
399 && mode2
!= VOIDmode
)
400 xop2
= copy_to_mode_reg (mode2
, xop2
);
402 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
, xop2
);
409 /* Like expand_binop, but return a constant rtx if the result can be
410 calculated at compile time. The arguments and return value are
411 otherwise the same as for expand_binop. */
414 simplify_expand_binop (enum machine_mode mode
, optab binoptab
,
415 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
416 enum optab_methods methods
)
418 if (CONSTANT_P (op0
) && CONSTANT_P (op1
))
419 return simplify_gen_binary (binoptab
->code
, mode
, op0
, op1
);
421 return expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
);
424 /* Like simplify_expand_binop, but always put the result in TARGET.
425 Return true if the expansion succeeded. */
428 force_expand_binop (enum machine_mode mode
, optab binoptab
,
429 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
430 enum optab_methods methods
)
432 rtx x
= simplify_expand_binop (mode
, binoptab
, op0
, op1
,
433 target
, unsignedp
, methods
);
437 emit_move_insn (target
, x
);
441 /* This subroutine of expand_doubleword_shift handles the cases in which
442 the effective shift value is >= BITS_PER_WORD. The arguments and return
443 value are the same as for the parent routine, except that SUPERWORD_OP1
444 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
445 INTO_TARGET may be null if the caller has decided to calculate it. */
448 expand_superword_shift (optab binoptab
, rtx outof_input
, rtx superword_op1
,
449 rtx outof_target
, rtx into_target
,
450 int unsignedp
, enum optab_methods methods
)
452 if (into_target
!= 0)
453 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, superword_op1
,
454 into_target
, unsignedp
, methods
))
457 if (outof_target
!= 0)
459 /* For a signed right shift, we must fill OUTOF_TARGET with copies
460 of the sign bit, otherwise we must fill it with zeros. */
461 if (binoptab
!= ashr_optab
)
462 emit_move_insn (outof_target
, CONST0_RTX (word_mode
));
464 if (!force_expand_binop (word_mode
, binoptab
,
465 outof_input
, GEN_INT (BITS_PER_WORD
- 1),
466 outof_target
, unsignedp
, methods
))
472 /* This subroutine of expand_doubleword_shift handles the cases in which
473 the effective shift value is < BITS_PER_WORD. The arguments and return
474 value are the same as for the parent routine. */
477 expand_subword_shift (enum machine_mode op1_mode
, optab binoptab
,
478 rtx outof_input
, rtx into_input
, rtx op1
,
479 rtx outof_target
, rtx into_target
,
480 int unsignedp
, enum optab_methods methods
,
481 unsigned HOST_WIDE_INT shift_mask
)
483 optab reverse_unsigned_shift
, unsigned_shift
;
486 reverse_unsigned_shift
= (binoptab
== ashl_optab
? lshr_optab
: ashl_optab
);
487 unsigned_shift
= (binoptab
== ashl_optab
? ashl_optab
: lshr_optab
);
489 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
490 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
491 the opposite direction to BINOPTAB. */
492 if (CONSTANT_P (op1
) || shift_mask
>= BITS_PER_WORD
)
494 carries
= outof_input
;
495 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
496 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
501 /* We must avoid shifting by BITS_PER_WORD bits since that is either
502 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
503 has unknown behavior. Do a single shift first, then shift by the
504 remainder. It's OK to use ~OP1 as the remainder if shift counts
505 are truncated to the mode size. */
506 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
507 outof_input
, const1_rtx
, 0, unsignedp
, methods
);
508 if (shift_mask
== BITS_PER_WORD
- 1)
510 tmp
= immed_double_const (-1, -1, op1_mode
);
511 tmp
= simplify_expand_binop (op1_mode
, xor_optab
, op1
, tmp
,
516 tmp
= immed_double_const (BITS_PER_WORD
- 1, 0, op1_mode
);
517 tmp
= simplify_expand_binop (op1_mode
, sub_optab
, tmp
, op1
,
521 if (tmp
== 0 || carries
== 0)
523 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
524 carries
, tmp
, 0, unsignedp
, methods
);
528 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
529 so the result can go directly into INTO_TARGET if convenient. */
530 tmp
= expand_binop (word_mode
, unsigned_shift
, into_input
, op1
,
531 into_target
, unsignedp
, methods
);
535 /* Now OR in the bits carried over from OUTOF_INPUT. */
536 if (!force_expand_binop (word_mode
, ior_optab
, tmp
, carries
,
537 into_target
, unsignedp
, methods
))
540 /* Use a standard word_mode shift for the out-of half. */
541 if (outof_target
!= 0)
542 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
543 outof_target
, unsignedp
, methods
))
550 #ifdef HAVE_conditional_move
551 /* Try implementing expand_doubleword_shift using conditional moves.
552 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
553 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
554 are the shift counts to use in the former and latter case. All other
555 arguments are the same as the parent routine. */
558 expand_doubleword_shift_condmove (enum machine_mode op1_mode
, optab binoptab
,
559 enum rtx_code cmp_code
, rtx cmp1
, rtx cmp2
,
560 rtx outof_input
, rtx into_input
,
561 rtx subword_op1
, rtx superword_op1
,
562 rtx outof_target
, rtx into_target
,
563 int unsignedp
, enum optab_methods methods
,
564 unsigned HOST_WIDE_INT shift_mask
)
566 rtx outof_superword
, into_superword
;
568 /* Put the superword version of the output into OUTOF_SUPERWORD and
570 outof_superword
= outof_target
!= 0 ? gen_reg_rtx (word_mode
) : 0;
571 if (outof_target
!= 0 && subword_op1
== superword_op1
)
573 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
574 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
575 into_superword
= outof_target
;
576 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
577 outof_superword
, 0, unsignedp
, methods
))
582 into_superword
= gen_reg_rtx (word_mode
);
583 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
584 outof_superword
, into_superword
,
589 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
590 if (!expand_subword_shift (op1_mode
, binoptab
,
591 outof_input
, into_input
, subword_op1
,
592 outof_target
, into_target
,
593 unsignedp
, methods
, shift_mask
))
596 /* Select between them. Do the INTO half first because INTO_SUPERWORD
597 might be the current value of OUTOF_TARGET. */
598 if (!emit_conditional_move (into_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
599 into_target
, into_superword
, word_mode
, false))
602 if (outof_target
!= 0)
603 if (!emit_conditional_move (outof_target
, cmp_code
, cmp1
, cmp2
, op1_mode
,
604 outof_target
, outof_superword
,
612 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
613 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
614 input operand; the shift moves bits in the direction OUTOF_INPUT->
615 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
616 of the target. OP1 is the shift count and OP1_MODE is its mode.
617 If OP1 is constant, it will have been truncated as appropriate
618 and is known to be nonzero.
620 If SHIFT_MASK is zero, the result of word shifts is undefined when the
621 shift count is outside the range [0, BITS_PER_WORD). This routine must
622 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
624 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
625 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
626 fill with zeros or sign bits as appropriate.
628 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
629 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
630 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
631 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
634 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
635 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
636 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
637 function wants to calculate it itself.
639 Return true if the shift could be successfully synthesized. */
642 expand_doubleword_shift (enum machine_mode op1_mode
, optab binoptab
,
643 rtx outof_input
, rtx into_input
, rtx op1
,
644 rtx outof_target
, rtx into_target
,
645 int unsignedp
, enum optab_methods methods
,
646 unsigned HOST_WIDE_INT shift_mask
)
648 rtx superword_op1
, tmp
, cmp1
, cmp2
;
649 rtx subword_label
, done_label
;
650 enum rtx_code cmp_code
;
652 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
653 fill the result with sign or zero bits as appropriate. If so, the value
654 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
655 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
656 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
658 This isn't worthwhile for constant shifts since the optimizers will
659 cope better with in-range shift counts. */
660 if (shift_mask
>= BITS_PER_WORD
662 && !CONSTANT_P (op1
))
664 if (!expand_doubleword_shift (op1_mode
, binoptab
,
665 outof_input
, into_input
, op1
,
667 unsignedp
, methods
, shift_mask
))
669 if (!force_expand_binop (word_mode
, binoptab
, outof_input
, op1
,
670 outof_target
, unsignedp
, methods
))
675 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
676 is true when the effective shift value is less than BITS_PER_WORD.
677 Set SUPERWORD_OP1 to the shift count that should be used to shift
678 OUTOF_INPUT into INTO_TARGET when the condition is false. */
679 tmp
= immed_double_const (BITS_PER_WORD
, 0, op1_mode
);
680 if (!CONSTANT_P (op1
) && shift_mask
== BITS_PER_WORD
- 1)
682 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
683 is a subword shift count. */
684 cmp1
= simplify_expand_binop (op1_mode
, and_optab
, op1
, tmp
,
686 cmp2
= CONST0_RTX (op1_mode
);
692 /* Set CMP1 to OP1 - BITS_PER_WORD. */
693 cmp1
= simplify_expand_binop (op1_mode
, sub_optab
, op1
, tmp
,
695 cmp2
= CONST0_RTX (op1_mode
);
697 superword_op1
= cmp1
;
702 /* If we can compute the condition at compile time, pick the
703 appropriate subroutine. */
704 tmp
= simplify_relational_operation (cmp_code
, SImode
, op1_mode
, cmp1
, cmp2
);
705 if (tmp
!= 0 && GET_CODE (tmp
) == CONST_INT
)
707 if (tmp
== const0_rtx
)
708 return expand_superword_shift (binoptab
, outof_input
, superword_op1
,
709 outof_target
, into_target
,
712 return expand_subword_shift (op1_mode
, binoptab
,
713 outof_input
, into_input
, op1
,
714 outof_target
, into_target
,
715 unsignedp
, methods
, shift_mask
);
718 #ifdef HAVE_conditional_move
719 /* Try using conditional moves to generate straight-line code. */
721 rtx start
= get_last_insn ();
722 if (expand_doubleword_shift_condmove (op1_mode
, binoptab
,
723 cmp_code
, cmp1
, cmp2
,
724 outof_input
, into_input
,
726 outof_target
, into_target
,
727 unsignedp
, methods
, shift_mask
))
729 delete_insns_since (start
);
733 /* As a last resort, use branches to select the correct alternative. */
734 subword_label
= gen_label_rtx ();
735 done_label
= gen_label_rtx ();
737 do_compare_rtx_and_jump (cmp1
, cmp2
, cmp_code
, false, op1_mode
,
738 0, 0, subword_label
);
740 if (!expand_superword_shift (binoptab
, outof_input
, superword_op1
,
741 outof_target
, into_target
,
745 emit_jump_insn (gen_jump (done_label
));
747 emit_label (subword_label
);
749 if (!expand_subword_shift (op1_mode
, binoptab
,
750 outof_input
, into_input
, op1
,
751 outof_target
, into_target
,
752 unsignedp
, methods
, shift_mask
))
755 emit_label (done_label
);
759 /* Subroutine of expand_binop. Perform a double word multiplication of
760 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
761 as the target's word_mode. This function return NULL_RTX if anything
762 goes wrong, in which case it may have already emitted instructions
763 which need to be deleted.
765 If we want to multiply two two-word values and have normal and widening
766 multiplies of single-word values, we can do this with three smaller
767 multiplications. Note that we do not make a REG_NO_CONFLICT block here
768 because we are not operating on one word at a time.
770 The multiplication proceeds as follows:
771 _______________________
772 [__op0_high_|__op0_low__]
773 _______________________
774 * [__op1_high_|__op1_low__]
775 _______________________________________________
776 _______________________
777 (1) [__op0_low__*__op1_low__]
778 _______________________
779 (2a) [__op0_low__*__op1_high_]
780 _______________________
781 (2b) [__op0_high_*__op1_low__]
782 _______________________
783 (3) [__op0_high_*__op1_high_]
786 This gives a 4-word result. Since we are only interested in the
787 lower 2 words, partial result (3) and the upper words of (2a) and
788 (2b) don't need to be calculated. Hence (2a) and (2b) can be
789 calculated using non-widening multiplication.
791 (1), however, needs to be calculated with an unsigned widening
792 multiplication. If this operation is not directly supported we
793 try using a signed widening multiplication and adjust the result.
794 This adjustment works as follows:
796 If both operands are positive then no adjustment is needed.
798 If the operands have different signs, for example op0_low < 0 and
799 op1_low >= 0, the instruction treats the most significant bit of
800 op0_low as a sign bit instead of a bit with significance
801 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
802 with 2**BITS_PER_WORD - op0_low, and two's complements the
803 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
806 Similarly, if both operands are negative, we need to add
807 (op0_low + op1_low) * 2**BITS_PER_WORD.
809 We use a trick to adjust quickly. We logically shift op0_low right
810 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
811 op0_high (op1_high) before it is used to calculate 2b (2a). If no
812 logical shift exists, we do an arithmetic right shift and subtract
816 expand_doubleword_mult (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
817 bool umulp
, enum optab_methods methods
)
819 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
820 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
821 rtx wordm1
= umulp
? NULL_RTX
: GEN_INT (BITS_PER_WORD
- 1);
822 rtx product
, adjust
, product_high
, temp
;
824 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
825 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
826 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
827 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
829 /* If we're using an unsigned multiply to directly compute the product
830 of the low-order words of the operands and perform any required
831 adjustments of the operands, we begin by trying two more multiplications
832 and then computing the appropriate sum.
834 We have checked above that the required addition is provided.
835 Full-word addition will normally always succeed, especially if
836 it is provided at all, so we don't worry about its failure. The
837 multiplication may well fail, however, so we do handle that. */
841 /* ??? This could be done with emit_store_flag where available. */
842 temp
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
843 NULL_RTX
, 1, methods
);
845 op0_high
= expand_binop (word_mode
, add_optab
, op0_high
, temp
,
846 NULL_RTX
, 0, OPTAB_DIRECT
);
849 temp
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
850 NULL_RTX
, 0, methods
);
853 op0_high
= expand_binop (word_mode
, sub_optab
, op0_high
, temp
,
854 NULL_RTX
, 0, OPTAB_DIRECT
);
861 adjust
= expand_binop (word_mode
, smul_optab
, op0_high
, op1_low
,
862 NULL_RTX
, 0, OPTAB_DIRECT
);
866 /* OP0_HIGH should now be dead. */
870 /* ??? This could be done with emit_store_flag where available. */
871 temp
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
872 NULL_RTX
, 1, methods
);
874 op1_high
= expand_binop (word_mode
, add_optab
, op1_high
, temp
,
875 NULL_RTX
, 0, OPTAB_DIRECT
);
878 temp
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
879 NULL_RTX
, 0, methods
);
882 op1_high
= expand_binop (word_mode
, sub_optab
, op1_high
, temp
,
883 NULL_RTX
, 0, OPTAB_DIRECT
);
890 temp
= expand_binop (word_mode
, smul_optab
, op1_high
, op0_low
,
891 NULL_RTX
, 0, OPTAB_DIRECT
);
895 /* OP1_HIGH should now be dead. */
897 adjust
= expand_binop (word_mode
, add_optab
, adjust
, temp
,
898 adjust
, 0, OPTAB_DIRECT
);
900 if (target
&& !REG_P (target
))
904 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
905 target
, 1, OPTAB_DIRECT
);
907 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
908 target
, 1, OPTAB_DIRECT
);
913 product_high
= operand_subword (product
, high
, 1, mode
);
914 adjust
= expand_binop (word_mode
, add_optab
, product_high
, adjust
,
915 REG_P (product_high
) ? product_high
: adjust
,
917 emit_move_insn (product_high
, adjust
);
921 /* Wrapper around expand_binop which takes an rtx code to specify
922 the operation to perform, not an optab pointer. All other
923 arguments are the same. */
925 expand_simple_binop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
926 rtx op1
, rtx target
, int unsignedp
,
927 enum optab_methods methods
)
929 optab binop
= code_to_optab
[(int) code
];
933 return expand_binop (mode
, binop
, op0
, op1
, target
, unsignedp
, methods
);
936 /* Generate code to perform an operation specified by BINOPTAB
937 on operands OP0 and OP1, with result having machine-mode MODE.
939 UNSIGNEDP is for the case where we have to widen the operands
940 to perform the operation. It says to use zero-extension.
942 If TARGET is nonzero, the value
943 is generated there, if it is convenient to do so.
944 In all cases an rtx is returned for the locus of the value;
945 this may or may not be TARGET. */
948 expand_binop (enum machine_mode mode
, optab binoptab
, rtx op0
, rtx op1
,
949 rtx target
, int unsignedp
, enum optab_methods methods
)
951 enum optab_methods next_methods
952 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
953 ? OPTAB_WIDEN
: methods
);
954 enum mode_class
class;
955 enum machine_mode wider_mode
;
957 int commutative_op
= 0;
958 int shift_op
= (binoptab
->code
== ASHIFT
959 || binoptab
->code
== ASHIFTRT
960 || binoptab
->code
== LSHIFTRT
961 || binoptab
->code
== ROTATE
962 || binoptab
->code
== ROTATERT
);
963 rtx entry_last
= get_last_insn ();
966 class = GET_MODE_CLASS (mode
);
970 /* Load duplicate non-volatile operands once. */
971 if (rtx_equal_p (op0
, op1
) && ! volatile_refs_p (op0
))
973 op0
= force_not_mem (op0
);
978 op0
= force_not_mem (op0
);
979 op1
= force_not_mem (op1
);
983 /* If subtracting an integer constant, convert this into an addition of
984 the negated constant. */
986 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
988 op1
= negate_rtx (mode
, op1
);
989 binoptab
= add_optab
;
992 /* If we are inside an appropriately-short loop and we are optimizing,
993 force expensive constants into a register. */
994 if (CONSTANT_P (op0
) && optimize
995 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
997 if (GET_MODE (op0
) != VOIDmode
)
998 op0
= convert_modes (mode
, VOIDmode
, op0
, unsignedp
);
999 op0
= force_reg (mode
, op0
);
1002 if (CONSTANT_P (op1
) && optimize
1003 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1005 if (GET_MODE (op1
) != VOIDmode
)
1006 op1
= convert_modes (mode
, VOIDmode
, op1
, unsignedp
);
1007 op1
= force_reg (mode
, op1
);
1010 /* Record where to delete back to if we backtrack. */
1011 last
= get_last_insn ();
1013 /* If operation is commutative,
1014 try to make the first operand a register.
1015 Even better, try to make it the same as the target.
1016 Also try to make the last operand a constant. */
1017 if (GET_RTX_CLASS (binoptab
->code
) == RTX_COMM_ARITH
1018 || binoptab
== smul_widen_optab
1019 || binoptab
== umul_widen_optab
1020 || binoptab
== smul_highpart_optab
1021 || binoptab
== umul_highpart_optab
)
1025 if (((target
== 0 || REG_P (target
))
1029 : rtx_equal_p (op1
, target
))
1030 || GET_CODE (op0
) == CONST_INT
)
1038 /* If we can do it with a three-operand insn, do so. */
1040 if (methods
!= OPTAB_MUST_WIDEN
1041 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1043 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1044 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1045 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1047 rtx xop0
= op0
, xop1
= op1
;
1052 temp
= gen_reg_rtx (mode
);
1054 /* If it is a commutative operator and the modes would match
1055 if we would swap the operands, we can save the conversions. */
1058 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
1059 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
1063 tmp
= op0
; op0
= op1
; op1
= tmp
;
1064 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
1068 /* In case the insn wants input operands in modes different from
1069 those of the actual operands, convert the operands. It would
1070 seem that we don't need to convert CONST_INTs, but we do, so
1071 that they're properly zero-extended, sign-extended or truncated
1074 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1075 xop0
= convert_modes (mode0
,
1076 GET_MODE (op0
) != VOIDmode
1081 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1082 xop1
= convert_modes (mode1
,
1083 GET_MODE (op1
) != VOIDmode
1088 /* Now, if insn's predicates don't allow our operands, put them into
1091 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
)
1092 && mode0
!= VOIDmode
)
1093 xop0
= copy_to_mode_reg (mode0
, xop0
);
1095 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
)
1096 && mode1
!= VOIDmode
)
1097 xop1
= copy_to_mode_reg (mode1
, xop1
);
1099 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
1100 temp
= gen_reg_rtx (mode
);
1102 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
1105 /* If PAT is composed of more than one insn, try to add an appropriate
1106 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1107 operand, call ourselves again, this time without a target. */
1108 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
1109 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
1111 delete_insns_since (last
);
1112 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
1113 unsignedp
, methods
);
1120 delete_insns_since (last
);
1123 /* If this is a multiply, see if we can do a widening operation that
1124 takes operands of this mode and makes a wider mode. */
1126 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
1127 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1128 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
1129 != CODE_FOR_nothing
))
1131 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
1132 unsignedp
? umul_widen_optab
: smul_widen_optab
,
1133 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
1137 if (GET_MODE_CLASS (mode
) == MODE_INT
)
1138 return gen_lowpart (mode
, temp
);
1140 return convert_to_mode (mode
, temp
, unsignedp
);
1144 /* Look for a wider mode of the same class for which we think we
1145 can open-code the operation. Check for a widening multiply at the
1146 wider mode as well. */
1148 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1149 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
1150 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1151 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1153 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
1154 || (binoptab
== smul_optab
1155 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
1156 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
1157 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
1158 != CODE_FOR_nothing
)))
1160 rtx xop0
= op0
, xop1
= op1
;
1163 /* For certain integer operations, we need not actually extend
1164 the narrow operands, as long as we will truncate
1165 the results to the same narrowness. */
1167 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1168 || binoptab
== xor_optab
1169 || binoptab
== add_optab
|| binoptab
== sub_optab
1170 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1171 && class == MODE_INT
)
1174 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
1176 /* The second operand of a shift must always be extended. */
1177 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1178 no_extend
&& binoptab
!= ashl_optab
);
1180 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1181 unsignedp
, OPTAB_DIRECT
);
1184 if (class != MODE_INT
)
1187 target
= gen_reg_rtx (mode
);
1188 convert_move (target
, temp
, 0);
1192 return gen_lowpart (mode
, temp
);
1195 delete_insns_since (last
);
1199 /* These can be done a word at a time. */
1200 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
1201 && class == MODE_INT
1202 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1203 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1209 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1210 won't be accurate, so use a new target. */
1211 if (target
== 0 || target
== op0
|| target
== op1
)
1212 target
= gen_reg_rtx (mode
);
1216 /* Do the actual arithmetic. */
1217 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1219 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1220 rtx x
= expand_binop (word_mode
, binoptab
,
1221 operand_subword_force (op0
, i
, mode
),
1222 operand_subword_force (op1
, i
, mode
),
1223 target_piece
, unsignedp
, next_methods
);
1228 if (target_piece
!= x
)
1229 emit_move_insn (target_piece
, x
);
1232 insns
= get_insns ();
1235 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
1237 if (binoptab
->code
!= UNKNOWN
)
1239 = gen_rtx_fmt_ee (binoptab
->code
, mode
,
1240 copy_rtx (op0
), copy_rtx (op1
));
1244 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1249 /* Synthesize double word shifts from single word shifts. */
1250 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
1251 || binoptab
== ashr_optab
)
1252 && class == MODE_INT
1253 && (GET_CODE (op1
) == CONST_INT
|| !optimize_size
)
1254 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1255 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1256 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1257 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1259 unsigned HOST_WIDE_INT shift_mask
, double_shift_mask
;
1260 enum machine_mode op1_mode
;
1262 double_shift_mask
= targetm
.shift_truncation_mask (mode
);
1263 shift_mask
= targetm
.shift_truncation_mask (word_mode
);
1264 op1_mode
= GET_MODE (op1
) != VOIDmode
? GET_MODE (op1
) : word_mode
;
1266 /* Apply the truncation to constant shifts. */
1267 if (double_shift_mask
> 0 && GET_CODE (op1
) == CONST_INT
)
1268 op1
= GEN_INT (INTVAL (op1
) & double_shift_mask
);
1270 if (op1
== CONST0_RTX (op1_mode
))
1273 /* Make sure that this is a combination that expand_doubleword_shift
1274 can handle. See the comments there for details. */
1275 if (double_shift_mask
== 0
1276 || (shift_mask
== BITS_PER_WORD
- 1
1277 && double_shift_mask
== BITS_PER_WORD
* 2 - 1))
1279 rtx insns
, equiv_value
;
1280 rtx into_target
, outof_target
;
1281 rtx into_input
, outof_input
;
1282 int left_shift
, outof_word
;
1284 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1285 won't be accurate, so use a new target. */
1286 if (target
== 0 || target
== op0
|| target
== op1
)
1287 target
= gen_reg_rtx (mode
);
1291 /* OUTOF_* is the word we are shifting bits away from, and
1292 INTO_* is the word that we are shifting bits towards, thus
1293 they differ depending on the direction of the shift and
1294 WORDS_BIG_ENDIAN. */
1296 left_shift
= binoptab
== ashl_optab
;
1297 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1299 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1300 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1302 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1303 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1305 if (expand_doubleword_shift (op1_mode
, binoptab
,
1306 outof_input
, into_input
, op1
,
1307 outof_target
, into_target
,
1308 unsignedp
, methods
, shift_mask
))
1310 insns
= get_insns ();
1313 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1314 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1321 /* Synthesize double word rotates from single word shifts. */
1322 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
1323 && class == MODE_INT
1324 && GET_CODE (op1
) == CONST_INT
1325 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1326 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1327 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1329 rtx insns
, equiv_value
;
1330 rtx into_target
, outof_target
;
1331 rtx into_input
, outof_input
;
1333 int shift_count
, left_shift
, outof_word
;
1335 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1336 won't be accurate, so use a new target. Do this also if target is not
1337 a REG, first because having a register instead may open optimization
1338 opportunities, and second because if target and op0 happen to be MEMs
1339 designating the same location, we would risk clobbering it too early
1340 in the code sequence we generate below. */
1341 if (target
== 0 || target
== op0
|| target
== op1
|| ! REG_P (target
))
1342 target
= gen_reg_rtx (mode
);
1346 shift_count
= INTVAL (op1
);
1348 /* OUTOF_* is the word we are shifting bits away from, and
1349 INTO_* is the word that we are shifting bits towards, thus
1350 they differ depending on the direction of the shift and
1351 WORDS_BIG_ENDIAN. */
1353 left_shift
= (binoptab
== rotl_optab
);
1354 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
1356 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
1357 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
1359 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
1360 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
1362 if (shift_count
== BITS_PER_WORD
)
1364 /* This is just a word swap. */
1365 emit_move_insn (outof_target
, into_input
);
1366 emit_move_insn (into_target
, outof_input
);
1371 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
1372 rtx first_shift_count
, second_shift_count
;
1373 optab reverse_unsigned_shift
, unsigned_shift
;
1375 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1376 ? lshr_optab
: ashl_optab
);
1378 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
1379 ? ashl_optab
: lshr_optab
);
1381 if (shift_count
> BITS_PER_WORD
)
1383 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
1384 second_shift_count
= GEN_INT (2 * BITS_PER_WORD
- shift_count
);
1388 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
1389 second_shift_count
= GEN_INT (shift_count
);
1392 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
1393 outof_input
, first_shift_count
,
1394 NULL_RTX
, unsignedp
, next_methods
);
1395 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1396 into_input
, second_shift_count
,
1397 NULL_RTX
, unsignedp
, next_methods
);
1399 if (into_temp1
!= 0 && into_temp2
!= 0)
1400 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
1401 into_target
, unsignedp
, next_methods
);
1405 if (inter
!= 0 && inter
!= into_target
)
1406 emit_move_insn (into_target
, inter
);
1408 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
1409 into_input
, first_shift_count
,
1410 NULL_RTX
, unsignedp
, next_methods
);
1411 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
1412 outof_input
, second_shift_count
,
1413 NULL_RTX
, unsignedp
, next_methods
);
1415 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
1416 inter
= expand_binop (word_mode
, ior_optab
,
1417 outof_temp1
, outof_temp2
,
1418 outof_target
, unsignedp
, next_methods
);
1420 if (inter
!= 0 && inter
!= outof_target
)
1421 emit_move_insn (outof_target
, inter
);
1424 insns
= get_insns ();
1429 if (binoptab
->code
!= UNKNOWN
)
1430 equiv_value
= gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
);
1434 /* We can't make this a no conflict block if this is a word swap,
1435 because the word swap case fails if the input and output values
1436 are in the same register. */
1437 if (shift_count
!= BITS_PER_WORD
)
1438 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
1447 /* These can be done a word at a time by propagating carries. */
1448 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
1449 && class == MODE_INT
1450 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
1451 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1454 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
1455 const unsigned int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
1456 rtx carry_in
= NULL_RTX
, carry_out
= NULL_RTX
;
1457 rtx xop0
, xop1
, xtarget
;
1459 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1460 value is one of those, use it. Otherwise, use 1 since it is the
1461 one easiest to get. */
1462 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1463 int normalizep
= STORE_FLAG_VALUE
;
1468 /* Prepare the operands. */
1469 xop0
= force_reg (mode
, op0
);
1470 xop1
= force_reg (mode
, op1
);
1472 xtarget
= gen_reg_rtx (mode
);
1474 if (target
== 0 || !REG_P (target
))
1477 /* Indicate for flow that the entire target reg is being set. */
1479 emit_insn (gen_rtx_CLOBBER (VOIDmode
, xtarget
));
1481 /* Do the actual arithmetic. */
1482 for (i
= 0; i
< nwords
; i
++)
1484 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
1485 rtx target_piece
= operand_subword (xtarget
, index
, 1, mode
);
1486 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
1487 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
1490 /* Main add/subtract of the input operands. */
1491 x
= expand_binop (word_mode
, binoptab
,
1492 op0_piece
, op1_piece
,
1493 target_piece
, unsignedp
, next_methods
);
1499 /* Store carry from main add/subtract. */
1500 carry_out
= gen_reg_rtx (word_mode
);
1501 carry_out
= emit_store_flag_force (carry_out
,
1502 (binoptab
== add_optab
1505 word_mode
, 1, normalizep
);
1512 /* Add/subtract previous carry to main result. */
1513 newx
= expand_binop (word_mode
,
1514 normalizep
== 1 ? binoptab
: otheroptab
,
1516 NULL_RTX
, 1, next_methods
);
1520 /* Get out carry from adding/subtracting carry in. */
1521 rtx carry_tmp
= gen_reg_rtx (word_mode
);
1522 carry_tmp
= emit_store_flag_force (carry_tmp
,
1523 (binoptab
== add_optab
1526 word_mode
, 1, normalizep
);
1528 /* Logical-ior the two poss. carry together. */
1529 carry_out
= expand_binop (word_mode
, ior_optab
,
1530 carry_out
, carry_tmp
,
1531 carry_out
, 0, next_methods
);
1535 emit_move_insn (target_piece
, newx
);
1538 carry_in
= carry_out
;
1541 if (i
== GET_MODE_BITSIZE (mode
) / (unsigned) BITS_PER_WORD
)
1543 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
1544 || ! rtx_equal_p (target
, xtarget
))
1546 rtx temp
= emit_move_insn (target
, xtarget
);
1548 set_unique_reg_note (temp
,
1550 gen_rtx_fmt_ee (binoptab
->code
, mode
,
1561 delete_insns_since (last
);
1564 /* Attempt to synthesize double word multiplies using a sequence of word
1565 mode multiplications. We first attempt to generate a sequence using a
1566 more efficient unsigned widening multiply, and if that fails we then
1567 try using a signed widening multiply. */
1569 if (binoptab
== smul_optab
1570 && class == MODE_INT
1571 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1572 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1573 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1575 rtx product
= NULL_RTX
;
1577 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
1578 != CODE_FOR_nothing
)
1580 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1583 delete_insns_since (last
);
1586 if (product
== NULL_RTX
1587 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1588 != CODE_FOR_nothing
)
1590 product
= expand_doubleword_mult (mode
, op0
, op1
, target
,
1593 delete_insns_since (last
);
1596 if (product
!= NULL_RTX
)
1598 if (mov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1600 temp
= emit_move_insn (target
? target
: product
, product
);
1601 set_unique_reg_note (temp
,
1603 gen_rtx_fmt_ee (MULT
, mode
,
1611 /* It can't be open-coded in this mode.
1612 Use a library call if one is available and caller says that's ok. */
1614 if (binoptab
->handlers
[(int) mode
].libfunc
1615 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1619 enum machine_mode op1_mode
= mode
;
1626 op1_mode
= word_mode
;
1627 /* Specify unsigned here,
1628 since negative shift counts are meaningless. */
1629 op1x
= convert_to_mode (word_mode
, op1
, 1);
1632 if (GET_MODE (op0
) != VOIDmode
1633 && GET_MODE (op0
) != mode
)
1634 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1636 /* Pass 1 for NO_QUEUE so we don't lose any increments
1637 if the libcall is cse'd or moved. */
1638 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1639 NULL_RTX
, LCT_CONST
, mode
, 2,
1640 op0
, mode
, op1x
, op1_mode
);
1642 insns
= get_insns ();
1645 target
= gen_reg_rtx (mode
);
1646 emit_libcall_block (insns
, target
, value
,
1647 gen_rtx_fmt_ee (binoptab
->code
, mode
, op0
, op1
));
1652 delete_insns_since (last
);
1654 /* It can't be done in this mode. Can we do it in a wider mode? */
1656 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1657 || methods
== OPTAB_MUST_WIDEN
))
1659 /* Caller says, don't even try. */
1660 delete_insns_since (entry_last
);
1664 /* Compute the value of METHODS to pass to recursive calls.
1665 Don't allow widening to be tried recursively. */
1667 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1669 /* Look for a wider mode of the same class for which it appears we can do
1672 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1674 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1675 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1677 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1678 != CODE_FOR_nothing
)
1679 || (methods
== OPTAB_LIB
1680 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1682 rtx xop0
= op0
, xop1
= op1
;
1685 /* For certain integer operations, we need not actually extend
1686 the narrow operands, as long as we will truncate
1687 the results to the same narrowness. */
1689 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1690 || binoptab
== xor_optab
1691 || binoptab
== add_optab
|| binoptab
== sub_optab
1692 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1693 && class == MODE_INT
)
1696 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1697 unsignedp
, no_extend
);
1699 /* The second operand of a shift must always be extended. */
1700 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1701 no_extend
&& binoptab
!= ashl_optab
);
1703 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1704 unsignedp
, methods
);
1707 if (class != MODE_INT
)
1710 target
= gen_reg_rtx (mode
);
1711 convert_move (target
, temp
, 0);
1715 return gen_lowpart (mode
, temp
);
1718 delete_insns_since (last
);
1723 delete_insns_since (entry_last
);
1727 /* Expand a binary operator which has both signed and unsigned forms.
1728 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1731 If we widen unsigned operands, we may use a signed wider operation instead
1732 of an unsigned wider operation, since the result would be the same. */
1735 sign_expand_binop (enum machine_mode mode
, optab uoptab
, optab soptab
,
1736 rtx op0
, rtx op1
, rtx target
, int unsignedp
,
1737 enum optab_methods methods
)
1740 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1741 struct optab wide_soptab
;
1743 /* Do it without widening, if possible. */
1744 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1745 unsignedp
, OPTAB_DIRECT
);
1746 if (temp
|| methods
== OPTAB_DIRECT
)
1749 /* Try widening to a signed int. Make a fake signed optab that
1750 hides any signed insn for direct use. */
1751 wide_soptab
= *soptab
;
1752 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1753 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1755 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1756 unsignedp
, OPTAB_WIDEN
);
1758 /* For unsigned operands, try widening to an unsigned int. */
1759 if (temp
== 0 && unsignedp
)
1760 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1761 unsignedp
, OPTAB_WIDEN
);
1762 if (temp
|| methods
== OPTAB_WIDEN
)
1765 /* Use the right width lib call if that exists. */
1766 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1767 if (temp
|| methods
== OPTAB_LIB
)
1770 /* Must widen and use a lib call, use either signed or unsigned. */
1771 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1772 unsignedp
, methods
);
1776 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1777 unsignedp
, methods
);
1781 /* Generate code to perform an operation specified by UNOPPTAB
1782 on operand OP0, with two results to TARG0 and TARG1.
1783 We assume that the order of the operands for the instruction
1784 is TARG0, TARG1, OP0.
1786 Either TARG0 or TARG1 may be zero, but what that means is that
1787 the result is not actually wanted. We will generate it into
1788 a dummy pseudo-reg and discard it. They may not both be zero.
1790 Returns 1 if this operation can be performed; 0 if not. */
1793 expand_twoval_unop (optab unoptab
, rtx op0
, rtx targ0
, rtx targ1
,
1796 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1797 enum mode_class
class;
1798 enum machine_mode wider_mode
;
1799 rtx entry_last
= get_last_insn ();
1802 class = GET_MODE_CLASS (mode
);
1805 op0
= force_not_mem (op0
);
1808 targ0
= gen_reg_rtx (mode
);
1810 targ1
= gen_reg_rtx (mode
);
1812 /* Record where to go back to if we fail. */
1813 last
= get_last_insn ();
1815 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1817 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1818 enum machine_mode mode0
= insn_data
[icode
].operand
[2].mode
;
1822 if (GET_MODE (xop0
) != VOIDmode
1823 && GET_MODE (xop0
) != mode0
)
1824 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1826 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1827 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop0
, mode0
))
1828 xop0
= copy_to_mode_reg (mode0
, xop0
);
1830 /* We could handle this, but we should always be called with a pseudo
1831 for our targets and all insns should take them as outputs. */
1832 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1833 || ! (*insn_data
[icode
].operand
[1].predicate
) (targ1
, mode
))
1836 pat
= GEN_FCN (icode
) (targ0
, targ1
, xop0
);
1843 delete_insns_since (last
);
1846 /* It can't be done in this mode. Can we do it in a wider mode? */
1848 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1850 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1851 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1853 if (unoptab
->handlers
[(int) wider_mode
].insn_code
1854 != CODE_FOR_nothing
)
1856 rtx t0
= gen_reg_rtx (wider_mode
);
1857 rtx t1
= gen_reg_rtx (wider_mode
);
1858 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1860 if (expand_twoval_unop (unoptab
, cop0
, t0
, t1
, unsignedp
))
1862 convert_move (targ0
, t0
, unsignedp
);
1863 convert_move (targ1
, t1
, unsignedp
);
1867 delete_insns_since (last
);
1872 delete_insns_since (entry_last
);
1876 /* Generate code to perform an operation specified by BINOPTAB
1877 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1878 We assume that the order of the operands for the instruction
1879 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1880 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1882 Either TARG0 or TARG1 may be zero, but what that means is that
1883 the result is not actually wanted. We will generate it into
1884 a dummy pseudo-reg and discard it. They may not both be zero.
1886 Returns 1 if this operation can be performed; 0 if not. */
1889 expand_twoval_binop (optab binoptab
, rtx op0
, rtx op1
, rtx targ0
, rtx targ1
,
1892 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1893 enum mode_class
class;
1894 enum machine_mode wider_mode
;
1895 rtx entry_last
= get_last_insn ();
1898 class = GET_MODE_CLASS (mode
);
1902 op0
= force_not_mem (op0
);
1903 op1
= force_not_mem (op1
);
1906 /* If we are inside an appropriately-short loop and we are optimizing,
1907 force expensive constants into a register. */
1908 if (CONSTANT_P (op0
) && optimize
1909 && rtx_cost (op0
, binoptab
->code
) > COSTS_N_INSNS (1))
1910 op0
= force_reg (mode
, op0
);
1912 if (CONSTANT_P (op1
) && optimize
1913 && rtx_cost (op1
, binoptab
->code
) > COSTS_N_INSNS (1))
1914 op1
= force_reg (mode
, op1
);
1917 targ0
= gen_reg_rtx (mode
);
1919 targ1
= gen_reg_rtx (mode
);
1921 /* Record where to go back to if we fail. */
1922 last
= get_last_insn ();
1924 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1926 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1927 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
1928 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
1930 rtx xop0
= op0
, xop1
= op1
;
1932 /* In case the insn wants input operands in modes different from
1933 those of the actual operands, convert the operands. It would
1934 seem that we don't need to convert CONST_INTs, but we do, so
1935 that they're properly zero-extended, sign-extended or truncated
1938 if (GET_MODE (op0
) != mode0
&& mode0
!= VOIDmode
)
1939 xop0
= convert_modes (mode0
,
1940 GET_MODE (op0
) != VOIDmode
1945 if (GET_MODE (op1
) != mode1
&& mode1
!= VOIDmode
)
1946 xop1
= convert_modes (mode1
,
1947 GET_MODE (op1
) != VOIDmode
1952 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1953 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
1954 xop0
= copy_to_mode_reg (mode0
, xop0
);
1956 if (! (*insn_data
[icode
].operand
[2].predicate
) (xop1
, mode1
))
1957 xop1
= copy_to_mode_reg (mode1
, xop1
);
1959 /* We could handle this, but we should always be called with a pseudo
1960 for our targets and all insns should take them as outputs. */
1961 if (! (*insn_data
[icode
].operand
[0].predicate
) (targ0
, mode
)
1962 || ! (*insn_data
[icode
].operand
[3].predicate
) (targ1
, mode
))
1965 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1972 delete_insns_since (last
);
1975 /* It can't be done in this mode. Can we do it in a wider mode? */
1977 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1979 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1980 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1982 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1983 != CODE_FOR_nothing
)
1985 rtx t0
= gen_reg_rtx (wider_mode
);
1986 rtx t1
= gen_reg_rtx (wider_mode
);
1987 rtx cop0
= convert_modes (wider_mode
, mode
, op0
, unsignedp
);
1988 rtx cop1
= convert_modes (wider_mode
, mode
, op1
, unsignedp
);
1990 if (expand_twoval_binop (binoptab
, cop0
, cop1
,
1993 convert_move (targ0
, t0
, unsignedp
);
1994 convert_move (targ1
, t1
, unsignedp
);
1998 delete_insns_since (last
);
2003 delete_insns_since (entry_last
);
2007 /* Expand the two-valued library call indicated by BINOPTAB, but
2008 preserve only one of the values. If TARG0 is non-NULL, the first
2009 value is placed into TARG0; otherwise the second value is placed
2010 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2011 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2012 This routine assumes that the value returned by the library call is
2013 as if the return value was of an integral mode twice as wide as the
2014 mode of OP0. Returns 1 if the call was successful. */
2017 expand_twoval_binop_libfunc (optab binoptab
, rtx op0
, rtx op1
,
2018 rtx targ0
, rtx targ1
, enum rtx_code code
)
2020 enum machine_mode mode
;
2021 enum machine_mode libval_mode
;
2025 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2026 if (!((targ0
!= NULL_RTX
) ^ (targ1
!= NULL_RTX
)))
2029 mode
= GET_MODE (op0
);
2030 if (!binoptab
->handlers
[(int) mode
].libfunc
)
2033 /* The value returned by the library function will have twice as
2034 many bits as the nominal MODE. */
2035 libval_mode
= smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode
),
2038 libval
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
2039 NULL_RTX
, LCT_CONST
,
2043 /* Get the part of VAL containing the value that we want. */
2044 libval
= simplify_gen_subreg (mode
, libval
, libval_mode
,
2045 targ0
? 0 : GET_MODE_SIZE (mode
));
2046 insns
= get_insns ();
2048 /* Move the into the desired location. */
2049 emit_libcall_block (insns
, targ0
? targ0
: targ1
, libval
,
2050 gen_rtx_fmt_ee (code
, mode
, op0
, op1
));
2056 /* Wrapper around expand_unop which takes an rtx code to specify
2057 the operation to perform, not an optab pointer. All other
2058 arguments are the same. */
2060 expand_simple_unop (enum machine_mode mode
, enum rtx_code code
, rtx op0
,
2061 rtx target
, int unsignedp
)
2063 optab unop
= code_to_optab
[(int) code
];
2067 return expand_unop (mode
, unop
, op0
, target
, unsignedp
);
2073 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2075 widen_clz (enum machine_mode mode
, rtx op0
, rtx target
)
2077 enum mode_class
class = GET_MODE_CLASS (mode
);
2078 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2080 enum machine_mode wider_mode
;
2081 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2082 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2084 if (clz_optab
->handlers
[(int) wider_mode
].insn_code
2085 != CODE_FOR_nothing
)
2087 rtx xop0
, temp
, last
;
2089 last
= get_last_insn ();
2092 target
= gen_reg_rtx (mode
);
2093 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2094 temp
= expand_unop (wider_mode
, clz_optab
, xop0
, NULL_RTX
, true);
2096 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2097 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2098 - GET_MODE_BITSIZE (mode
)),
2099 target
, true, OPTAB_DIRECT
);
2101 delete_insns_since (last
);
2110 /* Try calculating (parity x) as (and (popcount x) 1), where
2111 popcount can also be done in a wider mode. */
2113 expand_parity (enum machine_mode mode
, rtx op0
, rtx target
)
2115 enum mode_class
class = GET_MODE_CLASS (mode
);
2116 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2118 enum machine_mode wider_mode
;
2119 for (wider_mode
= mode
; wider_mode
!= VOIDmode
;
2120 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2122 if (popcount_optab
->handlers
[(int) wider_mode
].insn_code
2123 != CODE_FOR_nothing
)
2125 rtx xop0
, temp
, last
;
2127 last
= get_last_insn ();
2130 target
= gen_reg_rtx (mode
);
2131 xop0
= widen_operand (op0
, wider_mode
, mode
, true, false);
2132 temp
= expand_unop (wider_mode
, popcount_optab
, xop0
, NULL_RTX
,
2135 temp
= expand_binop (wider_mode
, and_optab
, temp
, const1_rtx
,
2136 target
, true, OPTAB_DIRECT
);
2138 delete_insns_since (last
);
2147 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2148 conditions, VAL may already be a SUBREG against which we cannot generate
2149 a further SUBREG. In this case, we expect forcing the value into a
2150 register will work around the situation. */
2153 lowpart_subreg_maybe_copy (enum machine_mode omode
, rtx val
,
2154 enum machine_mode imode
)
2157 ret
= lowpart_subreg (omode
, val
, imode
);
2160 val
= force_reg (imode
, val
);
2161 ret
= lowpart_subreg (omode
, val
, imode
);
2162 gcc_assert (ret
!= NULL
);
2167 /* Expand a floating point absolute value or negation operation via a
2168 logical operation on the sign bit. */
2171 expand_absneg_bit (enum rtx_code code
, enum machine_mode mode
,
2172 rtx op0
, rtx target
)
2174 const struct real_format
*fmt
;
2175 int bitpos
, word
, nwords
, i
;
2176 enum machine_mode imode
;
2177 HOST_WIDE_INT hi
, lo
;
2180 /* The format has to have a simple sign bit. */
2181 fmt
= REAL_MODE_FORMAT (mode
);
2185 bitpos
= fmt
->signbit_rw
;
2189 /* Don't create negative zeros if the format doesn't support them. */
2190 if (code
== NEG
&& !fmt
->has_signed_zero
)
2193 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2195 imode
= int_mode_for_mode (mode
);
2196 if (imode
== BLKmode
)
2205 if (FLOAT_WORDS_BIG_ENDIAN
)
2206 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2208 word
= bitpos
/ BITS_PER_WORD
;
2209 bitpos
= bitpos
% BITS_PER_WORD
;
2210 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2213 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2216 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2220 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2226 if (target
== 0 || target
== op0
)
2227 target
= gen_reg_rtx (mode
);
2233 for (i
= 0; i
< nwords
; ++i
)
2235 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2236 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2240 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2242 immed_double_const (lo
, hi
, imode
),
2243 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2244 if (temp
!= targ_piece
)
2245 emit_move_insn (targ_piece
, temp
);
2248 emit_move_insn (targ_piece
, op0_piece
);
2251 insns
= get_insns ();
2254 temp
= gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
));
2255 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
, temp
);
2259 temp
= expand_binop (imode
, code
== ABS
? and_optab
: xor_optab
,
2260 gen_lowpart (imode
, op0
),
2261 immed_double_const (lo
, hi
, imode
),
2262 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2263 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2265 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
2266 gen_rtx_fmt_e (code
, mode
, copy_rtx (op0
)));
2272 /* Generate code to perform an operation specified by UNOPTAB
2273 on operand OP0, with result having machine-mode MODE.
2275 UNSIGNEDP is for the case where we have to widen the operands
2276 to perform the operation. It says to use zero-extension.
2278 If TARGET is nonzero, the value
2279 is generated there, if it is convenient to do so.
2280 In all cases an rtx is returned for the locus of the value;
2281 this may or may not be TARGET. */
2284 expand_unop (enum machine_mode mode
, optab unoptab
, rtx op0
, rtx target
,
2287 enum mode_class
class;
2288 enum machine_mode wider_mode
;
2290 rtx last
= get_last_insn ();
2293 class = GET_MODE_CLASS (mode
);
2296 op0
= force_not_mem (op0
);
2298 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2300 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
2301 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2307 temp
= gen_reg_rtx (mode
);
2309 if (GET_MODE (xop0
) != VOIDmode
2310 && GET_MODE (xop0
) != mode0
)
2311 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2313 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2315 if (! (*insn_data
[icode
].operand
[1].predicate
) (xop0
, mode0
))
2316 xop0
= copy_to_mode_reg (mode0
, xop0
);
2318 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, mode
))
2319 temp
= gen_reg_rtx (mode
);
2321 pat
= GEN_FCN (icode
) (temp
, xop0
);
2324 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
2325 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
2327 delete_insns_since (last
);
2328 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
2336 delete_insns_since (last
);
2339 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2341 /* Widening clz needs special treatment. */
2342 if (unoptab
== clz_optab
)
2344 temp
= widen_clz (mode
, op0
, target
);
2351 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2352 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2353 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2355 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2359 /* For certain operations, we need not actually extend
2360 the narrow operand, as long as we will truncate the
2361 results to the same narrowness. */
2363 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2364 (unoptab
== neg_optab
2365 || unoptab
== one_cmpl_optab
)
2366 && class == MODE_INT
);
2368 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2373 if (class != MODE_INT
)
2376 target
= gen_reg_rtx (mode
);
2377 convert_move (target
, temp
, 0);
2381 return gen_lowpart (mode
, temp
);
2384 delete_insns_since (last
);
2388 /* These can be done a word at a time. */
2389 if (unoptab
== one_cmpl_optab
2390 && class == MODE_INT
2391 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
2392 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
2397 if (target
== 0 || target
== op0
)
2398 target
= gen_reg_rtx (mode
);
2402 /* Do the actual arithmetic. */
2403 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
2405 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
2406 rtx x
= expand_unop (word_mode
, unoptab
,
2407 operand_subword_force (op0
, i
, mode
),
2408 target_piece
, unsignedp
);
2410 if (target_piece
!= x
)
2411 emit_move_insn (target_piece
, x
);
2414 insns
= get_insns ();
2417 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
2418 gen_rtx_fmt_e (unoptab
->code
, mode
,
2423 if (unoptab
->code
== NEG
)
2425 /* Try negating floating point values by flipping the sign bit. */
2426 if (class == MODE_FLOAT
)
2428 temp
= expand_absneg_bit (NEG
, mode
, op0
, target
);
2433 /* If there is no negation pattern, and we have no negative zero,
2434 try subtracting from zero. */
2435 if (!HONOR_SIGNED_ZEROS (mode
))
2437 temp
= expand_binop (mode
, (unoptab
== negv_optab
2438 ? subv_optab
: sub_optab
),
2439 CONST0_RTX (mode
), op0
, target
,
2440 unsignedp
, OPTAB_DIRECT
);
2446 /* Try calculating parity (x) as popcount (x) % 2. */
2447 if (unoptab
== parity_optab
)
2449 temp
= expand_parity (mode
, op0
, target
);
2455 /* Now try a library call in this mode. */
2456 if (unoptab
->handlers
[(int) mode
].libfunc
)
2460 enum machine_mode outmode
= mode
;
2462 /* All of these functions return small values. Thus we choose to
2463 have them return something that isn't a double-word. */
2464 if (unoptab
== ffs_optab
|| unoptab
== clz_optab
|| unoptab
== ctz_optab
2465 || unoptab
== popcount_optab
|| unoptab
== parity_optab
)
2467 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node
)));
2471 /* Pass 1 for NO_QUEUE so we don't lose any increments
2472 if the libcall is cse'd or moved. */
2473 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2474 NULL_RTX
, LCT_CONST
, outmode
,
2476 insns
= get_insns ();
2479 target
= gen_reg_rtx (outmode
);
2480 emit_libcall_block (insns
, target
, value
,
2481 gen_rtx_fmt_e (unoptab
->code
, mode
, op0
));
2486 /* It can't be done in this mode. Can we do it in a wider mode? */
2488 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2490 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2491 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2493 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2494 != CODE_FOR_nothing
)
2495 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2499 /* For certain operations, we need not actually extend
2500 the narrow operand, as long as we will truncate the
2501 results to the same narrowness. */
2503 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2504 (unoptab
== neg_optab
2505 || unoptab
== one_cmpl_optab
)
2506 && class == MODE_INT
);
2508 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2511 /* If we are generating clz using wider mode, adjust the
2513 if (unoptab
== clz_optab
&& temp
!= 0)
2514 temp
= expand_binop (wider_mode
, sub_optab
, temp
,
2515 GEN_INT (GET_MODE_BITSIZE (wider_mode
)
2516 - GET_MODE_BITSIZE (mode
)),
2517 target
, true, OPTAB_DIRECT
);
2521 if (class != MODE_INT
)
2524 target
= gen_reg_rtx (mode
);
2525 convert_move (target
, temp
, 0);
2529 return gen_lowpart (mode
, temp
);
2532 delete_insns_since (last
);
2537 /* One final attempt at implementing negation via subtraction,
2538 this time allowing widening of the operand. */
2539 if (unoptab
->code
== NEG
&& !HONOR_SIGNED_ZEROS (mode
))
2542 temp
= expand_binop (mode
,
2543 unoptab
== negv_optab
? subv_optab
: sub_optab
,
2544 CONST0_RTX (mode
), op0
,
2545 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2553 /* Emit code to compute the absolute value of OP0, with result to
2554 TARGET if convenient. (TARGET may be 0.) The return value says
2555 where the result actually is to be found.
2557 MODE is the mode of the operand; the mode of the result is
2558 different but can be deduced from MODE.
2563 expand_abs_nojump (enum machine_mode mode
, rtx op0
, rtx target
,
2564 int result_unsignedp
)
2569 result_unsignedp
= 1;
2571 /* First try to do it with a special abs instruction. */
2572 temp
= expand_unop (mode
, result_unsignedp
? abs_optab
: absv_optab
,
2577 /* For floating point modes, try clearing the sign bit. */
2578 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2580 temp
= expand_absneg_bit (ABS
, mode
, op0
, target
);
2585 /* If we have a MAX insn, we can do this as MAX (x, -x). */
2586 if (smax_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
2587 && !HONOR_SIGNED_ZEROS (mode
))
2589 rtx last
= get_last_insn ();
2591 temp
= expand_unop (mode
, neg_optab
, op0
, NULL_RTX
, 0);
2593 temp
= expand_binop (mode
, smax_optab
, op0
, temp
, target
, 0,
2599 delete_insns_since (last
);
2602 /* If this machine has expensive jumps, we can do integer absolute
2603 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2604 where W is the width of MODE. */
2606 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2608 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2609 size_int (GET_MODE_BITSIZE (mode
) - 1),
2612 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2615 temp
= expand_binop (mode
, result_unsignedp
? sub_optab
: subv_optab
,
2616 temp
, extended
, target
, 0, OPTAB_LIB_WIDEN
);
2626 expand_abs (enum machine_mode mode
, rtx op0
, rtx target
,
2627 int result_unsignedp
, int safe
)
2632 result_unsignedp
= 1;
2634 temp
= expand_abs_nojump (mode
, op0
, target
, result_unsignedp
);
2638 /* If that does not win, use conditional jump and negate. */
2640 /* It is safe to use the target if it is the same
2641 as the source if this is also a pseudo register */
2642 if (op0
== target
&& REG_P (op0
)
2643 && REGNO (op0
) >= FIRST_PSEUDO_REGISTER
)
2646 op1
= gen_label_rtx ();
2647 if (target
== 0 || ! safe
2648 || GET_MODE (target
) != mode
2649 || (MEM_P (target
) && MEM_VOLATILE_P (target
))
2651 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2652 target
= gen_reg_rtx (mode
);
2654 emit_move_insn (target
, op0
);
2657 /* If this mode is an integer too wide to compare properly,
2658 compare word by word. Rely on CSE to optimize constant cases. */
2659 if (GET_MODE_CLASS (mode
) == MODE_INT
2660 && ! can_compare_p (GE
, mode
, ccp_jump
))
2661 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2664 do_compare_rtx_and_jump (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2665 NULL_RTX
, NULL_RTX
, op1
);
2667 op0
= expand_unop (mode
, result_unsignedp
? neg_optab
: negv_optab
,
2670 emit_move_insn (target
, op0
);
2676 /* A subroutine of expand_copysign, perform the copysign operation using the
2677 abs and neg primitives advertised to exist on the target. The assumption
2678 is that we have a split register file, and leaving op0 in fp registers,
2679 and not playing with subregs so much, will help the register allocator. */
2682 expand_copysign_absneg (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2683 int bitpos
, bool op0_is_abs
)
2685 enum machine_mode imode
;
2686 HOST_WIDE_INT hi
, lo
;
2695 op0
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2702 if (target
== NULL_RTX
)
2703 target
= copy_to_reg (op0
);
2705 emit_move_insn (target
, op0
);
2708 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2710 imode
= int_mode_for_mode (mode
);
2711 if (imode
== BLKmode
)
2713 op1
= gen_lowpart (imode
, op1
);
2718 if (FLOAT_WORDS_BIG_ENDIAN
)
2719 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2721 word
= bitpos
/ BITS_PER_WORD
;
2722 bitpos
= bitpos
% BITS_PER_WORD
;
2723 op1
= operand_subword_force (op1
, word
, mode
);
2726 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2729 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2733 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2737 op1
= expand_binop (imode
, and_optab
, op1
,
2738 immed_double_const (lo
, hi
, imode
),
2739 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2741 label
= gen_label_rtx ();
2742 emit_cmp_and_jump_insns (op1
, const0_rtx
, EQ
, NULL_RTX
, imode
, 1, label
);
2744 if (GET_CODE (op0
) == CONST_DOUBLE
)
2745 op0
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2747 op0
= expand_unop (mode
, neg_optab
, op0
, target
, 0);
2749 emit_move_insn (target
, op0
);
2757 /* A subroutine of expand_copysign, perform the entire copysign operation
2758 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
2759 is true if op0 is known to have its sign bit clear. */
2762 expand_copysign_bit (enum machine_mode mode
, rtx op0
, rtx op1
, rtx target
,
2763 int bitpos
, bool op0_is_abs
)
2765 enum machine_mode imode
;
2766 HOST_WIDE_INT hi
, lo
;
2767 int word
, nwords
, i
;
2770 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
2772 imode
= int_mode_for_mode (mode
);
2773 if (imode
== BLKmode
)
2782 if (FLOAT_WORDS_BIG_ENDIAN
)
2783 word
= (GET_MODE_BITSIZE (mode
) - bitpos
) / BITS_PER_WORD
;
2785 word
= bitpos
/ BITS_PER_WORD
;
2786 bitpos
= bitpos
% BITS_PER_WORD
;
2787 nwords
= (GET_MODE_BITSIZE (mode
) + BITS_PER_WORD
- 1) / BITS_PER_WORD
;
2790 if (bitpos
< HOST_BITS_PER_WIDE_INT
)
2793 lo
= (HOST_WIDE_INT
) 1 << bitpos
;
2797 hi
= (HOST_WIDE_INT
) 1 << (bitpos
- HOST_BITS_PER_WIDE_INT
);
2801 if (target
== 0 || target
== op0
|| target
== op1
)
2802 target
= gen_reg_rtx (mode
);
2808 for (i
= 0; i
< nwords
; ++i
)
2810 rtx targ_piece
= operand_subword (target
, i
, 1, mode
);
2811 rtx op0_piece
= operand_subword_force (op0
, i
, mode
);
2816 op0_piece
= expand_binop (imode
, and_optab
, op0_piece
,
2817 immed_double_const (~lo
, ~hi
, imode
),
2818 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2820 op1
= expand_binop (imode
, and_optab
,
2821 operand_subword_force (op1
, i
, mode
),
2822 immed_double_const (lo
, hi
, imode
),
2823 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2825 temp
= expand_binop (imode
, ior_optab
, op0_piece
, op1
,
2826 targ_piece
, 1, OPTAB_LIB_WIDEN
);
2827 if (temp
!= targ_piece
)
2828 emit_move_insn (targ_piece
, temp
);
2831 emit_move_insn (targ_piece
, op0_piece
);
2834 insns
= get_insns ();
2837 emit_no_conflict_block (insns
, target
, op0
, op1
, NULL_RTX
);
2841 op1
= expand_binop (imode
, and_optab
, gen_lowpart (imode
, op1
),
2842 immed_double_const (lo
, hi
, imode
),
2843 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2845 op0
= gen_lowpart (imode
, op0
);
2847 op0
= expand_binop (imode
, and_optab
, op0
,
2848 immed_double_const (~lo
, ~hi
, imode
),
2849 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
2851 temp
= expand_binop (imode
, ior_optab
, op0
, op1
,
2852 gen_lowpart (imode
, target
), 1, OPTAB_LIB_WIDEN
);
2853 target
= lowpart_subreg_maybe_copy (mode
, temp
, imode
);
2859 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
2860 scalar floating point mode. Return NULL if we do not know how to
2861 expand the operation inline. */
2864 expand_copysign (rtx op0
, rtx op1
, rtx target
)
2866 enum machine_mode mode
= GET_MODE (op0
);
2867 const struct real_format
*fmt
;
2871 gcc_assert (SCALAR_FLOAT_MODE_P (mode
));
2872 gcc_assert (GET_MODE (op1
) == mode
);
2874 /* First try to do it with a special instruction. */
2875 temp
= expand_binop (mode
, copysign_optab
, op0
, op1
,
2876 target
, 0, OPTAB_DIRECT
);
2880 fmt
= REAL_MODE_FORMAT (mode
);
2881 if (fmt
== NULL
|| !fmt
->has_signed_zero
)
2885 if (GET_CODE (op0
) == CONST_DOUBLE
)
2887 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0
)))
2888 op0
= simplify_unary_operation (ABS
, mode
, op0
, mode
);
2892 if (fmt
->signbit_ro
>= 0
2893 && (GET_CODE (op0
) == CONST_DOUBLE
2894 || (neg_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
2895 && abs_optab
->handlers
[mode
].insn_code
!= CODE_FOR_nothing
)))
2897 temp
= expand_copysign_absneg (mode
, op0
, op1
, target
,
2898 fmt
->signbit_ro
, op0_is_abs
);
2903 if (fmt
->signbit_rw
< 0)
2905 return expand_copysign_bit (mode
, op0
, op1
, target
,
2906 fmt
->signbit_rw
, op0_is_abs
);
2909 /* Generate an instruction whose insn-code is INSN_CODE,
2910 with two operands: an output TARGET and an input OP0.
2911 TARGET *must* be nonzero, and the output is always stored there.
2912 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2913 the value that is stored into TARGET. */
2916 emit_unop_insn (int icode
, rtx target
, rtx op0
, enum rtx_code code
)
2919 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
2924 /* Sign and zero extension from memory is often done specially on
2925 RISC machines, so forcing into a register here can pessimize
2927 if (flag_force_mem
&& code
!= SIGN_EXTEND
&& code
!= ZERO_EXTEND
)
2928 op0
= force_not_mem (op0
);
2930 /* Now, if insn does not accept our operands, put them into pseudos. */
2932 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
2933 op0
= copy_to_mode_reg (mode0
, op0
);
2935 if (! (*insn_data
[icode
].operand
[0].predicate
) (temp
, GET_MODE (temp
))
2936 || (flag_force_mem
&& MEM_P (temp
)))
2937 temp
= gen_reg_rtx (GET_MODE (temp
));
2939 pat
= GEN_FCN (icode
) (temp
, op0
);
2941 if (INSN_P (pat
) && NEXT_INSN (pat
) != NULL_RTX
&& code
!= UNKNOWN
)
2942 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2947 emit_move_insn (target
, temp
);
2950 /* Emit code to perform a series of operations on a multi-word quantity, one
2953 Such a block is preceded by a CLOBBER of the output, consists of multiple
2954 insns, each setting one word of the output, and followed by a SET copying
2955 the output to itself.
2957 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2958 note indicating that it doesn't conflict with the (also multi-word)
2959 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2962 INSNS is a block of code generated to perform the operation, not including
2963 the CLOBBER and final copy. All insns that compute intermediate values
2964 are first emitted, followed by the block as described above.
2966 TARGET, OP0, and OP1 are the output and inputs of the operations,
2967 respectively. OP1 may be zero for a unary operation.
2969 EQUIV, if nonzero, is an expression to be placed into a REG_EQUAL note
2972 If TARGET is not a register, INSNS is simply emitted with no special
2973 processing. Likewise if anything in INSNS is not an INSN or if
2974 there is a libcall block inside INSNS.
2976 The final insn emitted is returned. */
2979 emit_no_conflict_block (rtx insns
, rtx target
, rtx op0
, rtx op1
, rtx equiv
)
2981 rtx prev
, next
, first
, last
, insn
;
2983 if (!REG_P (target
) || reload_in_progress
)
2984 return emit_insn (insns
);
2986 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2987 if (!NONJUMP_INSN_P (insn
)
2988 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2989 return emit_insn (insns
);
2991 /* First emit all insns that do not store into words of the output and remove
2992 these from the list. */
2993 for (insn
= insns
; insn
; insn
= next
)
2998 next
= NEXT_INSN (insn
);
3000 /* Some ports (cris) create a libcall regions at their own. We must
3001 avoid any potential nesting of LIBCALLs. */
3002 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3003 remove_note (insn
, note
);
3004 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3005 remove_note (insn
, note
);
3007 if (GET_CODE (PATTERN (insn
)) == SET
|| GET_CODE (PATTERN (insn
)) == USE
3008 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
3009 set
= PATTERN (insn
);
3010 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
3012 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
3013 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
3015 set
= XVECEXP (PATTERN (insn
), 0, i
);
3023 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
3025 if (PREV_INSN (insn
))
3026 NEXT_INSN (PREV_INSN (insn
)) = next
;
3031 PREV_INSN (next
) = PREV_INSN (insn
);
3037 prev
= get_last_insn ();
3039 /* Now write the CLOBBER of the output, followed by the setting of each
3040 of the words, followed by the final copy. */
3041 if (target
!= op0
&& target
!= op1
)
3042 emit_insn (gen_rtx_CLOBBER (VOIDmode
, target
));
3044 for (insn
= insns
; insn
; insn
= next
)
3046 next
= NEXT_INSN (insn
);
3049 if (op1
&& REG_P (op1
))
3050 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op1
,
3053 if (op0
&& REG_P (op0
))
3054 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_NO_CONFLICT
, op0
,
3058 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3059 != CODE_FOR_nothing
)
3061 last
= emit_move_insn (target
, target
);
3063 set_unique_reg_note (last
, REG_EQUAL
, equiv
);
3067 last
= get_last_insn ();
3069 /* Remove any existing REG_EQUAL note from "last", or else it will
3070 be mistaken for a note referring to the full contents of the
3071 alleged libcall value when found together with the REG_RETVAL
3072 note added below. An existing note can come from an insn
3073 expansion at "last". */
3074 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3078 first
= get_insns ();
3080 first
= NEXT_INSN (prev
);
3082 /* Encapsulate the block so it gets manipulated as a unit. */
3083 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3085 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
, REG_NOTES (last
));
3090 /* Emit code to make a call to a constant function or a library call.
3092 INSNS is a list containing all insns emitted in the call.
3093 These insns leave the result in RESULT. Our block is to copy RESULT
3094 to TARGET, which is logically equivalent to EQUIV.
3096 We first emit any insns that set a pseudo on the assumption that these are
3097 loading constants into registers; doing so allows them to be safely cse'ed
3098 between blocks. Then we emit all the other insns in the block, followed by
3099 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3100 note with an operand of EQUIV.
3102 Moving assignments to pseudos outside of the block is done to improve
3103 the generated code, but is not required to generate correct code,
3104 hence being unable to move an assignment is not grounds for not making
3105 a libcall block. There are two reasons why it is safe to leave these
3106 insns inside the block: First, we know that these pseudos cannot be
3107 used in generated RTL outside the block since they are created for
3108 temporary purposes within the block. Second, CSE will not record the
3109 values of anything set inside a libcall block, so we know they must
3110 be dead at the end of the block.
3112 Except for the first group of insns (the ones setting pseudos), the
3113 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
3116 emit_libcall_block (rtx insns
, rtx target
, rtx result
, rtx equiv
)
3118 rtx final_dest
= target
;
3119 rtx prev
, next
, first
, last
, insn
;
3121 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3122 into a MEM later. Protect the libcall block from this change. */
3123 if (! REG_P (target
) || REG_USERVAR_P (target
))
3124 target
= gen_reg_rtx (GET_MODE (target
));
3126 /* If we're using non-call exceptions, a libcall corresponding to an
3127 operation that may trap may also trap. */
3128 if (flag_non_call_exceptions
&& may_trap_p (equiv
))
3130 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3133 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3135 if (note
!= 0 && INTVAL (XEXP (note
, 0)) <= 0)
3136 remove_note (insn
, note
);
3140 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3141 reg note to indicate that this call cannot throw or execute a nonlocal
3142 goto (unless there is already a REG_EH_REGION note, in which case
3144 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
3147 rtx note
= find_reg_note (insn
, REG_EH_REGION
, NULL_RTX
);
3150 XEXP (note
, 0) = constm1_rtx
;
3152 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EH_REGION
, constm1_rtx
,
3156 /* First emit all insns that set pseudos. Remove them from the list as
3157 we go. Avoid insns that set pseudos which were referenced in previous
3158 insns. These can be generated by move_by_pieces, for example,
3159 to update an address. Similarly, avoid insns that reference things
3160 set in previous insns. */
3162 for (insn
= insns
; insn
; insn
= next
)
3164 rtx set
= single_set (insn
);
3167 /* Some ports (cris) create a libcall regions at their own. We must
3168 avoid any potential nesting of LIBCALLs. */
3169 if ((note
= find_reg_note (insn
, REG_LIBCALL
, NULL
)) != NULL
)
3170 remove_note (insn
, note
);
3171 if ((note
= find_reg_note (insn
, REG_RETVAL
, NULL
)) != NULL
)
3172 remove_note (insn
, note
);
3174 next
= NEXT_INSN (insn
);
3176 if (set
!= 0 && REG_P (SET_DEST (set
))
3177 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
3179 || ((! INSN_P(insns
)
3180 || ! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
)))
3181 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
3182 && ! modified_in_p (SET_SRC (set
), insns
)
3183 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
3185 if (PREV_INSN (insn
))
3186 NEXT_INSN (PREV_INSN (insn
)) = next
;
3191 PREV_INSN (next
) = PREV_INSN (insn
);
3196 /* Some ports use a loop to copy large arguments onto the stack.
3197 Don't move anything outside such a loop. */
3202 prev
= get_last_insn ();
3204 /* Write the remaining insns followed by the final copy. */
3206 for (insn
= insns
; insn
; insn
= next
)
3208 next
= NEXT_INSN (insn
);
3213 last
= emit_move_insn (target
, result
);
3214 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
3215 != CODE_FOR_nothing
)
3216 set_unique_reg_note (last
, REG_EQUAL
, copy_rtx (equiv
));
3219 /* Remove any existing REG_EQUAL note from "last", or else it will
3220 be mistaken for a note referring to the full contents of the
3221 libcall value when found together with the REG_RETVAL note added
3222 below. An existing note can come from an insn expansion at
3224 remove_note (last
, find_reg_note (last
, REG_EQUAL
, NULL_RTX
));
3227 if (final_dest
!= target
)
3228 emit_move_insn (final_dest
, target
);
3231 first
= get_insns ();
3233 first
= NEXT_INSN (prev
);
3235 /* Encapsulate the block so it gets manipulated as a unit. */
3236 if (!flag_non_call_exceptions
|| !may_trap_p (equiv
))
3238 /* We can't attach the REG_LIBCALL and REG_RETVAL notes
3239 when the encapsulated region would not be in one basic block,
3240 i.e. when there is a control_flow_insn_p insn between FIRST and LAST.
3242 bool attach_libcall_retval_notes
= true;
3243 next
= NEXT_INSN (last
);
3244 for (insn
= first
; insn
!= next
; insn
= NEXT_INSN (insn
))
3245 if (control_flow_insn_p (insn
))
3247 attach_libcall_retval_notes
= false;
3251 if (attach_libcall_retval_notes
)
3253 REG_NOTES (first
) = gen_rtx_INSN_LIST (REG_LIBCALL
, last
,
3255 REG_NOTES (last
) = gen_rtx_INSN_LIST (REG_RETVAL
, first
,
3261 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3262 PURPOSE describes how this comparison will be used. CODE is the rtx
3263 comparison code we will be using.
3265 ??? Actually, CODE is slightly weaker than that. A target is still
3266 required to implement all of the normal bcc operations, but not
3267 required to implement all (or any) of the unordered bcc operations. */
3270 can_compare_p (enum rtx_code code
, enum machine_mode mode
,
3271 enum can_compare_purpose purpose
)
3275 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3277 if (purpose
== ccp_jump
)
3278 return bcc_gen_fctn
[(int) code
] != NULL
;
3279 else if (purpose
== ccp_store_flag
)
3280 return setcc_gen_code
[(int) code
] != CODE_FOR_nothing
;
3282 /* There's only one cmov entry point, and it's allowed to fail. */
3285 if (purpose
== ccp_jump
3286 && cbranch_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3288 if (purpose
== ccp_cmov
3289 && cmov_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3291 if (purpose
== ccp_store_flag
3292 && cstore_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
3294 mode
= GET_MODE_WIDER_MODE (mode
);
3296 while (mode
!= VOIDmode
);
3301 /* This function is called when we are going to emit a compare instruction that
3302 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
3304 *PMODE is the mode of the inputs (in case they are const_int).
3305 *PUNSIGNEDP nonzero says that the operands are unsigned;
3306 this matters if they need to be widened.
3308 If they have mode BLKmode, then SIZE specifies the size of both operands.
3310 This function performs all the setup necessary so that the caller only has
3311 to emit a single comparison insn. This setup can involve doing a BLKmode
3312 comparison or emitting a library call to perform the comparison if no insn
3313 is available to handle it.
3314 The values which are passed in through pointers can be modified; the caller
3315 should perform the comparison on the modified values. */
3318 prepare_cmp_insn (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
, rtx size
,
3319 enum machine_mode
*pmode
, int *punsignedp
,
3320 enum can_compare_purpose purpose
)
3322 enum machine_mode mode
= *pmode
;
3323 rtx x
= *px
, y
= *py
;
3324 int unsignedp
= *punsignedp
;
3325 enum mode_class
class;
3327 class = GET_MODE_CLASS (mode
);
3329 /* They could both be VOIDmode if both args are immediate constants,
3330 but we should fold that at an earlier stage.
3331 With no special code here, this will call abort,
3332 reminding the programmer to implement such folding. */
3334 if (mode
!= BLKmode
&& flag_force_mem
)
3336 /* Load duplicate non-volatile operands once. */
3337 if (rtx_equal_p (x
, y
) && ! volatile_refs_p (x
))
3339 x
= force_not_mem (x
);
3344 x
= force_not_mem (x
);
3345 y
= force_not_mem (y
);
3349 /* If we are inside an appropriately-short loop and we are optimizing,
3350 force expensive constants into a register. */
3351 if (CONSTANT_P (x
) && optimize
3352 && rtx_cost (x
, COMPARE
) > COSTS_N_INSNS (1))
3353 x
= force_reg (mode
, x
);
3355 if (CONSTANT_P (y
) && optimize
3356 && rtx_cost (y
, COMPARE
) > COSTS_N_INSNS (1))
3357 y
= force_reg (mode
, y
);
3360 /* Abort if we have a non-canonical comparison. The RTL documentation
3361 states that canonical comparisons are required only for targets which
3363 if (CONSTANT_P (x
) && ! CONSTANT_P (y
))
3367 /* Don't let both operands fail to indicate the mode. */
3368 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
3369 x
= force_reg (mode
, x
);
3371 /* Handle all BLKmode compares. */
3373 if (mode
== BLKmode
)
3375 enum machine_mode cmp_mode
, result_mode
;
3376 enum insn_code cmp_code
;
3381 = GEN_INT (MIN (MEM_ALIGN (x
), MEM_ALIGN (y
)) / BITS_PER_UNIT
);
3386 /* Try to use a memory block compare insn - either cmpstr
3387 or cmpmem will do. */
3388 for (cmp_mode
= GET_CLASS_NARROWEST_MODE (MODE_INT
);
3389 cmp_mode
!= VOIDmode
;
3390 cmp_mode
= GET_MODE_WIDER_MODE (cmp_mode
))
3392 cmp_code
= cmpmem_optab
[cmp_mode
];
3393 if (cmp_code
== CODE_FOR_nothing
)
3394 cmp_code
= cmpstr_optab
[cmp_mode
];
3395 if (cmp_code
== CODE_FOR_nothing
)
3398 /* Must make sure the size fits the insn's mode. */
3399 if ((GET_CODE (size
) == CONST_INT
3400 && INTVAL (size
) >= (1 << GET_MODE_BITSIZE (cmp_mode
)))
3401 || (GET_MODE_BITSIZE (GET_MODE (size
))
3402 > GET_MODE_BITSIZE (cmp_mode
)))
3405 result_mode
= insn_data
[cmp_code
].operand
[0].mode
;
3406 result
= gen_reg_rtx (result_mode
);
3407 size
= convert_to_mode (cmp_mode
, size
, 1);
3408 emit_insn (GEN_FCN (cmp_code
) (result
, x
, y
, size
, opalign
));
3412 *pmode
= result_mode
;
3416 /* Otherwise call a library function, memcmp. */
3417 libfunc
= memcmp_libfunc
;
3418 length_type
= sizetype
;
3419 result_mode
= TYPE_MODE (integer_type_node
);
3420 cmp_mode
= TYPE_MODE (length_type
);
3421 size
= convert_to_mode (TYPE_MODE (length_type
), size
,
3422 TYPE_UNSIGNED (length_type
));
3424 result
= emit_library_call_value (libfunc
, 0, LCT_PURE_MAKE_BLOCK
,
3431 *pmode
= result_mode
;
3435 /* Don't allow operands to the compare to trap, as that can put the
3436 compare and branch in different basic blocks. */
3437 if (flag_non_call_exceptions
)
3440 x
= force_reg (mode
, x
);
3442 y
= force_reg (mode
, y
);
3447 if (can_compare_p (*pcomparison
, mode
, purpose
))
3450 /* Handle a lib call just for the mode we are using. */
3452 if (cmp_optab
->handlers
[(int) mode
].libfunc
&& class != MODE_FLOAT
)
3454 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
3457 /* If we want unsigned, and this mode has a distinct unsigned
3458 comparison routine, use that. */
3459 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
3460 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
3462 result
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST_MAKE_BLOCK
,
3463 word_mode
, 2, x
, mode
, y
, mode
);
3467 if (TARGET_LIB_INT_CMP_BIASED
)
3468 /* Integer comparison returns a result that must be compared
3469 against 1, so that even if we do an unsigned compare
3470 afterward, there is still a value that can represent the
3471 result "less than". */
3481 if (class == MODE_FLOAT
)
3482 prepare_float_lib_cmp (px
, py
, pcomparison
, pmode
, punsignedp
);
3488 /* Before emitting an insn with code ICODE, make sure that X, which is going
3489 to be used for operand OPNUM of the insn, is converted from mode MODE to
3490 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
3491 that it is accepted by the operand predicate. Return the new value. */
3494 prepare_operand (int icode
, rtx x
, int opnum
, enum machine_mode mode
,
3495 enum machine_mode wider_mode
, int unsignedp
)
3497 if (mode
!= wider_mode
)
3498 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
3500 if (! (*insn_data
[icode
].operand
[opnum
].predicate
)
3501 (x
, insn_data
[icode
].operand
[opnum
].mode
))
3505 x
= copy_to_mode_reg (insn_data
[icode
].operand
[opnum
].mode
, x
);
3511 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
3512 we can do the comparison.
3513 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
3514 be NULL_RTX which indicates that only a comparison is to be generated. */
3517 emit_cmp_and_jump_insn_1 (rtx x
, rtx y
, enum machine_mode mode
,
3518 enum rtx_code comparison
, int unsignedp
, rtx label
)
3520 rtx test
= gen_rtx_fmt_ee (comparison
, mode
, x
, y
);
3521 enum mode_class
class = GET_MODE_CLASS (mode
);
3522 enum machine_mode wider_mode
= mode
;
3524 /* Try combined insns first. */
3527 enum insn_code icode
;
3528 PUT_MODE (test
, wider_mode
);
3532 icode
= cbranch_optab
->handlers
[(int) wider_mode
].insn_code
;
3534 if (icode
!= CODE_FOR_nothing
3535 && (*insn_data
[icode
].operand
[0].predicate
) (test
, wider_mode
))
3537 x
= prepare_operand (icode
, x
, 1, mode
, wider_mode
, unsignedp
);
3538 y
= prepare_operand (icode
, y
, 2, mode
, wider_mode
, unsignedp
);
3539 emit_jump_insn (GEN_FCN (icode
) (test
, x
, y
, label
));
3544 /* Handle some compares against zero. */
3545 icode
= (int) tst_optab
->handlers
[(int) wider_mode
].insn_code
;
3546 if (y
== CONST0_RTX (mode
) && icode
!= CODE_FOR_nothing
)
3548 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3549 emit_insn (GEN_FCN (icode
) (x
));
3551 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3555 /* Handle compares for which there is a directly suitable insn. */
3557 icode
= (int) cmp_optab
->handlers
[(int) wider_mode
].insn_code
;
3558 if (icode
!= CODE_FOR_nothing
)
3560 x
= prepare_operand (icode
, x
, 0, mode
, wider_mode
, unsignedp
);
3561 y
= prepare_operand (icode
, y
, 1, mode
, wider_mode
, unsignedp
);
3562 emit_insn (GEN_FCN (icode
) (x
, y
));
3564 emit_jump_insn ((*bcc_gen_fctn
[(int) comparison
]) (label
));
3568 if (class != MODE_INT
&& class != MODE_FLOAT
3569 && class != MODE_COMPLEX_FLOAT
)
3572 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
);
3574 while (wider_mode
!= VOIDmode
);
3579 /* Generate code to compare X with Y so that the condition codes are
3580 set and to jump to LABEL if the condition is true. If X is a
3581 constant and Y is not a constant, then the comparison is swapped to
3582 ensure that the comparison RTL has the canonical form.
3584 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
3585 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
3586 the proper branch condition code.
3588 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
3590 MODE is the mode of the inputs (in case they are const_int).
3592 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
3593 be passed unchanged to emit_cmp_insn, then potentially converted into an
3594 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
3597 emit_cmp_and_jump_insns (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3598 enum machine_mode mode
, int unsignedp
, rtx label
)
3600 rtx op0
= x
, op1
= y
;
3602 /* Swap operands and condition to ensure canonical RTL. */
3603 if (swap_commutative_operands_p (x
, y
))
3605 /* If we're not emitting a branch, this means some caller
3611 comparison
= swap_condition (comparison
);
3615 /* If OP0 is still a constant, then both X and Y must be constants. Force
3616 X into a register to avoid aborting in emit_cmp_insn due to non-canonical
3618 if (CONSTANT_P (op0
))
3619 op0
= force_reg (mode
, op0
);
3623 comparison
= unsigned_condition (comparison
);
3625 prepare_cmp_insn (&op0
, &op1
, &comparison
, size
, &mode
, &unsignedp
,
3627 emit_cmp_and_jump_insn_1 (op0
, op1
, mode
, comparison
, unsignedp
, label
);
3630 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
3633 emit_cmp_insn (rtx x
, rtx y
, enum rtx_code comparison
, rtx size
,
3634 enum machine_mode mode
, int unsignedp
)
3636 emit_cmp_and_jump_insns (x
, y
, comparison
, size
, mode
, unsignedp
, 0);
3639 /* Emit a library call comparison between floating point X and Y.
3640 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
3643 prepare_float_lib_cmp (rtx
*px
, rtx
*py
, enum rtx_code
*pcomparison
,
3644 enum machine_mode
*pmode
, int *punsignedp
)
3646 enum rtx_code comparison
= *pcomparison
;
3647 enum rtx_code swapped
= swap_condition (comparison
);
3648 enum rtx_code reversed
= reverse_condition_maybe_unordered (comparison
);
3651 enum machine_mode orig_mode
= GET_MODE (x
);
3652 enum machine_mode mode
;
3653 rtx value
, target
, insns
, equiv
;
3655 bool reversed_p
= false;
3657 for (mode
= orig_mode
; mode
!= VOIDmode
; mode
= GET_MODE_WIDER_MODE (mode
))
3659 if ((libfunc
= code_to_optab
[comparison
]->handlers
[mode
].libfunc
))
3662 if ((libfunc
= code_to_optab
[swapped
]->handlers
[mode
].libfunc
))
3665 tmp
= x
; x
= y
; y
= tmp
;
3666 comparison
= swapped
;
3670 if ((libfunc
= code_to_optab
[reversed
]->handlers
[mode
].libfunc
)
3671 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, reversed
))
3673 comparison
= reversed
;
3679 if (mode
== VOIDmode
)
3682 if (mode
!= orig_mode
)
3684 x
= convert_to_mode (mode
, x
, 0);
3685 y
= convert_to_mode (mode
, y
, 0);
3688 /* Attach a REG_EQUAL note describing the semantics of the libcall to
3689 the RTL. The allows the RTL optimizers to delete the libcall if the
3690 condition can be determined at compile-time. */
3691 if (comparison
== UNORDERED
)
3693 rtx temp
= simplify_gen_relational (NE
, word_mode
, mode
, x
, x
);
3694 equiv
= simplify_gen_relational (NE
, word_mode
, mode
, y
, y
);
3695 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3696 temp
, const_true_rtx
, equiv
);
3700 equiv
= simplify_gen_relational (comparison
, word_mode
, mode
, x
, y
);
3701 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3703 rtx true_rtx
, false_rtx
;
3708 true_rtx
= const0_rtx
;
3709 false_rtx
= const_true_rtx
;
3713 true_rtx
= const_true_rtx
;
3714 false_rtx
= const0_rtx
;
3718 true_rtx
= const1_rtx
;
3719 false_rtx
= const0_rtx
;
3723 true_rtx
= const0_rtx
;
3724 false_rtx
= constm1_rtx
;
3728 true_rtx
= constm1_rtx
;
3729 false_rtx
= const0_rtx
;
3733 true_rtx
= const0_rtx
;
3734 false_rtx
= const1_rtx
;
3740 equiv
= simplify_gen_ternary (IF_THEN_ELSE
, word_mode
, word_mode
,
3741 equiv
, true_rtx
, false_rtx
);
3746 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
3747 word_mode
, 2, x
, mode
, y
, mode
);
3748 insns
= get_insns ();
3751 target
= gen_reg_rtx (word_mode
);
3752 emit_libcall_block (insns
, target
, value
, equiv
);
3754 if (comparison
== UNORDERED
3755 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode
, comparison
))
3756 comparison
= reversed_p
? EQ
: NE
;
3761 *pcomparison
= comparison
;
3765 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3768 emit_indirect_jump (rtx loc
)
3770 if (! ((*insn_data
[(int) CODE_FOR_indirect_jump
].operand
[0].predicate
)
3772 loc
= copy_to_mode_reg (Pmode
, loc
);
3774 emit_jump_insn (gen_indirect_jump (loc
));
3778 #ifdef HAVE_conditional_move
3780 /* Emit a conditional move instruction if the machine supports one for that
3781 condition and machine mode.
3783 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3784 the mode to use should they be constants. If it is VOIDmode, they cannot
3787 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3788 should be stored there. MODE is the mode to use should they be constants.
3789 If it is VOIDmode, they cannot both be constants.
3791 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3792 is not supported. */
3795 emit_conditional_move (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3796 enum machine_mode cmode
, rtx op2
, rtx op3
,
3797 enum machine_mode mode
, int unsignedp
)
3799 rtx tem
, subtarget
, comparison
, insn
;
3800 enum insn_code icode
;
3801 enum rtx_code reversed
;
3803 /* If one operand is constant, make it the second one. Only do this
3804 if the other operand is not constant as well. */
3806 if (swap_commutative_operands_p (op0
, op1
))
3811 code
= swap_condition (code
);
3814 /* get_condition will prefer to generate LT and GT even if the old
3815 comparison was against zero, so undo that canonicalization here since
3816 comparisons against zero are cheaper. */
3817 if (code
== LT
&& op1
== const1_rtx
)
3818 code
= LE
, op1
= const0_rtx
;
3819 else if (code
== GT
&& op1
== constm1_rtx
)
3820 code
= GE
, op1
= const0_rtx
;
3822 if (cmode
== VOIDmode
)
3823 cmode
= GET_MODE (op0
);
3825 if (swap_commutative_operands_p (op2
, op3
)
3826 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3835 if (mode
== VOIDmode
)
3836 mode
= GET_MODE (op2
);
3838 icode
= movcc_gen_code
[mode
];
3840 if (icode
== CODE_FOR_nothing
)
3845 op2
= force_not_mem (op2
);
3846 op3
= force_not_mem (op3
);
3850 target
= gen_reg_rtx (mode
);
3854 /* If the insn doesn't accept these operands, put them in pseudos. */
3856 if (! (*insn_data
[icode
].operand
[0].predicate
)
3857 (subtarget
, insn_data
[icode
].operand
[0].mode
))
3858 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3860 if (! (*insn_data
[icode
].operand
[2].predicate
)
3861 (op2
, insn_data
[icode
].operand
[2].mode
))
3862 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3864 if (! (*insn_data
[icode
].operand
[3].predicate
)
3865 (op3
, insn_data
[icode
].operand
[3].mode
))
3866 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
3868 /* Everything should now be in the suitable form, so emit the compare insn
3869 and then the conditional move. */
3872 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
3874 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3875 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
3876 return NULL and let the caller figure out how best to deal with this
3878 if (GET_CODE (comparison
) != code
)
3881 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3883 /* If that failed, then give up. */
3889 if (subtarget
!= target
)
3890 convert_move (target
, subtarget
, 0);
3895 /* Return nonzero if a conditional move of mode MODE is supported.
3897 This function is for combine so it can tell whether an insn that looks
3898 like a conditional move is actually supported by the hardware. If we
3899 guess wrong we lose a bit on optimization, but that's it. */
3900 /* ??? sparc64 supports conditionally moving integers values based on fp
3901 comparisons, and vice versa. How do we handle them? */
3904 can_conditionally_move_p (enum machine_mode mode
)
3906 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3912 #endif /* HAVE_conditional_move */
3914 /* Emit a conditional addition instruction if the machine supports one for that
3915 condition and machine mode.
3917 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3918 the mode to use should they be constants. If it is VOIDmode, they cannot
3921 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
3922 should be stored there. MODE is the mode to use should they be constants.
3923 If it is VOIDmode, they cannot both be constants.
3925 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3926 is not supported. */
3929 emit_conditional_add (rtx target
, enum rtx_code code
, rtx op0
, rtx op1
,
3930 enum machine_mode cmode
, rtx op2
, rtx op3
,
3931 enum machine_mode mode
, int unsignedp
)
3933 rtx tem
, subtarget
, comparison
, insn
;
3934 enum insn_code icode
;
3935 enum rtx_code reversed
;
3937 /* If one operand is constant, make it the second one. Only do this
3938 if the other operand is not constant as well. */
3940 if (swap_commutative_operands_p (op0
, op1
))
3945 code
= swap_condition (code
);
3948 /* get_condition will prefer to generate LT and GT even if the old
3949 comparison was against zero, so undo that canonicalization here since
3950 comparisons against zero are cheaper. */
3951 if (code
== LT
&& op1
== const1_rtx
)
3952 code
= LE
, op1
= const0_rtx
;
3953 else if (code
== GT
&& op1
== constm1_rtx
)
3954 code
= GE
, op1
= const0_rtx
;
3956 if (cmode
== VOIDmode
)
3957 cmode
= GET_MODE (op0
);
3959 if (swap_commutative_operands_p (op2
, op3
)
3960 && ((reversed
= reversed_comparison_code_parts (code
, op0
, op1
, NULL
))
3969 if (mode
== VOIDmode
)
3970 mode
= GET_MODE (op2
);
3972 icode
= addcc_optab
->handlers
[(int) mode
].insn_code
;
3974 if (icode
== CODE_FOR_nothing
)
3979 op2
= force_not_mem (op2
);
3980 op3
= force_not_mem (op3
);
3984 target
= gen_reg_rtx (mode
);
3986 /* If the insn doesn't accept these operands, put them in pseudos. */
3988 if (! (*insn_data
[icode
].operand
[0].predicate
)
3989 (target
, insn_data
[icode
].operand
[0].mode
))
3990 subtarget
= gen_reg_rtx (insn_data
[icode
].operand
[0].mode
);
3994 if (! (*insn_data
[icode
].operand
[2].predicate
)
3995 (op2
, insn_data
[icode
].operand
[2].mode
))
3996 op2
= copy_to_mode_reg (insn_data
[icode
].operand
[2].mode
, op2
);
3998 if (! (*insn_data
[icode
].operand
[3].predicate
)
3999 (op3
, insn_data
[icode
].operand
[3].mode
))
4000 op3
= copy_to_mode_reg (insn_data
[icode
].operand
[3].mode
, op3
);
4002 /* Everything should now be in the suitable form, so emit the compare insn
4003 and then the conditional move. */
4006 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
);
4008 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4009 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4010 return NULL and let the caller figure out how best to deal with this
4012 if (GET_CODE (comparison
) != code
)
4015 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
4017 /* If that failed, then give up. */
4023 if (subtarget
!= target
)
4024 convert_move (target
, subtarget
, 0);
4029 /* These functions attempt to generate an insn body, rather than
4030 emitting the insn, but if the gen function already emits them, we
4031 make no attempt to turn them back into naked patterns. */
4033 /* Generate and return an insn body to add Y to X. */
4036 gen_add2_insn (rtx x
, rtx y
)
4038 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4040 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4041 (x
, insn_data
[icode
].operand
[0].mode
))
4042 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4043 (x
, insn_data
[icode
].operand
[1].mode
))
4044 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4045 (y
, insn_data
[icode
].operand
[2].mode
)))
4048 return (GEN_FCN (icode
) (x
, x
, y
));
4051 /* Generate and return an insn body to add r1 and c,
4052 storing the result in r0. */
4054 gen_add3_insn (rtx r0
, rtx r1
, rtx c
)
4056 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4058 if (icode
== CODE_FOR_nothing
4059 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4060 (r0
, insn_data
[icode
].operand
[0].mode
))
4061 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4062 (r1
, insn_data
[icode
].operand
[1].mode
))
4063 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4064 (c
, insn_data
[icode
].operand
[2].mode
)))
4067 return (GEN_FCN (icode
) (r0
, r1
, c
));
4071 have_add2_insn (rtx x
, rtx y
)
4075 if (GET_MODE (x
) == VOIDmode
)
4078 icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4080 if (icode
== CODE_FOR_nothing
)
4083 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4084 (x
, insn_data
[icode
].operand
[0].mode
))
4085 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4086 (x
, insn_data
[icode
].operand
[1].mode
))
4087 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4088 (y
, insn_data
[icode
].operand
[2].mode
)))
4094 /* Generate and return an insn body to subtract Y from X. */
4097 gen_sub2_insn (rtx x
, rtx y
)
4099 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4101 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4102 (x
, insn_data
[icode
].operand
[0].mode
))
4103 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4104 (x
, insn_data
[icode
].operand
[1].mode
))
4105 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4106 (y
, insn_data
[icode
].operand
[2].mode
)))
4109 return (GEN_FCN (icode
) (x
, x
, y
));
4112 /* Generate and return an insn body to subtract r1 and c,
4113 storing the result in r0. */
4115 gen_sub3_insn (rtx r0
, rtx r1
, rtx c
)
4117 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (r0
)].insn_code
;
4119 if (icode
== CODE_FOR_nothing
4120 || ! ((*insn_data
[icode
].operand
[0].predicate
)
4121 (r0
, insn_data
[icode
].operand
[0].mode
))
4122 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4123 (r1
, insn_data
[icode
].operand
[1].mode
))
4124 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4125 (c
, insn_data
[icode
].operand
[2].mode
)))
4128 return (GEN_FCN (icode
) (r0
, r1
, c
));
4132 have_sub2_insn (rtx x
, rtx y
)
4136 if (GET_MODE (x
) == VOIDmode
)
4139 icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
4141 if (icode
== CODE_FOR_nothing
)
4144 if (! ((*insn_data
[icode
].operand
[0].predicate
)
4145 (x
, insn_data
[icode
].operand
[0].mode
))
4146 || ! ((*insn_data
[icode
].operand
[1].predicate
)
4147 (x
, insn_data
[icode
].operand
[1].mode
))
4148 || ! ((*insn_data
[icode
].operand
[2].predicate
)
4149 (y
, insn_data
[icode
].operand
[2].mode
)))
4155 /* Generate the body of an instruction to copy Y into X.
4156 It may be a list of insns, if one insn isn't enough. */
4159 gen_move_insn (rtx x
, rtx y
)
4164 emit_move_insn_1 (x
, y
);
4170 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4171 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4172 no such operation exists, CODE_FOR_nothing will be returned. */
4175 can_extend_p (enum machine_mode to_mode
, enum machine_mode from_mode
,
4179 #ifdef HAVE_ptr_extend
4181 return CODE_FOR_ptr_extend
;
4184 tab
= unsignedp
? zext_optab
: sext_optab
;
4185 return tab
->handlers
[to_mode
][from_mode
].insn_code
;
4188 /* Generate the body of an insn to extend Y (with mode MFROM)
4189 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4192 gen_extend_insn (rtx x
, rtx y
, enum machine_mode mto
,
4193 enum machine_mode mfrom
, int unsignedp
)
4195 enum insn_code icode
= can_extend_p (mto
, mfrom
, unsignedp
);
4196 return GEN_FCN (icode
) (x
, y
);
4199 /* can_fix_p and can_float_p say whether the target machine
4200 can directly convert a given fixed point type to
4201 a given floating point type, or vice versa.
4202 The returned value is the CODE_FOR_... value to use,
4203 or CODE_FOR_nothing if these modes cannot be directly converted.
4205 *TRUNCP_PTR is set to 1 if it is necessary to output
4206 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4208 static enum insn_code
4209 can_fix_p (enum machine_mode fixmode
, enum machine_mode fltmode
,
4210 int unsignedp
, int *truncp_ptr
)
4213 enum insn_code icode
;
4215 tab
= unsignedp
? ufixtrunc_optab
: sfixtrunc_optab
;
4216 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4217 if (icode
!= CODE_FOR_nothing
)
4223 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4224 for this to work. We need to rework the fix* and ftrunc* patterns
4225 and documentation. */
4226 tab
= unsignedp
? ufix_optab
: sfix_optab
;
4227 icode
= tab
->handlers
[fixmode
][fltmode
].insn_code
;
4228 if (icode
!= CODE_FOR_nothing
4229 && ftrunc_optab
->handlers
[fltmode
].insn_code
!= CODE_FOR_nothing
)
4236 return CODE_FOR_nothing
;
4239 static enum insn_code
4240 can_float_p (enum machine_mode fltmode
, enum machine_mode fixmode
,
4245 tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4246 return tab
->handlers
[fltmode
][fixmode
].insn_code
;
4249 /* Generate code to convert FROM to floating point
4250 and store in TO. FROM must be fixed point and not VOIDmode.
4251 UNSIGNEDP nonzero means regard FROM as unsigned.
4252 Normally this is done by correcting the final value
4253 if it is negative. */
4256 expand_float (rtx to
, rtx from
, int unsignedp
)
4258 enum insn_code icode
;
4260 enum machine_mode fmode
, imode
;
4262 /* Crash now, because we won't be able to decide which mode to use. */
4263 if (GET_MODE (from
) == VOIDmode
)
4266 /* Look for an insn to do the conversion. Do it in the specified
4267 modes if possible; otherwise convert either input, output or both to
4268 wider mode. If the integer mode is wider than the mode of FROM,
4269 we can do the conversion signed even if the input is unsigned. */
4271 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4272 fmode
= GET_MODE_WIDER_MODE (fmode
))
4273 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
4274 imode
= GET_MODE_WIDER_MODE (imode
))
4276 int doing_unsigned
= unsignedp
;
4278 if (fmode
!= GET_MODE (to
)
4279 && significand_size (fmode
) < GET_MODE_BITSIZE (GET_MODE (from
)))
4282 icode
= can_float_p (fmode
, imode
, unsignedp
);
4283 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
4284 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
4286 if (icode
!= CODE_FOR_nothing
)
4288 if (imode
!= GET_MODE (from
))
4289 from
= convert_to_mode (imode
, from
, unsignedp
);
4291 if (fmode
!= GET_MODE (to
))
4292 target
= gen_reg_rtx (fmode
);
4294 emit_unop_insn (icode
, target
, from
,
4295 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
4298 convert_move (to
, target
, 0);
4303 /* Unsigned integer, and no way to convert directly.
4304 Convert as signed, then conditionally adjust the result. */
4307 rtx label
= gen_label_rtx ();
4309 REAL_VALUE_TYPE offset
;
4312 from
= force_not_mem (from
);
4314 /* Look for a usable floating mode FMODE wider than the source and at
4315 least as wide as the target. Using FMODE will avoid rounding woes
4316 with unsigned values greater than the signed maximum value. */
4318 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
4319 fmode
= GET_MODE_WIDER_MODE (fmode
))
4320 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
4321 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
4324 if (fmode
== VOIDmode
)
4326 /* There is no such mode. Pretend the target is wide enough. */
4327 fmode
= GET_MODE (to
);
4329 /* Avoid double-rounding when TO is narrower than FROM. */
4330 if ((significand_size (fmode
) + 1)
4331 < GET_MODE_BITSIZE (GET_MODE (from
)))
4334 rtx neglabel
= gen_label_rtx ();
4336 /* Don't use TARGET if it isn't a register, is a hard register,
4337 or is the wrong mode. */
4339 || REGNO (target
) < FIRST_PSEUDO_REGISTER
4340 || GET_MODE (target
) != fmode
)
4341 target
= gen_reg_rtx (fmode
);
4343 imode
= GET_MODE (from
);
4344 do_pending_stack_adjust ();
4346 /* Test whether the sign bit is set. */
4347 emit_cmp_and_jump_insns (from
, const0_rtx
, LT
, NULL_RTX
, imode
,
4350 /* The sign bit is not set. Convert as signed. */
4351 expand_float (target
, from
, 0);
4352 emit_jump_insn (gen_jump (label
));
4355 /* The sign bit is set.
4356 Convert to a usable (positive signed) value by shifting right
4357 one bit, while remembering if a nonzero bit was shifted
4358 out; i.e., compute (from & 1) | (from >> 1). */
4360 emit_label (neglabel
);
4361 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
4362 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
4363 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
4365 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
4367 expand_float (target
, temp
, 0);
4369 /* Multiply by 2 to undo the shift above. */
4370 temp
= expand_binop (fmode
, add_optab
, target
, target
,
4371 target
, 0, OPTAB_LIB_WIDEN
);
4373 emit_move_insn (target
, temp
);
4375 do_pending_stack_adjust ();
4381 /* If we are about to do some arithmetic to correct for an
4382 unsigned operand, do it in a pseudo-register. */
4384 if (GET_MODE (to
) != fmode
4385 || !REG_P (to
) || REGNO (to
) < FIRST_PSEUDO_REGISTER
)
4386 target
= gen_reg_rtx (fmode
);
4388 /* Convert as signed integer to floating. */
4389 expand_float (target
, from
, 0);
4391 /* If FROM is negative (and therefore TO is negative),
4392 correct its value by 2**bitwidth. */
4394 do_pending_stack_adjust ();
4395 emit_cmp_and_jump_insns (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
),
4399 real_2expN (&offset
, GET_MODE_BITSIZE (GET_MODE (from
)));
4400 temp
= expand_binop (fmode
, add_optab
, target
,
4401 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
4402 target
, 0, OPTAB_LIB_WIDEN
);
4404 emit_move_insn (target
, temp
);
4406 do_pending_stack_adjust ();
4411 /* No hardware instruction available; call a library routine. */
4416 convert_optab tab
= unsignedp
? ufloat_optab
: sfloat_optab
;
4418 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
4419 from
= convert_to_mode (SImode
, from
, unsignedp
);
4422 from
= force_not_mem (from
);
4424 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4430 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4431 GET_MODE (to
), 1, from
,
4433 insns
= get_insns ();
4436 emit_libcall_block (insns
, target
, value
,
4437 gen_rtx_FLOAT (GET_MODE (to
), from
));
4442 /* Copy result to requested destination
4443 if we have been computing in a temp location. */
4447 if (GET_MODE (target
) == GET_MODE (to
))
4448 emit_move_insn (to
, target
);
4450 convert_move (to
, target
, 0);
4454 /* Generate code to convert FROM to fixed point and store in TO. FROM
4455 must be floating point. */
4458 expand_fix (rtx to
, rtx from
, int unsignedp
)
4460 enum insn_code icode
;
4462 enum machine_mode fmode
, imode
;
4465 /* We first try to find a pair of modes, one real and one integer, at
4466 least as wide as FROM and TO, respectively, in which we can open-code
4467 this conversion. If the integer mode is wider than the mode of TO,
4468 we can do the conversion either signed or unsigned. */
4470 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4471 fmode
= GET_MODE_WIDER_MODE (fmode
))
4472 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
4473 imode
= GET_MODE_WIDER_MODE (imode
))
4475 int doing_unsigned
= unsignedp
;
4477 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
4478 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
4479 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
4481 if (icode
!= CODE_FOR_nothing
)
4483 if (fmode
!= GET_MODE (from
))
4484 from
= convert_to_mode (fmode
, from
, 0);
4488 rtx temp
= gen_reg_rtx (GET_MODE (from
));
4489 from
= expand_unop (GET_MODE (from
), ftrunc_optab
, from
,
4493 if (imode
!= GET_MODE (to
))
4494 target
= gen_reg_rtx (imode
);
4496 emit_unop_insn (icode
, target
, from
,
4497 doing_unsigned
? UNSIGNED_FIX
: FIX
);
4499 convert_move (to
, target
, unsignedp
);
4504 /* For an unsigned conversion, there is one more way to do it.
4505 If we have a signed conversion, we generate code that compares
4506 the real value to the largest representable positive number. If if
4507 is smaller, the conversion is done normally. Otherwise, subtract
4508 one plus the highest signed number, convert, and add it back.
4510 We only need to check all real modes, since we know we didn't find
4511 anything with a wider integer mode.
4513 This code used to extend FP value into mode wider than the destination.
4514 This is not needed. Consider, for instance conversion from SFmode
4517 The hot path trought the code is dealing with inputs smaller than 2^63
4518 and doing just the conversion, so there is no bits to lose.
4520 In the other path we know the value is positive in the range 2^63..2^64-1
4521 inclusive. (as for other imput overflow happens and result is undefined)
4522 So we know that the most important bit set in mantissa corresponds to
4523 2^63. The subtraction of 2^63 should not generate any rounding as it
4524 simply clears out that bit. The rest is trivial. */
4526 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
4527 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
4528 fmode
= GET_MODE_WIDER_MODE (fmode
))
4529 if (CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
4533 REAL_VALUE_TYPE offset
;
4534 rtx limit
, lab1
, lab2
, insn
;
4536 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
4537 real_2expN (&offset
, bitsize
- 1);
4538 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
4539 lab1
= gen_label_rtx ();
4540 lab2
= gen_label_rtx ();
4543 from
= force_not_mem (from
);
4545 if (fmode
!= GET_MODE (from
))
4546 from
= convert_to_mode (fmode
, from
, 0);
4548 /* See if we need to do the subtraction. */
4549 do_pending_stack_adjust ();
4550 emit_cmp_and_jump_insns (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
),
4553 /* If not, do the signed "fix" and branch around fixup code. */
4554 expand_fix (to
, from
, 0);
4555 emit_jump_insn (gen_jump (lab2
));
4558 /* Otherwise, subtract 2**(N-1), convert to signed number,
4559 then add 2**(N-1). Do the addition using XOR since this
4560 will often generate better code. */
4562 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
4563 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
4564 expand_fix (to
, target
, 0);
4565 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
4567 ((HOST_WIDE_INT
) 1 << (bitsize
- 1),
4569 to
, 1, OPTAB_LIB_WIDEN
);
4572 emit_move_insn (to
, target
);
4576 if (mov_optab
->handlers
[(int) GET_MODE (to
)].insn_code
4577 != CODE_FOR_nothing
)
4579 /* Make a place for a REG_NOTE and add it. */
4580 insn
= emit_move_insn (to
, to
);
4581 set_unique_reg_note (insn
,
4583 gen_rtx_fmt_e (UNSIGNED_FIX
,
4591 /* We can't do it with an insn, so use a library call. But first ensure
4592 that the mode of TO is at least as wide as SImode, since those are the
4593 only library calls we know about. */
4595 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
4597 target
= gen_reg_rtx (SImode
);
4599 expand_fix (target
, from
, unsignedp
);
4607 convert_optab tab
= unsignedp
? ufix_optab
: sfix_optab
;
4608 libfunc
= tab
->handlers
[GET_MODE (to
)][GET_MODE (from
)].libfunc
;
4613 from
= force_not_mem (from
);
4617 value
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
4618 GET_MODE (to
), 1, from
,
4620 insns
= get_insns ();
4623 emit_libcall_block (insns
, target
, value
,
4624 gen_rtx_fmt_e (unsignedp
? UNSIGNED_FIX
: FIX
,
4625 GET_MODE (to
), from
));
4630 if (GET_MODE (to
) == GET_MODE (target
))
4631 emit_move_insn (to
, target
);
4633 convert_move (to
, target
, 0);
4637 /* Report whether we have an instruction to perform the operation
4638 specified by CODE on operands of mode MODE. */
4640 have_insn_for (enum rtx_code code
, enum machine_mode mode
)
4642 return (code_to_optab
[(int) code
] != 0
4643 && (code_to_optab
[(int) code
]->handlers
[(int) mode
].insn_code
4644 != CODE_FOR_nothing
));
4647 /* Create a blank optab. */
4652 optab op
= ggc_alloc (sizeof (struct optab
));
4653 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4655 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
4656 op
->handlers
[i
].libfunc
= 0;
4662 static convert_optab
4663 new_convert_optab (void)
4666 convert_optab op
= ggc_alloc (sizeof (struct convert_optab
));
4667 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4668 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4670 op
->handlers
[i
][j
].insn_code
= CODE_FOR_nothing
;
4671 op
->handlers
[i
][j
].libfunc
= 0;
4676 /* Same, but fill in its code as CODE, and write it into the
4677 code_to_optab table. */
4679 init_optab (enum rtx_code code
)
4681 optab op
= new_optab ();
4683 code_to_optab
[(int) code
] = op
;
4687 /* Same, but fill in its code as CODE, and do _not_ write it into
4688 the code_to_optab table. */
4690 init_optabv (enum rtx_code code
)
4692 optab op
= new_optab ();
4697 /* Conversion optabs never go in the code_to_optab table. */
4698 static inline convert_optab
4699 init_convert_optab (enum rtx_code code
)
4701 convert_optab op
= new_convert_optab ();
4706 /* Initialize the libfunc fields of an entire group of entries in some
4707 optab. Each entry is set equal to a string consisting of a leading
4708 pair of underscores followed by a generic operation name followed by
4709 a mode name (downshifted to lowercase) followed by a single character
4710 representing the number of operands for the given operation (which is
4711 usually one of the characters '2', '3', or '4').
4713 OPTABLE is the table in which libfunc fields are to be initialized.
4714 FIRST_MODE is the first machine mode index in the given optab to
4716 LAST_MODE is the last machine mode index in the given optab to
4718 OPNAME is the generic (string) name of the operation.
4719 SUFFIX is the character which specifies the number of operands for
4720 the given generic operation.
4724 init_libfuncs (optab optable
, int first_mode
, int last_mode
,
4725 const char *opname
, int suffix
)
4728 unsigned opname_len
= strlen (opname
);
4730 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
4731 mode
= (enum machine_mode
) ((int) mode
+ 1))
4733 const char *mname
= GET_MODE_NAME (mode
);
4734 unsigned mname_len
= strlen (mname
);
4735 char *libfunc_name
= alloca (2 + opname_len
+ mname_len
+ 1 + 1);
4742 for (q
= opname
; *q
; )
4744 for (q
= mname
; *q
; q
++)
4745 *p
++ = TOLOWER (*q
);
4749 optable
->handlers
[(int) mode
].libfunc
4750 = init_one_libfunc (ggc_alloc_string (libfunc_name
, p
- libfunc_name
));
4754 /* Initialize the libfunc fields of an entire group of entries in some
4755 optab which correspond to all integer mode operations. The parameters
4756 have the same meaning as similarly named ones for the `init_libfuncs'
4757 routine. (See above). */
4760 init_integral_libfuncs (optab optable
, const char *opname
, int suffix
)
4762 int maxsize
= 2*BITS_PER_WORD
;
4763 if (maxsize
< LONG_LONG_TYPE_SIZE
)
4764 maxsize
= LONG_LONG_TYPE_SIZE
;
4765 init_libfuncs (optable
, word_mode
,
4766 mode_for_size (maxsize
, MODE_INT
, 0),
4770 /* Initialize the libfunc fields of an entire group of entries in some
4771 optab which correspond to all real mode operations. The parameters
4772 have the same meaning as similarly named ones for the `init_libfuncs'
4773 routine. (See above). */
4776 init_floating_libfuncs (optab optable
, const char *opname
, int suffix
)
4778 init_libfuncs (optable
, MIN_MODE_FLOAT
, MAX_MODE_FLOAT
, opname
, suffix
);
4781 /* Initialize the libfunc fields of an entire group of entries of an
4782 inter-mode-class conversion optab. The string formation rules are
4783 similar to the ones for init_libfuncs, above, but instead of having
4784 a mode name and an operand count these functions have two mode names
4785 and no operand count. */
4787 init_interclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4788 enum mode_class from_class
,
4789 enum mode_class to_class
)
4791 enum machine_mode first_from_mode
= GET_CLASS_NARROWEST_MODE (from_class
);
4792 enum machine_mode first_to_mode
= GET_CLASS_NARROWEST_MODE (to_class
);
4793 size_t opname_len
= strlen (opname
);
4794 size_t max_mname_len
= 0;
4796 enum machine_mode fmode
, tmode
;
4797 const char *fname
, *tname
;
4799 char *libfunc_name
, *suffix
;
4802 for (fmode
= first_from_mode
;
4804 fmode
= GET_MODE_WIDER_MODE (fmode
))
4805 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (fmode
)));
4807 for (tmode
= first_to_mode
;
4809 tmode
= GET_MODE_WIDER_MODE (tmode
))
4810 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (tmode
)));
4812 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4813 libfunc_name
[0] = '_';
4814 libfunc_name
[1] = '_';
4815 memcpy (&libfunc_name
[2], opname
, opname_len
);
4816 suffix
= libfunc_name
+ opname_len
+ 2;
4818 for (fmode
= first_from_mode
; fmode
!= VOIDmode
;
4819 fmode
= GET_MODE_WIDER_MODE (fmode
))
4820 for (tmode
= first_to_mode
; tmode
!= VOIDmode
;
4821 tmode
= GET_MODE_WIDER_MODE (tmode
))
4823 fname
= GET_MODE_NAME (fmode
);
4824 tname
= GET_MODE_NAME (tmode
);
4827 for (q
= fname
; *q
; p
++, q
++)
4829 for (q
= tname
; *q
; p
++, q
++)
4834 tab
->handlers
[tmode
][fmode
].libfunc
4835 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4840 /* Initialize the libfunc fields of an entire group of entries of an
4841 intra-mode-class conversion optab. The string formation rules are
4842 similar to the ones for init_libfunc, above. WIDENING says whether
4843 the optab goes from narrow to wide modes or vice versa. These functions
4844 have two mode names _and_ an operand count. */
4846 init_intraclass_conv_libfuncs (convert_optab tab
, const char *opname
,
4847 enum mode_class
class, bool widening
)
4849 enum machine_mode first_mode
= GET_CLASS_NARROWEST_MODE (class);
4850 size_t opname_len
= strlen (opname
);
4851 size_t max_mname_len
= 0;
4853 enum machine_mode nmode
, wmode
;
4854 const char *nname
, *wname
;
4856 char *libfunc_name
, *suffix
;
4859 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4860 nmode
= GET_MODE_WIDER_MODE (nmode
))
4861 max_mname_len
= MAX (max_mname_len
, strlen (GET_MODE_NAME (nmode
)));
4863 libfunc_name
= alloca (2 + opname_len
+ 2*max_mname_len
+ 1 + 1);
4864 libfunc_name
[0] = '_';
4865 libfunc_name
[1] = '_';
4866 memcpy (&libfunc_name
[2], opname
, opname_len
);
4867 suffix
= libfunc_name
+ opname_len
+ 2;
4869 for (nmode
= first_mode
; nmode
!= VOIDmode
;
4870 nmode
= GET_MODE_WIDER_MODE (nmode
))
4871 for (wmode
= GET_MODE_WIDER_MODE (nmode
); wmode
!= VOIDmode
;
4872 wmode
= GET_MODE_WIDER_MODE (wmode
))
4874 nname
= GET_MODE_NAME (nmode
);
4875 wname
= GET_MODE_NAME (wmode
);
4878 for (q
= widening
? nname
: wname
; *q
; p
++, q
++)
4880 for (q
= widening
? wname
: nname
; *q
; p
++, q
++)
4886 tab
->handlers
[widening
? wmode
: nmode
]
4887 [widening
? nmode
: wmode
].libfunc
4888 = init_one_libfunc (ggc_alloc_string (libfunc_name
,
4895 init_one_libfunc (const char *name
)
4899 /* Create a FUNCTION_DECL that can be passed to
4900 targetm.encode_section_info. */
4901 /* ??? We don't have any type information except for this is
4902 a function. Pretend this is "int foo()". */
4903 tree decl
= build_decl (FUNCTION_DECL
, get_identifier (name
),
4904 build_function_type (integer_type_node
, NULL_TREE
));
4905 DECL_ARTIFICIAL (decl
) = 1;
4906 DECL_EXTERNAL (decl
) = 1;
4907 TREE_PUBLIC (decl
) = 1;
4909 symbol
= XEXP (DECL_RTL (decl
), 0);
4911 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
4912 are the flags assigned by targetm.encode_section_info. */
4913 SYMBOL_REF_DECL (symbol
) = 0;
4918 /* Call this to reset the function entry for one optab (OPTABLE) in mode
4919 MODE to NAME, which should be either 0 or a string constant. */
4921 set_optab_libfunc (optab optable
, enum machine_mode mode
, const char *name
)
4924 optable
->handlers
[mode
].libfunc
= init_one_libfunc (name
);
4926 optable
->handlers
[mode
].libfunc
= 0;
4929 /* Call this to reset the function entry for one conversion optab
4930 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
4931 either 0 or a string constant. */
4933 set_conv_libfunc (convert_optab optable
, enum machine_mode tmode
,
4934 enum machine_mode fmode
, const char *name
)
4937 optable
->handlers
[tmode
][fmode
].libfunc
= init_one_libfunc (name
);
4939 optable
->handlers
[tmode
][fmode
].libfunc
= 0;
4942 /* Call this once to initialize the contents of the optabs
4943 appropriately for the current target machine. */
4950 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4952 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4953 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4955 #ifdef HAVE_conditional_move
4956 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4957 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4960 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4962 vcond_gen_code
[i
] = CODE_FOR_nothing
;
4963 vcondu_gen_code
[i
] = CODE_FOR_nothing
;
4966 add_optab
= init_optab (PLUS
);
4967 addv_optab
= init_optabv (PLUS
);
4968 sub_optab
= init_optab (MINUS
);
4969 subv_optab
= init_optabv (MINUS
);
4970 smul_optab
= init_optab (MULT
);
4971 smulv_optab
= init_optabv (MULT
);
4972 smul_highpart_optab
= init_optab (UNKNOWN
);
4973 umul_highpart_optab
= init_optab (UNKNOWN
);
4974 smul_widen_optab
= init_optab (UNKNOWN
);
4975 umul_widen_optab
= init_optab (UNKNOWN
);
4976 sdiv_optab
= init_optab (DIV
);
4977 sdivv_optab
= init_optabv (DIV
);
4978 sdivmod_optab
= init_optab (UNKNOWN
);
4979 udiv_optab
= init_optab (UDIV
);
4980 udivmod_optab
= init_optab (UNKNOWN
);
4981 smod_optab
= init_optab (MOD
);
4982 umod_optab
= init_optab (UMOD
);
4983 fmod_optab
= init_optab (UNKNOWN
);
4984 drem_optab
= init_optab (UNKNOWN
);
4985 ftrunc_optab
= init_optab (UNKNOWN
);
4986 and_optab
= init_optab (AND
);
4987 ior_optab
= init_optab (IOR
);
4988 xor_optab
= init_optab (XOR
);
4989 ashl_optab
= init_optab (ASHIFT
);
4990 ashr_optab
= init_optab (ASHIFTRT
);
4991 lshr_optab
= init_optab (LSHIFTRT
);
4992 rotl_optab
= init_optab (ROTATE
);
4993 rotr_optab
= init_optab (ROTATERT
);
4994 smin_optab
= init_optab (SMIN
);
4995 smax_optab
= init_optab (SMAX
);
4996 umin_optab
= init_optab (UMIN
);
4997 umax_optab
= init_optab (UMAX
);
4998 pow_optab
= init_optab (UNKNOWN
);
4999 atan2_optab
= init_optab (UNKNOWN
);
5001 /* These three have codes assigned exclusively for the sake of
5003 mov_optab
= init_optab (SET
);
5004 movstrict_optab
= init_optab (STRICT_LOW_PART
);
5005 cmp_optab
= init_optab (COMPARE
);
5007 ucmp_optab
= init_optab (UNKNOWN
);
5008 tst_optab
= init_optab (UNKNOWN
);
5010 eq_optab
= init_optab (EQ
);
5011 ne_optab
= init_optab (NE
);
5012 gt_optab
= init_optab (GT
);
5013 ge_optab
= init_optab (GE
);
5014 lt_optab
= init_optab (LT
);
5015 le_optab
= init_optab (LE
);
5016 unord_optab
= init_optab (UNORDERED
);
5018 neg_optab
= init_optab (NEG
);
5019 negv_optab
= init_optabv (NEG
);
5020 abs_optab
= init_optab (ABS
);
5021 absv_optab
= init_optabv (ABS
);
5022 addcc_optab
= init_optab (UNKNOWN
);
5023 one_cmpl_optab
= init_optab (NOT
);
5024 ffs_optab
= init_optab (FFS
);
5025 clz_optab
= init_optab (CLZ
);
5026 ctz_optab
= init_optab (CTZ
);
5027 popcount_optab
= init_optab (POPCOUNT
);
5028 parity_optab
= init_optab (PARITY
);
5029 sqrt_optab
= init_optab (SQRT
);
5030 floor_optab
= init_optab (UNKNOWN
);
5031 ceil_optab
= init_optab (UNKNOWN
);
5032 round_optab
= init_optab (UNKNOWN
);
5033 btrunc_optab
= init_optab (UNKNOWN
);
5034 nearbyint_optab
= init_optab (UNKNOWN
);
5035 rint_optab
= init_optab (UNKNOWN
);
5036 sincos_optab
= init_optab (UNKNOWN
);
5037 sin_optab
= init_optab (UNKNOWN
);
5038 asin_optab
= init_optab (UNKNOWN
);
5039 cos_optab
= init_optab (UNKNOWN
);
5040 acos_optab
= init_optab (UNKNOWN
);
5041 exp_optab
= init_optab (UNKNOWN
);
5042 exp10_optab
= init_optab (UNKNOWN
);
5043 exp2_optab
= init_optab (UNKNOWN
);
5044 expm1_optab
= init_optab (UNKNOWN
);
5045 ldexp_optab
= init_optab (UNKNOWN
);
5046 logb_optab
= init_optab (UNKNOWN
);
5047 ilogb_optab
= init_optab (UNKNOWN
);
5048 log_optab
= init_optab (UNKNOWN
);
5049 log10_optab
= init_optab (UNKNOWN
);
5050 log2_optab
= init_optab (UNKNOWN
);
5051 log1p_optab
= init_optab (UNKNOWN
);
5052 tan_optab
= init_optab (UNKNOWN
);
5053 atan_optab
= init_optab (UNKNOWN
);
5054 copysign_optab
= init_optab (UNKNOWN
);
5056 strlen_optab
= init_optab (UNKNOWN
);
5057 cbranch_optab
= init_optab (UNKNOWN
);
5058 cmov_optab
= init_optab (UNKNOWN
);
5059 cstore_optab
= init_optab (UNKNOWN
);
5060 push_optab
= init_optab (UNKNOWN
);
5062 vec_extract_optab
= init_optab (UNKNOWN
);
5063 vec_set_optab
= init_optab (UNKNOWN
);
5064 vec_init_optab
= init_optab (UNKNOWN
);
5065 vec_realign_load_optab
= init_optab (UNKNOWN
);
5066 movmisalign_optab
= init_optab (UNKNOWN
);
5068 powi_optab
= init_optab (UNKNOWN
);
5071 sext_optab
= init_convert_optab (SIGN_EXTEND
);
5072 zext_optab
= init_convert_optab (ZERO_EXTEND
);
5073 trunc_optab
= init_convert_optab (TRUNCATE
);
5074 sfix_optab
= init_convert_optab (FIX
);
5075 ufix_optab
= init_convert_optab (UNSIGNED_FIX
);
5076 sfixtrunc_optab
= init_convert_optab (UNKNOWN
);
5077 ufixtrunc_optab
= init_convert_optab (UNKNOWN
);
5078 sfloat_optab
= init_convert_optab (FLOAT
);
5079 ufloat_optab
= init_convert_optab (UNSIGNED_FLOAT
);
5081 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
5083 movmem_optab
[i
] = CODE_FOR_nothing
;
5084 clrmem_optab
[i
] = CODE_FOR_nothing
;
5085 cmpstr_optab
[i
] = CODE_FOR_nothing
;
5086 cmpmem_optab
[i
] = CODE_FOR_nothing
;
5088 #ifdef HAVE_SECONDARY_RELOADS
5089 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
5093 /* Fill in the optabs with the insns we support. */
5096 /* Initialize the optabs with the names of the library functions. */
5097 init_integral_libfuncs (add_optab
, "add", '3');
5098 init_floating_libfuncs (add_optab
, "add", '3');
5099 init_integral_libfuncs (addv_optab
, "addv", '3');
5100 init_floating_libfuncs (addv_optab
, "add", '3');
5101 init_integral_libfuncs (sub_optab
, "sub", '3');
5102 init_floating_libfuncs (sub_optab
, "sub", '3');
5103 init_integral_libfuncs (subv_optab
, "subv", '3');
5104 init_floating_libfuncs (subv_optab
, "sub", '3');
5105 init_integral_libfuncs (smul_optab
, "mul", '3');
5106 init_floating_libfuncs (smul_optab
, "mul", '3');
5107 init_integral_libfuncs (smulv_optab
, "mulv", '3');
5108 init_floating_libfuncs (smulv_optab
, "mul", '3');
5109 init_integral_libfuncs (sdiv_optab
, "div", '3');
5110 init_floating_libfuncs (sdiv_optab
, "div", '3');
5111 init_integral_libfuncs (sdivv_optab
, "divv", '3');
5112 init_integral_libfuncs (udiv_optab
, "udiv", '3');
5113 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
5114 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
5115 init_integral_libfuncs (smod_optab
, "mod", '3');
5116 init_integral_libfuncs (umod_optab
, "umod", '3');
5117 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
5118 init_integral_libfuncs (and_optab
, "and", '3');
5119 init_integral_libfuncs (ior_optab
, "ior", '3');
5120 init_integral_libfuncs (xor_optab
, "xor", '3');
5121 init_integral_libfuncs (ashl_optab
, "ashl", '3');
5122 init_integral_libfuncs (ashr_optab
, "ashr", '3');
5123 init_integral_libfuncs (lshr_optab
, "lshr", '3');
5124 init_integral_libfuncs (smin_optab
, "min", '3');
5125 init_floating_libfuncs (smin_optab
, "min", '3');
5126 init_integral_libfuncs (smax_optab
, "max", '3');
5127 init_floating_libfuncs (smax_optab
, "max", '3');
5128 init_integral_libfuncs (umin_optab
, "umin", '3');
5129 init_integral_libfuncs (umax_optab
, "umax", '3');
5130 init_integral_libfuncs (neg_optab
, "neg", '2');
5131 init_floating_libfuncs (neg_optab
, "neg", '2');
5132 init_integral_libfuncs (negv_optab
, "negv", '2');
5133 init_floating_libfuncs (negv_optab
, "neg", '2');
5134 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
5135 init_integral_libfuncs (ffs_optab
, "ffs", '2');
5136 init_integral_libfuncs (clz_optab
, "clz", '2');
5137 init_integral_libfuncs (ctz_optab
, "ctz", '2');
5138 init_integral_libfuncs (popcount_optab
, "popcount", '2');
5139 init_integral_libfuncs (parity_optab
, "parity", '2');
5141 /* Comparison libcalls for integers MUST come in pairs,
5143 init_integral_libfuncs (cmp_optab
, "cmp", '2');
5144 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
5145 init_floating_libfuncs (cmp_optab
, "cmp", '2');
5147 /* EQ etc are floating point only. */
5148 init_floating_libfuncs (eq_optab
, "eq", '2');
5149 init_floating_libfuncs (ne_optab
, "ne", '2');
5150 init_floating_libfuncs (gt_optab
, "gt", '2');
5151 init_floating_libfuncs (ge_optab
, "ge", '2');
5152 init_floating_libfuncs (lt_optab
, "lt", '2');
5153 init_floating_libfuncs (le_optab
, "le", '2');
5154 init_floating_libfuncs (unord_optab
, "unord", '2');
5156 init_floating_libfuncs (powi_optab
, "powi", '2');
5159 init_interclass_conv_libfuncs (sfloat_optab
, "float",
5160 MODE_INT
, MODE_FLOAT
);
5161 init_interclass_conv_libfuncs (sfix_optab
, "fix",
5162 MODE_FLOAT
, MODE_INT
);
5163 init_interclass_conv_libfuncs (ufix_optab
, "fixuns",
5164 MODE_FLOAT
, MODE_INT
);
5166 /* sext_optab is also used for FLOAT_EXTEND. */
5167 init_intraclass_conv_libfuncs (sext_optab
, "extend", MODE_FLOAT
, true);
5168 init_intraclass_conv_libfuncs (trunc_optab
, "trunc", MODE_FLOAT
, false);
5170 /* Use cabs for double complex abs, since systems generally have cabs.
5171 Don't define any libcall for float complex, so that cabs will be used. */
5172 if (complex_double_type_node
)
5173 abs_optab
->handlers
[TYPE_MODE (complex_double_type_node
)].libfunc
5174 = init_one_libfunc ("cabs");
5176 /* The ffs function operates on `int'. */
5177 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)].libfunc
5178 = init_one_libfunc ("ffs");
5180 abort_libfunc
= init_one_libfunc ("abort");
5181 memcpy_libfunc
= init_one_libfunc ("memcpy");
5182 memmove_libfunc
= init_one_libfunc ("memmove");
5183 memcmp_libfunc
= init_one_libfunc ("memcmp");
5184 memset_libfunc
= init_one_libfunc ("memset");
5185 setbits_libfunc
= init_one_libfunc ("__setbits");
5187 unwind_resume_libfunc
= init_one_libfunc (USING_SJLJ_EXCEPTIONS
5188 ? "_Unwind_SjLj_Resume"
5189 : "_Unwind_Resume");
5190 #ifndef DONT_USE_BUILTIN_SETJMP
5191 setjmp_libfunc
= init_one_libfunc ("__builtin_setjmp");
5192 longjmp_libfunc
= init_one_libfunc ("__builtin_longjmp");
5194 setjmp_libfunc
= init_one_libfunc ("setjmp");
5195 longjmp_libfunc
= init_one_libfunc ("longjmp");
5197 unwind_sjlj_register_libfunc
= init_one_libfunc ("_Unwind_SjLj_Register");
5198 unwind_sjlj_unregister_libfunc
5199 = init_one_libfunc ("_Unwind_SjLj_Unregister");
5201 /* For function entry/exit instrumentation. */
5202 profile_function_entry_libfunc
5203 = init_one_libfunc ("__cyg_profile_func_enter");
5204 profile_function_exit_libfunc
5205 = init_one_libfunc ("__cyg_profile_func_exit");
5207 gcov_flush_libfunc
= init_one_libfunc ("__gcov_flush");
5209 if (HAVE_conditional_trap
)
5210 trap_rtx
= gen_rtx_fmt_ee (EQ
, VOIDmode
, NULL_RTX
, NULL_RTX
);
5212 /* Allow the target to add more libcalls or rename some, etc. */
5213 targetm
.init_libfuncs ();
5218 /* Print information about the current contents of the optabs on
5222 debug_optab_libfuncs (void)
5228 /* Dump the arithmetic optabs. */
5229 for (i
= 0; i
!= (int) OTI_MAX
; i
++)
5230 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5233 struct optab_handlers
*h
;
5236 h
= &o
->handlers
[j
];
5239 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5241 fprintf (stderr
, "%s\t%s:\t%s\n",
5242 GET_RTX_NAME (o
->code
),
5244 XSTR (h
->libfunc
, 0));
5248 /* Dump the conversion optabs. */
5249 for (i
= 0; i
< (int) CTI_MAX
; ++i
)
5250 for (j
= 0; j
< NUM_MACHINE_MODES
; ++j
)
5251 for (k
= 0; k
< NUM_MACHINE_MODES
; ++k
)
5254 struct optab_handlers
*h
;
5256 o
= &convert_optab_table
[i
];
5257 h
= &o
->handlers
[j
][k
];
5260 if (GET_CODE (h
->libfunc
) != SYMBOL_REF
)
5262 fprintf (stderr
, "%s\t%s\t%s:\t%s\n",
5263 GET_RTX_NAME (o
->code
),
5266 XSTR (h
->libfunc
, 0));
5274 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
5275 CODE. Return 0 on failure. */
5278 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED
, rtx op1
,
5279 rtx op2 ATTRIBUTE_UNUSED
, rtx tcode ATTRIBUTE_UNUSED
)
5281 enum machine_mode mode
= GET_MODE (op1
);
5282 enum insn_code icode
;
5285 if (!HAVE_conditional_trap
)
5288 if (mode
== VOIDmode
)
5291 icode
= cmp_optab
->handlers
[(int) mode
].insn_code
;
5292 if (icode
== CODE_FOR_nothing
)
5296 op1
= prepare_operand (icode
, op1
, 0, mode
, mode
, 0);
5297 op2
= prepare_operand (icode
, op2
, 1, mode
, mode
, 0);
5303 emit_insn (GEN_FCN (icode
) (op1
, op2
));
5305 PUT_CODE (trap_rtx
, code
);
5306 insn
= gen_conditional_trap (trap_rtx
, tcode
);
5310 insn
= get_insns ();
5317 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
5318 or unsigned operation code. */
5320 static enum rtx_code
5321 get_rtx_code (enum tree_code tcode
, bool unsignedp
)
5333 code
= unsignedp
? LTU
: LT
;
5336 code
= unsignedp
? LEU
: LE
;
5339 code
= unsignedp
? GTU
: GT
;
5342 code
= unsignedp
? GEU
: GE
;
5345 case UNORDERED_EXPR
:
5376 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
5377 unsigned operators. Do not generate compare instruction. */
5380 vector_compare_rtx (tree cond
, bool unsignedp
, enum insn_code icode
)
5382 enum rtx_code rcode
;
5384 rtx rtx_op0
, rtx_op1
;
5386 if (!COMPARISON_CLASS_P (cond
))
5388 /* This is unlikely. While generating VEC_COND_EXPR,
5389 auto vectorizer ensures that condition is a relational
5395 rcode
= get_rtx_code (TREE_CODE (cond
), unsignedp
);
5396 t_op0
= TREE_OPERAND (cond
, 0);
5397 t_op1
= TREE_OPERAND (cond
, 1);
5400 /* Expand operands. */
5401 rtx_op0
= expand_expr (t_op0
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op0
)), 1);
5402 rtx_op1
= expand_expr (t_op1
, NULL_RTX
, TYPE_MODE (TREE_TYPE (t_op1
)), 1);
5404 if (!(*insn_data
[icode
].operand
[4].predicate
) (rtx_op0
, GET_MODE (rtx_op0
))
5405 && GET_MODE (rtx_op0
) != VOIDmode
)
5406 rtx_op0
= force_reg (GET_MODE (rtx_op0
), rtx_op0
);
5408 if (!(*insn_data
[icode
].operand
[5].predicate
) (rtx_op1
, GET_MODE (rtx_op1
))
5409 && GET_MODE (rtx_op1
) != VOIDmode
)
5410 rtx_op1
= force_reg (GET_MODE (rtx_op1
), rtx_op1
);
5412 return gen_rtx_fmt_ee (rcode
, VOIDmode
, rtx_op0
, rtx_op1
);
5415 /* Return insn code for VEC_COND_EXPR EXPR. */
5417 static inline enum insn_code
5418 get_vcond_icode (tree expr
, enum machine_mode mode
)
5420 enum insn_code icode
= CODE_FOR_nothing
;
5422 if (TYPE_UNSIGNED (TREE_TYPE (expr
)))
5423 icode
= vcondu_gen_code
[mode
];
5425 icode
= vcond_gen_code
[mode
];
5429 /* Return TRUE iff, appropriate vector insns are available
5430 for vector cond expr expr in VMODE mode. */
5433 expand_vec_cond_expr_p (tree expr
, enum machine_mode vmode
)
5435 if (get_vcond_icode (expr
, vmode
) == CODE_FOR_nothing
)
5440 /* Generate insns for VEC_COND_EXPR. */
5443 expand_vec_cond_expr (tree vec_cond_expr
, rtx target
)
5445 enum insn_code icode
;
5446 rtx comparison
, rtx_op1
, rtx_op2
, cc_op0
, cc_op1
;
5447 enum machine_mode mode
= TYPE_MODE (TREE_TYPE (vec_cond_expr
));
5448 bool unsignedp
= TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr
));
5450 icode
= get_vcond_icode (vec_cond_expr
, mode
);
5451 if (icode
== CODE_FOR_nothing
)
5455 target
= gen_reg_rtx (mode
);
5457 /* Get comparison rtx. First expand both cond expr operands. */
5458 comparison
= vector_compare_rtx (TREE_OPERAND (vec_cond_expr
, 0),
5460 cc_op0
= XEXP (comparison
, 0);
5461 cc_op1
= XEXP (comparison
, 1);
5462 /* Expand both operands and force them in reg, if required. */
5463 rtx_op1
= expand_expr (TREE_OPERAND (vec_cond_expr
, 1),
5464 NULL_RTX
, VOIDmode
, 1);
5465 if (!(*insn_data
[icode
].operand
[1].predicate
) (rtx_op1
, mode
)
5466 && mode
!= VOIDmode
)
5467 rtx_op1
= force_reg (mode
, rtx_op1
);
5469 rtx_op2
= expand_expr (TREE_OPERAND (vec_cond_expr
, 2),
5470 NULL_RTX
, VOIDmode
, 1);
5471 if (!(*insn_data
[icode
].operand
[2].predicate
) (rtx_op2
, mode
)
5472 && mode
!= VOIDmode
)
5473 rtx_op2
= force_reg (mode
, rtx_op2
);
5475 /* Emit instruction! */
5476 emit_insn (GEN_FCN (icode
) (target
, rtx_op1
, rtx_op2
,
5477 comparison
, cc_op0
, cc_op1
));
5481 #include "gt-optabs.h"