1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 88, 92, 93, 94, 95, 1996 Free Software Foundation, Inc.
4 This file is part of GNU CC.
6 GNU CC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2, or (at your option)
11 GNU CC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GNU CC; see the file COPYING. If not, write to
18 the Free Software Foundation, 59 Temple Place - Suite 330,
19 Boston, MA 02111-1307, USA. */
26 #include "insn-flags.h"
27 #include "insn-codes.h"
29 #include "insn-config.h"
34 /* Each optab contains info on how this target machine
35 can perform a particular operation
36 for all sizes and kinds of operands.
38 The operation to be performed is often specified
39 by passing one of these optabs as an argument.
41 See expr.h for documentation of these optabs. */
46 optab smul_highpart_optab
;
47 optab umul_highpart_optab
;
48 optab smul_widen_optab
;
49 optab umul_widen_optab
;
72 optab movstrict_optab
;
83 optab ucmp_optab
; /* Used only for libcalls for unsigned comparisons. */
88 /* Tables of patterns for extending one integer mode to another. */
89 enum insn_code extendtab
[MAX_MACHINE_MODE
][MAX_MACHINE_MODE
][2];
91 /* Tables of patterns for converting between fixed and floating point. */
92 enum insn_code fixtab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
93 enum insn_code fixtrunctab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
94 enum insn_code floattab
[NUM_MACHINE_MODES
][NUM_MACHINE_MODES
][2];
96 /* Contains the optab used for each rtx code. */
97 optab code_to_optab
[NUM_RTX_CODE
+ 1];
99 /* SYMBOL_REF rtx's for the library functions that are called
100 implicitly and not via optabs. */
102 rtx extendsfdf2_libfunc
;
103 rtx extendsfxf2_libfunc
;
104 rtx extendsftf2_libfunc
;
105 rtx extenddfxf2_libfunc
;
106 rtx extenddftf2_libfunc
;
108 rtx truncdfsf2_libfunc
;
109 rtx truncxfsf2_libfunc
;
110 rtx trunctfsf2_libfunc
;
111 rtx truncxfdf2_libfunc
;
112 rtx trunctfdf2_libfunc
;
156 rtx floatsisf_libfunc
;
157 rtx floatdisf_libfunc
;
158 rtx floattisf_libfunc
;
160 rtx floatsidf_libfunc
;
161 rtx floatdidf_libfunc
;
162 rtx floattidf_libfunc
;
164 rtx floatsixf_libfunc
;
165 rtx floatdixf_libfunc
;
166 rtx floattixf_libfunc
;
168 rtx floatsitf_libfunc
;
169 rtx floatditf_libfunc
;
170 rtx floattitf_libfunc
;
188 rtx fixunssfsi_libfunc
;
189 rtx fixunssfdi_libfunc
;
190 rtx fixunssfti_libfunc
;
192 rtx fixunsdfsi_libfunc
;
193 rtx fixunsdfdi_libfunc
;
194 rtx fixunsdfti_libfunc
;
196 rtx fixunsxfsi_libfunc
;
197 rtx fixunsxfdi_libfunc
;
198 rtx fixunsxfti_libfunc
;
200 rtx fixunstfsi_libfunc
;
201 rtx fixunstfdi_libfunc
;
202 rtx fixunstfti_libfunc
;
204 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
205 gives the gen_function to make a branch to test that condition. */
207 rtxfun bcc_gen_fctn
[NUM_RTX_CODE
];
209 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
210 gives the insn code to make a store-condition insn
211 to test that condition. */
213 enum insn_code setcc_gen_code
[NUM_RTX_CODE
];
215 #ifdef HAVE_conditional_move
216 /* Indexed by the machine mode, gives the insn code to make a conditional
217 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
218 setcc_gen_code to cut down on the number of named patterns. Consider a day
219 when a lot more rtx codes are conditional (eg: for the ARM). */
221 enum insn_code movcc_gen_code
[NUM_MACHINE_MODES
];
224 static int add_equal_note
PROTO((rtx
, rtx
, enum rtx_code
, rtx
, rtx
));
225 static rtx widen_operand
PROTO((rtx
, enum machine_mode
,
226 enum machine_mode
, int, int));
227 static enum insn_code can_fix_p
PROTO((enum machine_mode
, enum machine_mode
,
229 static enum insn_code can_float_p
PROTO((enum machine_mode
, enum machine_mode
,
231 static rtx ftruncify
PROTO((rtx
));
232 static optab init_optab
PROTO((enum rtx_code
));
233 static void init_libfuncs
PROTO((optab
, int, int, char *, int));
234 static void init_integral_libfuncs
PROTO((optab
, char *, int));
235 static void init_floating_libfuncs
PROTO((optab
, char *, int));
236 static void init_complex_libfuncs
PROTO((optab
, char *, int));
238 /* Add a REG_EQUAL note to the last insn in SEQ. TARGET is being set to
239 the result of operation CODE applied to OP0 (and OP1 if it is a binary
242 If the last insn does not set TARGET, don't do anything, but return 1.
244 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
245 don't add the REG_EQUAL note but return 0. Our caller can then try
246 again, ensuring that TARGET is not one of the operands. */
249 add_equal_note (seq
, target
, code
, op0
, op1
)
259 if ((GET_RTX_CLASS (code
) != '1' && GET_RTX_CLASS (code
) != '2'
260 && GET_RTX_CLASS (code
) != 'c' && GET_RTX_CLASS (code
) != '<')
261 || GET_CODE (seq
) != SEQUENCE
262 || (set
= single_set (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))) == 0
263 || GET_CODE (target
) == ZERO_EXTRACT
264 || (! rtx_equal_p (SET_DEST (set
), target
)
265 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside the
267 && (GET_CODE (SET_DEST (set
)) != STRICT_LOW_PART
268 || ! rtx_equal_p (SUBREG_REG (XEXP (SET_DEST (set
), 0)),
272 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
273 besides the last insn. */
274 if (reg_overlap_mentioned_p (target
, op0
)
275 || (op1
&& reg_overlap_mentioned_p (target
, op1
)))
276 for (i
= XVECLEN (seq
, 0) - 2; i
>= 0; i
--)
277 if (reg_set_p (target
, XVECEXP (seq
, 0, i
)))
280 if (GET_RTX_CLASS (code
) == '1')
281 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
));
283 note
= gen_rtx (code
, GET_MODE (target
), copy_rtx (op0
), copy_rtx (op1
));
285 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1))
286 = gen_rtx (EXPR_LIST
, REG_EQUAL
, note
,
287 REG_NOTES (XVECEXP (seq
, 0, XVECLEN (seq
, 0) - 1)));
292 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
293 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
294 not actually do a sign-extend or zero-extend, but can leave the
295 higher-order bits of the result rtx undefined, for example, in the case
296 of logical operations, but not right shifts. */
299 widen_operand (op
, mode
, oldmode
, unsignedp
, no_extend
)
301 enum machine_mode mode
, oldmode
;
307 /* If we must extend do so. If OP is either a constant or a SUBREG
308 for a promoted object, also extend since it will be more efficient to
311 || GET_MODE (op
) == VOIDmode
312 || (GET_CODE (op
) == SUBREG
&& SUBREG_PROMOTED_VAR_P (op
)))
313 return convert_modes (mode
, oldmode
, op
, unsignedp
);
315 /* If MODE is no wider than a single word, we return a paradoxical
317 if (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
)
318 return gen_rtx (SUBREG
, mode
, force_reg (GET_MODE (op
), op
), 0);
320 /* Otherwise, get an object of MODE, clobber it, and set the low-order
323 result
= gen_reg_rtx (mode
);
324 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, result
));
325 emit_move_insn (gen_lowpart (GET_MODE (op
), result
), op
);
329 /* Generate code to perform an operation specified by BINOPTAB
330 on operands OP0 and OP1, with result having machine-mode MODE.
332 UNSIGNEDP is for the case where we have to widen the operands
333 to perform the operation. It says to use zero-extension.
335 If TARGET is nonzero, the value
336 is generated there, if it is convenient to do so.
337 In all cases an rtx is returned for the locus of the value;
338 this may or may not be TARGET. */
341 expand_binop (mode
, binoptab
, op0
, op1
, target
, unsignedp
, methods
)
342 enum machine_mode mode
;
347 enum optab_methods methods
;
349 enum optab_methods next_methods
350 = (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
351 ? OPTAB_WIDEN
: methods
);
352 enum mode_class
class;
353 enum machine_mode wider_mode
;
355 int commutative_op
= 0;
356 int shift_op
= (binoptab
->code
== ASHIFT
357 || binoptab
->code
== ASHIFTRT
358 || binoptab
->code
== LSHIFTRT
359 || binoptab
->code
== ROTATE
360 || binoptab
->code
== ROTATERT
);
361 rtx entry_last
= get_last_insn ();
364 class = GET_MODE_CLASS (mode
);
366 op0
= protect_from_queue (op0
, 0);
367 op1
= protect_from_queue (op1
, 0);
369 target
= protect_from_queue (target
, 1);
373 op0
= force_not_mem (op0
);
374 op1
= force_not_mem (op1
);
377 /* If subtracting an integer constant, convert this into an addition of
378 the negated constant. */
380 if (binoptab
== sub_optab
&& GET_CODE (op1
) == CONST_INT
)
382 op1
= negate_rtx (mode
, op1
);
383 binoptab
= add_optab
;
386 /* If we are inside an appropriately-short loop and one operand is an
387 expensive constant, force it into a register. */
388 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
389 && rtx_cost (op0
, binoptab
->code
) > 2)
390 op0
= force_reg (mode
, op0
);
392 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
393 && ! shift_op
&& rtx_cost (op1
, binoptab
->code
) > 2)
394 op1
= force_reg (mode
, op1
);
396 /* Record where to delete back to if we backtrack. */
397 last
= get_last_insn ();
399 /* If operation is commutative,
400 try to make the first operand a register.
401 Even better, try to make it the same as the target.
402 Also try to make the last operand a constant. */
403 if (GET_RTX_CLASS (binoptab
->code
) == 'c'
404 || binoptab
== smul_widen_optab
405 || binoptab
== umul_widen_optab
406 || binoptab
== smul_highpart_optab
407 || binoptab
== umul_highpart_optab
)
411 if (((target
== 0 || GET_CODE (target
) == REG
)
412 ? ((GET_CODE (op1
) == REG
413 && GET_CODE (op0
) != REG
)
415 : rtx_equal_p (op1
, target
))
416 || GET_CODE (op0
) == CONST_INT
)
424 /* If we can do it with a three-operand insn, do so. */
426 if (methods
!= OPTAB_MUST_WIDEN
427 && binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
429 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
430 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
431 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
433 rtx xop0
= op0
, xop1
= op1
;
438 temp
= gen_reg_rtx (mode
);
440 /* If it is a commutative operator and the modes would match
441 if we would swap the operands, we can save the conversions. */
444 if (GET_MODE (op0
) != mode0
&& GET_MODE (op1
) != mode1
445 && GET_MODE (op0
) == mode1
&& GET_MODE (op1
) == mode0
)
449 tmp
= op0
; op0
= op1
; op1
= tmp
;
450 tmp
= xop0
; xop0
= xop1
; xop1
= tmp
;
454 /* In case the insn wants input operands in modes different from
455 the result, convert the operands. */
457 if (GET_MODE (op0
) != VOIDmode
458 && GET_MODE (op0
) != mode0
459 && mode0
!= VOIDmode
)
460 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
462 if (GET_MODE (xop1
) != VOIDmode
463 && GET_MODE (xop1
) != mode1
464 && mode1
!= VOIDmode
)
465 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
467 /* Now, if insn's predicates don't allow our operands, put them into
470 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
)
471 && mode0
!= VOIDmode
)
472 xop0
= copy_to_mode_reg (mode0
, xop0
);
474 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
)
475 && mode1
!= VOIDmode
)
476 xop1
= copy_to_mode_reg (mode1
, xop1
);
478 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
479 temp
= gen_reg_rtx (mode
);
481 pat
= GEN_FCN (icode
) (temp
, xop0
, xop1
);
484 /* If PAT is a multi-insn sequence, try to add an appropriate
485 REG_EQUAL note to it. If we can't because TEMP conflicts with an
486 operand, call ourselves again, this time without a target. */
487 if (GET_CODE (pat
) == SEQUENCE
488 && ! add_equal_note (pat
, temp
, binoptab
->code
, xop0
, xop1
))
490 delete_insns_since (last
);
491 return expand_binop (mode
, binoptab
, op0
, op1
, NULL_RTX
,
499 delete_insns_since (last
);
502 /* If this is a multiply, see if we can do a widening operation that
503 takes operands of this mode and makes a wider mode. */
505 if (binoptab
== smul_optab
&& GET_MODE_WIDER_MODE (mode
) != VOIDmode
506 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
507 ->handlers
[(int) GET_MODE_WIDER_MODE (mode
)].insn_code
)
508 != CODE_FOR_nothing
))
510 temp
= expand_binop (GET_MODE_WIDER_MODE (mode
),
511 unsignedp
? umul_widen_optab
: smul_widen_optab
,
512 op0
, op1
, NULL_RTX
, unsignedp
, OPTAB_DIRECT
);
516 if (GET_MODE_CLASS (mode
) == MODE_INT
)
517 return gen_lowpart (mode
, temp
);
519 return convert_to_mode (mode
, temp
, unsignedp
);
523 /* Look for a wider mode of the same class for which we think we
524 can open-code the operation. Check for a widening multiply at the
525 wider mode as well. */
527 if ((class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
528 && methods
!= OPTAB_DIRECT
&& methods
!= OPTAB_LIB
)
529 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
530 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
532 if (binoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
533 || (binoptab
== smul_optab
534 && GET_MODE_WIDER_MODE (wider_mode
) != VOIDmode
535 && (((unsignedp
? umul_widen_optab
: smul_widen_optab
)
536 ->handlers
[(int) GET_MODE_WIDER_MODE (wider_mode
)].insn_code
)
537 != CODE_FOR_nothing
)))
539 rtx xop0
= op0
, xop1
= op1
;
542 /* For certain integer operations, we need not actually extend
543 the narrow operands, as long as we will truncate
544 the results to the same narrowness. */
546 if ((binoptab
== ior_optab
|| binoptab
== and_optab
547 || binoptab
== xor_optab
548 || binoptab
== add_optab
|| binoptab
== sub_optab
549 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
550 && class == MODE_INT
)
553 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
, no_extend
);
555 /* The second operand of a shift must always be extended. */
556 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
557 no_extend
&& binoptab
!= ashl_optab
);
559 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
560 unsignedp
, OPTAB_DIRECT
);
563 if (class != MODE_INT
)
566 target
= gen_reg_rtx (mode
);
567 convert_move (target
, temp
, 0);
571 return gen_lowpart (mode
, temp
);
574 delete_insns_since (last
);
578 /* These can be done a word at a time. */
579 if ((binoptab
== and_optab
|| binoptab
== ior_optab
|| binoptab
== xor_optab
)
581 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
582 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
588 /* If TARGET is the same as one of the operands, the REG_EQUAL note
589 won't be accurate, so use a new target. */
590 if (target
== 0 || target
== op0
|| target
== op1
)
591 target
= gen_reg_rtx (mode
);
595 /* Do the actual arithmetic. */
596 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
598 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
599 rtx x
= expand_binop (word_mode
, binoptab
,
600 operand_subword_force (op0
, i
, mode
),
601 operand_subword_force (op1
, i
, mode
),
602 target_piece
, unsignedp
, next_methods
);
607 if (target_piece
!= x
)
608 emit_move_insn (target_piece
, x
);
611 insns
= get_insns ();
614 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
616 if (binoptab
->code
!= UNKNOWN
)
618 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
622 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
627 /* Synthesize double word shifts from single word shifts. */
628 if ((binoptab
== lshr_optab
|| binoptab
== ashl_optab
629 || binoptab
== ashr_optab
)
631 && GET_CODE (op1
) == CONST_INT
632 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
633 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
634 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
635 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
637 rtx insns
, inter
, equiv_value
;
638 rtx into_target
, outof_target
;
639 rtx into_input
, outof_input
;
640 int shift_count
, left_shift
, outof_word
;
642 /* If TARGET is the same as one of the operands, the REG_EQUAL note
643 won't be accurate, so use a new target. */
644 if (target
== 0 || target
== op0
|| target
== op1
)
645 target
= gen_reg_rtx (mode
);
649 shift_count
= INTVAL (op1
);
651 /* OUTOF_* is the word we are shifting bits away from, and
652 INTO_* is the word that we are shifting bits towards, thus
653 they differ depending on the direction of the shift and
656 left_shift
= binoptab
== ashl_optab
;
657 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
659 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
660 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
662 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
663 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
665 if (shift_count
>= BITS_PER_WORD
)
667 inter
= expand_binop (word_mode
, binoptab
,
669 GEN_INT (shift_count
- BITS_PER_WORD
),
670 into_target
, unsignedp
, next_methods
);
672 if (inter
!= 0 && inter
!= into_target
)
673 emit_move_insn (into_target
, inter
);
675 /* For a signed right shift, we must fill the word we are shifting
676 out of with copies of the sign bit. Otherwise it is zeroed. */
677 if (inter
!= 0 && binoptab
!= ashr_optab
)
678 inter
= CONST0_RTX (word_mode
);
680 inter
= expand_binop (word_mode
, binoptab
,
682 GEN_INT (BITS_PER_WORD
- 1),
683 outof_target
, unsignedp
, next_methods
);
685 if (inter
!= 0 && inter
!= outof_target
)
686 emit_move_insn (outof_target
, inter
);
691 optab reverse_unsigned_shift
, unsigned_shift
;
693 /* For a shift of less then BITS_PER_WORD, to compute the carry,
694 we must do a logical shift in the opposite direction of the
697 reverse_unsigned_shift
= (left_shift
? lshr_optab
: ashl_optab
);
699 /* For a shift of less than BITS_PER_WORD, to compute the word
700 shifted towards, we need to unsigned shift the orig value of
703 unsigned_shift
= (left_shift
? ashl_optab
: lshr_optab
);
705 carries
= expand_binop (word_mode
, reverse_unsigned_shift
,
707 GEN_INT (BITS_PER_WORD
- shift_count
),
708 0, unsignedp
, next_methods
);
713 inter
= expand_binop (word_mode
, unsigned_shift
, into_input
,
714 op1
, 0, unsignedp
, next_methods
);
717 inter
= expand_binop (word_mode
, ior_optab
, carries
, inter
,
718 into_target
, unsignedp
, next_methods
);
720 if (inter
!= 0 && inter
!= into_target
)
721 emit_move_insn (into_target
, inter
);
724 inter
= expand_binop (word_mode
, binoptab
, outof_input
,
725 op1
, outof_target
, unsignedp
, next_methods
);
727 if (inter
!= 0 && inter
!= outof_target
)
728 emit_move_insn (outof_target
, inter
);
731 insns
= get_insns ();
736 if (binoptab
->code
!= UNKNOWN
)
737 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
741 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
746 /* Synthesize double word rotates from single word shifts. */
747 if ((binoptab
== rotl_optab
|| binoptab
== rotr_optab
)
749 && GET_CODE (op1
) == CONST_INT
750 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
751 && ashl_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
752 && lshr_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
754 rtx insns
, equiv_value
;
755 rtx into_target
, outof_target
;
756 rtx into_input
, outof_input
;
758 int shift_count
, left_shift
, outof_word
;
760 /* If TARGET is the same as one of the operands, the REG_EQUAL note
761 won't be accurate, so use a new target. */
762 if (target
== 0 || target
== op0
|| target
== op1
)
763 target
= gen_reg_rtx (mode
);
767 shift_count
= INTVAL (op1
);
769 /* OUTOF_* is the word we are shifting bits away from, and
770 INTO_* is the word that we are shifting bits towards, thus
771 they differ depending on the direction of the shift and
774 left_shift
= (binoptab
== rotl_optab
);
775 outof_word
= left_shift
^ ! WORDS_BIG_ENDIAN
;
777 outof_target
= operand_subword (target
, outof_word
, 1, mode
);
778 into_target
= operand_subword (target
, 1 - outof_word
, 1, mode
);
780 outof_input
= operand_subword_force (op0
, outof_word
, mode
);
781 into_input
= operand_subword_force (op0
, 1 - outof_word
, mode
);
783 if (shift_count
== BITS_PER_WORD
)
785 /* This is just a word swap. */
786 emit_move_insn (outof_target
, into_input
);
787 emit_move_insn (into_target
, outof_input
);
792 rtx into_temp1
, into_temp2
, outof_temp1
, outof_temp2
;
793 rtx first_shift_count
, second_shift_count
;
794 optab reverse_unsigned_shift
, unsigned_shift
;
796 reverse_unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
797 ? lshr_optab
: ashl_optab
);
799 unsigned_shift
= (left_shift
^ (shift_count
< BITS_PER_WORD
)
800 ? ashl_optab
: lshr_optab
);
802 if (shift_count
> BITS_PER_WORD
)
804 first_shift_count
= GEN_INT (shift_count
- BITS_PER_WORD
);
805 second_shift_count
= GEN_INT (2*BITS_PER_WORD
- shift_count
);
809 first_shift_count
= GEN_INT (BITS_PER_WORD
- shift_count
);
810 second_shift_count
= GEN_INT (shift_count
);
813 into_temp1
= expand_binop (word_mode
, unsigned_shift
,
814 outof_input
, first_shift_count
,
815 NULL_RTX
, unsignedp
, next_methods
);
816 into_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
817 into_input
, second_shift_count
,
818 into_target
, unsignedp
, next_methods
);
820 if (into_temp1
!= 0 && into_temp2
!= 0)
821 inter
= expand_binop (word_mode
, ior_optab
, into_temp1
, into_temp2
,
822 into_target
, unsignedp
, next_methods
);
826 if (inter
!= 0 && inter
!= into_target
)
827 emit_move_insn (into_target
, inter
);
829 outof_temp1
= expand_binop (word_mode
, unsigned_shift
,
830 into_input
, first_shift_count
,
831 NULL_RTX
, unsignedp
, next_methods
);
832 outof_temp2
= expand_binop (word_mode
, reverse_unsigned_shift
,
833 outof_input
, second_shift_count
,
834 outof_target
, unsignedp
, next_methods
);
836 if (inter
!= 0 && outof_temp1
!= 0 && outof_temp2
!= 0)
837 inter
= expand_binop (word_mode
, ior_optab
,
838 outof_temp1
, outof_temp2
,
839 outof_target
, unsignedp
, next_methods
);
841 if (inter
!= 0 && inter
!= outof_target
)
842 emit_move_insn (outof_target
, inter
);
845 insns
= get_insns ();
850 if (binoptab
->code
!= UNKNOWN
)
851 equiv_value
= gen_rtx (binoptab
->code
, mode
, op0
, op1
);
855 /* We can't make this a no conflict block if this is a word swap,
856 because the word swap case fails if the input and output values
857 are in the same register. */
858 if (shift_count
!= BITS_PER_WORD
)
859 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv_value
);
868 /* These can be done a word at a time by propagating carries. */
869 if ((binoptab
== add_optab
|| binoptab
== sub_optab
)
871 && GET_MODE_SIZE (mode
) >= 2 * UNITS_PER_WORD
872 && binoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
875 rtx carry_tmp
= gen_reg_rtx (word_mode
);
876 optab otheroptab
= binoptab
== add_optab
? sub_optab
: add_optab
;
877 int nwords
= GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
;
878 rtx carry_in
, carry_out
;
881 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
882 value is one of those, use it. Otherwise, use 1 since it is the
883 one easiest to get. */
884 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
885 int normalizep
= STORE_FLAG_VALUE
;
890 /* Prepare the operands. */
891 xop0
= force_reg (mode
, op0
);
892 xop1
= force_reg (mode
, op1
);
894 if (target
== 0 || GET_CODE (target
) != REG
895 || target
== xop0
|| target
== xop1
)
896 target
= gen_reg_rtx (mode
);
898 /* Indicate for flow that the entire target reg is being set. */
899 if (GET_CODE (target
) == REG
)
900 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
902 /* Do the actual arithmetic. */
903 for (i
= 0; i
< nwords
; i
++)
905 int index
= (WORDS_BIG_ENDIAN
? nwords
- i
- 1 : i
);
906 rtx target_piece
= operand_subword (target
, index
, 1, mode
);
907 rtx op0_piece
= operand_subword_force (xop0
, index
, mode
);
908 rtx op1_piece
= operand_subword_force (xop1
, index
, mode
);
911 /* Main add/subtract of the input operands. */
912 x
= expand_binop (word_mode
, binoptab
,
913 op0_piece
, op1_piece
,
914 target_piece
, unsignedp
, next_methods
);
920 /* Store carry from main add/subtract. */
921 carry_out
= gen_reg_rtx (word_mode
);
922 carry_out
= emit_store_flag (carry_out
,
923 binoptab
== add_optab
? LTU
: GTU
,
925 word_mode
, 1, normalizep
);
932 /* Add/subtract previous carry to main result. */
933 x
= expand_binop (word_mode
,
934 normalizep
== 1 ? binoptab
: otheroptab
,
936 target_piece
, 1, next_methods
);
939 else if (target_piece
!= x
)
940 emit_move_insn (target_piece
, x
);
944 /* THIS CODE HAS NOT BEEN TESTED. */
945 /* Get out carry from adding/subtracting carry in. */
946 carry_tmp
= emit_store_flag (carry_tmp
,
947 binoptab
== add_optab
950 word_mode
, 1, normalizep
);
952 /* Logical-ior the two poss. carry together. */
953 carry_out
= expand_binop (word_mode
, ior_optab
,
954 carry_out
, carry_tmp
,
955 carry_out
, 0, next_methods
);
961 carry_in
= carry_out
;
964 if (i
== GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
)
966 rtx temp
= emit_move_insn (target
, target
);
968 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
969 gen_rtx (binoptab
->code
, mode
,
976 delete_insns_since (last
);
979 /* If we want to multiply two two-word values and have normal and widening
980 multiplies of single-word values, we can do this with three smaller
981 multiplications. Note that we do not make a REG_NO_CONFLICT block here
982 because we are not operating on one word at a time.
984 The multiplication proceeds as follows:
985 _______________________
986 [__op0_high_|__op0_low__]
987 _______________________
988 * [__op1_high_|__op1_low__]
989 _______________________________________________
990 _______________________
991 (1) [__op0_low__*__op1_low__]
992 _______________________
993 (2a) [__op0_low__*__op1_high_]
994 _______________________
995 (2b) [__op0_high_*__op1_low__]
996 _______________________
997 (3) [__op0_high_*__op1_high_]
1000 This gives a 4-word result. Since we are only interested in the
1001 lower 2 words, partial result (3) and the upper words of (2a) and
1002 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1003 calculated using non-widening multiplication.
1005 (1), however, needs to be calculated with an unsigned widening
1006 multiplication. If this operation is not directly supported we
1007 try using a signed widening multiplication and adjust the result.
1008 This adjustment works as follows:
1010 If both operands are positive then no adjustment is needed.
1012 If the operands have different signs, for example op0_low < 0 and
1013 op1_low >= 0, the instruction treats the most significant bit of
1014 op0_low as a sign bit instead of a bit with significance
1015 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1016 with 2**BITS_PER_WORD - op0_low, and two's complements the
1017 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1020 Similarly, if both operands are negative, we need to add
1021 (op0_low + op1_low) * 2**BITS_PER_WORD.
1023 We use a trick to adjust quickly. We logically shift op0_low right
1024 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1025 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1026 logical shift exists, we do an arithmetic right shift and subtract
1029 if (binoptab
== smul_optab
1030 && class == MODE_INT
1031 && GET_MODE_SIZE (mode
) == 2 * UNITS_PER_WORD
1032 && smul_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1033 && add_optab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
1034 && ((umul_widen_optab
->handlers
[(int) mode
].insn_code
1035 != CODE_FOR_nothing
)
1036 || (smul_widen_optab
->handlers
[(int) mode
].insn_code
1037 != CODE_FOR_nothing
)))
1039 int low
= (WORDS_BIG_ENDIAN
? 1 : 0);
1040 int high
= (WORDS_BIG_ENDIAN
? 0 : 1);
1041 rtx op0_high
= operand_subword_force (op0
, high
, mode
);
1042 rtx op0_low
= operand_subword_force (op0
, low
, mode
);
1043 rtx op1_high
= operand_subword_force (op1
, high
, mode
);
1044 rtx op1_low
= operand_subword_force (op1
, low
, mode
);
1049 /* If the target is the same as one of the inputs, don't use it. This
1050 prevents problems with the REG_EQUAL note. */
1051 if (target
== op0
|| target
== op1
1052 || (target
!= 0 && GET_CODE (target
) != REG
))
1055 /* Multiply the two lower words to get a double-word product.
1056 If unsigned widening multiplication is available, use that;
1057 otherwise use the signed form and compensate. */
1059 if (umul_widen_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1061 product
= expand_binop (mode
, umul_widen_optab
, op0_low
, op1_low
,
1062 target
, 1, OPTAB_DIRECT
);
1064 /* If we didn't succeed, delete everything we did so far. */
1066 delete_insns_since (last
);
1068 op0_xhigh
= op0_high
, op1_xhigh
= op1_high
;
1072 && smul_widen_optab
->handlers
[(int) mode
].insn_code
1073 != CODE_FOR_nothing
)
1075 rtx wordm1
= GEN_INT (BITS_PER_WORD
- 1);
1076 product
= expand_binop (mode
, smul_widen_optab
, op0_low
, op1_low
,
1077 target
, 1, OPTAB_DIRECT
);
1078 op0_xhigh
= expand_binop (word_mode
, lshr_optab
, op0_low
, wordm1
,
1079 NULL_RTX
, 1, next_methods
);
1081 op0_xhigh
= expand_binop (word_mode
, add_optab
, op0_high
,
1082 op0_xhigh
, op0_xhigh
, 0, next_methods
);
1085 op0_xhigh
= expand_binop (word_mode
, ashr_optab
, op0_low
, wordm1
,
1086 NULL_RTX
, 0, next_methods
);
1088 op0_xhigh
= expand_binop (word_mode
, sub_optab
, op0_high
,
1089 op0_xhigh
, op0_xhigh
, 0,
1093 op1_xhigh
= expand_binop (word_mode
, lshr_optab
, op1_low
, wordm1
,
1094 NULL_RTX
, 1, next_methods
);
1096 op1_xhigh
= expand_binop (word_mode
, add_optab
, op1_high
,
1097 op1_xhigh
, op1_xhigh
, 0, next_methods
);
1100 op1_xhigh
= expand_binop (word_mode
, ashr_optab
, op1_low
, wordm1
,
1101 NULL_RTX
, 0, next_methods
);
1103 op1_xhigh
= expand_binop (word_mode
, sub_optab
, op1_high
,
1104 op1_xhigh
, op1_xhigh
, 0,
1109 /* If we have been able to directly compute the product of the
1110 low-order words of the operands and perform any required adjustments
1111 of the operands, we proceed by trying two more multiplications
1112 and then computing the appropriate sum.
1114 We have checked above that the required addition is provided.
1115 Full-word addition will normally always succeed, especially if
1116 it is provided at all, so we don't worry about its failure. The
1117 multiplication may well fail, however, so we do handle that. */
1119 if (product
&& op0_xhigh
&& op1_xhigh
)
1121 rtx product_high
= operand_subword (product
, high
, 1, mode
);
1122 rtx temp
= expand_binop (word_mode
, binoptab
, op0_low
, op1_xhigh
,
1123 NULL_RTX
, 0, OPTAB_DIRECT
);
1126 temp
= expand_binop (word_mode
, add_optab
, temp
, product_high
,
1127 product_high
, 0, next_methods
);
1129 if (temp
!= 0 && temp
!= product_high
)
1130 emit_move_insn (product_high
, temp
);
1133 temp
= expand_binop (word_mode
, binoptab
, op1_low
, op0_xhigh
,
1134 NULL_RTX
, 0, OPTAB_DIRECT
);
1137 temp
= expand_binop (word_mode
, add_optab
, temp
,
1138 product_high
, product_high
,
1141 if (temp
!= 0 && temp
!= product_high
)
1142 emit_move_insn (product_high
, temp
);
1146 temp
= emit_move_insn (product
, product
);
1147 REG_NOTES (temp
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
1148 gen_rtx (MULT
, mode
, copy_rtx (op0
),
1156 /* If we get here, we couldn't do it for some reason even though we
1157 originally thought we could. Delete anything we've emitted in
1160 delete_insns_since (last
);
1163 /* We need to open-code the complex type operations: '+, -, * and /' */
1165 /* At this point we allow operations between two similar complex
1166 numbers, and also if one of the operands is not a complex number
1167 but rather of MODE_FLOAT or MODE_INT. However, the caller
1168 must make sure that the MODE of the non-complex operand matches
1169 the SUBMODE of the complex operand. */
1171 if (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
)
1173 rtx real0
= 0, imag0
= 0;
1174 rtx real1
= 0, imag1
= 0;
1175 rtx realr
, imagr
, res
;
1180 /* Find the correct mode for the real and imaginary parts */
1181 enum machine_mode submode
1182 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1183 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1186 if (submode
== BLKmode
)
1190 target
= gen_reg_rtx (mode
);
1194 realr
= gen_realpart (submode
, target
);
1195 imagr
= gen_imagpart (submode
, target
);
1197 if (GET_MODE (op0
) == mode
)
1199 real0
= gen_realpart (submode
, op0
);
1200 imag0
= gen_imagpart (submode
, op0
);
1205 if (GET_MODE (op1
) == mode
)
1207 real1
= gen_realpart (submode
, op1
);
1208 imag1
= gen_imagpart (submode
, op1
);
1213 if (real0
== 0 || real1
== 0 || ! (imag0
!= 0|| imag1
!= 0))
1216 switch (binoptab
->code
)
1219 /* (a+ib) + (c+id) = (a+c) + i(b+d) */
1221 /* (a+ib) - (c+id) = (a-c) + i(b-d) */
1222 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1223 realr
, unsignedp
, methods
);
1227 else if (res
!= realr
)
1228 emit_move_insn (realr
, res
);
1231 res
= expand_binop (submode
, binoptab
, imag0
, imag1
,
1232 imagr
, unsignedp
, methods
);
1235 else if (binoptab
->code
== MINUS
)
1236 res
= expand_unop (submode
, neg_optab
, imag1
, imagr
, unsignedp
);
1242 else if (res
!= imagr
)
1243 emit_move_insn (imagr
, res
);
1249 /* (a+ib) * (c+id) = (ac-bd) + i(ad+cb) */
1255 /* Don't fetch these from memory more than once. */
1256 real0
= force_reg (submode
, real0
);
1257 real1
= force_reg (submode
, real1
);
1258 imag0
= force_reg (submode
, imag0
);
1259 imag1
= force_reg (submode
, imag1
);
1261 temp1
= expand_binop (submode
, binoptab
, real0
, real1
, NULL_RTX
,
1262 unsignedp
, methods
);
1264 temp2
= expand_binop (submode
, binoptab
, imag0
, imag1
, NULL_RTX
,
1265 unsignedp
, methods
);
1267 if (temp1
== 0 || temp2
== 0)
1270 res
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1271 realr
, unsignedp
, methods
);
1275 else if (res
!= realr
)
1276 emit_move_insn (realr
, res
);
1278 temp1
= expand_binop (submode
, binoptab
, real0
, imag1
,
1279 NULL_RTX
, unsignedp
, methods
);
1281 temp2
= expand_binop (submode
, binoptab
, real1
, imag0
,
1282 NULL_RTX
, unsignedp
, methods
);
1284 if (temp1
== 0 || temp2
== 0)
1287 res
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1288 imagr
, unsignedp
, methods
);
1292 else if (res
!= imagr
)
1293 emit_move_insn (imagr
, res
);
1299 /* Don't fetch these from memory more than once. */
1300 real0
= force_reg (submode
, real0
);
1301 real1
= force_reg (submode
, real1
);
1303 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1304 realr
, unsignedp
, methods
);
1307 else if (res
!= realr
)
1308 emit_move_insn (realr
, res
);
1311 res
= expand_binop (submode
, binoptab
,
1312 real1
, imag0
, imagr
, unsignedp
, methods
);
1314 res
= expand_binop (submode
, binoptab
,
1315 real0
, imag1
, imagr
, unsignedp
, methods
);
1319 else if (res
!= imagr
)
1320 emit_move_insn (imagr
, res
);
1327 /* (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) */
1331 /* (a+ib) / (c+i0) = (a/c) + i(b/c) */
1333 /* Don't fetch these from memory more than once. */
1334 real1
= force_reg (submode
, real1
);
1336 /* Simply divide the real and imaginary parts by `c' */
1337 if (class == MODE_COMPLEX_FLOAT
)
1338 res
= expand_binop (submode
, binoptab
, real0
, real1
,
1339 realr
, unsignedp
, methods
);
1341 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1342 real0
, real1
, realr
, unsignedp
);
1346 else if (res
!= realr
)
1347 emit_move_insn (realr
, res
);
1349 if (class == MODE_COMPLEX_FLOAT
)
1350 res
= expand_binop (submode
, binoptab
, imag0
, real1
,
1351 imagr
, unsignedp
, methods
);
1353 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1354 imag0
, real1
, imagr
, unsignedp
);
1358 else if (res
!= imagr
)
1359 emit_move_insn (imagr
, res
);
1365 /* Divisor is of complex type:
1372 /* Don't fetch these from memory more than once. */
1373 real0
= force_reg (submode
, real0
);
1374 real1
= force_reg (submode
, real1
);
1377 imag0
= force_reg (submode
, imag0
);
1379 imag1
= force_reg (submode
, imag1
);
1381 /* Divisor: c*c + d*d */
1382 temp1
= expand_binop (submode
, smul_optab
, real1
, real1
,
1383 NULL_RTX
, unsignedp
, methods
);
1385 temp2
= expand_binop (submode
, smul_optab
, imag1
, imag1
,
1386 NULL_RTX
, unsignedp
, methods
);
1388 if (temp1
== 0 || temp2
== 0)
1391 divisor
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1392 NULL_RTX
, unsignedp
, methods
);
1398 /* ((a)(c-id))/divisor */
1399 /* (a+i0) / (c+id) = (ac/(cc+dd)) + i(-ad/(cc+dd)) */
1401 /* Calculate the dividend */
1402 real_t
= expand_binop (submode
, smul_optab
, real0
, real1
,
1403 NULL_RTX
, unsignedp
, methods
);
1405 imag_t
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1406 NULL_RTX
, unsignedp
, methods
);
1408 if (real_t
== 0 || imag_t
== 0)
1411 imag_t
= expand_unop (submode
, neg_optab
, imag_t
,
1412 NULL_RTX
, unsignedp
);
1416 /* ((a+ib)(c-id))/divider */
1417 /* Calculate the dividend */
1418 temp1
= expand_binop (submode
, smul_optab
, real0
, real1
,
1419 NULL_RTX
, unsignedp
, methods
);
1421 temp2
= expand_binop (submode
, smul_optab
, imag0
, imag1
,
1422 NULL_RTX
, unsignedp
, methods
);
1424 if (temp1
== 0 || temp2
== 0)
1427 real_t
= expand_binop (submode
, add_optab
, temp1
, temp2
,
1428 NULL_RTX
, unsignedp
, methods
);
1430 temp1
= expand_binop (submode
, smul_optab
, imag0
, real1
,
1431 NULL_RTX
, unsignedp
, methods
);
1433 temp2
= expand_binop (submode
, smul_optab
, real0
, imag1
,
1434 NULL_RTX
, unsignedp
, methods
);
1436 if (temp1
== 0 || temp2
== 0)
1439 imag_t
= expand_binop (submode
, sub_optab
, temp1
, temp2
,
1440 NULL_RTX
, unsignedp
, methods
);
1442 if (real_t
== 0 || imag_t
== 0)
1446 if (class == MODE_COMPLEX_FLOAT
)
1447 res
= expand_binop (submode
, binoptab
, real_t
, divisor
,
1448 realr
, unsignedp
, methods
);
1450 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1451 real_t
, divisor
, realr
, unsignedp
);
1455 else if (res
!= realr
)
1456 emit_move_insn (realr
, res
);
1458 if (class == MODE_COMPLEX_FLOAT
)
1459 res
= expand_binop (submode
, binoptab
, imag_t
, divisor
,
1460 imagr
, unsignedp
, methods
);
1462 res
= expand_divmod (0, TRUNC_DIV_EXPR
, submode
,
1463 imag_t
, divisor
, imagr
, unsignedp
);
1467 else if (res
!= imagr
)
1468 emit_move_insn (imagr
, res
);
1483 if (binoptab
->code
!= UNKNOWN
)
1485 = gen_rtx (binoptab
->code
, mode
, copy_rtx (op0
), copy_rtx (op1
));
1489 emit_no_conflict_block (seq
, target
, op0
, op1
, equiv_value
);
1495 /* It can't be open-coded in this mode.
1496 Use a library call if one is available and caller says that's ok. */
1498 if (binoptab
->handlers
[(int) mode
].libfunc
1499 && (methods
== OPTAB_LIB
|| methods
== OPTAB_LIB_WIDEN
))
1502 rtx funexp
= binoptab
->handlers
[(int) mode
].libfunc
;
1504 enum machine_mode op1_mode
= mode
;
1511 op1_mode
= word_mode
;
1512 /* Specify unsigned here,
1513 since negative shift counts are meaningless. */
1514 op1x
= convert_to_mode (word_mode
, op1
, 1);
1517 if (GET_MODE (op0
) != VOIDmode
1518 && GET_MODE (op0
) != mode
)
1519 op0
= convert_to_mode (mode
, op0
, unsignedp
);
1521 /* Pass 1 for NO_QUEUE so we don't lose any increments
1522 if the libcall is cse'd or moved. */
1523 value
= emit_library_call_value (binoptab
->handlers
[(int) mode
].libfunc
,
1524 NULL_RTX
, 1, mode
, 2,
1525 op0
, mode
, op1x
, op1_mode
);
1527 insns
= get_insns ();
1530 target
= gen_reg_rtx (mode
);
1531 emit_libcall_block (insns
, target
, value
,
1532 gen_rtx (binoptab
->code
, mode
, op0
, op1
));
1537 delete_insns_since (last
);
1539 /* It can't be done in this mode. Can we do it in a wider mode? */
1541 if (! (methods
== OPTAB_WIDEN
|| methods
== OPTAB_LIB_WIDEN
1542 || methods
== OPTAB_MUST_WIDEN
))
1544 /* Caller says, don't even try. */
1545 delete_insns_since (entry_last
);
1549 /* Compute the value of METHODS to pass to recursive calls.
1550 Don't allow widening to be tried recursively. */
1552 methods
= (methods
== OPTAB_LIB_WIDEN
? OPTAB_LIB
: OPTAB_DIRECT
);
1554 /* Look for a wider mode of the same class for which it appears we can do
1557 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1559 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1560 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1562 if ((binoptab
->handlers
[(int) wider_mode
].insn_code
1563 != CODE_FOR_nothing
)
1564 || (methods
== OPTAB_LIB
1565 && binoptab
->handlers
[(int) wider_mode
].libfunc
))
1567 rtx xop0
= op0
, xop1
= op1
;
1570 /* For certain integer operations, we need not actually extend
1571 the narrow operands, as long as we will truncate
1572 the results to the same narrowness. */
1574 if ((binoptab
== ior_optab
|| binoptab
== and_optab
1575 || binoptab
== xor_optab
1576 || binoptab
== add_optab
|| binoptab
== sub_optab
1577 || binoptab
== smul_optab
|| binoptab
== ashl_optab
)
1578 && class == MODE_INT
)
1581 xop0
= widen_operand (xop0
, wider_mode
, mode
,
1582 unsignedp
, no_extend
);
1584 /* The second operand of a shift must always be extended. */
1585 xop1
= widen_operand (xop1
, wider_mode
, mode
, unsignedp
,
1586 no_extend
&& binoptab
!= ashl_optab
);
1588 temp
= expand_binop (wider_mode
, binoptab
, xop0
, xop1
, NULL_RTX
,
1589 unsignedp
, methods
);
1592 if (class != MODE_INT
)
1595 target
= gen_reg_rtx (mode
);
1596 convert_move (target
, temp
, 0);
1600 return gen_lowpart (mode
, temp
);
1603 delete_insns_since (last
);
1608 delete_insns_since (entry_last
);
1612 /* Expand a binary operator which has both signed and unsigned forms.
1613 UOPTAB is the optab for unsigned operations, and SOPTAB is for
1616 If we widen unsigned operands, we may use a signed wider operation instead
1617 of an unsigned wider operation, since the result would be the same. */
1620 sign_expand_binop (mode
, uoptab
, soptab
, op0
, op1
, target
, unsignedp
, methods
)
1621 enum machine_mode mode
;
1622 optab uoptab
, soptab
;
1623 rtx op0
, op1
, target
;
1625 enum optab_methods methods
;
1628 optab direct_optab
= unsignedp
? uoptab
: soptab
;
1629 struct optab wide_soptab
;
1631 /* Do it without widening, if possible. */
1632 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
,
1633 unsignedp
, OPTAB_DIRECT
);
1634 if (temp
|| methods
== OPTAB_DIRECT
)
1637 /* Try widening to a signed int. Make a fake signed optab that
1638 hides any signed insn for direct use. */
1639 wide_soptab
= *soptab
;
1640 wide_soptab
.handlers
[(int) mode
].insn_code
= CODE_FOR_nothing
;
1641 wide_soptab
.handlers
[(int) mode
].libfunc
= 0;
1643 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1644 unsignedp
, OPTAB_WIDEN
);
1646 /* For unsigned operands, try widening to an unsigned int. */
1647 if (temp
== 0 && unsignedp
)
1648 temp
= expand_binop (mode
, uoptab
, op0
, op1
, target
,
1649 unsignedp
, OPTAB_WIDEN
);
1650 if (temp
|| methods
== OPTAB_WIDEN
)
1653 /* Use the right width lib call if that exists. */
1654 temp
= expand_binop (mode
, direct_optab
, op0
, op1
, target
, unsignedp
, OPTAB_LIB
);
1655 if (temp
|| methods
== OPTAB_LIB
)
1658 /* Must widen and use a lib call, use either signed or unsigned. */
1659 temp
= expand_binop (mode
, &wide_soptab
, op0
, op1
, target
,
1660 unsignedp
, methods
);
1664 return expand_binop (mode
, uoptab
, op0
, op1
, target
,
1665 unsignedp
, methods
);
1669 /* Generate code to perform an operation specified by BINOPTAB
1670 on operands OP0 and OP1, with two results to TARG1 and TARG2.
1671 We assume that the order of the operands for the instruction
1672 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
1673 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
1675 Either TARG0 or TARG1 may be zero, but what that means is that
1676 that result is not actually wanted. We will generate it into
1677 a dummy pseudo-reg and discard it. They may not both be zero.
1679 Returns 1 if this operation can be performed; 0 if not. */
1682 expand_twoval_binop (binoptab
, op0
, op1
, targ0
, targ1
, unsignedp
)
1688 enum machine_mode mode
= GET_MODE (targ0
? targ0
: targ1
);
1689 enum mode_class
class;
1690 enum machine_mode wider_mode
;
1691 rtx entry_last
= get_last_insn ();
1694 class = GET_MODE_CLASS (mode
);
1696 op0
= protect_from_queue (op0
, 0);
1697 op1
= protect_from_queue (op1
, 0);
1701 op0
= force_not_mem (op0
);
1702 op1
= force_not_mem (op1
);
1705 /* If we are inside an appropriately-short loop and one operand is an
1706 expensive constant, force it into a register. */
1707 if (CONSTANT_P (op0
) && preserve_subexpressions_p ()
1708 && rtx_cost (op0
, binoptab
->code
) > 2)
1709 op0
= force_reg (mode
, op0
);
1711 if (CONSTANT_P (op1
) && preserve_subexpressions_p ()
1712 && rtx_cost (op1
, binoptab
->code
) > 2)
1713 op1
= force_reg (mode
, op1
);
1716 targ0
= protect_from_queue (targ0
, 1);
1718 targ0
= gen_reg_rtx (mode
);
1720 targ1
= protect_from_queue (targ1
, 1);
1722 targ1
= gen_reg_rtx (mode
);
1724 /* Record where to go back to if we fail. */
1725 last
= get_last_insn ();
1727 if (binoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1729 int icode
= (int) binoptab
->handlers
[(int) mode
].insn_code
;
1730 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1731 enum machine_mode mode1
= insn_operand_mode
[icode
][2];
1733 rtx xop0
= op0
, xop1
= op1
;
1735 /* In case this insn wants input operands in modes different from the
1736 result, convert the operands. */
1737 if (GET_MODE (op0
) != VOIDmode
&& GET_MODE (op0
) != mode0
)
1738 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1740 if (GET_MODE (op1
) != VOIDmode
&& GET_MODE (op1
) != mode1
)
1741 xop1
= convert_to_mode (mode1
, xop1
, unsignedp
);
1743 /* Now, if insn doesn't accept these operands, put them into pseudos. */
1744 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1745 xop0
= copy_to_mode_reg (mode0
, xop0
);
1747 if (! (*insn_operand_predicate
[icode
][2]) (xop1
, mode1
))
1748 xop1
= copy_to_mode_reg (mode1
, xop1
);
1750 /* We could handle this, but we should always be called with a pseudo
1751 for our targets and all insns should take them as outputs. */
1752 if (! (*insn_operand_predicate
[icode
][0]) (targ0
, mode
)
1753 || ! (*insn_operand_predicate
[icode
][3]) (targ1
, mode
))
1756 pat
= GEN_FCN (icode
) (targ0
, xop0
, xop1
, targ1
);
1763 delete_insns_since (last
);
1766 /* It can't be done in this mode. Can we do it in a wider mode? */
1768 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1770 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1771 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1773 if (binoptab
->handlers
[(int) wider_mode
].insn_code
1774 != CODE_FOR_nothing
)
1776 register rtx t0
= gen_reg_rtx (wider_mode
);
1777 register rtx t1
= gen_reg_rtx (wider_mode
);
1779 if (expand_twoval_binop (binoptab
,
1780 convert_modes (wider_mode
, mode
, op0
,
1782 convert_modes (wider_mode
, mode
, op1
,
1786 convert_move (targ0
, t0
, unsignedp
);
1787 convert_move (targ1
, t1
, unsignedp
);
1791 delete_insns_since (last
);
1796 delete_insns_since (entry_last
);
1800 /* Generate code to perform an operation specified by UNOPTAB
1801 on operand OP0, with result having machine-mode MODE.
1803 UNSIGNEDP is for the case where we have to widen the operands
1804 to perform the operation. It says to use zero-extension.
1806 If TARGET is nonzero, the value
1807 is generated there, if it is convenient to do so.
1808 In all cases an rtx is returned for the locus of the value;
1809 this may or may not be TARGET. */
1812 expand_unop (mode
, unoptab
, op0
, target
, unsignedp
)
1813 enum machine_mode mode
;
1819 enum mode_class
class;
1820 enum machine_mode wider_mode
;
1822 rtx last
= get_last_insn ();
1825 class = GET_MODE_CLASS (mode
);
1827 op0
= protect_from_queue (op0
, 0);
1831 op0
= force_not_mem (op0
);
1835 target
= protect_from_queue (target
, 1);
1837 if (unoptab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
1839 int icode
= (int) unoptab
->handlers
[(int) mode
].insn_code
;
1840 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
1846 temp
= gen_reg_rtx (mode
);
1848 if (GET_MODE (xop0
) != VOIDmode
1849 && GET_MODE (xop0
) != mode0
)
1850 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
1852 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
1854 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
1855 xop0
= copy_to_mode_reg (mode0
, xop0
);
1857 if (! (*insn_operand_predicate
[icode
][0]) (temp
, mode
))
1858 temp
= gen_reg_rtx (mode
);
1860 pat
= GEN_FCN (icode
) (temp
, xop0
);
1863 if (GET_CODE (pat
) == SEQUENCE
1864 && ! add_equal_note (pat
, temp
, unoptab
->code
, xop0
, NULL_RTX
))
1866 delete_insns_since (last
);
1867 return expand_unop (mode
, unoptab
, op0
, NULL_RTX
, unsignedp
);
1875 delete_insns_since (last
);
1878 /* It can't be done in this mode. Can we open-code it in a wider mode? */
1880 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
1881 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
1882 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
1884 if (unoptab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
1888 /* For certain operations, we need not actually extend
1889 the narrow operand, as long as we will truncate the
1890 results to the same narrowness. */
1892 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
1893 (unoptab
== neg_optab
1894 || unoptab
== one_cmpl_optab
)
1895 && class == MODE_INT
);
1897 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
1902 if (class != MODE_INT
)
1905 target
= gen_reg_rtx (mode
);
1906 convert_move (target
, temp
, 0);
1910 return gen_lowpart (mode
, temp
);
1913 delete_insns_since (last
);
1917 /* These can be done a word at a time. */
1918 if (unoptab
== one_cmpl_optab
1919 && class == MODE_INT
1920 && GET_MODE_SIZE (mode
) > UNITS_PER_WORD
1921 && unoptab
->handlers
[(int) word_mode
].insn_code
!= CODE_FOR_nothing
)
1926 if (target
== 0 || target
== op0
)
1927 target
= gen_reg_rtx (mode
);
1931 /* Do the actual arithmetic. */
1932 for (i
= 0; i
< GET_MODE_BITSIZE (mode
) / BITS_PER_WORD
; i
++)
1934 rtx target_piece
= operand_subword (target
, i
, 1, mode
);
1935 rtx x
= expand_unop (word_mode
, unoptab
,
1936 operand_subword_force (op0
, i
, mode
),
1937 target_piece
, unsignedp
);
1938 if (target_piece
!= x
)
1939 emit_move_insn (target_piece
, x
);
1942 insns
= get_insns ();
1945 emit_no_conflict_block (insns
, target
, op0
, NULL_RTX
,
1946 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1950 /* Open-code the complex negation operation. */
1951 else if (unoptab
== neg_optab
1952 && (class == MODE_COMPLEX_FLOAT
|| class == MODE_COMPLEX_INT
))
1958 /* Find the correct mode for the real and imaginary parts */
1959 enum machine_mode submode
1960 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
1961 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
1964 if (submode
== BLKmode
)
1968 target
= gen_reg_rtx (mode
);
1972 target_piece
= gen_imagpart (submode
, target
);
1973 x
= expand_unop (submode
, unoptab
,
1974 gen_imagpart (submode
, op0
),
1975 target_piece
, unsignedp
);
1976 if (target_piece
!= x
)
1977 emit_move_insn (target_piece
, x
);
1979 target_piece
= gen_realpart (submode
, target
);
1980 x
= expand_unop (submode
, unoptab
,
1981 gen_realpart (submode
, op0
),
1982 target_piece
, unsignedp
);
1983 if (target_piece
!= x
)
1984 emit_move_insn (target_piece
, x
);
1989 emit_no_conflict_block (seq
, target
, op0
, 0,
1990 gen_rtx (unoptab
->code
, mode
, copy_rtx (op0
)));
1994 /* Now try a library call in this mode. */
1995 if (unoptab
->handlers
[(int) mode
].libfunc
)
1998 rtx funexp
= unoptab
->handlers
[(int) mode
].libfunc
;
2003 /* Pass 1 for NO_QUEUE so we don't lose any increments
2004 if the libcall is cse'd or moved. */
2005 value
= emit_library_call_value (unoptab
->handlers
[(int) mode
].libfunc
,
2006 NULL_RTX
, 1, mode
, 1, op0
, mode
);
2007 insns
= get_insns ();
2010 target
= gen_reg_rtx (mode
);
2011 emit_libcall_block (insns
, target
, value
,
2012 gen_rtx (unoptab
->code
, mode
, op0
));
2017 /* It can't be done in this mode. Can we do it in a wider mode? */
2019 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2021 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2022 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2024 if ((unoptab
->handlers
[(int) wider_mode
].insn_code
2025 != CODE_FOR_nothing
)
2026 || unoptab
->handlers
[(int) wider_mode
].libfunc
)
2030 /* For certain operations, we need not actually extend
2031 the narrow operand, as long as we will truncate the
2032 results to the same narrowness. */
2034 xop0
= widen_operand (xop0
, wider_mode
, mode
, unsignedp
,
2035 (unoptab
== neg_optab
2036 || unoptab
== one_cmpl_optab
)
2037 && class == MODE_INT
);
2039 temp
= expand_unop (wider_mode
, unoptab
, xop0
, NULL_RTX
,
2044 if (class != MODE_INT
)
2047 target
= gen_reg_rtx (mode
);
2048 convert_move (target
, temp
, 0);
2052 return gen_lowpart (mode
, temp
);
2055 delete_insns_since (last
);
2060 /* If there is no negate operation, try doing a subtract from zero.
2061 The US Software GOFAST library needs this. */
2062 if (unoptab
== neg_optab
)
2065 temp
= expand_binop (mode
, sub_optab
, CONST0_RTX (mode
), op0
,
2066 target
, unsignedp
, OPTAB_LIB_WIDEN
);
2074 /* Emit code to compute the absolute value of OP0, with result to
2075 TARGET if convenient. (TARGET may be 0.) The return value says
2076 where the result actually is to be found.
2078 MODE is the mode of the operand; the mode of the result is
2079 different but can be deduced from MODE.
2081 UNSIGNEDP is relevant if extension is needed. */
2084 expand_abs (mode
, op0
, target
, unsignedp
, safe
)
2085 enum machine_mode mode
;
2093 /* First try to do it with a special abs instruction. */
2094 temp
= expand_unop (mode
, abs_optab
, op0
, target
, 0);
2098 /* If this machine has expensive jumps, we can do integer absolute
2099 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
2100 where W is the width of MODE. */
2102 if (GET_MODE_CLASS (mode
) == MODE_INT
&& BRANCH_COST
>= 2)
2104 rtx extended
= expand_shift (RSHIFT_EXPR
, mode
, op0
,
2105 size_int (GET_MODE_BITSIZE (mode
) - 1),
2108 temp
= expand_binop (mode
, xor_optab
, extended
, op0
, target
, 0,
2111 temp
= expand_binop (mode
, sub_optab
, temp
, extended
, target
, 0,
2118 /* If that does not win, use conditional jump and negate. */
2119 op1
= gen_label_rtx ();
2120 if (target
== 0 || ! safe
2121 || GET_MODE (target
) != mode
2122 || (GET_CODE (target
) == MEM
&& MEM_VOLATILE_P (target
))
2123 || (GET_CODE (target
) == REG
2124 && REGNO (target
) < FIRST_PSEUDO_REGISTER
))
2125 target
= gen_reg_rtx (mode
);
2127 emit_move_insn (target
, op0
);
2130 /* If this mode is an integer too wide to compare properly,
2131 compare word by word. Rely on CSE to optimize constant cases. */
2132 if (GET_MODE_CLASS (mode
) == MODE_INT
&& ! can_compare_p (mode
))
2133 do_jump_by_parts_greater_rtx (mode
, 0, target
, const0_rtx
,
2137 temp
= compare_from_rtx (target
, CONST0_RTX (mode
), GE
, 0, mode
,
2139 if (temp
== const1_rtx
)
2141 else if (temp
!= const0_rtx
)
2143 if (bcc_gen_fctn
[(int) GET_CODE (temp
)] != 0)
2144 emit_jump_insn ((*bcc_gen_fctn
[(int) GET_CODE (temp
)]) (op1
));
2150 op0
= expand_unop (mode
, neg_optab
, target
, target
, 0);
2152 emit_move_insn (target
, op0
);
2158 /* Emit code to compute the absolute value of OP0, with result to
2159 TARGET if convenient. (TARGET may be 0.) The return value says
2160 where the result actually is to be found.
2162 MODE is the mode of the operand; the mode of the result is
2163 different but can be deduced from MODE.
2165 UNSIGNEDP is relevant for complex integer modes. */
2168 expand_complex_abs (mode
, op0
, target
, unsignedp
)
2169 enum machine_mode mode
;
2174 enum mode_class
class = GET_MODE_CLASS (mode
);
2175 enum machine_mode wider_mode
;
2177 rtx entry_last
= get_last_insn ();
2181 /* Find the correct mode for the real and imaginary parts. */
2182 enum machine_mode submode
2183 = mode_for_size (GET_MODE_UNIT_SIZE (mode
) * BITS_PER_UNIT
,
2184 class == MODE_COMPLEX_INT
? MODE_INT
: MODE_FLOAT
,
2187 if (submode
== BLKmode
)
2190 op0
= protect_from_queue (op0
, 0);
2194 op0
= force_not_mem (op0
);
2197 last
= get_last_insn ();
2200 target
= protect_from_queue (target
, 1);
2202 if (abs_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2204 int icode
= (int) abs_optab
->handlers
[(int) mode
].insn_code
;
2205 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2211 temp
= gen_reg_rtx (submode
);
2213 if (GET_MODE (xop0
) != VOIDmode
2214 && GET_MODE (xop0
) != mode0
)
2215 xop0
= convert_to_mode (mode0
, xop0
, unsignedp
);
2217 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
2219 if (! (*insn_operand_predicate
[icode
][1]) (xop0
, mode0
))
2220 xop0
= copy_to_mode_reg (mode0
, xop0
);
2222 if (! (*insn_operand_predicate
[icode
][0]) (temp
, submode
))
2223 temp
= gen_reg_rtx (submode
);
2225 pat
= GEN_FCN (icode
) (temp
, xop0
);
2228 if (GET_CODE (pat
) == SEQUENCE
2229 && ! add_equal_note (pat
, temp
, abs_optab
->code
, xop0
, NULL_RTX
))
2231 delete_insns_since (last
);
2232 return expand_unop (mode
, abs_optab
, op0
, NULL_RTX
, unsignedp
);
2240 delete_insns_since (last
);
2243 /* It can't be done in this mode. Can we open-code it in a wider mode? */
2245 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2246 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2248 if (abs_optab
->handlers
[(int) wider_mode
].insn_code
!= CODE_FOR_nothing
)
2252 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2253 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2257 if (class != MODE_COMPLEX_INT
)
2260 target
= gen_reg_rtx (submode
);
2261 convert_move (target
, temp
, 0);
2265 return gen_lowpart (submode
, temp
);
2268 delete_insns_since (last
);
2272 /* Open-code the complex absolute-value operation
2273 if we can open-code sqrt. Otherwise it's not worth while. */
2274 if (sqrt_optab
->handlers
[(int) submode
].insn_code
!= CODE_FOR_nothing
)
2276 rtx real
, imag
, total
;
2278 real
= gen_realpart (submode
, op0
);
2279 imag
= gen_imagpart (submode
, op0
);
2281 /* Square both parts. */
2282 real
= expand_mult (submode
, real
, real
, NULL_RTX
, 0);
2283 imag
= expand_mult (submode
, imag
, imag
, NULL_RTX
, 0);
2285 /* Sum the parts. */
2286 total
= expand_binop (submode
, add_optab
, real
, imag
, NULL_RTX
,
2287 0, OPTAB_LIB_WIDEN
);
2289 /* Get sqrt in TARGET. Set TARGET to where the result is. */
2290 target
= expand_unop (submode
, sqrt_optab
, total
, target
, 0);
2292 delete_insns_since (last
);
2297 /* Now try a library call in this mode. */
2298 if (abs_optab
->handlers
[(int) mode
].libfunc
)
2301 rtx funexp
= abs_optab
->handlers
[(int) mode
].libfunc
;
2306 /* Pass 1 for NO_QUEUE so we don't lose any increments
2307 if the libcall is cse'd or moved. */
2308 value
= emit_library_call_value (abs_optab
->handlers
[(int) mode
].libfunc
,
2309 NULL_RTX
, 1, submode
, 1, op0
, mode
);
2310 insns
= get_insns ();
2313 target
= gen_reg_rtx (submode
);
2314 emit_libcall_block (insns
, target
, value
,
2315 gen_rtx (abs_optab
->code
, mode
, op0
));
2320 /* It can't be done in this mode. Can we do it in a wider mode? */
2322 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2323 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2325 if ((abs_optab
->handlers
[(int) wider_mode
].insn_code
2326 != CODE_FOR_nothing
)
2327 || abs_optab
->handlers
[(int) wider_mode
].libfunc
)
2331 xop0
= convert_modes (wider_mode
, mode
, xop0
, unsignedp
);
2333 temp
= expand_complex_abs (wider_mode
, xop0
, NULL_RTX
, unsignedp
);
2337 if (class != MODE_COMPLEX_INT
)
2340 target
= gen_reg_rtx (submode
);
2341 convert_move (target
, temp
, 0);
2345 return gen_lowpart (submode
, temp
);
2348 delete_insns_since (last
);
2352 delete_insns_since (entry_last
);
2356 /* Generate an instruction whose insn-code is INSN_CODE,
2357 with two operands: an output TARGET and an input OP0.
2358 TARGET *must* be nonzero, and the output is always stored there.
2359 CODE is an rtx code such that (CODE OP0) is an rtx that describes
2360 the value that is stored into TARGET. */
2363 emit_unop_insn (icode
, target
, op0
, code
)
2370 enum machine_mode mode0
= insn_operand_mode
[icode
][1];
2373 temp
= target
= protect_from_queue (target
, 1);
2375 op0
= protect_from_queue (op0
, 0);
2377 /* Sign extension from memory is often done specially on RISC
2378 machines, so forcing into a register here can pessimize code. */
2379 if (flag_force_mem
&& code
!= SIGN_EXTEND
)
2380 op0
= force_not_mem (op0
);
2382 /* Now, if insn does not accept our operands, put them into pseudos. */
2384 if (! (*insn_operand_predicate
[icode
][1]) (op0
, mode0
))
2385 op0
= copy_to_mode_reg (mode0
, op0
);
2387 if (! (*insn_operand_predicate
[icode
][0]) (temp
, GET_MODE (temp
))
2388 || (flag_force_mem
&& GET_CODE (temp
) == MEM
))
2389 temp
= gen_reg_rtx (GET_MODE (temp
));
2391 pat
= GEN_FCN (icode
) (temp
, op0
);
2393 if (GET_CODE (pat
) == SEQUENCE
&& code
!= UNKNOWN
)
2394 add_equal_note (pat
, temp
, code
, op0
, NULL_RTX
);
2399 emit_move_insn (target
, temp
);
2402 /* Emit code to perform a series of operations on a multi-word quantity, one
2405 Such a block is preceded by a CLOBBER of the output, consists of multiple
2406 insns, each setting one word of the output, and followed by a SET copying
2407 the output to itself.
2409 Each of the insns setting words of the output receives a REG_NO_CONFLICT
2410 note indicating that it doesn't conflict with the (also multi-word)
2411 inputs. The entire block is surrounded by REG_LIBCALL and REG_RETVAL
2414 INSNS is a block of code generated to perform the operation, not including
2415 the CLOBBER and final copy. All insns that compute intermediate values
2416 are first emitted, followed by the block as described above.
2418 TARGET, OP0, and OP1 are the output and inputs of the operations,
2419 respectively. OP1 may be zero for a unary operation.
2421 EQUIV, if non-zero, is an expression to be placed into a REG_EQUAL note
2424 If TARGET is not a register, INSNS is simply emitted with no special
2425 processing. Likewise if anything in INSNS is not an INSN or if
2426 there is a libcall block inside INSNS.
2428 The final insn emitted is returned. */
2431 emit_no_conflict_block (insns
, target
, op0
, op1
, equiv
)
2437 rtx prev
, next
, first
, last
, insn
;
2439 if (GET_CODE (target
) != REG
|| reload_in_progress
)
2440 return emit_insns (insns
);
2442 for (insn
= insns
; insn
; insn
= NEXT_INSN (insn
))
2443 if (GET_CODE (insn
) != INSN
2444 || find_reg_note (insn
, REG_LIBCALL
, NULL_RTX
))
2445 return emit_insns (insns
);
2447 /* First emit all insns that do not store into words of the output and remove
2448 these from the list. */
2449 for (insn
= insns
; insn
; insn
= next
)
2454 next
= NEXT_INSN (insn
);
2456 if (GET_CODE (PATTERN (insn
)) == SET
)
2457 set
= PATTERN (insn
);
2458 else if (GET_CODE (PATTERN (insn
)) == PARALLEL
)
2460 for (i
= 0; i
< XVECLEN (PATTERN (insn
), 0); i
++)
2461 if (GET_CODE (XVECEXP (PATTERN (insn
), 0, i
)) == SET
)
2463 set
= XVECEXP (PATTERN (insn
), 0, i
);
2471 if (! reg_overlap_mentioned_p (target
, SET_DEST (set
)))
2473 if (PREV_INSN (insn
))
2474 NEXT_INSN (PREV_INSN (insn
)) = next
;
2479 PREV_INSN (next
) = PREV_INSN (insn
);
2485 prev
= get_last_insn ();
2487 /* Now write the CLOBBER of the output, followed by the setting of each
2488 of the words, followed by the final copy. */
2489 if (target
!= op0
&& target
!= op1
)
2490 emit_insn (gen_rtx (CLOBBER
, VOIDmode
, target
));
2492 for (insn
= insns
; insn
; insn
= next
)
2494 next
= NEXT_INSN (insn
);
2497 if (op1
&& GET_CODE (op1
) == REG
)
2498 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op1
,
2501 if (op0
&& GET_CODE (op0
) == REG
)
2502 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_NO_CONFLICT
, op0
,
2506 if (mov_optab
->handlers
[(int) GET_MODE (target
)].insn_code
2507 != CODE_FOR_nothing
)
2509 last
= emit_move_insn (target
, target
);
2512 = gen_rtx (EXPR_LIST
, REG_EQUAL
, equiv
, REG_NOTES (last
));
2515 last
= get_last_insn ();
2518 first
= get_insns ();
2520 first
= NEXT_INSN (prev
);
2522 /* Encapsulate the block so it gets manipulated as a unit. */
2523 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2525 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2530 /* Emit code to make a call to a constant function or a library call.
2532 INSNS is a list containing all insns emitted in the call.
2533 These insns leave the result in RESULT. Our block is to copy RESULT
2534 to TARGET, which is logically equivalent to EQUIV.
2536 We first emit any insns that set a pseudo on the assumption that these are
2537 loading constants into registers; doing so allows them to be safely cse'ed
2538 between blocks. Then we emit all the other insns in the block, followed by
2539 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
2540 note with an operand of EQUIV.
2542 Moving assignments to pseudos outside of the block is done to improve
2543 the generated code, but is not required to generate correct code,
2544 hence being unable to move an assignment is not grounds for not making
2545 a libcall block. There are two reasons why it is safe to leave these
2546 insns inside the block: First, we know that these pseudos cannot be
2547 used in generated RTL outside the block since they are created for
2548 temporary purposes within the block. Second, CSE will not record the
2549 values of anything set inside a libcall block, so we know they must
2550 be dead at the end of the block.
2552 Except for the first group of insns (the ones setting pseudos), the
2553 block is delimited by REG_RETVAL and REG_LIBCALL notes. */
2556 emit_libcall_block (insns
, target
, result
, equiv
)
2562 rtx prev
, next
, first
, last
, insn
;
2564 /* First emit all insns that set pseudos. Remove them from the list as
2565 we go. Avoid insns that set pseudos which were referenced in previous
2566 insns. These can be generated by move_by_pieces, for example,
2567 to update an address. Similarly, avoid insns that reference things
2568 set in previous insns. */
2570 for (insn
= insns
; insn
; insn
= next
)
2572 rtx set
= single_set (insn
);
2574 next
= NEXT_INSN (insn
);
2576 if (set
!= 0 && GET_CODE (SET_DEST (set
)) == REG
2577 && REGNO (SET_DEST (set
)) >= FIRST_PSEUDO_REGISTER
2579 || (! reg_mentioned_p (SET_DEST (set
), PATTERN (insns
))
2580 && ! reg_used_between_p (SET_DEST (set
), insns
, insn
)
2581 && ! modified_in_p (SET_SRC (set
), insns
)
2582 && ! modified_between_p (SET_SRC (set
), insns
, insn
))))
2584 if (PREV_INSN (insn
))
2585 NEXT_INSN (PREV_INSN (insn
)) = next
;
2590 PREV_INSN (next
) = PREV_INSN (insn
);
2596 prev
= get_last_insn ();
2598 /* Write the remaining insns followed by the final copy. */
2600 for (insn
= insns
; insn
; insn
= next
)
2602 next
= NEXT_INSN (insn
);
2607 last
= emit_move_insn (target
, result
);
2608 REG_NOTES (last
) = gen_rtx (EXPR_LIST
,
2609 REG_EQUAL
, copy_rtx (equiv
), REG_NOTES (last
));
2612 first
= get_insns ();
2614 first
= NEXT_INSN (prev
);
2616 /* Encapsulate the block so it gets manipulated as a unit. */
2617 REG_NOTES (first
) = gen_rtx (INSN_LIST
, REG_LIBCALL
, last
,
2619 REG_NOTES (last
) = gen_rtx (INSN_LIST
, REG_RETVAL
, first
, REG_NOTES (last
));
2622 /* Generate code to store zero in X. */
2628 emit_move_insn (x
, const0_rtx
);
2631 /* Generate code to store 1 in X
2632 assuming it contains zero beforehand. */
2635 emit_0_to_1_insn (x
)
2638 emit_move_insn (x
, const1_rtx
);
2641 /* Generate code to compare X with Y
2642 so that the condition codes are set.
2644 MODE is the mode of the inputs (in case they are const_int).
2645 UNSIGNEDP nonzero says that X and Y are unsigned;
2646 this matters if they need to be widened.
2648 If they have mode BLKmode, then SIZE specifies the size of both X and Y,
2649 and ALIGN specifies the known shared alignment of X and Y.
2651 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.).
2652 It is ignored for fixed-point and block comparisons;
2653 it is used only for floating-point comparisons. */
2656 emit_cmp_insn (x
, y
, comparison
, size
, mode
, unsignedp
, align
)
2658 enum rtx_code comparison
;
2660 enum machine_mode mode
;
2664 enum mode_class
class;
2665 enum machine_mode wider_mode
;
2667 class = GET_MODE_CLASS (mode
);
2669 /* They could both be VOIDmode if both args are immediate constants,
2670 but we should fold that at an earlier stage.
2671 With no special code here, this will call abort,
2672 reminding the programmer to implement such folding. */
2674 if (mode
!= BLKmode
&& flag_force_mem
)
2676 x
= force_not_mem (x
);
2677 y
= force_not_mem (y
);
2680 /* If we are inside an appropriately-short loop and one operand is an
2681 expensive constant, force it into a register. */
2682 if (CONSTANT_P (x
) && preserve_subexpressions_p () && rtx_cost (x
, COMPARE
) > 2)
2683 x
= force_reg (mode
, x
);
2685 if (CONSTANT_P (y
) && preserve_subexpressions_p () && rtx_cost (y
, COMPARE
) > 2)
2686 y
= force_reg (mode
, y
);
2688 /* Don't let both operands fail to indicate the mode. */
2689 if (GET_MODE (x
) == VOIDmode
&& GET_MODE (y
) == VOIDmode
)
2690 x
= force_reg (mode
, x
);
2692 /* Handle all BLKmode compares. */
2694 if (mode
== BLKmode
)
2697 x
= protect_from_queue (x
, 0);
2698 y
= protect_from_queue (y
, 0);
2702 #ifdef HAVE_cmpstrqi
2704 && GET_CODE (size
) == CONST_INT
2705 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (QImode
)))
2707 enum machine_mode result_mode
2708 = insn_operand_mode
[(int) CODE_FOR_cmpstrqi
][0];
2709 rtx result
= gen_reg_rtx (result_mode
);
2710 emit_insn (gen_cmpstrqi (result
, x
, y
, size
, GEN_INT (align
)));
2711 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2716 #ifdef HAVE_cmpstrhi
2718 && GET_CODE (size
) == CONST_INT
2719 && INTVAL (size
) < (1 << GET_MODE_BITSIZE (HImode
)))
2721 enum machine_mode result_mode
2722 = insn_operand_mode
[(int) CODE_FOR_cmpstrhi
][0];
2723 rtx result
= gen_reg_rtx (result_mode
);
2724 emit_insn (gen_cmpstrhi (result
, x
, y
, size
, GEN_INT (align
)));
2725 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2730 #ifdef HAVE_cmpstrsi
2733 enum machine_mode result_mode
2734 = insn_operand_mode
[(int) CODE_FOR_cmpstrsi
][0];
2735 rtx result
= gen_reg_rtx (result_mode
);
2736 size
= protect_from_queue (size
, 0);
2737 emit_insn (gen_cmpstrsi (result
, x
, y
,
2738 convert_to_mode (SImode
, size
, 1),
2740 emit_cmp_insn (result
, const0_rtx
, comparison
, NULL_RTX
,
2748 #ifdef TARGET_MEM_FUNCTIONS
2749 emit_library_call (memcmp_libfunc
, 0,
2750 TYPE_MODE (integer_type_node
), 3,
2751 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2752 convert_to_mode (TYPE_MODE (sizetype
), size
,
2753 TREE_UNSIGNED (sizetype
)),
2754 TYPE_MODE (sizetype
));
2756 emit_library_call (bcmp_libfunc
, 0,
2757 TYPE_MODE (integer_type_node
), 3,
2758 XEXP (x
, 0), Pmode
, XEXP (y
, 0), Pmode
,
2759 convert_to_mode (TYPE_MODE (integer_type_node
),
2761 TREE_UNSIGNED (integer_type_node
)),
2762 TYPE_MODE (integer_type_node
));
2765 /* Immediately move the result of the libcall into a pseudo
2766 register so reload doesn't clobber the value if it needs
2767 the return register for a spill reg. */
2768 result
= gen_reg_rtx (TYPE_MODE (integer_type_node
));
2769 emit_move_insn (result
,
2770 hard_libcall_value (TYPE_MODE (integer_type_node
)));
2771 emit_cmp_insn (result
,
2772 const0_rtx
, comparison
, NULL_RTX
,
2773 TYPE_MODE (integer_type_node
), 0, 0);
2778 /* Handle some compares against zero. */
2780 if (y
== CONST0_RTX (mode
)
2781 && tst_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2783 int icode
= (int) tst_optab
->handlers
[(int) mode
].insn_code
;
2786 x
= protect_from_queue (x
, 0);
2787 y
= protect_from_queue (y
, 0);
2789 /* Now, if insn does accept these operands, put them into pseudos. */
2790 if (! (*insn_operand_predicate
[icode
][0])
2791 (x
, insn_operand_mode
[icode
][0]))
2792 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2794 emit_insn (GEN_FCN (icode
) (x
));
2798 /* Handle compares for which there is a directly suitable insn. */
2800 if (cmp_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
)
2802 int icode
= (int) cmp_optab
->handlers
[(int) mode
].insn_code
;
2805 x
= protect_from_queue (x
, 0);
2806 y
= protect_from_queue (y
, 0);
2808 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2809 if (! (*insn_operand_predicate
[icode
][0])
2810 (x
, insn_operand_mode
[icode
][0]))
2811 x
= copy_to_mode_reg (insn_operand_mode
[icode
][0], x
);
2813 if (! (*insn_operand_predicate
[icode
][1])
2814 (y
, insn_operand_mode
[icode
][1]))
2815 y
= copy_to_mode_reg (insn_operand_mode
[icode
][1], y
);
2817 emit_insn (GEN_FCN (icode
) (x
, y
));
2821 /* Try widening if we can find a direct insn that way. */
2823 if (class == MODE_INT
|| class == MODE_FLOAT
|| class == MODE_COMPLEX_FLOAT
)
2825 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
2826 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
2828 if (cmp_optab
->handlers
[(int) wider_mode
].insn_code
2829 != CODE_FOR_nothing
)
2831 x
= protect_from_queue (x
, 0);
2832 y
= protect_from_queue (y
, 0);
2833 x
= convert_modes (wider_mode
, mode
, x
, unsignedp
);
2834 y
= convert_modes (wider_mode
, mode
, y
, unsignedp
);
2835 emit_cmp_insn (x
, y
, comparison
, NULL_RTX
,
2836 wider_mode
, unsignedp
, align
);
2842 /* Handle a lib call just for the mode we are using. */
2844 if (cmp_optab
->handlers
[(int) mode
].libfunc
2845 && class != MODE_FLOAT
)
2847 rtx libfunc
= cmp_optab
->handlers
[(int) mode
].libfunc
;
2850 /* If we want unsigned, and this mode has a distinct unsigned
2851 comparison routine, use that. */
2852 if (unsignedp
&& ucmp_optab
->handlers
[(int) mode
].libfunc
)
2853 libfunc
= ucmp_optab
->handlers
[(int) mode
].libfunc
;
2855 emit_library_call (libfunc
, 1,
2856 word_mode
, 2, x
, mode
, y
, mode
);
2858 /* Immediately move the result of the libcall into a pseudo
2859 register so reload doesn't clobber the value if it needs
2860 the return register for a spill reg. */
2861 result
= gen_reg_rtx (word_mode
);
2862 emit_move_insn (result
, hard_libcall_value (word_mode
));
2864 /* Integer comparison returns a result that must be compared against 1,
2865 so that even if we do an unsigned compare afterward,
2866 there is still a value that can represent the result "less than". */
2867 emit_cmp_insn (result
, const1_rtx
,
2868 comparison
, NULL_RTX
, word_mode
, unsignedp
, 0);
2872 if (class == MODE_FLOAT
)
2873 emit_float_lib_cmp (x
, y
, comparison
);
2879 /* Nonzero if a compare of mode MODE can be done straightforwardly
2880 (without splitting it into pieces). */
2883 can_compare_p (mode
)
2884 enum machine_mode mode
;
2888 if (cmp_optab
->handlers
[(int)mode
].insn_code
!= CODE_FOR_nothing
)
2890 mode
= GET_MODE_WIDER_MODE (mode
);
2891 } while (mode
!= VOIDmode
);
2896 /* Emit a library call comparison between floating point X and Y.
2897 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
2900 emit_float_lib_cmp (x
, y
, comparison
)
2902 enum rtx_code comparison
;
2904 enum machine_mode mode
= GET_MODE (x
);
2912 libfunc
= eqhf2_libfunc
;
2916 libfunc
= nehf2_libfunc
;
2920 libfunc
= gthf2_libfunc
;
2924 libfunc
= gehf2_libfunc
;
2928 libfunc
= lthf2_libfunc
;
2932 libfunc
= lehf2_libfunc
;
2935 else if (mode
== SFmode
)
2939 libfunc
= eqsf2_libfunc
;
2943 libfunc
= nesf2_libfunc
;
2947 libfunc
= gtsf2_libfunc
;
2951 libfunc
= gesf2_libfunc
;
2955 libfunc
= ltsf2_libfunc
;
2959 libfunc
= lesf2_libfunc
;
2962 else if (mode
== DFmode
)
2966 libfunc
= eqdf2_libfunc
;
2970 libfunc
= nedf2_libfunc
;
2974 libfunc
= gtdf2_libfunc
;
2978 libfunc
= gedf2_libfunc
;
2982 libfunc
= ltdf2_libfunc
;
2986 libfunc
= ledf2_libfunc
;
2989 else if (mode
== XFmode
)
2993 libfunc
= eqxf2_libfunc
;
2997 libfunc
= nexf2_libfunc
;
3001 libfunc
= gtxf2_libfunc
;
3005 libfunc
= gexf2_libfunc
;
3009 libfunc
= ltxf2_libfunc
;
3013 libfunc
= lexf2_libfunc
;
3016 else if (mode
== TFmode
)
3020 libfunc
= eqtf2_libfunc
;
3024 libfunc
= netf2_libfunc
;
3028 libfunc
= gttf2_libfunc
;
3032 libfunc
= getf2_libfunc
;
3036 libfunc
= lttf2_libfunc
;
3040 libfunc
= letf2_libfunc
;
3045 enum machine_mode wider_mode
;
3047 for (wider_mode
= GET_MODE_WIDER_MODE (mode
); wider_mode
!= VOIDmode
;
3048 wider_mode
= GET_MODE_WIDER_MODE (wider_mode
))
3050 if ((cmp_optab
->handlers
[(int) wider_mode
].insn_code
3051 != CODE_FOR_nothing
)
3052 || (cmp_optab
->handlers
[(int) wider_mode
].libfunc
!= 0))
3054 x
= protect_from_queue (x
, 0);
3055 y
= protect_from_queue (y
, 0);
3056 x
= convert_to_mode (wider_mode
, x
, 0);
3057 y
= convert_to_mode (wider_mode
, y
, 0);
3058 emit_float_lib_cmp (x
, y
, comparison
);
3068 emit_library_call (libfunc
, 1,
3069 word_mode
, 2, x
, mode
, y
, mode
);
3071 /* Immediately move the result of the libcall into a pseudo
3072 register so reload doesn't clobber the value if it needs
3073 the return register for a spill reg. */
3074 result
= gen_reg_rtx (word_mode
);
3075 emit_move_insn (result
, hard_libcall_value (word_mode
));
3077 emit_cmp_insn (result
, const0_rtx
, comparison
,
3078 NULL_RTX
, word_mode
, 0, 0);
3081 /* Generate code to indirectly jump to a location given in the rtx LOC. */
3084 emit_indirect_jump (loc
)
3087 if (! ((*insn_operand_predicate
[(int)CODE_FOR_indirect_jump
][0])
3089 loc
= copy_to_mode_reg (Pmode
, loc
);
3091 emit_jump_insn (gen_indirect_jump (loc
));
3095 #ifdef HAVE_conditional_move
3097 /* Emit a conditional move instruction if the machine supports one for that
3098 condition and machine mode.
3100 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
3101 the mode to use should they be constants. If it is VOIDmode, they cannot
3104 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
3105 should be stored there. MODE is the mode to use should they be constants.
3106 If it is VOIDmode, they cannot both be constants.
3108 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
3109 is not supported. */
3112 emit_conditional_move (target
, code
, op0
, op1
, cmode
, op2
, op3
, mode
,
3117 enum machine_mode cmode
;
3119 enum machine_mode mode
;
3122 rtx tem
, subtarget
, comparison
, insn
;
3123 enum insn_code icode
;
3125 /* If one operand is constant, make it the second one. Only do this
3126 if the other operand is not constant as well. */
3128 if ((CONSTANT_P (op0
) && ! CONSTANT_P (op1
))
3129 || (GET_CODE (op0
) == CONST_INT
&& GET_CODE (op1
) != CONST_INT
))
3134 code
= swap_condition (code
);
3137 if (cmode
== VOIDmode
)
3138 cmode
= GET_MODE (op0
);
3140 if ((CONSTANT_P (op2
) && ! CONSTANT_P (op3
))
3141 || (GET_CODE (op2
) == CONST_INT
&& GET_CODE (op3
) != CONST_INT
))
3146 /* ??? This may not be appropriate (consider IEEE). Perhaps we should
3147 call can_reverse_comparison_p here and bail out if necessary.
3148 It's not clear whether we need to do this canonicalization though. */
3149 code
= reverse_condition (code
);
3152 if (mode
== VOIDmode
)
3153 mode
= GET_MODE (op2
);
3155 icode
= movcc_gen_code
[mode
];
3157 if (icode
== CODE_FOR_nothing
)
3162 op2
= force_not_mem (op2
);
3163 op3
= force_not_mem (op3
);
3167 target
= protect_from_queue (target
, 1);
3169 target
= gen_reg_rtx (mode
);
3175 op2
= protect_from_queue (op2
, 0);
3176 op3
= protect_from_queue (op3
, 0);
3178 /* If the insn doesn't accept these operands, put them in pseudos. */
3180 if (! (*insn_operand_predicate
[icode
][0])
3181 (subtarget
, insn_operand_mode
[icode
][0]))
3182 subtarget
= gen_reg_rtx (insn_operand_mode
[icode
][0]);
3184 if (! (*insn_operand_predicate
[icode
][2])
3185 (op2
, insn_operand_mode
[icode
][2]))
3186 op2
= copy_to_mode_reg (insn_operand_mode
[icode
][2], op2
);
3188 if (! (*insn_operand_predicate
[icode
][3])
3189 (op3
, insn_operand_mode
[icode
][3]))
3190 op3
= copy_to_mode_reg (insn_operand_mode
[icode
][3], op3
);
3192 /* Everything should now be in the suitable form, so emit the compare insn
3193 and then the conditional move. */
3196 = compare_from_rtx (op0
, op1
, code
, unsignedp
, cmode
, NULL_RTX
, 0);
3198 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
3199 if (GET_CODE (comparison
) != code
)
3200 /* This shouldn't happen. */
3203 insn
= GEN_FCN (icode
) (subtarget
, comparison
, op2
, op3
);
3205 /* If that failed, then give up. */
3211 if (subtarget
!= target
)
3212 convert_move (target
, subtarget
, 0);
3217 /* Return non-zero if a conditional move of mode MODE is supported.
3219 This function is for combine so it can tell whether an insn that looks
3220 like a conditional move is actually supported by the hardware. If we
3221 guess wrong we lose a bit on optimization, but that's it. */
3222 /* ??? sparc64 supports conditionally moving integers values based on fp
3223 comparisons, and vice versa. How do we handle them? */
3226 can_conditionally_move_p (mode
)
3227 enum machine_mode mode
;
3229 if (movcc_gen_code
[mode
] != CODE_FOR_nothing
)
3235 #endif /* HAVE_conditional_move */
3237 /* These three functions generate an insn body and return it
3238 rather than emitting the insn.
3240 They do not protect from queued increments,
3241 because they may be used 1) in protect_from_queue itself
3242 and 2) in other passes where there is no queue. */
3244 /* Generate and return an insn body to add Y to X. */
3247 gen_add2_insn (x
, y
)
3250 int icode
= (int) add_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3252 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3253 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3254 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3257 return (GEN_FCN (icode
) (x
, x
, y
));
3261 have_add2_insn (mode
)
3262 enum machine_mode mode
;
3264 return add_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3267 /* Generate and return an insn body to subtract Y from X. */
3270 gen_sub2_insn (x
, y
)
3273 int icode
= (int) sub_optab
->handlers
[(int) GET_MODE (x
)].insn_code
;
3275 if (! (*insn_operand_predicate
[icode
][0]) (x
, insn_operand_mode
[icode
][0])
3276 || ! (*insn_operand_predicate
[icode
][1]) (x
, insn_operand_mode
[icode
][1])
3277 || ! (*insn_operand_predicate
[icode
][2]) (y
, insn_operand_mode
[icode
][2]))
3280 return (GEN_FCN (icode
) (x
, x
, y
));
3284 have_sub2_insn (mode
)
3285 enum machine_mode mode
;
3287 return sub_optab
->handlers
[(int) mode
].insn_code
!= CODE_FOR_nothing
;
3290 /* Generate the body of an instruction to copy Y into X.
3291 It may be a SEQUENCE, if one insn isn't enough. */
3294 gen_move_insn (x
, y
)
3297 register enum machine_mode mode
= GET_MODE (x
);
3298 enum insn_code insn_code
;
3301 if (mode
== VOIDmode
)
3302 mode
= GET_MODE (y
);
3304 insn_code
= mov_optab
->handlers
[(int) mode
].insn_code
;
3306 /* Handle MODE_CC modes: If we don't have a special move insn for this mode,
3307 find a mode to do it in. If we have a movcc, use it. Otherwise,
3308 find the MODE_INT mode of the same width. */
3310 if (GET_MODE_CLASS (mode
) == MODE_CC
&& insn_code
== CODE_FOR_nothing
)
3312 enum machine_mode tmode
= VOIDmode
;
3316 && mov_optab
->handlers
[(int) CCmode
].insn_code
!= CODE_FOR_nothing
)
3319 for (tmode
= QImode
; tmode
!= VOIDmode
;
3320 tmode
= GET_MODE_WIDER_MODE (tmode
))
3321 if (GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (mode
))
3324 if (tmode
== VOIDmode
)
3327 /* Get X and Y in TMODE. We can't use gen_lowpart here because it
3328 may call change_address which is not appropriate if we were
3329 called when a reload was in progress. We don't have to worry
3330 about changing the address since the size in bytes is supposed to
3331 be the same. Copy the MEM to change the mode and move any
3332 substitutions from the old MEM to the new one. */
3334 if (reload_in_progress
)
3336 x
= gen_lowpart_common (tmode
, x1
);
3337 if (x
== 0 && GET_CODE (x1
) == MEM
)
3339 x
= gen_rtx (MEM
, tmode
, XEXP (x1
, 0));
3340 RTX_UNCHANGING_P (x
) = RTX_UNCHANGING_P (x1
);
3341 MEM_IN_STRUCT_P (x
) = MEM_IN_STRUCT_P (x1
);
3342 MEM_VOLATILE_P (x
) = MEM_VOLATILE_P (x1
);
3343 copy_replacements (x1
, x
);
3346 y
= gen_lowpart_common (tmode
, y1
);
3347 if (y
== 0 && GET_CODE (y1
) == MEM
)
3349 y
= gen_rtx (MEM
, tmode
, XEXP (y1
, 0));
3350 RTX_UNCHANGING_P (y
) = RTX_UNCHANGING_P (y1
);
3351 MEM_IN_STRUCT_P (y
) = MEM_IN_STRUCT_P (y1
);
3352 MEM_VOLATILE_P (y
) = MEM_VOLATILE_P (y1
);
3353 copy_replacements (y1
, y
);
3358 x
= gen_lowpart (tmode
, x
);
3359 y
= gen_lowpart (tmode
, y
);
3362 insn_code
= mov_optab
->handlers
[(int) tmode
].insn_code
;
3363 return (GEN_FCN (insn_code
) (x
, y
));
3367 emit_move_insn_1 (x
, y
);
3368 seq
= gen_sequence ();
3373 /* Return the insn code used to extend FROM_MODE to TO_MODE.
3374 UNSIGNEDP specifies zero-extension instead of sign-extension. If
3375 no such operation exists, CODE_FOR_nothing will be returned. */
3378 can_extend_p (to_mode
, from_mode
, unsignedp
)
3379 enum machine_mode to_mode
, from_mode
;
3382 return extendtab
[(int) to_mode
][(int) from_mode
][unsignedp
];
3385 /* Generate the body of an insn to extend Y (with mode MFROM)
3386 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
3389 gen_extend_insn (x
, y
, mto
, mfrom
, unsignedp
)
3391 enum machine_mode mto
, mfrom
;
3394 return (GEN_FCN (extendtab
[(int) mto
][(int) mfrom
][unsignedp
]) (x
, y
));
3397 /* can_fix_p and can_float_p say whether the target machine
3398 can directly convert a given fixed point type to
3399 a given floating point type, or vice versa.
3400 The returned value is the CODE_FOR_... value to use,
3401 or CODE_FOR_nothing if these modes cannot be directly converted.
3403 *TRUNCP_PTR is set to 1 if it is necessary to output
3404 an explicit FTRUNC insn before the fix insn; otherwise 0. */
3406 static enum insn_code
3407 can_fix_p (fixmode
, fltmode
, unsignedp
, truncp_ptr
)
3408 enum machine_mode fltmode
, fixmode
;
3413 if (fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
] != CODE_FOR_nothing
)
3414 return fixtrunctab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3416 if (ftrunc_optab
->handlers
[(int) fltmode
].insn_code
!= CODE_FOR_nothing
)
3419 return fixtab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3421 return CODE_FOR_nothing
;
3424 static enum insn_code
3425 can_float_p (fltmode
, fixmode
, unsignedp
)
3426 enum machine_mode fixmode
, fltmode
;
3429 return floattab
[(int) fltmode
][(int) fixmode
][unsignedp
];
3432 /* Generate code to convert FROM to floating point
3433 and store in TO. FROM must be fixed point and not VOIDmode.
3434 UNSIGNEDP nonzero means regard FROM as unsigned.
3435 Normally this is done by correcting the final value
3436 if it is negative. */
3439 expand_float (to
, from
, unsignedp
)
3443 enum insn_code icode
;
3444 register rtx target
= to
;
3445 enum machine_mode fmode
, imode
;
3447 /* Crash now, because we won't be able to decide which mode to use. */
3448 if (GET_MODE (from
) == VOIDmode
)
3451 /* Look for an insn to do the conversion. Do it in the specified
3452 modes if possible; otherwise convert either input, output or both to
3453 wider mode. If the integer mode is wider than the mode of FROM,
3454 we can do the conversion signed even if the input is unsigned. */
3456 for (imode
= GET_MODE (from
); imode
!= VOIDmode
;
3457 imode
= GET_MODE_WIDER_MODE (imode
))
3458 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3459 fmode
= GET_MODE_WIDER_MODE (fmode
))
3461 int doing_unsigned
= unsignedp
;
3463 icode
= can_float_p (fmode
, imode
, unsignedp
);
3464 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (from
) && unsignedp
)
3465 icode
= can_float_p (fmode
, imode
, 0), doing_unsigned
= 0;
3467 if (icode
!= CODE_FOR_nothing
)
3469 to
= protect_from_queue (to
, 1);
3470 from
= protect_from_queue (from
, 0);
3472 if (imode
!= GET_MODE (from
))
3473 from
= convert_to_mode (imode
, from
, unsignedp
);
3475 if (fmode
!= GET_MODE (to
))
3476 target
= gen_reg_rtx (fmode
);
3478 emit_unop_insn (icode
, target
, from
,
3479 doing_unsigned
? UNSIGNED_FLOAT
: FLOAT
);
3482 convert_move (to
, target
, 0);
3487 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3489 /* Unsigned integer, and no way to convert directly.
3490 Convert as signed, then conditionally adjust the result. */
3493 rtx label
= gen_label_rtx ();
3495 REAL_VALUE_TYPE offset
;
3499 to
= protect_from_queue (to
, 1);
3500 from
= protect_from_queue (from
, 0);
3503 from
= force_not_mem (from
);
3505 /* Look for a usable floating mode FMODE wider than the source and at
3506 least as wide as the target. Using FMODE will avoid rounding woes
3507 with unsigned values greater than the signed maximum value. */
3509 for (fmode
= GET_MODE (to
); fmode
!= VOIDmode
;
3510 fmode
= GET_MODE_WIDER_MODE (fmode
))
3511 if (GET_MODE_BITSIZE (GET_MODE (from
)) < GET_MODE_BITSIZE (fmode
)
3512 && can_float_p (fmode
, GET_MODE (from
), 0) != CODE_FOR_nothing
)
3515 if (fmode
== VOIDmode
)
3517 /* There is no such mode. Pretend the target is wide enough. */
3518 fmode
= GET_MODE (to
);
3520 /* Avoid double-rounding when TO is narrower than FROM. */
3521 if ((significand_size (fmode
) + 1)
3522 < GET_MODE_BITSIZE (GET_MODE (from
)))
3525 rtx neglabel
= gen_label_rtx ();
3527 /* Don't use TARGET if it isn't a register, is a hard register,
3528 or is the wrong mode. */
3529 if (GET_CODE (target
) != REG
3530 || REGNO (target
) < FIRST_PSEUDO_REGISTER
3531 || GET_MODE (target
) != fmode
)
3532 target
= gen_reg_rtx (fmode
);
3534 imode
= GET_MODE (from
);
3535 do_pending_stack_adjust ();
3537 /* Test whether the sign bit is set. */
3538 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, imode
, 0, 0);
3539 emit_jump_insn (gen_blt (neglabel
));
3541 /* The sign bit is not set. Convert as signed. */
3542 expand_float (target
, from
, 0);
3543 emit_jump_insn (gen_jump (label
));
3546 /* The sign bit is set.
3547 Convert to a usable (positive signed) value by shifting right
3548 one bit, while remembering if a nonzero bit was shifted
3549 out; i.e., compute (from & 1) | (from >> 1). */
3551 emit_label (neglabel
);
3552 temp
= expand_binop (imode
, and_optab
, from
, const1_rtx
,
3553 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
3554 temp1
= expand_shift (RSHIFT_EXPR
, imode
, from
, integer_one_node
,
3556 temp
= expand_binop (imode
, ior_optab
, temp
, temp1
, temp
, 1,
3558 expand_float (target
, temp
, 0);
3560 /* Multiply by 2 to undo the shift above. */
3561 temp
= expand_binop (fmode
, add_optab
, target
, target
,
3562 target
, 0, OPTAB_LIB_WIDEN
);
3564 emit_move_insn (target
, temp
);
3566 do_pending_stack_adjust ();
3572 /* If we are about to do some arithmetic to correct for an
3573 unsigned operand, do it in a pseudo-register. */
3575 if (GET_MODE (to
) != fmode
3576 || GET_CODE (to
) != REG
|| REGNO (to
) < FIRST_PSEUDO_REGISTER
)
3577 target
= gen_reg_rtx (fmode
);
3579 /* Convert as signed integer to floating. */
3580 expand_float (target
, from
, 0);
3582 /* If FROM is negative (and therefore TO is negative),
3583 correct its value by 2**bitwidth. */
3585 do_pending_stack_adjust ();
3586 emit_cmp_insn (from
, const0_rtx
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3587 emit_jump_insn (gen_bge (label
));
3589 /* On SCO 3.2.1, ldexp rejects values outside [0.5, 1).
3590 Rather than setting up a dconst_dot_5, let's hope SCO
3592 offset
= REAL_VALUE_LDEXP (dconst1
, GET_MODE_BITSIZE (GET_MODE (from
)));
3593 temp
= expand_binop (fmode
, add_optab
, target
,
3594 CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
),
3595 target
, 0, OPTAB_LIB_WIDEN
);
3597 emit_move_insn (target
, temp
);
3599 do_pending_stack_adjust ();
3605 /* No hardware instruction available; call a library routine to convert from
3606 SImode, DImode, or TImode into SFmode, DFmode, XFmode, or TFmode. */
3612 to
= protect_from_queue (to
, 1);
3613 from
= protect_from_queue (from
, 0);
3615 if (GET_MODE_SIZE (GET_MODE (from
)) < GET_MODE_SIZE (SImode
))
3616 from
= convert_to_mode (SImode
, from
, unsignedp
);
3619 from
= force_not_mem (from
);
3621 if (GET_MODE (to
) == SFmode
)
3623 if (GET_MODE (from
) == SImode
)
3624 libfcn
= floatsisf_libfunc
;
3625 else if (GET_MODE (from
) == DImode
)
3626 libfcn
= floatdisf_libfunc
;
3627 else if (GET_MODE (from
) == TImode
)
3628 libfcn
= floattisf_libfunc
;
3632 else if (GET_MODE (to
) == DFmode
)
3634 if (GET_MODE (from
) == SImode
)
3635 libfcn
= floatsidf_libfunc
;
3636 else if (GET_MODE (from
) == DImode
)
3637 libfcn
= floatdidf_libfunc
;
3638 else if (GET_MODE (from
) == TImode
)
3639 libfcn
= floattidf_libfunc
;
3643 else if (GET_MODE (to
) == XFmode
)
3645 if (GET_MODE (from
) == SImode
)
3646 libfcn
= floatsixf_libfunc
;
3647 else if (GET_MODE (from
) == DImode
)
3648 libfcn
= floatdixf_libfunc
;
3649 else if (GET_MODE (from
) == TImode
)
3650 libfcn
= floattixf_libfunc
;
3654 else if (GET_MODE (to
) == TFmode
)
3656 if (GET_MODE (from
) == SImode
)
3657 libfcn
= floatsitf_libfunc
;
3658 else if (GET_MODE (from
) == DImode
)
3659 libfcn
= floatditf_libfunc
;
3660 else if (GET_MODE (from
) == TImode
)
3661 libfcn
= floattitf_libfunc
;
3670 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1,
3672 1, from
, GET_MODE (from
));
3673 insns
= get_insns ();
3676 emit_libcall_block (insns
, target
, value
,
3677 gen_rtx (FLOAT
, GET_MODE (to
), from
));
3682 /* Copy result to requested destination
3683 if we have been computing in a temp location. */
3687 if (GET_MODE (target
) == GET_MODE (to
))
3688 emit_move_insn (to
, target
);
3690 convert_move (to
, target
, 0);
3694 /* expand_fix: generate code to convert FROM to fixed point
3695 and store in TO. FROM must be floating point. */
3701 rtx temp
= gen_reg_rtx (GET_MODE (x
));
3702 return expand_unop (GET_MODE (x
), ftrunc_optab
, x
, temp
, 0);
3706 expand_fix (to
, from
, unsignedp
)
3707 register rtx to
, from
;
3710 enum insn_code icode
;
3711 register rtx target
= to
;
3712 enum machine_mode fmode
, imode
;
3716 /* We first try to find a pair of modes, one real and one integer, at
3717 least as wide as FROM and TO, respectively, in which we can open-code
3718 this conversion. If the integer mode is wider than the mode of TO,
3719 we can do the conversion either signed or unsigned. */
3721 for (imode
= GET_MODE (to
); imode
!= VOIDmode
;
3722 imode
= GET_MODE_WIDER_MODE (imode
))
3723 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3724 fmode
= GET_MODE_WIDER_MODE (fmode
))
3726 int doing_unsigned
= unsignedp
;
3728 icode
= can_fix_p (imode
, fmode
, unsignedp
, &must_trunc
);
3729 if (icode
== CODE_FOR_nothing
&& imode
!= GET_MODE (to
) && unsignedp
)
3730 icode
= can_fix_p (imode
, fmode
, 0, &must_trunc
), doing_unsigned
= 0;
3732 if (icode
!= CODE_FOR_nothing
)
3734 to
= protect_from_queue (to
, 1);
3735 from
= protect_from_queue (from
, 0);
3737 if (fmode
!= GET_MODE (from
))
3738 from
= convert_to_mode (fmode
, from
, 0);
3741 from
= ftruncify (from
);
3743 if (imode
!= GET_MODE (to
))
3744 target
= gen_reg_rtx (imode
);
3746 emit_unop_insn (icode
, target
, from
,
3747 doing_unsigned
? UNSIGNED_FIX
: FIX
);
3749 convert_move (to
, target
, unsignedp
);
3754 #if !defined (REAL_IS_NOT_DOUBLE) || defined (REAL_ARITHMETIC)
3755 /* For an unsigned conversion, there is one more way to do it.
3756 If we have a signed conversion, we generate code that compares
3757 the real value to the largest representable positive number. If if
3758 is smaller, the conversion is done normally. Otherwise, subtract
3759 one plus the highest signed number, convert, and add it back.
3761 We only need to check all real modes, since we know we didn't find
3762 anything with a wider integer mode. */
3764 if (unsignedp
&& GET_MODE_BITSIZE (GET_MODE (to
)) <= HOST_BITS_PER_WIDE_INT
)
3765 for (fmode
= GET_MODE (from
); fmode
!= VOIDmode
;
3766 fmode
= GET_MODE_WIDER_MODE (fmode
))
3767 /* Make sure we won't lose significant bits doing this. */
3768 if (GET_MODE_BITSIZE (fmode
) > GET_MODE_BITSIZE (GET_MODE (to
))
3769 && CODE_FOR_nothing
!= can_fix_p (GET_MODE (to
), fmode
, 0,
3773 REAL_VALUE_TYPE offset
;
3774 rtx limit
, lab1
, lab2
, insn
;
3776 bitsize
= GET_MODE_BITSIZE (GET_MODE (to
));
3777 offset
= REAL_VALUE_LDEXP (dconst1
, bitsize
- 1);
3778 limit
= CONST_DOUBLE_FROM_REAL_VALUE (offset
, fmode
);
3779 lab1
= gen_label_rtx ();
3780 lab2
= gen_label_rtx ();
3783 to
= protect_from_queue (to
, 1);
3784 from
= protect_from_queue (from
, 0);
3787 from
= force_not_mem (from
);
3789 if (fmode
!= GET_MODE (from
))
3790 from
= convert_to_mode (fmode
, from
, 0);
3792 /* See if we need to do the subtraction. */
3793 do_pending_stack_adjust ();
3794 emit_cmp_insn (from
, limit
, GE
, NULL_RTX
, GET_MODE (from
), 0, 0);
3795 emit_jump_insn (gen_bge (lab1
));
3797 /* If not, do the signed "fix" and branch around fixup code. */
3798 expand_fix (to
, from
, 0);
3799 emit_jump_insn (gen_jump (lab2
));
3802 /* Otherwise, subtract 2**(N-1), convert to signed number,
3803 then add 2**(N-1). Do the addition using XOR since this
3804 will often generate better code. */
3806 target
= expand_binop (GET_MODE (from
), sub_optab
, from
, limit
,
3807 NULL_RTX
, 0, OPTAB_LIB_WIDEN
);
3808 expand_fix (to
, target
, 0);
3809 target
= expand_binop (GET_MODE (to
), xor_optab
, to
,
3810 GEN_INT ((HOST_WIDE_INT
) 1 << (bitsize
- 1)),
3811 to
, 1, OPTAB_LIB_WIDEN
);
3814 emit_move_insn (to
, target
);
3818 /* Make a place for a REG_NOTE and add it. */
3819 insn
= emit_move_insn (to
, to
);
3820 REG_NOTES (insn
) = gen_rtx (EXPR_LIST
, REG_EQUAL
,
3821 gen_rtx (UNSIGNED_FIX
, GET_MODE (to
),
3829 /* We can't do it with an insn, so use a library call. But first ensure
3830 that the mode of TO is at least as wide as SImode, since those are the
3831 only library calls we know about. */
3833 if (GET_MODE_SIZE (GET_MODE (to
)) < GET_MODE_SIZE (SImode
))
3835 target
= gen_reg_rtx (SImode
);
3837 expand_fix (target
, from
, unsignedp
);
3839 else if (GET_MODE (from
) == SFmode
)
3841 if (GET_MODE (to
) == SImode
)
3842 libfcn
= unsignedp
? fixunssfsi_libfunc
: fixsfsi_libfunc
;
3843 else if (GET_MODE (to
) == DImode
)
3844 libfcn
= unsignedp
? fixunssfdi_libfunc
: fixsfdi_libfunc
;
3845 else if (GET_MODE (to
) == TImode
)
3846 libfcn
= unsignedp
? fixunssfti_libfunc
: fixsfti_libfunc
;
3850 else if (GET_MODE (from
) == DFmode
)
3852 if (GET_MODE (to
) == SImode
)
3853 libfcn
= unsignedp
? fixunsdfsi_libfunc
: fixdfsi_libfunc
;
3854 else if (GET_MODE (to
) == DImode
)
3855 libfcn
= unsignedp
? fixunsdfdi_libfunc
: fixdfdi_libfunc
;
3856 else if (GET_MODE (to
) == TImode
)
3857 libfcn
= unsignedp
? fixunsdfti_libfunc
: fixdfti_libfunc
;
3861 else if (GET_MODE (from
) == XFmode
)
3863 if (GET_MODE (to
) == SImode
)
3864 libfcn
= unsignedp
? fixunsxfsi_libfunc
: fixxfsi_libfunc
;
3865 else if (GET_MODE (to
) == DImode
)
3866 libfcn
= unsignedp
? fixunsxfdi_libfunc
: fixxfdi_libfunc
;
3867 else if (GET_MODE (to
) == TImode
)
3868 libfcn
= unsignedp
? fixunsxfti_libfunc
: fixxfti_libfunc
;
3872 else if (GET_MODE (from
) == TFmode
)
3874 if (GET_MODE (to
) == SImode
)
3875 libfcn
= unsignedp
? fixunstfsi_libfunc
: fixtfsi_libfunc
;
3876 else if (GET_MODE (to
) == DImode
)
3877 libfcn
= unsignedp
? fixunstfdi_libfunc
: fixtfdi_libfunc
;
3878 else if (GET_MODE (to
) == TImode
)
3879 libfcn
= unsignedp
? fixunstfti_libfunc
: fixtfti_libfunc
;
3891 to
= protect_from_queue (to
, 1);
3892 from
= protect_from_queue (from
, 0);
3895 from
= force_not_mem (from
);
3899 value
= emit_library_call_value (libfcn
, NULL_RTX
, 1, GET_MODE (to
),
3901 1, from
, GET_MODE (from
));
3902 insns
= get_insns ();
3905 emit_libcall_block (insns
, target
, value
,
3906 gen_rtx (unsignedp
? UNSIGNED_FIX
: FIX
,
3907 GET_MODE (to
), from
));
3912 if (GET_MODE (to
) == GET_MODE (target
))
3913 emit_move_insn (to
, target
);
3915 convert_move (to
, target
, 0);
3924 optab op
= (optab
) xmalloc (sizeof (struct optab
));
3926 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
3928 op
->handlers
[i
].insn_code
= CODE_FOR_nothing
;
3929 op
->handlers
[i
].libfunc
= 0;
3932 if (code
!= UNKNOWN
)
3933 code_to_optab
[(int) code
] = op
;
3938 /* Initialize the libfunc fields of an entire group of entries in some
3939 optab. Each entry is set equal to a string consisting of a leading
3940 pair of underscores followed by a generic operation name followed by
3941 a mode name (downshifted to lower case) followed by a single character
3942 representing the number of operands for the given operation (which is
3943 usually one of the characters '2', '3', or '4').
3945 OPTABLE is the table in which libfunc fields are to be initialized.
3946 FIRST_MODE is the first machine mode index in the given optab to
3948 LAST_MODE is the last machine mode index in the given optab to
3950 OPNAME is the generic (string) name of the operation.
3951 SUFFIX is the character which specifies the number of operands for
3952 the given generic operation.
3956 init_libfuncs (optable
, first_mode
, last_mode
, opname
, suffix
)
3957 register optab optable
;
3958 register int first_mode
;
3959 register int last_mode
;
3960 register char *opname
;
3961 register int suffix
;
3964 register unsigned opname_len
= strlen (opname
);
3966 for (mode
= first_mode
; (int) mode
<= (int) last_mode
;
3967 mode
= (enum machine_mode
) ((int) mode
+ 1))
3969 register char *mname
= mode_name
[(int) mode
];
3970 register unsigned mname_len
= strlen (mname
);
3971 register char *libfunc_name
3972 = (char *) xmalloc (2 + opname_len
+ mname_len
+ 1 + 1);
3979 for (q
= opname
; *q
; )
3981 for (q
= mname
; *q
; q
++)
3982 *p
++ = tolower (*q
);
3985 optable
->handlers
[(int) mode
].libfunc
3986 = gen_rtx (SYMBOL_REF
, Pmode
, libfunc_name
);
3990 /* Initialize the libfunc fields of an entire group of entries in some
3991 optab which correspond to all integer mode operations. The parameters
3992 have the same meaning as similarly named ones for the `init_libfuncs'
3993 routine. (See above). */
3996 init_integral_libfuncs (optable
, opname
, suffix
)
3997 register optab optable
;
3998 register char *opname
;
3999 register int suffix
;
4001 init_libfuncs (optable
, SImode
, TImode
, opname
, suffix
);
4004 /* Initialize the libfunc fields of an entire group of entries in some
4005 optab which correspond to all real mode operations. The parameters
4006 have the same meaning as similarly named ones for the `init_libfuncs'
4007 routine. (See above). */
4010 init_floating_libfuncs (optable
, opname
, suffix
)
4011 register optab optable
;
4012 register char *opname
;
4013 register int suffix
;
4015 init_libfuncs (optable
, SFmode
, TFmode
, opname
, suffix
);
4018 /* Initialize the libfunc fields of an entire group of entries in some
4019 optab which correspond to all complex floating modes. The parameters
4020 have the same meaning as similarly named ones for the `init_libfuncs'
4021 routine. (See above). */
4024 init_complex_libfuncs (optable
, opname
, suffix
)
4025 register optab optable
;
4026 register char *opname
;
4027 register int suffix
;
4029 init_libfuncs (optable
, SCmode
, TCmode
, opname
, suffix
);
4032 /* Call this once to initialize the contents of the optabs
4033 appropriately for the current target machine. */
4041 /* Start by initializing all tables to contain CODE_FOR_nothing. */
4043 for (p
= fixtab
[0][0];
4044 p
< fixtab
[0][0] + sizeof fixtab
/ sizeof (fixtab
[0][0][0]);
4046 *p
= CODE_FOR_nothing
;
4048 for (p
= fixtrunctab
[0][0];
4049 p
< fixtrunctab
[0][0] + sizeof fixtrunctab
/ sizeof (fixtrunctab
[0][0][0]);
4051 *p
= CODE_FOR_nothing
;
4053 for (p
= floattab
[0][0];
4054 p
< floattab
[0][0] + sizeof floattab
/ sizeof (floattab
[0][0][0]);
4056 *p
= CODE_FOR_nothing
;
4058 for (p
= extendtab
[0][0];
4059 p
< extendtab
[0][0] + sizeof extendtab
/ sizeof extendtab
[0][0][0];
4061 *p
= CODE_FOR_nothing
;
4063 for (i
= 0; i
< NUM_RTX_CODE
; i
++)
4064 setcc_gen_code
[i
] = CODE_FOR_nothing
;
4066 #ifdef HAVE_conditional_move
4067 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4068 movcc_gen_code
[i
] = CODE_FOR_nothing
;
4071 add_optab
= init_optab (PLUS
);
4072 sub_optab
= init_optab (MINUS
);
4073 smul_optab
= init_optab (MULT
);
4074 smul_highpart_optab
= init_optab (UNKNOWN
);
4075 umul_highpart_optab
= init_optab (UNKNOWN
);
4076 smul_widen_optab
= init_optab (UNKNOWN
);
4077 umul_widen_optab
= init_optab (UNKNOWN
);
4078 sdiv_optab
= init_optab (DIV
);
4079 sdivmod_optab
= init_optab (UNKNOWN
);
4080 udiv_optab
= init_optab (UDIV
);
4081 udivmod_optab
= init_optab (UNKNOWN
);
4082 smod_optab
= init_optab (MOD
);
4083 umod_optab
= init_optab (UMOD
);
4084 flodiv_optab
= init_optab (DIV
);
4085 ftrunc_optab
= init_optab (UNKNOWN
);
4086 and_optab
= init_optab (AND
);
4087 ior_optab
= init_optab (IOR
);
4088 xor_optab
= init_optab (XOR
);
4089 ashl_optab
= init_optab (ASHIFT
);
4090 ashr_optab
= init_optab (ASHIFTRT
);
4091 lshr_optab
= init_optab (LSHIFTRT
);
4092 rotl_optab
= init_optab (ROTATE
);
4093 rotr_optab
= init_optab (ROTATERT
);
4094 smin_optab
= init_optab (SMIN
);
4095 smax_optab
= init_optab (SMAX
);
4096 umin_optab
= init_optab (UMIN
);
4097 umax_optab
= init_optab (UMAX
);
4098 mov_optab
= init_optab (UNKNOWN
);
4099 movstrict_optab
= init_optab (UNKNOWN
);
4100 cmp_optab
= init_optab (UNKNOWN
);
4101 ucmp_optab
= init_optab (UNKNOWN
);
4102 tst_optab
= init_optab (UNKNOWN
);
4103 neg_optab
= init_optab (NEG
);
4104 abs_optab
= init_optab (ABS
);
4105 one_cmpl_optab
= init_optab (NOT
);
4106 ffs_optab
= init_optab (FFS
);
4107 sqrt_optab
= init_optab (SQRT
);
4108 sin_optab
= init_optab (UNKNOWN
);
4109 cos_optab
= init_optab (UNKNOWN
);
4110 strlen_optab
= init_optab (UNKNOWN
);
4112 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4114 movstr_optab
[i
] = CODE_FOR_nothing
;
4115 clrstr_optab
[i
] = CODE_FOR_nothing
;
4117 #ifdef HAVE_SECONDARY_RELOADS
4118 reload_in_optab
[i
] = reload_out_optab
[i
] = CODE_FOR_nothing
;
4122 /* Fill in the optabs with the insns we support. */
4125 #ifdef FIXUNS_TRUNC_LIKE_FIX_TRUNC
4126 /* This flag says the same insns that convert to a signed fixnum
4127 also convert validly to an unsigned one. */
4128 for (i
= 0; i
< NUM_MACHINE_MODES
; i
++)
4129 for (j
= 0; j
< NUM_MACHINE_MODES
; j
++)
4130 fixtrunctab
[i
][j
][1] = fixtrunctab
[i
][j
][0];
4133 #ifdef EXTRA_CC_MODES
4137 /* Initialize the optabs with the names of the library functions. */
4138 init_integral_libfuncs (add_optab
, "add", '3');
4139 init_floating_libfuncs (add_optab
, "add", '3');
4140 init_integral_libfuncs (sub_optab
, "sub", '3');
4141 init_floating_libfuncs (sub_optab
, "sub", '3');
4142 init_integral_libfuncs (smul_optab
, "mul", '3');
4143 init_floating_libfuncs (smul_optab
, "mul", '3');
4144 init_integral_libfuncs (sdiv_optab
, "div", '3');
4145 init_integral_libfuncs (udiv_optab
, "udiv", '3');
4146 init_integral_libfuncs (sdivmod_optab
, "divmod", '4');
4147 init_integral_libfuncs (udivmod_optab
, "udivmod", '4');
4148 init_integral_libfuncs (smod_optab
, "mod", '3');
4149 init_integral_libfuncs (umod_optab
, "umod", '3');
4150 init_floating_libfuncs (flodiv_optab
, "div", '3');
4151 init_floating_libfuncs (ftrunc_optab
, "ftrunc", '2');
4152 init_integral_libfuncs (and_optab
, "and", '3');
4153 init_integral_libfuncs (ior_optab
, "ior", '3');
4154 init_integral_libfuncs (xor_optab
, "xor", '3');
4155 init_integral_libfuncs (ashl_optab
, "ashl", '3');
4156 init_integral_libfuncs (ashr_optab
, "ashr", '3');
4157 init_integral_libfuncs (lshr_optab
, "lshr", '3');
4158 init_integral_libfuncs (smin_optab
, "min", '3');
4159 init_floating_libfuncs (smin_optab
, "min", '3');
4160 init_integral_libfuncs (smax_optab
, "max", '3');
4161 init_floating_libfuncs (smax_optab
, "max", '3');
4162 init_integral_libfuncs (umin_optab
, "umin", '3');
4163 init_integral_libfuncs (umax_optab
, "umax", '3');
4164 init_integral_libfuncs (neg_optab
, "neg", '2');
4165 init_floating_libfuncs (neg_optab
, "neg", '2');
4166 init_integral_libfuncs (one_cmpl_optab
, "one_cmpl", '2');
4167 init_integral_libfuncs (ffs_optab
, "ffs", '2');
4169 /* Comparison libcalls for integers MUST come in pairs, signed/unsigned. */
4170 init_integral_libfuncs (cmp_optab
, "cmp", '2');
4171 init_integral_libfuncs (ucmp_optab
, "ucmp", '2');
4172 init_floating_libfuncs (cmp_optab
, "cmp", '2');
4174 #ifdef MULSI3_LIBCALL
4175 smul_optab
->handlers
[(int) SImode
].libfunc
4176 = gen_rtx (SYMBOL_REF
, Pmode
, MULSI3_LIBCALL
);
4178 #ifdef MULDI3_LIBCALL
4179 smul_optab
->handlers
[(int) DImode
].libfunc
4180 = gen_rtx (SYMBOL_REF
, Pmode
, MULDI3_LIBCALL
);
4183 #ifdef DIVSI3_LIBCALL
4184 sdiv_optab
->handlers
[(int) SImode
].libfunc
4185 = gen_rtx (SYMBOL_REF
, Pmode
, DIVSI3_LIBCALL
);
4187 #ifdef DIVDI3_LIBCALL
4188 sdiv_optab
->handlers
[(int) DImode
].libfunc
4189 = gen_rtx (SYMBOL_REF
, Pmode
, DIVDI3_LIBCALL
);
4192 #ifdef UDIVSI3_LIBCALL
4193 udiv_optab
->handlers
[(int) SImode
].libfunc
4194 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVSI3_LIBCALL
);
4196 #ifdef UDIVDI3_LIBCALL
4197 udiv_optab
->handlers
[(int) DImode
].libfunc
4198 = gen_rtx (SYMBOL_REF
, Pmode
, UDIVDI3_LIBCALL
);
4201 #ifdef MODSI3_LIBCALL
4202 smod_optab
->handlers
[(int) SImode
].libfunc
4203 = gen_rtx (SYMBOL_REF
, Pmode
, MODSI3_LIBCALL
);
4205 #ifdef MODDI3_LIBCALL
4206 smod_optab
->handlers
[(int) DImode
].libfunc
4207 = gen_rtx (SYMBOL_REF
, Pmode
, MODDI3_LIBCALL
);
4210 #ifdef UMODSI3_LIBCALL
4211 umod_optab
->handlers
[(int) SImode
].libfunc
4212 = gen_rtx (SYMBOL_REF
, Pmode
, UMODSI3_LIBCALL
);
4214 #ifdef UMODDI3_LIBCALL
4215 umod_optab
->handlers
[(int) DImode
].libfunc
4216 = gen_rtx (SYMBOL_REF
, Pmode
, UMODDI3_LIBCALL
);
4219 /* Use cabs for DC complex abs, since systems generally have cabs.
4220 Don't define any libcall for SCmode, so that cabs will be used. */
4221 abs_optab
->handlers
[(int) DCmode
].libfunc
4222 = gen_rtx (SYMBOL_REF
, Pmode
, "cabs");
4224 /* The ffs function operates on `int'. */
4225 #ifndef INT_TYPE_SIZE
4226 #define INT_TYPE_SIZE BITS_PER_WORD
4228 ffs_optab
->handlers
[(int) mode_for_size (INT_TYPE_SIZE
, MODE_INT
, 0)] .libfunc
4229 = gen_rtx (SYMBOL_REF
, Pmode
, "ffs");
4231 extendsfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfdf2");
4232 extendsfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsfxf2");
4233 extendsftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extendsftf2");
4234 extenddfxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddfxf2");
4235 extenddftf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__extenddftf2");
4237 truncdfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncdfsf2");
4238 truncxfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfsf2");
4239 trunctfsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfsf2");
4240 truncxfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__truncxfdf2");
4241 trunctfdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__trunctfdf2");
4243 memcpy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcpy");
4244 bcopy_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bcopy");
4245 memcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memcmp");
4246 bcmp_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gcc_bcmp");
4247 memset_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "memset");
4248 bzero_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "bzero");
4250 eqhf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqhf2");
4251 nehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nehf2");
4252 gthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gthf2");
4253 gehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gehf2");
4254 lthf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lthf2");
4255 lehf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lehf2");
4257 eqsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqsf2");
4258 nesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nesf2");
4259 gtsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtsf2");
4260 gesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gesf2");
4261 ltsf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltsf2");
4262 lesf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lesf2");
4264 eqdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqdf2");
4265 nedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nedf2");
4266 gtdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtdf2");
4267 gedf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gedf2");
4268 ltdf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltdf2");
4269 ledf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ledf2");
4271 eqxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqxf2");
4272 nexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__nexf2");
4273 gtxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gtxf2");
4274 gexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gexf2");
4275 ltxf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__ltxf2");
4276 lexf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lexf2");
4278 eqtf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__eqtf2");
4279 netf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__netf2");
4280 gttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__gttf2");
4281 getf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__getf2");
4282 lttf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__lttf2");
4283 letf2_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__letf2");
4285 floatsisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsisf");
4286 floatdisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdisf");
4287 floattisf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattisf");
4289 floatsidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsidf");
4290 floatdidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdidf");
4291 floattidf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattidf");
4293 floatsixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsixf");
4294 floatdixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatdixf");
4295 floattixf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattixf");
4297 floatsitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatsitf");
4298 floatditf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floatditf");
4299 floattitf_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__floattitf");
4301 fixsfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfsi");
4302 fixsfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfdi");
4303 fixsfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixsfti");
4305 fixdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfsi");
4306 fixdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfdi");
4307 fixdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixdfti");
4309 fixxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfsi");
4310 fixxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfdi");
4311 fixxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixxfti");
4313 fixtfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfsi");
4314 fixtfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfdi");
4315 fixtfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixtfti");
4317 fixunssfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfsi");
4318 fixunssfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfdi");
4319 fixunssfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunssfti");
4321 fixunsdfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfsi");
4322 fixunsdfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfdi");
4323 fixunsdfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsdfti");
4325 fixunsxfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfsi");
4326 fixunsxfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfdi");
4327 fixunsxfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunsxfti");
4329 fixunstfsi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfsi");
4330 fixunstfdi_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfdi");
4331 fixunstfti_libfunc
= gen_rtx (SYMBOL_REF
, Pmode
, "__fixunstfti");
4333 #ifdef INIT_TARGET_OPTABS
4334 /* Allow the target to add more libcalls or rename some, etc. */
4341 /* SCO 3.2 apparently has a broken ldexp. */
4354 #endif /* BROKEN_LDEXP */