]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/optabs.c
./:
[thirdparty/gcc.git] / gcc / optabs.c
1 /* Expand the basic unary and binary arithmetic operations, for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "toplev.h"
28
29 /* Include insn-config.h before expr.h so that HAVE_conditional_move
30 is properly defined. */
31 #include "insn-config.h"
32 #include "rtl.h"
33 #include "tree.h"
34 #include "tm_p.h"
35 #include "flags.h"
36 #include "function.h"
37 #include "except.h"
38 #include "expr.h"
39 #include "optabs.h"
40 #include "libfuncs.h"
41 #include "recog.h"
42 #include "reload.h"
43 #include "ggc.h"
44 #include "real.h"
45 #include "basic-block.h"
46 #include "target.h"
47
48 /* Each optab contains info on how this target machine
49 can perform a particular operation
50 for all sizes and kinds of operands.
51
52 The operation to be performed is often specified
53 by passing one of these optabs as an argument.
54
55 See expr.h for documentation of these optabs. */
56
57 #if GCC_VERSION >= 4000
58 __extension__ struct optab optab_table[OTI_MAX]
59 = { [0 ... OTI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1].insn_code
60 = CODE_FOR_nothing };
61 #else
62 /* init_insn_codes will do runtime initialization otherwise. */
63 struct optab optab_table[OTI_MAX];
64 #endif
65
66 rtx libfunc_table[LTI_MAX];
67
68 /* Tables of patterns for converting one mode to another. */
69 #if GCC_VERSION >= 4000
70 __extension__ struct convert_optab convert_optab_table[COI_MAX]
71 = { [0 ... COI_MAX - 1].handlers[0 ... NUM_MACHINE_MODES - 1]
72 [0 ... NUM_MACHINE_MODES - 1].insn_code
73 = CODE_FOR_nothing };
74 #else
75 /* init_convert_optab will do runtime initialization otherwise. */
76 struct convert_optab convert_optab_table[COI_MAX];
77 #endif
78
79 /* Contains the optab used for each rtx code. */
80 optab code_to_optab[NUM_RTX_CODE + 1];
81
82 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
83 gives the gen_function to make a branch to test that condition. */
84
85 rtxfun bcc_gen_fctn[NUM_RTX_CODE];
86
87 /* Indexed by the rtx-code for a conditional (eg. EQ, LT,...)
88 gives the insn code to make a store-condition insn
89 to test that condition. */
90
91 enum insn_code setcc_gen_code[NUM_RTX_CODE];
92
93 #ifdef HAVE_conditional_move
94 /* Indexed by the machine mode, gives the insn code to make a conditional
95 move insn. This is not indexed by the rtx-code like bcc_gen_fctn and
96 setcc_gen_code to cut down on the number of named patterns. Consider a day
97 when a lot more rtx codes are conditional (eg: for the ARM). */
98
99 enum insn_code movcc_gen_code[NUM_MACHINE_MODES];
100 #endif
101
102 /* Indexed by the machine mode, gives the insn code for vector conditional
103 operation. */
104
105 enum insn_code vcond_gen_code[NUM_MACHINE_MODES];
106 enum insn_code vcondu_gen_code[NUM_MACHINE_MODES];
107
108 /* The insn generating function can not take an rtx_code argument.
109 TRAP_RTX is used as an rtx argument. Its code is replaced with
110 the code to be used in the trap insn and all other fields are ignored. */
111 static GTY(()) rtx trap_rtx;
112
113 static void prepare_float_lib_cmp (rtx *, rtx *, enum rtx_code *,
114 enum machine_mode *, int *);
115 static rtx expand_unop_direct (enum machine_mode, optab, rtx, rtx, int);
116
117 /* Debug facility for use in GDB. */
118 void debug_optab_libfuncs (void);
119
120 #ifndef HAVE_conditional_trap
121 #define HAVE_conditional_trap 0
122 #define gen_conditional_trap(a,b) (gcc_unreachable (), NULL_RTX)
123 #endif
124
125 /* Prefixes for the current version of decimal floating point (BID vs. DPD) */
126 #if ENABLE_DECIMAL_BID_FORMAT
127 #define DECIMAL_PREFIX "bid_"
128 #else
129 #define DECIMAL_PREFIX "dpd_"
130 #endif
131 \f
132
133 /* Info about libfunc. We use same hashtable for normal optabs and conversion
134 optab. In the first case mode2 is unused. */
135 struct GTY(()) libfunc_entry {
136 size_t optab;
137 enum machine_mode mode1, mode2;
138 rtx libfunc;
139 };
140
141 /* Hash table used to convert declarations into nodes. */
142 static GTY((param_is (struct libfunc_entry))) htab_t libfunc_hash;
143
144 /* Used for attribute_hash. */
145
146 static hashval_t
147 hash_libfunc (const void *p)
148 {
149 const struct libfunc_entry *const e = (const struct libfunc_entry *) p;
150
151 return (((int) e->mode1 + (int) e->mode2 * NUM_MACHINE_MODES)
152 ^ e->optab);
153 }
154
155 /* Used for optab_hash. */
156
157 static int
158 eq_libfunc (const void *p, const void *q)
159 {
160 const struct libfunc_entry *const e1 = (const struct libfunc_entry *) p;
161 const struct libfunc_entry *const e2 = (const struct libfunc_entry *) q;
162
163 return (e1->optab == e2->optab
164 && e1->mode1 == e2->mode1
165 && e1->mode2 == e2->mode2);
166 }
167
168 /* Return libfunc corresponding operation defined by OPTAB converting
169 from MODE2 to MODE1. Trigger lazy initialization if needed, return NULL
170 if no libfunc is available. */
171 rtx
172 convert_optab_libfunc (convert_optab optab, enum machine_mode mode1,
173 enum machine_mode mode2)
174 {
175 struct libfunc_entry e;
176 struct libfunc_entry **slot;
177
178 e.optab = (size_t) (optab - &convert_optab_table[0]);
179 e.mode1 = mode1;
180 e.mode2 = mode2;
181 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
182 if (!slot)
183 {
184 if (optab->libcall_gen)
185 {
186 optab->libcall_gen (optab, optab->libcall_basename, mode1, mode2);
187 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
188 if (slot)
189 return (*slot)->libfunc;
190 else
191 return NULL;
192 }
193 return NULL;
194 }
195 return (*slot)->libfunc;
196 }
197
198 /* Return libfunc corresponding operation defined by OPTAB in MODE.
199 Trigger lazy initialization if needed, return NULL if no libfunc is
200 available. */
201 rtx
202 optab_libfunc (optab optab, enum machine_mode mode)
203 {
204 struct libfunc_entry e;
205 struct libfunc_entry **slot;
206
207 e.optab = (size_t) (optab - &optab_table[0]);
208 e.mode1 = mode;
209 e.mode2 = VOIDmode;
210 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, NO_INSERT);
211 if (!slot)
212 {
213 if (optab->libcall_gen)
214 {
215 optab->libcall_gen (optab, optab->libcall_basename,
216 optab->libcall_suffix, mode);
217 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash,
218 &e, NO_INSERT);
219 if (slot)
220 return (*slot)->libfunc;
221 else
222 return NULL;
223 }
224 return NULL;
225 }
226 return (*slot)->libfunc;
227 }
228
229 \f
230 /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
231 the result of operation CODE applied to OP0 (and OP1 if it is a binary
232 operation).
233
234 If the last insn does not set TARGET, don't do anything, but return 1.
235
236 If a previous insn sets TARGET and TARGET is one of OP0 or OP1,
237 don't add the REG_EQUAL note but return 0. Our caller can then try
238 again, ensuring that TARGET is not one of the operands. */
239
240 static int
241 add_equal_note (rtx insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
242 {
243 rtx last_insn, insn, set;
244 rtx note;
245
246 gcc_assert (insns && INSN_P (insns) && NEXT_INSN (insns));
247
248 if (GET_RTX_CLASS (code) != RTX_COMM_ARITH
249 && GET_RTX_CLASS (code) != RTX_BIN_ARITH
250 && GET_RTX_CLASS (code) != RTX_COMM_COMPARE
251 && GET_RTX_CLASS (code) != RTX_COMPARE
252 && GET_RTX_CLASS (code) != RTX_UNARY)
253 return 1;
254
255 if (GET_CODE (target) == ZERO_EXTRACT)
256 return 1;
257
258 for (last_insn = insns;
259 NEXT_INSN (last_insn) != NULL_RTX;
260 last_insn = NEXT_INSN (last_insn))
261 ;
262
263 set = single_set (last_insn);
264 if (set == NULL_RTX)
265 return 1;
266
267 if (! rtx_equal_p (SET_DEST (set), target)
268 /* For a STRICT_LOW_PART, the REG_NOTE applies to what is inside it. */
269 && (GET_CODE (SET_DEST (set)) != STRICT_LOW_PART
270 || ! rtx_equal_p (XEXP (SET_DEST (set), 0), target)))
271 return 1;
272
273 /* If TARGET is in OP0 or OP1, check if anything in SEQ sets TARGET
274 besides the last insn. */
275 if (reg_overlap_mentioned_p (target, op0)
276 || (op1 && reg_overlap_mentioned_p (target, op1)))
277 {
278 insn = PREV_INSN (last_insn);
279 while (insn != NULL_RTX)
280 {
281 if (reg_set_p (target, insn))
282 return 0;
283
284 insn = PREV_INSN (insn);
285 }
286 }
287
288 if (GET_RTX_CLASS (code) == RTX_UNARY)
289 note = gen_rtx_fmt_e (code, GET_MODE (target), copy_rtx (op0));
290 else
291 note = gen_rtx_fmt_ee (code, GET_MODE (target), copy_rtx (op0), copy_rtx (op1));
292
293 set_unique_reg_note (last_insn, REG_EQUAL, note);
294
295 return 1;
296 }
297 \f
298 /* Widen OP to MODE and return the rtx for the widened operand. UNSIGNEDP
299 says whether OP is signed or unsigned. NO_EXTEND is nonzero if we need
300 not actually do a sign-extend or zero-extend, but can leave the
301 higher-order bits of the result rtx undefined, for example, in the case
302 of logical operations, but not right shifts. */
303
304 static rtx
305 widen_operand (rtx op, enum machine_mode mode, enum machine_mode oldmode,
306 int unsignedp, int no_extend)
307 {
308 rtx result;
309
310 /* If we don't have to extend and this is a constant, return it. */
311 if (no_extend && GET_MODE (op) == VOIDmode)
312 return op;
313
314 /* If we must extend do so. If OP is a SUBREG for a promoted object, also
315 extend since it will be more efficient to do so unless the signedness of
316 a promoted object differs from our extension. */
317 if (! no_extend
318 || (GET_CODE (op) == SUBREG && SUBREG_PROMOTED_VAR_P (op)
319 && SUBREG_PROMOTED_UNSIGNED_P (op) == unsignedp))
320 return convert_modes (mode, oldmode, op, unsignedp);
321
322 /* If MODE is no wider than a single word, we return a paradoxical
323 SUBREG. */
324 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
325 return gen_rtx_SUBREG (mode, force_reg (GET_MODE (op), op), 0);
326
327 /* Otherwise, get an object of MODE, clobber it, and set the low-order
328 part to OP. */
329
330 result = gen_reg_rtx (mode);
331 emit_clobber (result);
332 emit_move_insn (gen_lowpart (GET_MODE (op), result), op);
333 return result;
334 }
335 \f
336 /* Return the optab used for computing the operation given by the tree code,
337 CODE and the tree EXP. This function is not always usable (for example, it
338 cannot give complete results for multiplication or division) but probably
339 ought to be relied on more widely throughout the expander. */
340 optab
341 optab_for_tree_code (enum tree_code code, const_tree type,
342 enum optab_subtype subtype)
343 {
344 bool trapv;
345 switch (code)
346 {
347 case BIT_AND_EXPR:
348 return and_optab;
349
350 case BIT_IOR_EXPR:
351 return ior_optab;
352
353 case BIT_NOT_EXPR:
354 return one_cmpl_optab;
355
356 case BIT_XOR_EXPR:
357 return xor_optab;
358
359 case TRUNC_MOD_EXPR:
360 case CEIL_MOD_EXPR:
361 case FLOOR_MOD_EXPR:
362 case ROUND_MOD_EXPR:
363 return TYPE_UNSIGNED (type) ? umod_optab : smod_optab;
364
365 case RDIV_EXPR:
366 case TRUNC_DIV_EXPR:
367 case CEIL_DIV_EXPR:
368 case FLOOR_DIV_EXPR:
369 case ROUND_DIV_EXPR:
370 case EXACT_DIV_EXPR:
371 if (TYPE_SATURATING(type))
372 return TYPE_UNSIGNED(type) ? usdiv_optab : ssdiv_optab;
373 return TYPE_UNSIGNED (type) ? udiv_optab : sdiv_optab;
374
375 case LSHIFT_EXPR:
376 if (VECTOR_MODE_P (TYPE_MODE (type)))
377 {
378 if (subtype == optab_vector)
379 return TYPE_SATURATING (type) ? NULL : vashl_optab;
380
381 gcc_assert (subtype == optab_scalar);
382 }
383 if (TYPE_SATURATING(type))
384 return TYPE_UNSIGNED(type) ? usashl_optab : ssashl_optab;
385 return ashl_optab;
386
387 case RSHIFT_EXPR:
388 if (VECTOR_MODE_P (TYPE_MODE (type)))
389 {
390 if (subtype == optab_vector)
391 return TYPE_UNSIGNED (type) ? vlshr_optab : vashr_optab;
392
393 gcc_assert (subtype == optab_scalar);
394 }
395 return TYPE_UNSIGNED (type) ? lshr_optab : ashr_optab;
396
397 case LROTATE_EXPR:
398 if (VECTOR_MODE_P (TYPE_MODE (type)))
399 {
400 if (subtype == optab_vector)
401 return vrotl_optab;
402
403 gcc_assert (subtype == optab_scalar);
404 }
405 return rotl_optab;
406
407 case RROTATE_EXPR:
408 if (VECTOR_MODE_P (TYPE_MODE (type)))
409 {
410 if (subtype == optab_vector)
411 return vrotr_optab;
412
413 gcc_assert (subtype == optab_scalar);
414 }
415 return rotr_optab;
416
417 case MAX_EXPR:
418 return TYPE_UNSIGNED (type) ? umax_optab : smax_optab;
419
420 case MIN_EXPR:
421 return TYPE_UNSIGNED (type) ? umin_optab : smin_optab;
422
423 case REALIGN_LOAD_EXPR:
424 return vec_realign_load_optab;
425
426 case WIDEN_SUM_EXPR:
427 return TYPE_UNSIGNED (type) ? usum_widen_optab : ssum_widen_optab;
428
429 case DOT_PROD_EXPR:
430 return TYPE_UNSIGNED (type) ? udot_prod_optab : sdot_prod_optab;
431
432 case REDUC_MAX_EXPR:
433 return TYPE_UNSIGNED (type) ? reduc_umax_optab : reduc_smax_optab;
434
435 case REDUC_MIN_EXPR:
436 return TYPE_UNSIGNED (type) ? reduc_umin_optab : reduc_smin_optab;
437
438 case REDUC_PLUS_EXPR:
439 return TYPE_UNSIGNED (type) ? reduc_uplus_optab : reduc_splus_optab;
440
441 case VEC_LSHIFT_EXPR:
442 return vec_shl_optab;
443
444 case VEC_RSHIFT_EXPR:
445 return vec_shr_optab;
446
447 case VEC_WIDEN_MULT_HI_EXPR:
448 return TYPE_UNSIGNED (type) ?
449 vec_widen_umult_hi_optab : vec_widen_smult_hi_optab;
450
451 case VEC_WIDEN_MULT_LO_EXPR:
452 return TYPE_UNSIGNED (type) ?
453 vec_widen_umult_lo_optab : vec_widen_smult_lo_optab;
454
455 case VEC_UNPACK_HI_EXPR:
456 return TYPE_UNSIGNED (type) ?
457 vec_unpacku_hi_optab : vec_unpacks_hi_optab;
458
459 case VEC_UNPACK_LO_EXPR:
460 return TYPE_UNSIGNED (type) ?
461 vec_unpacku_lo_optab : vec_unpacks_lo_optab;
462
463 case VEC_UNPACK_FLOAT_HI_EXPR:
464 /* The signedness is determined from input operand. */
465 return TYPE_UNSIGNED (type) ?
466 vec_unpacku_float_hi_optab : vec_unpacks_float_hi_optab;
467
468 case VEC_UNPACK_FLOAT_LO_EXPR:
469 /* The signedness is determined from input operand. */
470 return TYPE_UNSIGNED (type) ?
471 vec_unpacku_float_lo_optab : vec_unpacks_float_lo_optab;
472
473 case VEC_PACK_TRUNC_EXPR:
474 return vec_pack_trunc_optab;
475
476 case VEC_PACK_SAT_EXPR:
477 return TYPE_UNSIGNED (type) ? vec_pack_usat_optab : vec_pack_ssat_optab;
478
479 case VEC_PACK_FIX_TRUNC_EXPR:
480 /* The signedness is determined from output operand. */
481 return TYPE_UNSIGNED (type) ?
482 vec_pack_ufix_trunc_optab : vec_pack_sfix_trunc_optab;
483
484 default:
485 break;
486 }
487
488 trapv = INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_TRAPS (type);
489 switch (code)
490 {
491 case POINTER_PLUS_EXPR:
492 case PLUS_EXPR:
493 if (TYPE_SATURATING(type))
494 return TYPE_UNSIGNED(type) ? usadd_optab : ssadd_optab;
495 return trapv ? addv_optab : add_optab;
496
497 case MINUS_EXPR:
498 if (TYPE_SATURATING(type))
499 return TYPE_UNSIGNED(type) ? ussub_optab : sssub_optab;
500 return trapv ? subv_optab : sub_optab;
501
502 case MULT_EXPR:
503 if (TYPE_SATURATING(type))
504 return TYPE_UNSIGNED(type) ? usmul_optab : ssmul_optab;
505 return trapv ? smulv_optab : smul_optab;
506
507 case NEGATE_EXPR:
508 if (TYPE_SATURATING(type))
509 return TYPE_UNSIGNED(type) ? usneg_optab : ssneg_optab;
510 return trapv ? negv_optab : neg_optab;
511
512 case ABS_EXPR:
513 return trapv ? absv_optab : abs_optab;
514
515 case VEC_EXTRACT_EVEN_EXPR:
516 return vec_extract_even_optab;
517
518 case VEC_EXTRACT_ODD_EXPR:
519 return vec_extract_odd_optab;
520
521 case VEC_INTERLEAVE_HIGH_EXPR:
522 return vec_interleave_high_optab;
523
524 case VEC_INTERLEAVE_LOW_EXPR:
525 return vec_interleave_low_optab;
526
527 default:
528 return NULL;
529 }
530 }
531 \f
532
533 /* Expand vector widening operations.
534
535 There are two different classes of operations handled here:
536 1) Operations whose result is wider than all the arguments to the operation.
537 Examples: VEC_UNPACK_HI/LO_EXPR, VEC_WIDEN_MULT_HI/LO_EXPR
538 In this case OP0 and optionally OP1 would be initialized,
539 but WIDE_OP wouldn't (not relevant for this case).
540 2) Operations whose result is of the same size as the last argument to the
541 operation, but wider than all the other arguments to the operation.
542 Examples: WIDEN_SUM_EXPR, VEC_DOT_PROD_EXPR.
543 In the case WIDE_OP, OP0 and optionally OP1 would be initialized.
544
545 E.g, when called to expand the following operations, this is how
546 the arguments will be initialized:
547 nops OP0 OP1 WIDE_OP
548 widening-sum 2 oprnd0 - oprnd1
549 widening-dot-product 3 oprnd0 oprnd1 oprnd2
550 widening-mult 2 oprnd0 oprnd1 -
551 type-promotion (vec-unpack) 1 oprnd0 - - */
552
553 rtx
554 expand_widen_pattern_expr (tree exp, rtx op0, rtx op1, rtx wide_op, rtx target,
555 int unsignedp)
556 {
557 tree oprnd0, oprnd1, oprnd2;
558 enum machine_mode wmode = VOIDmode, tmode0, tmode1 = VOIDmode;
559 optab widen_pattern_optab;
560 int icode;
561 enum machine_mode xmode0, xmode1 = VOIDmode, wxmode = VOIDmode;
562 rtx temp;
563 rtx pat;
564 rtx xop0, xop1, wxop;
565 int nops = TREE_OPERAND_LENGTH (exp);
566
567 oprnd0 = TREE_OPERAND (exp, 0);
568 tmode0 = TYPE_MODE (TREE_TYPE (oprnd0));
569 widen_pattern_optab =
570 optab_for_tree_code (TREE_CODE (exp), TREE_TYPE (oprnd0), optab_default);
571 icode = (int) optab_handler (widen_pattern_optab, tmode0)->insn_code;
572 gcc_assert (icode != CODE_FOR_nothing);
573 xmode0 = insn_data[icode].operand[1].mode;
574
575 if (nops >= 2)
576 {
577 oprnd1 = TREE_OPERAND (exp, 1);
578 tmode1 = TYPE_MODE (TREE_TYPE (oprnd1));
579 xmode1 = insn_data[icode].operand[2].mode;
580 }
581
582 /* The last operand is of a wider mode than the rest of the operands. */
583 if (nops == 2)
584 {
585 wmode = tmode1;
586 wxmode = xmode1;
587 }
588 else if (nops == 3)
589 {
590 gcc_assert (tmode1 == tmode0);
591 gcc_assert (op1);
592 oprnd2 = TREE_OPERAND (exp, 2);
593 wmode = TYPE_MODE (TREE_TYPE (oprnd2));
594 wxmode = insn_data[icode].operand[3].mode;
595 }
596
597 if (!wide_op)
598 wmode = wxmode = insn_data[icode].operand[0].mode;
599
600 if (!target
601 || ! (*insn_data[icode].operand[0].predicate) (target, wmode))
602 temp = gen_reg_rtx (wmode);
603 else
604 temp = target;
605
606 xop0 = op0;
607 xop1 = op1;
608 wxop = wide_op;
609
610 /* In case the insn wants input operands in modes different from
611 those of the actual operands, convert the operands. It would
612 seem that we don't need to convert CONST_INTs, but we do, so
613 that they're properly zero-extended, sign-extended or truncated
614 for their mode. */
615
616 if (GET_MODE (op0) != xmode0 && xmode0 != VOIDmode)
617 xop0 = convert_modes (xmode0,
618 GET_MODE (op0) != VOIDmode
619 ? GET_MODE (op0)
620 : tmode0,
621 xop0, unsignedp);
622
623 if (op1)
624 if (GET_MODE (op1) != xmode1 && xmode1 != VOIDmode)
625 xop1 = convert_modes (xmode1,
626 GET_MODE (op1) != VOIDmode
627 ? GET_MODE (op1)
628 : tmode1,
629 xop1, unsignedp);
630
631 if (wide_op)
632 if (GET_MODE (wide_op) != wxmode && wxmode != VOIDmode)
633 wxop = convert_modes (wxmode,
634 GET_MODE (wide_op) != VOIDmode
635 ? GET_MODE (wide_op)
636 : wmode,
637 wxop, unsignedp);
638
639 /* Now, if insn's predicates don't allow our operands, put them into
640 pseudo regs. */
641
642 if (! (*insn_data[icode].operand[1].predicate) (xop0, xmode0)
643 && xmode0 != VOIDmode)
644 xop0 = copy_to_mode_reg (xmode0, xop0);
645
646 if (op1)
647 {
648 if (! (*insn_data[icode].operand[2].predicate) (xop1, xmode1)
649 && xmode1 != VOIDmode)
650 xop1 = copy_to_mode_reg (xmode1, xop1);
651
652 if (wide_op)
653 {
654 if (! (*insn_data[icode].operand[3].predicate) (wxop, wxmode)
655 && wxmode != VOIDmode)
656 wxop = copy_to_mode_reg (wxmode, wxop);
657
658 pat = GEN_FCN (icode) (temp, xop0, xop1, wxop);
659 }
660 else
661 pat = GEN_FCN (icode) (temp, xop0, xop1);
662 }
663 else
664 {
665 if (wide_op)
666 {
667 if (! (*insn_data[icode].operand[2].predicate) (wxop, wxmode)
668 && wxmode != VOIDmode)
669 wxop = copy_to_mode_reg (wxmode, wxop);
670
671 pat = GEN_FCN (icode) (temp, xop0, wxop);
672 }
673 else
674 pat = GEN_FCN (icode) (temp, xop0);
675 }
676
677 emit_insn (pat);
678 return temp;
679 }
680
681 /* Generate code to perform an operation specified by TERNARY_OPTAB
682 on operands OP0, OP1 and OP2, with result having machine-mode MODE.
683
684 UNSIGNEDP is for the case where we have to widen the operands
685 to perform the operation. It says to use zero-extension.
686
687 If TARGET is nonzero, the value
688 is generated there, if it is convenient to do so.
689 In all cases an rtx is returned for the locus of the value;
690 this may or may not be TARGET. */
691
692 rtx
693 expand_ternary_op (enum machine_mode mode, optab ternary_optab, rtx op0,
694 rtx op1, rtx op2, rtx target, int unsignedp)
695 {
696 int icode = (int) optab_handler (ternary_optab, mode)->insn_code;
697 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
698 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
699 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
700 rtx temp;
701 rtx pat;
702 rtx xop0 = op0, xop1 = op1, xop2 = op2;
703
704 gcc_assert (optab_handler (ternary_optab, mode)->insn_code
705 != CODE_FOR_nothing);
706
707 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
708 temp = gen_reg_rtx (mode);
709 else
710 temp = target;
711
712 /* In case the insn wants input operands in modes different from
713 those of the actual operands, convert the operands. It would
714 seem that we don't need to convert CONST_INTs, but we do, so
715 that they're properly zero-extended, sign-extended or truncated
716 for their mode. */
717
718 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
719 xop0 = convert_modes (mode0,
720 GET_MODE (op0) != VOIDmode
721 ? GET_MODE (op0)
722 : mode,
723 xop0, unsignedp);
724
725 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
726 xop1 = convert_modes (mode1,
727 GET_MODE (op1) != VOIDmode
728 ? GET_MODE (op1)
729 : mode,
730 xop1, unsignedp);
731
732 if (GET_MODE (op2) != mode2 && mode2 != VOIDmode)
733 xop2 = convert_modes (mode2,
734 GET_MODE (op2) != VOIDmode
735 ? GET_MODE (op2)
736 : mode,
737 xop2, unsignedp);
738
739 /* Now, if insn's predicates don't allow our operands, put them into
740 pseudo regs. */
741
742 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
743 && mode0 != VOIDmode)
744 xop0 = copy_to_mode_reg (mode0, xop0);
745
746 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
747 && mode1 != VOIDmode)
748 xop1 = copy_to_mode_reg (mode1, xop1);
749
750 if (!insn_data[icode].operand[3].predicate (xop2, mode2)
751 && mode2 != VOIDmode)
752 xop2 = copy_to_mode_reg (mode2, xop2);
753
754 pat = GEN_FCN (icode) (temp, xop0, xop1, xop2);
755
756 emit_insn (pat);
757 return temp;
758 }
759
760
761 /* Like expand_binop, but return a constant rtx if the result can be
762 calculated at compile time. The arguments and return value are
763 otherwise the same as for expand_binop. */
764
765 static rtx
766 simplify_expand_binop (enum machine_mode mode, optab binoptab,
767 rtx op0, rtx op1, rtx target, int unsignedp,
768 enum optab_methods methods)
769 {
770 if (CONSTANT_P (op0) && CONSTANT_P (op1))
771 {
772 rtx x = simplify_binary_operation (binoptab->code, mode, op0, op1);
773
774 if (x)
775 return x;
776 }
777
778 return expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods);
779 }
780
781 /* Like simplify_expand_binop, but always put the result in TARGET.
782 Return true if the expansion succeeded. */
783
784 bool
785 force_expand_binop (enum machine_mode mode, optab binoptab,
786 rtx op0, rtx op1, rtx target, int unsignedp,
787 enum optab_methods methods)
788 {
789 rtx x = simplify_expand_binop (mode, binoptab, op0, op1,
790 target, unsignedp, methods);
791 if (x == 0)
792 return false;
793 if (x != target)
794 emit_move_insn (target, x);
795 return true;
796 }
797
798 /* Generate insns for VEC_LSHIFT_EXPR, VEC_RSHIFT_EXPR. */
799
800 rtx
801 expand_vec_shift_expr (tree vec_shift_expr, rtx target)
802 {
803 enum insn_code icode;
804 rtx rtx_op1, rtx_op2;
805 enum machine_mode mode1;
806 enum machine_mode mode2;
807 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_shift_expr));
808 tree vec_oprnd = TREE_OPERAND (vec_shift_expr, 0);
809 tree shift_oprnd = TREE_OPERAND (vec_shift_expr, 1);
810 optab shift_optab;
811 rtx pat;
812
813 switch (TREE_CODE (vec_shift_expr))
814 {
815 case VEC_RSHIFT_EXPR:
816 shift_optab = vec_shr_optab;
817 break;
818 case VEC_LSHIFT_EXPR:
819 shift_optab = vec_shl_optab;
820 break;
821 default:
822 gcc_unreachable ();
823 }
824
825 icode = optab_handler (shift_optab, mode)->insn_code;
826 gcc_assert (icode != CODE_FOR_nothing);
827
828 mode1 = insn_data[icode].operand[1].mode;
829 mode2 = insn_data[icode].operand[2].mode;
830
831 rtx_op1 = expand_normal (vec_oprnd);
832 if (!(*insn_data[icode].operand[1].predicate) (rtx_op1, mode1)
833 && mode1 != VOIDmode)
834 rtx_op1 = force_reg (mode1, rtx_op1);
835
836 rtx_op2 = expand_normal (shift_oprnd);
837 if (!(*insn_data[icode].operand[2].predicate) (rtx_op2, mode2)
838 && mode2 != VOIDmode)
839 rtx_op2 = force_reg (mode2, rtx_op2);
840
841 if (!target
842 || ! (*insn_data[icode].operand[0].predicate) (target, mode))
843 target = gen_reg_rtx (mode);
844
845 /* Emit instruction */
846 pat = GEN_FCN (icode) (target, rtx_op1, rtx_op2);
847 gcc_assert (pat);
848 emit_insn (pat);
849
850 return target;
851 }
852
853 /* This subroutine of expand_doubleword_shift handles the cases in which
854 the effective shift value is >= BITS_PER_WORD. The arguments and return
855 value are the same as for the parent routine, except that SUPERWORD_OP1
856 is the shift count to use when shifting OUTOF_INPUT into INTO_TARGET.
857 INTO_TARGET may be null if the caller has decided to calculate it. */
858
859 static bool
860 expand_superword_shift (optab binoptab, rtx outof_input, rtx superword_op1,
861 rtx outof_target, rtx into_target,
862 int unsignedp, enum optab_methods methods)
863 {
864 if (into_target != 0)
865 if (!force_expand_binop (word_mode, binoptab, outof_input, superword_op1,
866 into_target, unsignedp, methods))
867 return false;
868
869 if (outof_target != 0)
870 {
871 /* For a signed right shift, we must fill OUTOF_TARGET with copies
872 of the sign bit, otherwise we must fill it with zeros. */
873 if (binoptab != ashr_optab)
874 emit_move_insn (outof_target, CONST0_RTX (word_mode));
875 else
876 if (!force_expand_binop (word_mode, binoptab,
877 outof_input, GEN_INT (BITS_PER_WORD - 1),
878 outof_target, unsignedp, methods))
879 return false;
880 }
881 return true;
882 }
883
884 /* This subroutine of expand_doubleword_shift handles the cases in which
885 the effective shift value is < BITS_PER_WORD. The arguments and return
886 value are the same as for the parent routine. */
887
888 static bool
889 expand_subword_shift (enum machine_mode op1_mode, optab binoptab,
890 rtx outof_input, rtx into_input, rtx op1,
891 rtx outof_target, rtx into_target,
892 int unsignedp, enum optab_methods methods,
893 unsigned HOST_WIDE_INT shift_mask)
894 {
895 optab reverse_unsigned_shift, unsigned_shift;
896 rtx tmp, carries;
897
898 reverse_unsigned_shift = (binoptab == ashl_optab ? lshr_optab : ashl_optab);
899 unsigned_shift = (binoptab == ashl_optab ? ashl_optab : lshr_optab);
900
901 /* The low OP1 bits of INTO_TARGET come from the high bits of OUTOF_INPUT.
902 We therefore need to shift OUTOF_INPUT by (BITS_PER_WORD - OP1) bits in
903 the opposite direction to BINOPTAB. */
904 if (CONSTANT_P (op1) || shift_mask >= BITS_PER_WORD)
905 {
906 carries = outof_input;
907 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
908 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
909 0, true, methods);
910 }
911 else
912 {
913 /* We must avoid shifting by BITS_PER_WORD bits since that is either
914 the same as a zero shift (if shift_mask == BITS_PER_WORD - 1) or
915 has unknown behavior. Do a single shift first, then shift by the
916 remainder. It's OK to use ~OP1 as the remainder if shift counts
917 are truncated to the mode size. */
918 carries = expand_binop (word_mode, reverse_unsigned_shift,
919 outof_input, const1_rtx, 0, unsignedp, methods);
920 if (shift_mask == BITS_PER_WORD - 1)
921 {
922 tmp = immed_double_const (-1, -1, op1_mode);
923 tmp = simplify_expand_binop (op1_mode, xor_optab, op1, tmp,
924 0, true, methods);
925 }
926 else
927 {
928 tmp = immed_double_const (BITS_PER_WORD - 1, 0, op1_mode);
929 tmp = simplify_expand_binop (op1_mode, sub_optab, tmp, op1,
930 0, true, methods);
931 }
932 }
933 if (tmp == 0 || carries == 0)
934 return false;
935 carries = expand_binop (word_mode, reverse_unsigned_shift,
936 carries, tmp, 0, unsignedp, methods);
937 if (carries == 0)
938 return false;
939
940 /* Shift INTO_INPUT logically by OP1. This is the last use of INTO_INPUT
941 so the result can go directly into INTO_TARGET if convenient. */
942 tmp = expand_binop (word_mode, unsigned_shift, into_input, op1,
943 into_target, unsignedp, methods);
944 if (tmp == 0)
945 return false;
946
947 /* Now OR in the bits carried over from OUTOF_INPUT. */
948 if (!force_expand_binop (word_mode, ior_optab, tmp, carries,
949 into_target, unsignedp, methods))
950 return false;
951
952 /* Use a standard word_mode shift for the out-of half. */
953 if (outof_target != 0)
954 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
955 outof_target, unsignedp, methods))
956 return false;
957
958 return true;
959 }
960
961
962 #ifdef HAVE_conditional_move
963 /* Try implementing expand_doubleword_shift using conditional moves.
964 The shift is by < BITS_PER_WORD if (CMP_CODE CMP1 CMP2) is true,
965 otherwise it is by >= BITS_PER_WORD. SUBWORD_OP1 and SUPERWORD_OP1
966 are the shift counts to use in the former and latter case. All other
967 arguments are the same as the parent routine. */
968
969 static bool
970 expand_doubleword_shift_condmove (enum machine_mode op1_mode, optab binoptab,
971 enum rtx_code cmp_code, rtx cmp1, rtx cmp2,
972 rtx outof_input, rtx into_input,
973 rtx subword_op1, rtx superword_op1,
974 rtx outof_target, rtx into_target,
975 int unsignedp, enum optab_methods methods,
976 unsigned HOST_WIDE_INT shift_mask)
977 {
978 rtx outof_superword, into_superword;
979
980 /* Put the superword version of the output into OUTOF_SUPERWORD and
981 INTO_SUPERWORD. */
982 outof_superword = outof_target != 0 ? gen_reg_rtx (word_mode) : 0;
983 if (outof_target != 0 && subword_op1 == superword_op1)
984 {
985 /* The value INTO_TARGET >> SUBWORD_OP1, which we later store in
986 OUTOF_TARGET, is the same as the value of INTO_SUPERWORD. */
987 into_superword = outof_target;
988 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
989 outof_superword, 0, unsignedp, methods))
990 return false;
991 }
992 else
993 {
994 into_superword = gen_reg_rtx (word_mode);
995 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
996 outof_superword, into_superword,
997 unsignedp, methods))
998 return false;
999 }
1000
1001 /* Put the subword version directly in OUTOF_TARGET and INTO_TARGET. */
1002 if (!expand_subword_shift (op1_mode, binoptab,
1003 outof_input, into_input, subword_op1,
1004 outof_target, into_target,
1005 unsignedp, methods, shift_mask))
1006 return false;
1007
1008 /* Select between them. Do the INTO half first because INTO_SUPERWORD
1009 might be the current value of OUTOF_TARGET. */
1010 if (!emit_conditional_move (into_target, cmp_code, cmp1, cmp2, op1_mode,
1011 into_target, into_superword, word_mode, false))
1012 return false;
1013
1014 if (outof_target != 0)
1015 if (!emit_conditional_move (outof_target, cmp_code, cmp1, cmp2, op1_mode,
1016 outof_target, outof_superword,
1017 word_mode, false))
1018 return false;
1019
1020 return true;
1021 }
1022 #endif
1023
1024 /* Expand a doubleword shift (ashl, ashr or lshr) using word-mode shifts.
1025 OUTOF_INPUT and INTO_INPUT are the two word-sized halves of the first
1026 input operand; the shift moves bits in the direction OUTOF_INPUT->
1027 INTO_TARGET. OUTOF_TARGET and INTO_TARGET are the equivalent words
1028 of the target. OP1 is the shift count and OP1_MODE is its mode.
1029 If OP1 is constant, it will have been truncated as appropriate
1030 and is known to be nonzero.
1031
1032 If SHIFT_MASK is zero, the result of word shifts is undefined when the
1033 shift count is outside the range [0, BITS_PER_WORD). This routine must
1034 avoid generating such shifts for OP1s in the range [0, BITS_PER_WORD * 2).
1035
1036 If SHIFT_MASK is nonzero, all word-mode shift counts are effectively
1037 masked by it and shifts in the range [BITS_PER_WORD, SHIFT_MASK) will
1038 fill with zeros or sign bits as appropriate.
1039
1040 If SHIFT_MASK is BITS_PER_WORD - 1, this routine will synthesize
1041 a doubleword shift whose equivalent mask is BITS_PER_WORD * 2 - 1.
1042 Doing this preserves semantics required by SHIFT_COUNT_TRUNCATED.
1043 In all other cases, shifts by values outside [0, BITS_PER_UNIT * 2)
1044 are undefined.
1045
1046 BINOPTAB, UNSIGNEDP and METHODS are as for expand_binop. This function
1047 may not use INTO_INPUT after modifying INTO_TARGET, and similarly for
1048 OUTOF_INPUT and OUTOF_TARGET. OUTOF_TARGET can be null if the parent
1049 function wants to calculate it itself.
1050
1051 Return true if the shift could be successfully synthesized. */
1052
1053 static bool
1054 expand_doubleword_shift (enum machine_mode op1_mode, optab binoptab,
1055 rtx outof_input, rtx into_input, rtx op1,
1056 rtx outof_target, rtx into_target,
1057 int unsignedp, enum optab_methods methods,
1058 unsigned HOST_WIDE_INT shift_mask)
1059 {
1060 rtx superword_op1, tmp, cmp1, cmp2;
1061 rtx subword_label, done_label;
1062 enum rtx_code cmp_code;
1063
1064 /* See if word-mode shifts by BITS_PER_WORD...BITS_PER_WORD * 2 - 1 will
1065 fill the result with sign or zero bits as appropriate. If so, the value
1066 of OUTOF_TARGET will always be (SHIFT OUTOF_INPUT OP1). Recursively call
1067 this routine to calculate INTO_TARGET (which depends on both OUTOF_INPUT
1068 and INTO_INPUT), then emit code to set up OUTOF_TARGET.
1069
1070 This isn't worthwhile for constant shifts since the optimizers will
1071 cope better with in-range shift counts. */
1072 if (shift_mask >= BITS_PER_WORD
1073 && outof_target != 0
1074 && !CONSTANT_P (op1))
1075 {
1076 if (!expand_doubleword_shift (op1_mode, binoptab,
1077 outof_input, into_input, op1,
1078 0, into_target,
1079 unsignedp, methods, shift_mask))
1080 return false;
1081 if (!force_expand_binop (word_mode, binoptab, outof_input, op1,
1082 outof_target, unsignedp, methods))
1083 return false;
1084 return true;
1085 }
1086
1087 /* Set CMP_CODE, CMP1 and CMP2 so that the rtx (CMP_CODE CMP1 CMP2)
1088 is true when the effective shift value is less than BITS_PER_WORD.
1089 Set SUPERWORD_OP1 to the shift count that should be used to shift
1090 OUTOF_INPUT into INTO_TARGET when the condition is false. */
1091 tmp = immed_double_const (BITS_PER_WORD, 0, op1_mode);
1092 if (!CONSTANT_P (op1) && shift_mask == BITS_PER_WORD - 1)
1093 {
1094 /* Set CMP1 to OP1 & BITS_PER_WORD. The result is zero iff OP1
1095 is a subword shift count. */
1096 cmp1 = simplify_expand_binop (op1_mode, and_optab, op1, tmp,
1097 0, true, methods);
1098 cmp2 = CONST0_RTX (op1_mode);
1099 cmp_code = EQ;
1100 superword_op1 = op1;
1101 }
1102 else
1103 {
1104 /* Set CMP1 to OP1 - BITS_PER_WORD. */
1105 cmp1 = simplify_expand_binop (op1_mode, sub_optab, op1, tmp,
1106 0, true, methods);
1107 cmp2 = CONST0_RTX (op1_mode);
1108 cmp_code = LT;
1109 superword_op1 = cmp1;
1110 }
1111 if (cmp1 == 0)
1112 return false;
1113
1114 /* If we can compute the condition at compile time, pick the
1115 appropriate subroutine. */
1116 tmp = simplify_relational_operation (cmp_code, SImode, op1_mode, cmp1, cmp2);
1117 if (tmp != 0 && GET_CODE (tmp) == CONST_INT)
1118 {
1119 if (tmp == const0_rtx)
1120 return expand_superword_shift (binoptab, outof_input, superword_op1,
1121 outof_target, into_target,
1122 unsignedp, methods);
1123 else
1124 return expand_subword_shift (op1_mode, binoptab,
1125 outof_input, into_input, op1,
1126 outof_target, into_target,
1127 unsignedp, methods, shift_mask);
1128 }
1129
1130 #ifdef HAVE_conditional_move
1131 /* Try using conditional moves to generate straight-line code. */
1132 {
1133 rtx start = get_last_insn ();
1134 if (expand_doubleword_shift_condmove (op1_mode, binoptab,
1135 cmp_code, cmp1, cmp2,
1136 outof_input, into_input,
1137 op1, superword_op1,
1138 outof_target, into_target,
1139 unsignedp, methods, shift_mask))
1140 return true;
1141 delete_insns_since (start);
1142 }
1143 #endif
1144
1145 /* As a last resort, use branches to select the correct alternative. */
1146 subword_label = gen_label_rtx ();
1147 done_label = gen_label_rtx ();
1148
1149 NO_DEFER_POP;
1150 do_compare_rtx_and_jump (cmp1, cmp2, cmp_code, false, op1_mode,
1151 0, 0, subword_label);
1152 OK_DEFER_POP;
1153
1154 if (!expand_superword_shift (binoptab, outof_input, superword_op1,
1155 outof_target, into_target,
1156 unsignedp, methods))
1157 return false;
1158
1159 emit_jump_insn (gen_jump (done_label));
1160 emit_barrier ();
1161 emit_label (subword_label);
1162
1163 if (!expand_subword_shift (op1_mode, binoptab,
1164 outof_input, into_input, op1,
1165 outof_target, into_target,
1166 unsignedp, methods, shift_mask))
1167 return false;
1168
1169 emit_label (done_label);
1170 return true;
1171 }
1172 \f
1173 /* Subroutine of expand_binop. Perform a double word multiplication of
1174 operands OP0 and OP1 both of mode MODE, which is exactly twice as wide
1175 as the target's word_mode. This function return NULL_RTX if anything
1176 goes wrong, in which case it may have already emitted instructions
1177 which need to be deleted.
1178
1179 If we want to multiply two two-word values and have normal and widening
1180 multiplies of single-word values, we can do this with three smaller
1181 multiplications.
1182
1183 The multiplication proceeds as follows:
1184 _______________________
1185 [__op0_high_|__op0_low__]
1186 _______________________
1187 * [__op1_high_|__op1_low__]
1188 _______________________________________________
1189 _______________________
1190 (1) [__op0_low__*__op1_low__]
1191 _______________________
1192 (2a) [__op0_low__*__op1_high_]
1193 _______________________
1194 (2b) [__op0_high_*__op1_low__]
1195 _______________________
1196 (3) [__op0_high_*__op1_high_]
1197
1198
1199 This gives a 4-word result. Since we are only interested in the
1200 lower 2 words, partial result (3) and the upper words of (2a) and
1201 (2b) don't need to be calculated. Hence (2a) and (2b) can be
1202 calculated using non-widening multiplication.
1203
1204 (1), however, needs to be calculated with an unsigned widening
1205 multiplication. If this operation is not directly supported we
1206 try using a signed widening multiplication and adjust the result.
1207 This adjustment works as follows:
1208
1209 If both operands are positive then no adjustment is needed.
1210
1211 If the operands have different signs, for example op0_low < 0 and
1212 op1_low >= 0, the instruction treats the most significant bit of
1213 op0_low as a sign bit instead of a bit with significance
1214 2**(BITS_PER_WORD-1), i.e. the instruction multiplies op1_low
1215 with 2**BITS_PER_WORD - op0_low, and two's complements the
1216 result. Conclusion: We need to add op1_low * 2**BITS_PER_WORD to
1217 the result.
1218
1219 Similarly, if both operands are negative, we need to add
1220 (op0_low + op1_low) * 2**BITS_PER_WORD.
1221
1222 We use a trick to adjust quickly. We logically shift op0_low right
1223 (op1_low) BITS_PER_WORD-1 steps to get 0 or 1, and add this to
1224 op0_high (op1_high) before it is used to calculate 2b (2a). If no
1225 logical shift exists, we do an arithmetic right shift and subtract
1226 the 0 or -1. */
1227
1228 static rtx
1229 expand_doubleword_mult (enum machine_mode mode, rtx op0, rtx op1, rtx target,
1230 bool umulp, enum optab_methods methods)
1231 {
1232 int low = (WORDS_BIG_ENDIAN ? 1 : 0);
1233 int high = (WORDS_BIG_ENDIAN ? 0 : 1);
1234 rtx wordm1 = umulp ? NULL_RTX : GEN_INT (BITS_PER_WORD - 1);
1235 rtx product, adjust, product_high, temp;
1236
1237 rtx op0_high = operand_subword_force (op0, high, mode);
1238 rtx op0_low = operand_subword_force (op0, low, mode);
1239 rtx op1_high = operand_subword_force (op1, high, mode);
1240 rtx op1_low = operand_subword_force (op1, low, mode);
1241
1242 /* If we're using an unsigned multiply to directly compute the product
1243 of the low-order words of the operands and perform any required
1244 adjustments of the operands, we begin by trying two more multiplications
1245 and then computing the appropriate sum.
1246
1247 We have checked above that the required addition is provided.
1248 Full-word addition will normally always succeed, especially if
1249 it is provided at all, so we don't worry about its failure. The
1250 multiplication may well fail, however, so we do handle that. */
1251
1252 if (!umulp)
1253 {
1254 /* ??? This could be done with emit_store_flag where available. */
1255 temp = expand_binop (word_mode, lshr_optab, op0_low, wordm1,
1256 NULL_RTX, 1, methods);
1257 if (temp)
1258 op0_high = expand_binop (word_mode, add_optab, op0_high, temp,
1259 NULL_RTX, 0, OPTAB_DIRECT);
1260 else
1261 {
1262 temp = expand_binop (word_mode, ashr_optab, op0_low, wordm1,
1263 NULL_RTX, 0, methods);
1264 if (!temp)
1265 return NULL_RTX;
1266 op0_high = expand_binop (word_mode, sub_optab, op0_high, temp,
1267 NULL_RTX, 0, OPTAB_DIRECT);
1268 }
1269
1270 if (!op0_high)
1271 return NULL_RTX;
1272 }
1273
1274 adjust = expand_binop (word_mode, smul_optab, op0_high, op1_low,
1275 NULL_RTX, 0, OPTAB_DIRECT);
1276 if (!adjust)
1277 return NULL_RTX;
1278
1279 /* OP0_HIGH should now be dead. */
1280
1281 if (!umulp)
1282 {
1283 /* ??? This could be done with emit_store_flag where available. */
1284 temp = expand_binop (word_mode, lshr_optab, op1_low, wordm1,
1285 NULL_RTX, 1, methods);
1286 if (temp)
1287 op1_high = expand_binop (word_mode, add_optab, op1_high, temp,
1288 NULL_RTX, 0, OPTAB_DIRECT);
1289 else
1290 {
1291 temp = expand_binop (word_mode, ashr_optab, op1_low, wordm1,
1292 NULL_RTX, 0, methods);
1293 if (!temp)
1294 return NULL_RTX;
1295 op1_high = expand_binop (word_mode, sub_optab, op1_high, temp,
1296 NULL_RTX, 0, OPTAB_DIRECT);
1297 }
1298
1299 if (!op1_high)
1300 return NULL_RTX;
1301 }
1302
1303 temp = expand_binop (word_mode, smul_optab, op1_high, op0_low,
1304 NULL_RTX, 0, OPTAB_DIRECT);
1305 if (!temp)
1306 return NULL_RTX;
1307
1308 /* OP1_HIGH should now be dead. */
1309
1310 adjust = expand_binop (word_mode, add_optab, adjust, temp,
1311 adjust, 0, OPTAB_DIRECT);
1312
1313 if (target && !REG_P (target))
1314 target = NULL_RTX;
1315
1316 if (umulp)
1317 product = expand_binop (mode, umul_widen_optab, op0_low, op1_low,
1318 target, 1, OPTAB_DIRECT);
1319 else
1320 product = expand_binop (mode, smul_widen_optab, op0_low, op1_low,
1321 target, 1, OPTAB_DIRECT);
1322
1323 if (!product)
1324 return NULL_RTX;
1325
1326 product_high = operand_subword (product, high, 1, mode);
1327 adjust = expand_binop (word_mode, add_optab, product_high, adjust,
1328 REG_P (product_high) ? product_high : adjust,
1329 0, OPTAB_DIRECT);
1330 emit_move_insn (product_high, adjust);
1331 return product;
1332 }
1333 \f
1334 /* Wrapper around expand_binop which takes an rtx code to specify
1335 the operation to perform, not an optab pointer. All other
1336 arguments are the same. */
1337 rtx
1338 expand_simple_binop (enum machine_mode mode, enum rtx_code code, rtx op0,
1339 rtx op1, rtx target, int unsignedp,
1340 enum optab_methods methods)
1341 {
1342 optab binop = code_to_optab[(int) code];
1343 gcc_assert (binop);
1344
1345 return expand_binop (mode, binop, op0, op1, target, unsignedp, methods);
1346 }
1347
1348 /* Return whether OP0 and OP1 should be swapped when expanding a commutative
1349 binop. Order them according to commutative_operand_precedence and, if
1350 possible, try to put TARGET or a pseudo first. */
1351 static bool
1352 swap_commutative_operands_with_target (rtx target, rtx op0, rtx op1)
1353 {
1354 int op0_prec = commutative_operand_precedence (op0);
1355 int op1_prec = commutative_operand_precedence (op1);
1356
1357 if (op0_prec < op1_prec)
1358 return true;
1359
1360 if (op0_prec > op1_prec)
1361 return false;
1362
1363 /* With equal precedence, both orders are ok, but it is better if the
1364 first operand is TARGET, or if both TARGET and OP0 are pseudos. */
1365 if (target == 0 || REG_P (target))
1366 return (REG_P (op1) && !REG_P (op0)) || target == op1;
1367 else
1368 return rtx_equal_p (op1, target);
1369 }
1370
1371 /* Return true if BINOPTAB implements a shift operation. */
1372
1373 static bool
1374 shift_optab_p (optab binoptab)
1375 {
1376 switch (binoptab->code)
1377 {
1378 case ASHIFT:
1379 case SS_ASHIFT:
1380 case US_ASHIFT:
1381 case ASHIFTRT:
1382 case LSHIFTRT:
1383 case ROTATE:
1384 case ROTATERT:
1385 return true;
1386
1387 default:
1388 return false;
1389 }
1390 }
1391
1392 /* Return true if BINOPTAB implements a commutative binary operation. */
1393
1394 static bool
1395 commutative_optab_p (optab binoptab)
1396 {
1397 return (GET_RTX_CLASS (binoptab->code) == RTX_COMM_ARITH
1398 || binoptab == smul_widen_optab
1399 || binoptab == umul_widen_optab
1400 || binoptab == smul_highpart_optab
1401 || binoptab == umul_highpart_optab);
1402 }
1403
1404 /* X is to be used in mode MODE as an operand to BINOPTAB. If we're
1405 optimizing, and if the operand is a constant that costs more than
1406 1 instruction, force the constant into a register and return that
1407 register. Return X otherwise. UNSIGNEDP says whether X is unsigned. */
1408
1409 static rtx
1410 avoid_expensive_constant (enum machine_mode mode, optab binoptab,
1411 rtx x, bool unsignedp)
1412 {
1413 if (mode != VOIDmode
1414 && optimize
1415 && CONSTANT_P (x)
1416 && rtx_cost (x, binoptab->code, optimize_insn_for_speed_p ())
1417 > COSTS_N_INSNS (1))
1418 {
1419 if (GET_CODE (x) == CONST_INT)
1420 {
1421 HOST_WIDE_INT intval = trunc_int_for_mode (INTVAL (x), mode);
1422 if (intval != INTVAL (x))
1423 x = GEN_INT (intval);
1424 }
1425 else
1426 x = convert_modes (mode, VOIDmode, x, unsignedp);
1427 x = force_reg (mode, x);
1428 }
1429 return x;
1430 }
1431
1432 /* Helper function for expand_binop: handle the case where there
1433 is an insn that directly implements the indicated operation.
1434 Returns null if this is not possible. */
1435 static rtx
1436 expand_binop_directly (enum machine_mode mode, optab binoptab,
1437 rtx op0, rtx op1,
1438 rtx target, int unsignedp, enum optab_methods methods,
1439 rtx last)
1440 {
1441 int icode = (int) optab_handler (binoptab, mode)->insn_code;
1442 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
1443 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
1444 enum machine_mode tmp_mode;
1445 bool commutative_p;
1446 rtx pat;
1447 rtx xop0 = op0, xop1 = op1;
1448 rtx temp;
1449 rtx swap;
1450
1451 if (target)
1452 temp = target;
1453 else
1454 temp = gen_reg_rtx (mode);
1455
1456 /* If it is a commutative operator and the modes would match
1457 if we would swap the operands, we can save the conversions. */
1458 commutative_p = commutative_optab_p (binoptab);
1459 if (commutative_p
1460 && GET_MODE (xop0) != mode0 && GET_MODE (xop1) != mode1
1461 && GET_MODE (xop0) == mode1 && GET_MODE (xop1) == mode1)
1462 {
1463 swap = xop0;
1464 xop0 = xop1;
1465 xop1 = swap;
1466 }
1467
1468 /* If we are optimizing, force expensive constants into a register. */
1469 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
1470 if (!shift_optab_p (binoptab))
1471 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
1472
1473 /* In case the insn wants input operands in modes different from
1474 those of the actual operands, convert the operands. It would
1475 seem that we don't need to convert CONST_INTs, but we do, so
1476 that they're properly zero-extended, sign-extended or truncated
1477 for their mode. */
1478
1479 if (GET_MODE (xop0) != mode0 && mode0 != VOIDmode)
1480 xop0 = convert_modes (mode0,
1481 GET_MODE (xop0) != VOIDmode
1482 ? GET_MODE (xop0)
1483 : mode,
1484 xop0, unsignedp);
1485
1486 if (GET_MODE (xop1) != mode1 && mode1 != VOIDmode)
1487 xop1 = convert_modes (mode1,
1488 GET_MODE (xop1) != VOIDmode
1489 ? GET_MODE (xop1)
1490 : mode,
1491 xop1, unsignedp);
1492
1493 /* If operation is commutative,
1494 try to make the first operand a register.
1495 Even better, try to make it the same as the target.
1496 Also try to make the last operand a constant. */
1497 if (commutative_p
1498 && swap_commutative_operands_with_target (target, xop0, xop1))
1499 {
1500 swap = xop1;
1501 xop1 = xop0;
1502 xop0 = swap;
1503 }
1504
1505 /* Now, if insn's predicates don't allow our operands, put them into
1506 pseudo regs. */
1507
1508 if (!insn_data[icode].operand[1].predicate (xop0, mode0)
1509 && mode0 != VOIDmode)
1510 xop0 = copy_to_mode_reg (mode0, xop0);
1511
1512 if (!insn_data[icode].operand[2].predicate (xop1, mode1)
1513 && mode1 != VOIDmode)
1514 xop1 = copy_to_mode_reg (mode1, xop1);
1515
1516 if (binoptab == vec_pack_trunc_optab
1517 || binoptab == vec_pack_usat_optab
1518 || binoptab == vec_pack_ssat_optab
1519 || binoptab == vec_pack_ufix_trunc_optab
1520 || binoptab == vec_pack_sfix_trunc_optab)
1521 {
1522 /* The mode of the result is different then the mode of the
1523 arguments. */
1524 tmp_mode = insn_data[icode].operand[0].mode;
1525 if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
1526 return 0;
1527 }
1528 else
1529 tmp_mode = mode;
1530
1531 if (!insn_data[icode].operand[0].predicate (temp, tmp_mode))
1532 temp = gen_reg_rtx (tmp_mode);
1533
1534 pat = GEN_FCN (icode) (temp, xop0, xop1);
1535 if (pat)
1536 {
1537 /* If PAT is composed of more than one insn, try to add an appropriate
1538 REG_EQUAL note to it. If we can't because TEMP conflicts with an
1539 operand, call expand_binop again, this time without a target. */
1540 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
1541 && ! add_equal_note (pat, temp, binoptab->code, xop0, xop1))
1542 {
1543 delete_insns_since (last);
1544 return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
1545 unsignedp, methods);
1546 }
1547
1548 emit_insn (pat);
1549 return temp;
1550 }
1551
1552 delete_insns_since (last);
1553 return NULL_RTX;
1554 }
1555
1556 /* Generate code to perform an operation specified by BINOPTAB
1557 on operands OP0 and OP1, with result having machine-mode MODE.
1558
1559 UNSIGNEDP is for the case where we have to widen the operands
1560 to perform the operation. It says to use zero-extension.
1561
1562 If TARGET is nonzero, the value
1563 is generated there, if it is convenient to do so.
1564 In all cases an rtx is returned for the locus of the value;
1565 this may or may not be TARGET. */
1566
1567 rtx
1568 expand_binop (enum machine_mode mode, optab binoptab, rtx op0, rtx op1,
1569 rtx target, int unsignedp, enum optab_methods methods)
1570 {
1571 enum optab_methods next_methods
1572 = (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN
1573 ? OPTAB_WIDEN : methods);
1574 enum mode_class mclass;
1575 enum machine_mode wider_mode;
1576 rtx libfunc;
1577 rtx temp;
1578 rtx entry_last = get_last_insn ();
1579 rtx last;
1580
1581 mclass = GET_MODE_CLASS (mode);
1582
1583 /* If subtracting an integer constant, convert this into an addition of
1584 the negated constant. */
1585
1586 if (binoptab == sub_optab && GET_CODE (op1) == CONST_INT)
1587 {
1588 op1 = negate_rtx (mode, op1);
1589 binoptab = add_optab;
1590 }
1591
1592 /* Record where to delete back to if we backtrack. */
1593 last = get_last_insn ();
1594
1595 /* If we can do it with a three-operand insn, do so. */
1596
1597 if (methods != OPTAB_MUST_WIDEN
1598 && optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
1599 {
1600 temp = expand_binop_directly (mode, binoptab, op0, op1, target,
1601 unsignedp, methods, last);
1602 if (temp)
1603 return temp;
1604 }
1605
1606 /* If we were trying to rotate, and that didn't work, try rotating
1607 the other direction before falling back to shifts and bitwise-or. */
1608 if (((binoptab == rotl_optab
1609 && optab_handler (rotr_optab, mode)->insn_code != CODE_FOR_nothing)
1610 || (binoptab == rotr_optab
1611 && optab_handler (rotl_optab, mode)->insn_code != CODE_FOR_nothing))
1612 && mclass == MODE_INT)
1613 {
1614 optab otheroptab = (binoptab == rotl_optab ? rotr_optab : rotl_optab);
1615 rtx newop1;
1616 unsigned int bits = GET_MODE_BITSIZE (mode);
1617
1618 if (GET_CODE (op1) == CONST_INT)
1619 newop1 = GEN_INT (bits - INTVAL (op1));
1620 else if (targetm.shift_truncation_mask (mode) == bits - 1)
1621 newop1 = negate_rtx (mode, op1);
1622 else
1623 newop1 = expand_binop (mode, sub_optab,
1624 GEN_INT (bits), op1,
1625 NULL_RTX, unsignedp, OPTAB_DIRECT);
1626
1627 temp = expand_binop_directly (mode, otheroptab, op0, newop1,
1628 target, unsignedp, methods, last);
1629 if (temp)
1630 return temp;
1631 }
1632
1633 /* If this is a multiply, see if we can do a widening operation that
1634 takes operands of this mode and makes a wider mode. */
1635
1636 if (binoptab == smul_optab
1637 && GET_MODE_WIDER_MODE (mode) != VOIDmode
1638 && ((optab_handler ((unsignedp ? umul_widen_optab : smul_widen_optab),
1639 GET_MODE_WIDER_MODE (mode))->insn_code)
1640 != CODE_FOR_nothing))
1641 {
1642 temp = expand_binop (GET_MODE_WIDER_MODE (mode),
1643 unsignedp ? umul_widen_optab : smul_widen_optab,
1644 op0, op1, NULL_RTX, unsignedp, OPTAB_DIRECT);
1645
1646 if (temp != 0)
1647 {
1648 if (GET_MODE_CLASS (mode) == MODE_INT
1649 && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1650 GET_MODE_BITSIZE (GET_MODE (temp))))
1651 return gen_lowpart (mode, temp);
1652 else
1653 return convert_to_mode (mode, temp, unsignedp);
1654 }
1655 }
1656
1657 /* Look for a wider mode of the same class for which we think we
1658 can open-code the operation. Check for a widening multiply at the
1659 wider mode as well. */
1660
1661 if (CLASS_HAS_WIDER_MODES_P (mclass)
1662 && methods != OPTAB_DIRECT && methods != OPTAB_LIB)
1663 for (wider_mode = GET_MODE_WIDER_MODE (mode);
1664 wider_mode != VOIDmode;
1665 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
1666 {
1667 if (optab_handler (binoptab, wider_mode)->insn_code != CODE_FOR_nothing
1668 || (binoptab == smul_optab
1669 && GET_MODE_WIDER_MODE (wider_mode) != VOIDmode
1670 && ((optab_handler ((unsignedp ? umul_widen_optab
1671 : smul_widen_optab),
1672 GET_MODE_WIDER_MODE (wider_mode))->insn_code)
1673 != CODE_FOR_nothing)))
1674 {
1675 rtx xop0 = op0, xop1 = op1;
1676 int no_extend = 0;
1677
1678 /* For certain integer operations, we need not actually extend
1679 the narrow operands, as long as we will truncate
1680 the results to the same narrowness. */
1681
1682 if ((binoptab == ior_optab || binoptab == and_optab
1683 || binoptab == xor_optab
1684 || binoptab == add_optab || binoptab == sub_optab
1685 || binoptab == smul_optab || binoptab == ashl_optab)
1686 && mclass == MODE_INT)
1687 {
1688 no_extend = 1;
1689 xop0 = avoid_expensive_constant (mode, binoptab,
1690 xop0, unsignedp);
1691 if (binoptab != ashl_optab)
1692 xop1 = avoid_expensive_constant (mode, binoptab,
1693 xop1, unsignedp);
1694 }
1695
1696 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp, no_extend);
1697
1698 /* The second operand of a shift must always be extended. */
1699 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
1700 no_extend && binoptab != ashl_optab);
1701
1702 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
1703 unsignedp, OPTAB_DIRECT);
1704 if (temp)
1705 {
1706 if (mclass != MODE_INT
1707 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
1708 GET_MODE_BITSIZE (wider_mode)))
1709 {
1710 if (target == 0)
1711 target = gen_reg_rtx (mode);
1712 convert_move (target, temp, 0);
1713 return target;
1714 }
1715 else
1716 return gen_lowpart (mode, temp);
1717 }
1718 else
1719 delete_insns_since (last);
1720 }
1721 }
1722
1723 /* If operation is commutative,
1724 try to make the first operand a register.
1725 Even better, try to make it the same as the target.
1726 Also try to make the last operand a constant. */
1727 if (commutative_optab_p (binoptab)
1728 && swap_commutative_operands_with_target (target, op0, op1))
1729 {
1730 temp = op1;
1731 op1 = op0;
1732 op0 = temp;
1733 }
1734
1735 /* These can be done a word at a time. */
1736 if ((binoptab == and_optab || binoptab == ior_optab || binoptab == xor_optab)
1737 && mclass == MODE_INT
1738 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
1739 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1740 {
1741 int i;
1742 rtx insns;
1743 rtx equiv_value;
1744
1745 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1746 won't be accurate, so use a new target. */
1747 if (target == 0 || target == op0 || target == op1)
1748 target = gen_reg_rtx (mode);
1749
1750 start_sequence ();
1751
1752 /* Do the actual arithmetic. */
1753 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
1754 {
1755 rtx target_piece = operand_subword (target, i, 1, mode);
1756 rtx x = expand_binop (word_mode, binoptab,
1757 operand_subword_force (op0, i, mode),
1758 operand_subword_force (op1, i, mode),
1759 target_piece, unsignedp, next_methods);
1760
1761 if (x == 0)
1762 break;
1763
1764 if (target_piece != x)
1765 emit_move_insn (target_piece, x);
1766 }
1767
1768 insns = get_insns ();
1769 end_sequence ();
1770
1771 if (i == GET_MODE_BITSIZE (mode) / BITS_PER_WORD)
1772 {
1773 if (binoptab->code != UNKNOWN)
1774 equiv_value
1775 = gen_rtx_fmt_ee (binoptab->code, mode,
1776 copy_rtx (op0), copy_rtx (op1));
1777 else
1778 equiv_value = 0;
1779
1780 emit_insn (insns);
1781 return target;
1782 }
1783 }
1784
1785 /* Synthesize double word shifts from single word shifts. */
1786 if ((binoptab == lshr_optab || binoptab == ashl_optab
1787 || binoptab == ashr_optab)
1788 && mclass == MODE_INT
1789 && (GET_CODE (op1) == CONST_INT || optimize_insn_for_speed_p ())
1790 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1791 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing
1792 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1793 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1794 {
1795 unsigned HOST_WIDE_INT shift_mask, double_shift_mask;
1796 enum machine_mode op1_mode;
1797
1798 double_shift_mask = targetm.shift_truncation_mask (mode);
1799 shift_mask = targetm.shift_truncation_mask (word_mode);
1800 op1_mode = GET_MODE (op1) != VOIDmode ? GET_MODE (op1) : word_mode;
1801
1802 /* Apply the truncation to constant shifts. */
1803 if (double_shift_mask > 0 && GET_CODE (op1) == CONST_INT)
1804 op1 = GEN_INT (INTVAL (op1) & double_shift_mask);
1805
1806 if (op1 == CONST0_RTX (op1_mode))
1807 return op0;
1808
1809 /* Make sure that this is a combination that expand_doubleword_shift
1810 can handle. See the comments there for details. */
1811 if (double_shift_mask == 0
1812 || (shift_mask == BITS_PER_WORD - 1
1813 && double_shift_mask == BITS_PER_WORD * 2 - 1))
1814 {
1815 rtx insns;
1816 rtx into_target, outof_target;
1817 rtx into_input, outof_input;
1818 int left_shift, outof_word;
1819
1820 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1821 won't be accurate, so use a new target. */
1822 if (target == 0 || target == op0 || target == op1)
1823 target = gen_reg_rtx (mode);
1824
1825 start_sequence ();
1826
1827 /* OUTOF_* is the word we are shifting bits away from, and
1828 INTO_* is the word that we are shifting bits towards, thus
1829 they differ depending on the direction of the shift and
1830 WORDS_BIG_ENDIAN. */
1831
1832 left_shift = binoptab == ashl_optab;
1833 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1834
1835 outof_target = operand_subword (target, outof_word, 1, mode);
1836 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1837
1838 outof_input = operand_subword_force (op0, outof_word, mode);
1839 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1840
1841 if (expand_doubleword_shift (op1_mode, binoptab,
1842 outof_input, into_input, op1,
1843 outof_target, into_target,
1844 unsignedp, next_methods, shift_mask))
1845 {
1846 insns = get_insns ();
1847 end_sequence ();
1848
1849 emit_insn (insns);
1850 return target;
1851 }
1852 end_sequence ();
1853 }
1854 }
1855
1856 /* Synthesize double word rotates from single word shifts. */
1857 if ((binoptab == rotl_optab || binoptab == rotr_optab)
1858 && mclass == MODE_INT
1859 && GET_CODE (op1) == CONST_INT
1860 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
1861 && optab_handler (ashl_optab, word_mode)->insn_code != CODE_FOR_nothing
1862 && optab_handler (lshr_optab, word_mode)->insn_code != CODE_FOR_nothing)
1863 {
1864 rtx insns;
1865 rtx into_target, outof_target;
1866 rtx into_input, outof_input;
1867 rtx inter;
1868 int shift_count, left_shift, outof_word;
1869
1870 /* If TARGET is the same as one of the operands, the REG_EQUAL note
1871 won't be accurate, so use a new target. Do this also if target is not
1872 a REG, first because having a register instead may open optimization
1873 opportunities, and second because if target and op0 happen to be MEMs
1874 designating the same location, we would risk clobbering it too early
1875 in the code sequence we generate below. */
1876 if (target == 0 || target == op0 || target == op1 || ! REG_P (target))
1877 target = gen_reg_rtx (mode);
1878
1879 start_sequence ();
1880
1881 shift_count = INTVAL (op1);
1882
1883 /* OUTOF_* is the word we are shifting bits away from, and
1884 INTO_* is the word that we are shifting bits towards, thus
1885 they differ depending on the direction of the shift and
1886 WORDS_BIG_ENDIAN. */
1887
1888 left_shift = (binoptab == rotl_optab);
1889 outof_word = left_shift ^ ! WORDS_BIG_ENDIAN;
1890
1891 outof_target = operand_subword (target, outof_word, 1, mode);
1892 into_target = operand_subword (target, 1 - outof_word, 1, mode);
1893
1894 outof_input = operand_subword_force (op0, outof_word, mode);
1895 into_input = operand_subword_force (op0, 1 - outof_word, mode);
1896
1897 if (shift_count == BITS_PER_WORD)
1898 {
1899 /* This is just a word swap. */
1900 emit_move_insn (outof_target, into_input);
1901 emit_move_insn (into_target, outof_input);
1902 inter = const0_rtx;
1903 }
1904 else
1905 {
1906 rtx into_temp1, into_temp2, outof_temp1, outof_temp2;
1907 rtx first_shift_count, second_shift_count;
1908 optab reverse_unsigned_shift, unsigned_shift;
1909
1910 reverse_unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1911 ? lshr_optab : ashl_optab);
1912
1913 unsigned_shift = (left_shift ^ (shift_count < BITS_PER_WORD)
1914 ? ashl_optab : lshr_optab);
1915
1916 if (shift_count > BITS_PER_WORD)
1917 {
1918 first_shift_count = GEN_INT (shift_count - BITS_PER_WORD);
1919 second_shift_count = GEN_INT (2 * BITS_PER_WORD - shift_count);
1920 }
1921 else
1922 {
1923 first_shift_count = GEN_INT (BITS_PER_WORD - shift_count);
1924 second_shift_count = GEN_INT (shift_count);
1925 }
1926
1927 into_temp1 = expand_binop (word_mode, unsigned_shift,
1928 outof_input, first_shift_count,
1929 NULL_RTX, unsignedp, next_methods);
1930 into_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1931 into_input, second_shift_count,
1932 NULL_RTX, unsignedp, next_methods);
1933
1934 if (into_temp1 != 0 && into_temp2 != 0)
1935 inter = expand_binop (word_mode, ior_optab, into_temp1, into_temp2,
1936 into_target, unsignedp, next_methods);
1937 else
1938 inter = 0;
1939
1940 if (inter != 0 && inter != into_target)
1941 emit_move_insn (into_target, inter);
1942
1943 outof_temp1 = expand_binop (word_mode, unsigned_shift,
1944 into_input, first_shift_count,
1945 NULL_RTX, unsignedp, next_methods);
1946 outof_temp2 = expand_binop (word_mode, reverse_unsigned_shift,
1947 outof_input, second_shift_count,
1948 NULL_RTX, unsignedp, next_methods);
1949
1950 if (inter != 0 && outof_temp1 != 0 && outof_temp2 != 0)
1951 inter = expand_binop (word_mode, ior_optab,
1952 outof_temp1, outof_temp2,
1953 outof_target, unsignedp, next_methods);
1954
1955 if (inter != 0 && inter != outof_target)
1956 emit_move_insn (outof_target, inter);
1957 }
1958
1959 insns = get_insns ();
1960 end_sequence ();
1961
1962 if (inter != 0)
1963 {
1964 emit_insn (insns);
1965 return target;
1966 }
1967 }
1968
1969 /* These can be done a word at a time by propagating carries. */
1970 if ((binoptab == add_optab || binoptab == sub_optab)
1971 && mclass == MODE_INT
1972 && GET_MODE_SIZE (mode) >= 2 * UNITS_PER_WORD
1973 && optab_handler (binoptab, word_mode)->insn_code != CODE_FOR_nothing)
1974 {
1975 unsigned int i;
1976 optab otheroptab = binoptab == add_optab ? sub_optab : add_optab;
1977 const unsigned int nwords = GET_MODE_BITSIZE (mode) / BITS_PER_WORD;
1978 rtx carry_in = NULL_RTX, carry_out = NULL_RTX;
1979 rtx xop0, xop1, xtarget;
1980
1981 /* We can handle either a 1 or -1 value for the carry. If STORE_FLAG
1982 value is one of those, use it. Otherwise, use 1 since it is the
1983 one easiest to get. */
1984 #if STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1
1985 int normalizep = STORE_FLAG_VALUE;
1986 #else
1987 int normalizep = 1;
1988 #endif
1989
1990 /* Prepare the operands. */
1991 xop0 = force_reg (mode, op0);
1992 xop1 = force_reg (mode, op1);
1993
1994 xtarget = gen_reg_rtx (mode);
1995
1996 if (target == 0 || !REG_P (target))
1997 target = xtarget;
1998
1999 /* Indicate for flow that the entire target reg is being set. */
2000 if (REG_P (target))
2001 emit_clobber (xtarget);
2002
2003 /* Do the actual arithmetic. */
2004 for (i = 0; i < nwords; i++)
2005 {
2006 int index = (WORDS_BIG_ENDIAN ? nwords - i - 1 : i);
2007 rtx target_piece = operand_subword (xtarget, index, 1, mode);
2008 rtx op0_piece = operand_subword_force (xop0, index, mode);
2009 rtx op1_piece = operand_subword_force (xop1, index, mode);
2010 rtx x;
2011
2012 /* Main add/subtract of the input operands. */
2013 x = expand_binop (word_mode, binoptab,
2014 op0_piece, op1_piece,
2015 target_piece, unsignedp, next_methods);
2016 if (x == 0)
2017 break;
2018
2019 if (i + 1 < nwords)
2020 {
2021 /* Store carry from main add/subtract. */
2022 carry_out = gen_reg_rtx (word_mode);
2023 carry_out = emit_store_flag_force (carry_out,
2024 (binoptab == add_optab
2025 ? LT : GT),
2026 x, op0_piece,
2027 word_mode, 1, normalizep);
2028 }
2029
2030 if (i > 0)
2031 {
2032 rtx newx;
2033
2034 /* Add/subtract previous carry to main result. */
2035 newx = expand_binop (word_mode,
2036 normalizep == 1 ? binoptab : otheroptab,
2037 x, carry_in,
2038 NULL_RTX, 1, next_methods);
2039
2040 if (i + 1 < nwords)
2041 {
2042 /* Get out carry from adding/subtracting carry in. */
2043 rtx carry_tmp = gen_reg_rtx (word_mode);
2044 carry_tmp = emit_store_flag_force (carry_tmp,
2045 (binoptab == add_optab
2046 ? LT : GT),
2047 newx, x,
2048 word_mode, 1, normalizep);
2049
2050 /* Logical-ior the two poss. carry together. */
2051 carry_out = expand_binop (word_mode, ior_optab,
2052 carry_out, carry_tmp,
2053 carry_out, 0, next_methods);
2054 if (carry_out == 0)
2055 break;
2056 }
2057 emit_move_insn (target_piece, newx);
2058 }
2059 else
2060 {
2061 if (x != target_piece)
2062 emit_move_insn (target_piece, x);
2063 }
2064
2065 carry_in = carry_out;
2066 }
2067
2068 if (i == GET_MODE_BITSIZE (mode) / (unsigned) BITS_PER_WORD)
2069 {
2070 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing
2071 || ! rtx_equal_p (target, xtarget))
2072 {
2073 rtx temp = emit_move_insn (target, xtarget);
2074
2075 set_unique_reg_note (temp,
2076 REG_EQUAL,
2077 gen_rtx_fmt_ee (binoptab->code, mode,
2078 copy_rtx (xop0),
2079 copy_rtx (xop1)));
2080 }
2081 else
2082 target = xtarget;
2083
2084 return target;
2085 }
2086
2087 else
2088 delete_insns_since (last);
2089 }
2090
2091 /* Attempt to synthesize double word multiplies using a sequence of word
2092 mode multiplications. We first attempt to generate a sequence using a
2093 more efficient unsigned widening multiply, and if that fails we then
2094 try using a signed widening multiply. */
2095
2096 if (binoptab == smul_optab
2097 && mclass == MODE_INT
2098 && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
2099 && optab_handler (smul_optab, word_mode)->insn_code != CODE_FOR_nothing
2100 && optab_handler (add_optab, word_mode)->insn_code != CODE_FOR_nothing)
2101 {
2102 rtx product = NULL_RTX;
2103
2104 if (optab_handler (umul_widen_optab, mode)->insn_code
2105 != CODE_FOR_nothing)
2106 {
2107 product = expand_doubleword_mult (mode, op0, op1, target,
2108 true, methods);
2109 if (!product)
2110 delete_insns_since (last);
2111 }
2112
2113 if (product == NULL_RTX
2114 && optab_handler (smul_widen_optab, mode)->insn_code
2115 != CODE_FOR_nothing)
2116 {
2117 product = expand_doubleword_mult (mode, op0, op1, target,
2118 false, methods);
2119 if (!product)
2120 delete_insns_since (last);
2121 }
2122
2123 if (product != NULL_RTX)
2124 {
2125 if (optab_handler (mov_optab, mode)->insn_code != CODE_FOR_nothing)
2126 {
2127 temp = emit_move_insn (target ? target : product, product);
2128 set_unique_reg_note (temp,
2129 REG_EQUAL,
2130 gen_rtx_fmt_ee (MULT, mode,
2131 copy_rtx (op0),
2132 copy_rtx (op1)));
2133 }
2134 return product;
2135 }
2136 }
2137
2138 /* It can't be open-coded in this mode.
2139 Use a library call if one is available and caller says that's ok. */
2140
2141 libfunc = optab_libfunc (binoptab, mode);
2142 if (libfunc
2143 && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
2144 {
2145 rtx insns;
2146 rtx op1x = op1;
2147 enum machine_mode op1_mode = mode;
2148 rtx value;
2149
2150 start_sequence ();
2151
2152 if (shift_optab_p (binoptab))
2153 {
2154 op1_mode = targetm.libgcc_shift_count_mode ();
2155 /* Specify unsigned here,
2156 since negative shift counts are meaningless. */
2157 op1x = convert_to_mode (op1_mode, op1, 1);
2158 }
2159
2160 if (GET_MODE (op0) != VOIDmode
2161 && GET_MODE (op0) != mode)
2162 op0 = convert_to_mode (mode, op0, unsignedp);
2163
2164 /* Pass 1 for NO_QUEUE so we don't lose any increments
2165 if the libcall is cse'd or moved. */
2166 value = emit_library_call_value (libfunc,
2167 NULL_RTX, LCT_CONST, mode, 2,
2168 op0, mode, op1x, op1_mode);
2169
2170 insns = get_insns ();
2171 end_sequence ();
2172
2173 target = gen_reg_rtx (mode);
2174 emit_libcall_block (insns, target, value,
2175 gen_rtx_fmt_ee (binoptab->code, mode, op0, op1));
2176
2177 return target;
2178 }
2179
2180 delete_insns_since (last);
2181
2182 /* It can't be done in this mode. Can we do it in a wider mode? */
2183
2184 if (! (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN
2185 || methods == OPTAB_MUST_WIDEN))
2186 {
2187 /* Caller says, don't even try. */
2188 delete_insns_since (entry_last);
2189 return 0;
2190 }
2191
2192 /* Compute the value of METHODS to pass to recursive calls.
2193 Don't allow widening to be tried recursively. */
2194
2195 methods = (methods == OPTAB_LIB_WIDEN ? OPTAB_LIB : OPTAB_DIRECT);
2196
2197 /* Look for a wider mode of the same class for which it appears we can do
2198 the operation. */
2199
2200 if (CLASS_HAS_WIDER_MODES_P (mclass))
2201 {
2202 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2203 wider_mode != VOIDmode;
2204 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2205 {
2206 if ((optab_handler (binoptab, wider_mode)->insn_code
2207 != CODE_FOR_nothing)
2208 || (methods == OPTAB_LIB
2209 && optab_libfunc (binoptab, wider_mode)))
2210 {
2211 rtx xop0 = op0, xop1 = op1;
2212 int no_extend = 0;
2213
2214 /* For certain integer operations, we need not actually extend
2215 the narrow operands, as long as we will truncate
2216 the results to the same narrowness. */
2217
2218 if ((binoptab == ior_optab || binoptab == and_optab
2219 || binoptab == xor_optab
2220 || binoptab == add_optab || binoptab == sub_optab
2221 || binoptab == smul_optab || binoptab == ashl_optab)
2222 && mclass == MODE_INT)
2223 no_extend = 1;
2224
2225 xop0 = widen_operand (xop0, wider_mode, mode,
2226 unsignedp, no_extend);
2227
2228 /* The second operand of a shift must always be extended. */
2229 xop1 = widen_operand (xop1, wider_mode, mode, unsignedp,
2230 no_extend && binoptab != ashl_optab);
2231
2232 temp = expand_binop (wider_mode, binoptab, xop0, xop1, NULL_RTX,
2233 unsignedp, methods);
2234 if (temp)
2235 {
2236 if (mclass != MODE_INT
2237 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
2238 GET_MODE_BITSIZE (wider_mode)))
2239 {
2240 if (target == 0)
2241 target = gen_reg_rtx (mode);
2242 convert_move (target, temp, 0);
2243 return target;
2244 }
2245 else
2246 return gen_lowpart (mode, temp);
2247 }
2248 else
2249 delete_insns_since (last);
2250 }
2251 }
2252 }
2253
2254 delete_insns_since (entry_last);
2255 return 0;
2256 }
2257 \f
2258 /* Expand a binary operator which has both signed and unsigned forms.
2259 UOPTAB is the optab for unsigned operations, and SOPTAB is for
2260 signed operations.
2261
2262 If we widen unsigned operands, we may use a signed wider operation instead
2263 of an unsigned wider operation, since the result would be the same. */
2264
2265 rtx
2266 sign_expand_binop (enum machine_mode mode, optab uoptab, optab soptab,
2267 rtx op0, rtx op1, rtx target, int unsignedp,
2268 enum optab_methods methods)
2269 {
2270 rtx temp;
2271 optab direct_optab = unsignedp ? uoptab : soptab;
2272 struct optab wide_soptab;
2273
2274 /* Do it without widening, if possible. */
2275 temp = expand_binop (mode, direct_optab, op0, op1, target,
2276 unsignedp, OPTAB_DIRECT);
2277 if (temp || methods == OPTAB_DIRECT)
2278 return temp;
2279
2280 /* Try widening to a signed int. Make a fake signed optab that
2281 hides any signed insn for direct use. */
2282 wide_soptab = *soptab;
2283 optab_handler (&wide_soptab, mode)->insn_code = CODE_FOR_nothing;
2284 /* We don't want to generate new hash table entries from this fake
2285 optab. */
2286 wide_soptab.libcall_gen = NULL;
2287
2288 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2289 unsignedp, OPTAB_WIDEN);
2290
2291 /* For unsigned operands, try widening to an unsigned int. */
2292 if (temp == 0 && unsignedp)
2293 temp = expand_binop (mode, uoptab, op0, op1, target,
2294 unsignedp, OPTAB_WIDEN);
2295 if (temp || methods == OPTAB_WIDEN)
2296 return temp;
2297
2298 /* Use the right width lib call if that exists. */
2299 temp = expand_binop (mode, direct_optab, op0, op1, target, unsignedp, OPTAB_LIB);
2300 if (temp || methods == OPTAB_LIB)
2301 return temp;
2302
2303 /* Must widen and use a lib call, use either signed or unsigned. */
2304 temp = expand_binop (mode, &wide_soptab, op0, op1, target,
2305 unsignedp, methods);
2306 if (temp != 0)
2307 return temp;
2308 if (unsignedp)
2309 return expand_binop (mode, uoptab, op0, op1, target,
2310 unsignedp, methods);
2311 return 0;
2312 }
2313 \f
2314 /* Generate code to perform an operation specified by UNOPPTAB
2315 on operand OP0, with two results to TARG0 and TARG1.
2316 We assume that the order of the operands for the instruction
2317 is TARG0, TARG1, OP0.
2318
2319 Either TARG0 or TARG1 may be zero, but what that means is that
2320 the result is not actually wanted. We will generate it into
2321 a dummy pseudo-reg and discard it. They may not both be zero.
2322
2323 Returns 1 if this operation can be performed; 0 if not. */
2324
2325 int
2326 expand_twoval_unop (optab unoptab, rtx op0, rtx targ0, rtx targ1,
2327 int unsignedp)
2328 {
2329 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2330 enum mode_class mclass;
2331 enum machine_mode wider_mode;
2332 rtx entry_last = get_last_insn ();
2333 rtx last;
2334
2335 mclass = GET_MODE_CLASS (mode);
2336
2337 if (!targ0)
2338 targ0 = gen_reg_rtx (mode);
2339 if (!targ1)
2340 targ1 = gen_reg_rtx (mode);
2341
2342 /* Record where to go back to if we fail. */
2343 last = get_last_insn ();
2344
2345 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
2346 {
2347 int icode = (int) optab_handler (unoptab, mode)->insn_code;
2348 enum machine_mode mode0 = insn_data[icode].operand[2].mode;
2349 rtx pat;
2350 rtx xop0 = op0;
2351
2352 if (GET_MODE (xop0) != VOIDmode
2353 && GET_MODE (xop0) != mode0)
2354 xop0 = convert_to_mode (mode0, xop0, unsignedp);
2355
2356 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2357 if (!insn_data[icode].operand[2].predicate (xop0, mode0))
2358 xop0 = copy_to_mode_reg (mode0, xop0);
2359
2360 /* We could handle this, but we should always be called with a pseudo
2361 for our targets and all insns should take them as outputs. */
2362 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2363 gcc_assert (insn_data[icode].operand[1].predicate (targ1, mode));
2364
2365 pat = GEN_FCN (icode) (targ0, targ1, xop0);
2366 if (pat)
2367 {
2368 emit_insn (pat);
2369 return 1;
2370 }
2371 else
2372 delete_insns_since (last);
2373 }
2374
2375 /* It can't be done in this mode. Can we do it in a wider mode? */
2376
2377 if (CLASS_HAS_WIDER_MODES_P (mclass))
2378 {
2379 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2380 wider_mode != VOIDmode;
2381 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2382 {
2383 if (optab_handler (unoptab, wider_mode)->insn_code
2384 != CODE_FOR_nothing)
2385 {
2386 rtx t0 = gen_reg_rtx (wider_mode);
2387 rtx t1 = gen_reg_rtx (wider_mode);
2388 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2389
2390 if (expand_twoval_unop (unoptab, cop0, t0, t1, unsignedp))
2391 {
2392 convert_move (targ0, t0, unsignedp);
2393 convert_move (targ1, t1, unsignedp);
2394 return 1;
2395 }
2396 else
2397 delete_insns_since (last);
2398 }
2399 }
2400 }
2401
2402 delete_insns_since (entry_last);
2403 return 0;
2404 }
2405 \f
2406 /* Generate code to perform an operation specified by BINOPTAB
2407 on operands OP0 and OP1, with two results to TARG1 and TARG2.
2408 We assume that the order of the operands for the instruction
2409 is TARG0, OP0, OP1, TARG1, which would fit a pattern like
2410 [(set TARG0 (operate OP0 OP1)) (set TARG1 (operate ...))].
2411
2412 Either TARG0 or TARG1 may be zero, but what that means is that
2413 the result is not actually wanted. We will generate it into
2414 a dummy pseudo-reg and discard it. They may not both be zero.
2415
2416 Returns 1 if this operation can be performed; 0 if not. */
2417
2418 int
2419 expand_twoval_binop (optab binoptab, rtx op0, rtx op1, rtx targ0, rtx targ1,
2420 int unsignedp)
2421 {
2422 enum machine_mode mode = GET_MODE (targ0 ? targ0 : targ1);
2423 enum mode_class mclass;
2424 enum machine_mode wider_mode;
2425 rtx entry_last = get_last_insn ();
2426 rtx last;
2427
2428 mclass = GET_MODE_CLASS (mode);
2429
2430 if (!targ0)
2431 targ0 = gen_reg_rtx (mode);
2432 if (!targ1)
2433 targ1 = gen_reg_rtx (mode);
2434
2435 /* Record where to go back to if we fail. */
2436 last = get_last_insn ();
2437
2438 if (optab_handler (binoptab, mode)->insn_code != CODE_FOR_nothing)
2439 {
2440 int icode = (int) optab_handler (binoptab, mode)->insn_code;
2441 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
2442 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
2443 rtx pat;
2444 rtx xop0 = op0, xop1 = op1;
2445
2446 /* If we are optimizing, force expensive constants into a register. */
2447 xop0 = avoid_expensive_constant (mode0, binoptab, xop0, unsignedp);
2448 xop1 = avoid_expensive_constant (mode1, binoptab, xop1, unsignedp);
2449
2450 /* In case the insn wants input operands in modes different from
2451 those of the actual operands, convert the operands. It would
2452 seem that we don't need to convert CONST_INTs, but we do, so
2453 that they're properly zero-extended, sign-extended or truncated
2454 for their mode. */
2455
2456 if (GET_MODE (op0) != mode0 && mode0 != VOIDmode)
2457 xop0 = convert_modes (mode0,
2458 GET_MODE (op0) != VOIDmode
2459 ? GET_MODE (op0)
2460 : mode,
2461 xop0, unsignedp);
2462
2463 if (GET_MODE (op1) != mode1 && mode1 != VOIDmode)
2464 xop1 = convert_modes (mode1,
2465 GET_MODE (op1) != VOIDmode
2466 ? GET_MODE (op1)
2467 : mode,
2468 xop1, unsignedp);
2469
2470 /* Now, if insn doesn't accept these operands, put them into pseudos. */
2471 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
2472 xop0 = copy_to_mode_reg (mode0, xop0);
2473
2474 if (!insn_data[icode].operand[2].predicate (xop1, mode1))
2475 xop1 = copy_to_mode_reg (mode1, xop1);
2476
2477 /* We could handle this, but we should always be called with a pseudo
2478 for our targets and all insns should take them as outputs. */
2479 gcc_assert (insn_data[icode].operand[0].predicate (targ0, mode));
2480 gcc_assert (insn_data[icode].operand[3].predicate (targ1, mode));
2481
2482 pat = GEN_FCN (icode) (targ0, xop0, xop1, targ1);
2483 if (pat)
2484 {
2485 emit_insn (pat);
2486 return 1;
2487 }
2488 else
2489 delete_insns_since (last);
2490 }
2491
2492 /* It can't be done in this mode. Can we do it in a wider mode? */
2493
2494 if (CLASS_HAS_WIDER_MODES_P (mclass))
2495 {
2496 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2497 wider_mode != VOIDmode;
2498 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2499 {
2500 if (optab_handler (binoptab, wider_mode)->insn_code
2501 != CODE_FOR_nothing)
2502 {
2503 rtx t0 = gen_reg_rtx (wider_mode);
2504 rtx t1 = gen_reg_rtx (wider_mode);
2505 rtx cop0 = convert_modes (wider_mode, mode, op0, unsignedp);
2506 rtx cop1 = convert_modes (wider_mode, mode, op1, unsignedp);
2507
2508 if (expand_twoval_binop (binoptab, cop0, cop1,
2509 t0, t1, unsignedp))
2510 {
2511 convert_move (targ0, t0, unsignedp);
2512 convert_move (targ1, t1, unsignedp);
2513 return 1;
2514 }
2515 else
2516 delete_insns_since (last);
2517 }
2518 }
2519 }
2520
2521 delete_insns_since (entry_last);
2522 return 0;
2523 }
2524
2525 /* Expand the two-valued library call indicated by BINOPTAB, but
2526 preserve only one of the values. If TARG0 is non-NULL, the first
2527 value is placed into TARG0; otherwise the second value is placed
2528 into TARG1. Exactly one of TARG0 and TARG1 must be non-NULL. The
2529 value stored into TARG0 or TARG1 is equivalent to (CODE OP0 OP1).
2530 This routine assumes that the value returned by the library call is
2531 as if the return value was of an integral mode twice as wide as the
2532 mode of OP0. Returns 1 if the call was successful. */
2533
2534 bool
2535 expand_twoval_binop_libfunc (optab binoptab, rtx op0, rtx op1,
2536 rtx targ0, rtx targ1, enum rtx_code code)
2537 {
2538 enum machine_mode mode;
2539 enum machine_mode libval_mode;
2540 rtx libval;
2541 rtx insns;
2542 rtx libfunc;
2543
2544 /* Exactly one of TARG0 or TARG1 should be non-NULL. */
2545 gcc_assert (!targ0 != !targ1);
2546
2547 mode = GET_MODE (op0);
2548 libfunc = optab_libfunc (binoptab, mode);
2549 if (!libfunc)
2550 return false;
2551
2552 /* The value returned by the library function will have twice as
2553 many bits as the nominal MODE. */
2554 libval_mode = smallest_mode_for_size (2 * GET_MODE_BITSIZE (mode),
2555 MODE_INT);
2556 start_sequence ();
2557 libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
2558 libval_mode, 2,
2559 op0, mode,
2560 op1, mode);
2561 /* Get the part of VAL containing the value that we want. */
2562 libval = simplify_gen_subreg (mode, libval, libval_mode,
2563 targ0 ? 0 : GET_MODE_SIZE (mode));
2564 insns = get_insns ();
2565 end_sequence ();
2566 /* Move the into the desired location. */
2567 emit_libcall_block (insns, targ0 ? targ0 : targ1, libval,
2568 gen_rtx_fmt_ee (code, mode, op0, op1));
2569
2570 return true;
2571 }
2572
2573 \f
2574 /* Wrapper around expand_unop which takes an rtx code to specify
2575 the operation to perform, not an optab pointer. All other
2576 arguments are the same. */
2577 rtx
2578 expand_simple_unop (enum machine_mode mode, enum rtx_code code, rtx op0,
2579 rtx target, int unsignedp)
2580 {
2581 optab unop = code_to_optab[(int) code];
2582 gcc_assert (unop);
2583
2584 return expand_unop (mode, unop, op0, target, unsignedp);
2585 }
2586
2587 /* Try calculating
2588 (clz:narrow x)
2589 as
2590 (clz:wide (zero_extend:wide x)) - ((width wide) - (width narrow)). */
2591 static rtx
2592 widen_clz (enum machine_mode mode, rtx op0, rtx target)
2593 {
2594 enum mode_class mclass = GET_MODE_CLASS (mode);
2595 if (CLASS_HAS_WIDER_MODES_P (mclass))
2596 {
2597 enum machine_mode wider_mode;
2598 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2599 wider_mode != VOIDmode;
2600 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2601 {
2602 if (optab_handler (clz_optab, wider_mode)->insn_code
2603 != CODE_FOR_nothing)
2604 {
2605 rtx xop0, temp, last;
2606
2607 last = get_last_insn ();
2608
2609 if (target == 0)
2610 target = gen_reg_rtx (mode);
2611 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2612 temp = expand_unop (wider_mode, clz_optab, xop0, NULL_RTX, true);
2613 if (temp != 0)
2614 temp = expand_binop (wider_mode, sub_optab, temp,
2615 GEN_INT (GET_MODE_BITSIZE (wider_mode)
2616 - GET_MODE_BITSIZE (mode)),
2617 target, true, OPTAB_DIRECT);
2618 if (temp == 0)
2619 delete_insns_since (last);
2620
2621 return temp;
2622 }
2623 }
2624 }
2625 return 0;
2626 }
2627
2628 /* Try calculating clz of a double-word quantity as two clz's of word-sized
2629 quantities, choosing which based on whether the high word is nonzero. */
2630 static rtx
2631 expand_doubleword_clz (enum machine_mode mode, rtx op0, rtx target)
2632 {
2633 rtx xop0 = force_reg (mode, op0);
2634 rtx subhi = gen_highpart (word_mode, xop0);
2635 rtx sublo = gen_lowpart (word_mode, xop0);
2636 rtx hi0_label = gen_label_rtx ();
2637 rtx after_label = gen_label_rtx ();
2638 rtx seq, temp, result;
2639
2640 /* If we were not given a target, use a word_mode register, not a
2641 'mode' register. The result will fit, and nobody is expecting
2642 anything bigger (the return type of __builtin_clz* is int). */
2643 if (!target)
2644 target = gen_reg_rtx (word_mode);
2645
2646 /* In any case, write to a word_mode scratch in both branches of the
2647 conditional, so we can ensure there is a single move insn setting
2648 'target' to tag a REG_EQUAL note on. */
2649 result = gen_reg_rtx (word_mode);
2650
2651 start_sequence ();
2652
2653 /* If the high word is not equal to zero,
2654 then clz of the full value is clz of the high word. */
2655 emit_cmp_and_jump_insns (subhi, CONST0_RTX (word_mode), EQ, 0,
2656 word_mode, true, hi0_label);
2657
2658 temp = expand_unop_direct (word_mode, clz_optab, subhi, result, true);
2659 if (!temp)
2660 goto fail;
2661
2662 if (temp != result)
2663 convert_move (result, temp, true);
2664
2665 emit_jump_insn (gen_jump (after_label));
2666 emit_barrier ();
2667
2668 /* Else clz of the full value is clz of the low word plus the number
2669 of bits in the high word. */
2670 emit_label (hi0_label);
2671
2672 temp = expand_unop_direct (word_mode, clz_optab, sublo, 0, true);
2673 if (!temp)
2674 goto fail;
2675 temp = expand_binop (word_mode, add_optab, temp,
2676 GEN_INT (GET_MODE_BITSIZE (word_mode)),
2677 result, true, OPTAB_DIRECT);
2678 if (!temp)
2679 goto fail;
2680 if (temp != result)
2681 convert_move (result, temp, true);
2682
2683 emit_label (after_label);
2684 convert_move (target, result, true);
2685
2686 seq = get_insns ();
2687 end_sequence ();
2688
2689 add_equal_note (seq, target, CLZ, xop0, 0);
2690 emit_insn (seq);
2691 return target;
2692
2693 fail:
2694 end_sequence ();
2695 return 0;
2696 }
2697
2698 /* Try calculating
2699 (bswap:narrow x)
2700 as
2701 (lshiftrt:wide (bswap:wide x) ((width wide) - (width narrow))). */
2702 static rtx
2703 widen_bswap (enum machine_mode mode, rtx op0, rtx target)
2704 {
2705 enum mode_class mclass = GET_MODE_CLASS (mode);
2706 enum machine_mode wider_mode;
2707 rtx x, last;
2708
2709 if (!CLASS_HAS_WIDER_MODES_P (mclass))
2710 return NULL_RTX;
2711
2712 for (wider_mode = GET_MODE_WIDER_MODE (mode);
2713 wider_mode != VOIDmode;
2714 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2715 if (optab_handler (bswap_optab, wider_mode)->insn_code != CODE_FOR_nothing)
2716 goto found;
2717 return NULL_RTX;
2718
2719 found:
2720 last = get_last_insn ();
2721
2722 x = widen_operand (op0, wider_mode, mode, true, true);
2723 x = expand_unop (wider_mode, bswap_optab, x, NULL_RTX, true);
2724
2725 if (x != 0)
2726 x = expand_shift (RSHIFT_EXPR, wider_mode, x,
2727 size_int (GET_MODE_BITSIZE (wider_mode)
2728 - GET_MODE_BITSIZE (mode)),
2729 NULL_RTX, true);
2730
2731 if (x != 0)
2732 {
2733 if (target == 0)
2734 target = gen_reg_rtx (mode);
2735 emit_move_insn (target, gen_lowpart (mode, x));
2736 }
2737 else
2738 delete_insns_since (last);
2739
2740 return target;
2741 }
2742
2743 /* Try calculating bswap as two bswaps of two word-sized operands. */
2744
2745 static rtx
2746 expand_doubleword_bswap (enum machine_mode mode, rtx op, rtx target)
2747 {
2748 rtx t0, t1;
2749
2750 t1 = expand_unop (word_mode, bswap_optab,
2751 operand_subword_force (op, 0, mode), NULL_RTX, true);
2752 t0 = expand_unop (word_mode, bswap_optab,
2753 operand_subword_force (op, 1, mode), NULL_RTX, true);
2754
2755 if (target == 0)
2756 target = gen_reg_rtx (mode);
2757 if (REG_P (target))
2758 emit_clobber (target);
2759 emit_move_insn (operand_subword (target, 0, 1, mode), t0);
2760 emit_move_insn (operand_subword (target, 1, 1, mode), t1);
2761
2762 return target;
2763 }
2764
2765 /* Try calculating (parity x) as (and (popcount x) 1), where
2766 popcount can also be done in a wider mode. */
2767 static rtx
2768 expand_parity (enum machine_mode mode, rtx op0, rtx target)
2769 {
2770 enum mode_class mclass = GET_MODE_CLASS (mode);
2771 if (CLASS_HAS_WIDER_MODES_P (mclass))
2772 {
2773 enum machine_mode wider_mode;
2774 for (wider_mode = mode; wider_mode != VOIDmode;
2775 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
2776 {
2777 if (optab_handler (popcount_optab, wider_mode)->insn_code
2778 != CODE_FOR_nothing)
2779 {
2780 rtx xop0, temp, last;
2781
2782 last = get_last_insn ();
2783
2784 if (target == 0)
2785 target = gen_reg_rtx (mode);
2786 xop0 = widen_operand (op0, wider_mode, mode, true, false);
2787 temp = expand_unop (wider_mode, popcount_optab, xop0, NULL_RTX,
2788 true);
2789 if (temp != 0)
2790 temp = expand_binop (wider_mode, and_optab, temp, const1_rtx,
2791 target, true, OPTAB_DIRECT);
2792 if (temp == 0)
2793 delete_insns_since (last);
2794
2795 return temp;
2796 }
2797 }
2798 }
2799 return 0;
2800 }
2801
2802 /* Try calculating ctz(x) as K - clz(x & -x) ,
2803 where K is GET_MODE_BITSIZE(mode) - 1.
2804
2805 Both __builtin_ctz and __builtin_clz are undefined at zero, so we
2806 don't have to worry about what the hardware does in that case. (If
2807 the clz instruction produces the usual value at 0, which is K, the
2808 result of this code sequence will be -1; expand_ffs, below, relies
2809 on this. It might be nice to have it be K instead, for consistency
2810 with the (very few) processors that provide a ctz with a defined
2811 value, but that would take one more instruction, and it would be
2812 less convenient for expand_ffs anyway. */
2813
2814 static rtx
2815 expand_ctz (enum machine_mode mode, rtx op0, rtx target)
2816 {
2817 rtx seq, temp;
2818
2819 if (optab_handler (clz_optab, mode)->insn_code == CODE_FOR_nothing)
2820 return 0;
2821
2822 start_sequence ();
2823
2824 temp = expand_unop_direct (mode, neg_optab, op0, NULL_RTX, true);
2825 if (temp)
2826 temp = expand_binop (mode, and_optab, op0, temp, NULL_RTX,
2827 true, OPTAB_DIRECT);
2828 if (temp)
2829 temp = expand_unop_direct (mode, clz_optab, temp, NULL_RTX, true);
2830 if (temp)
2831 temp = expand_binop (mode, sub_optab, GEN_INT (GET_MODE_BITSIZE (mode) - 1),
2832 temp, target,
2833 true, OPTAB_DIRECT);
2834 if (temp == 0)
2835 {
2836 end_sequence ();
2837 return 0;
2838 }
2839
2840 seq = get_insns ();
2841 end_sequence ();
2842
2843 add_equal_note (seq, temp, CTZ, op0, 0);
2844 emit_insn (seq);
2845 return temp;
2846 }
2847
2848
2849 /* Try calculating ffs(x) using ctz(x) if we have that instruction, or
2850 else with the sequence used by expand_clz.
2851
2852 The ffs builtin promises to return zero for a zero value and ctz/clz
2853 may have an undefined value in that case. If they do not give us a
2854 convenient value, we have to generate a test and branch. */
2855 static rtx
2856 expand_ffs (enum machine_mode mode, rtx op0, rtx target)
2857 {
2858 HOST_WIDE_INT val = 0;
2859 bool defined_at_zero = false;
2860 rtx temp, seq;
2861
2862 if (optab_handler (ctz_optab, mode)->insn_code != CODE_FOR_nothing)
2863 {
2864 start_sequence ();
2865
2866 temp = expand_unop_direct (mode, ctz_optab, op0, 0, true);
2867 if (!temp)
2868 goto fail;
2869
2870 defined_at_zero = (CTZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2);
2871 }
2872 else if (optab_handler (clz_optab, mode)->insn_code != CODE_FOR_nothing)
2873 {
2874 start_sequence ();
2875 temp = expand_ctz (mode, op0, 0);
2876 if (!temp)
2877 goto fail;
2878
2879 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, val) == 2)
2880 {
2881 defined_at_zero = true;
2882 val = (GET_MODE_BITSIZE (mode) - 1) - val;
2883 }
2884 }
2885 else
2886 return 0;
2887
2888 if (defined_at_zero && val == -1)
2889 /* No correction needed at zero. */;
2890 else
2891 {
2892 /* We don't try to do anything clever with the situation found
2893 on some processors (eg Alpha) where ctz(0:mode) ==
2894 bitsize(mode). If someone can think of a way to send N to -1
2895 and leave alone all values in the range 0..N-1 (where N is a
2896 power of two), cheaper than this test-and-branch, please add it.
2897
2898 The test-and-branch is done after the operation itself, in case
2899 the operation sets condition codes that can be recycled for this.
2900 (This is true on i386, for instance.) */
2901
2902 rtx nonzero_label = gen_label_rtx ();
2903 emit_cmp_and_jump_insns (op0, CONST0_RTX (mode), NE, 0,
2904 mode, true, nonzero_label);
2905
2906 convert_move (temp, GEN_INT (-1), false);
2907 emit_label (nonzero_label);
2908 }
2909
2910 /* temp now has a value in the range -1..bitsize-1. ffs is supposed
2911 to produce a value in the range 0..bitsize. */
2912 temp = expand_binop (mode, add_optab, temp, GEN_INT (1),
2913 target, false, OPTAB_DIRECT);
2914 if (!temp)
2915 goto fail;
2916
2917 seq = get_insns ();
2918 end_sequence ();
2919
2920 add_equal_note (seq, temp, FFS, op0, 0);
2921 emit_insn (seq);
2922 return temp;
2923
2924 fail:
2925 end_sequence ();
2926 return 0;
2927 }
2928
2929 /* Extract the OMODE lowpart from VAL, which has IMODE. Under certain
2930 conditions, VAL may already be a SUBREG against which we cannot generate
2931 a further SUBREG. In this case, we expect forcing the value into a
2932 register will work around the situation. */
2933
2934 static rtx
2935 lowpart_subreg_maybe_copy (enum machine_mode omode, rtx val,
2936 enum machine_mode imode)
2937 {
2938 rtx ret;
2939 ret = lowpart_subreg (omode, val, imode);
2940 if (ret == NULL)
2941 {
2942 val = force_reg (imode, val);
2943 ret = lowpart_subreg (omode, val, imode);
2944 gcc_assert (ret != NULL);
2945 }
2946 return ret;
2947 }
2948
2949 /* Expand a floating point absolute value or negation operation via a
2950 logical operation on the sign bit. */
2951
2952 static rtx
2953 expand_absneg_bit (enum rtx_code code, enum machine_mode mode,
2954 rtx op0, rtx target)
2955 {
2956 const struct real_format *fmt;
2957 int bitpos, word, nwords, i;
2958 enum machine_mode imode;
2959 HOST_WIDE_INT hi, lo;
2960 rtx temp, insns;
2961
2962 /* The format has to have a simple sign bit. */
2963 fmt = REAL_MODE_FORMAT (mode);
2964 if (fmt == NULL)
2965 return NULL_RTX;
2966
2967 bitpos = fmt->signbit_rw;
2968 if (bitpos < 0)
2969 return NULL_RTX;
2970
2971 /* Don't create negative zeros if the format doesn't support them. */
2972 if (code == NEG && !fmt->has_signed_zero)
2973 return NULL_RTX;
2974
2975 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
2976 {
2977 imode = int_mode_for_mode (mode);
2978 if (imode == BLKmode)
2979 return NULL_RTX;
2980 word = 0;
2981 nwords = 1;
2982 }
2983 else
2984 {
2985 imode = word_mode;
2986
2987 if (FLOAT_WORDS_BIG_ENDIAN)
2988 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
2989 else
2990 word = bitpos / BITS_PER_WORD;
2991 bitpos = bitpos % BITS_PER_WORD;
2992 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
2993 }
2994
2995 if (bitpos < HOST_BITS_PER_WIDE_INT)
2996 {
2997 hi = 0;
2998 lo = (HOST_WIDE_INT) 1 << bitpos;
2999 }
3000 else
3001 {
3002 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3003 lo = 0;
3004 }
3005 if (code == ABS)
3006 lo = ~lo, hi = ~hi;
3007
3008 if (target == 0 || target == op0)
3009 target = gen_reg_rtx (mode);
3010
3011 if (nwords > 1)
3012 {
3013 start_sequence ();
3014
3015 for (i = 0; i < nwords; ++i)
3016 {
3017 rtx targ_piece = operand_subword (target, i, 1, mode);
3018 rtx op0_piece = operand_subword_force (op0, i, mode);
3019
3020 if (i == word)
3021 {
3022 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3023 op0_piece,
3024 immed_double_const (lo, hi, imode),
3025 targ_piece, 1, OPTAB_LIB_WIDEN);
3026 if (temp != targ_piece)
3027 emit_move_insn (targ_piece, temp);
3028 }
3029 else
3030 emit_move_insn (targ_piece, op0_piece);
3031 }
3032
3033 insns = get_insns ();
3034 end_sequence ();
3035
3036 emit_insn (insns);
3037 }
3038 else
3039 {
3040 temp = expand_binop (imode, code == ABS ? and_optab : xor_optab,
3041 gen_lowpart (imode, op0),
3042 immed_double_const (lo, hi, imode),
3043 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3044 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3045
3046 set_unique_reg_note (get_last_insn (), REG_EQUAL,
3047 gen_rtx_fmt_e (code, mode, copy_rtx (op0)));
3048 }
3049
3050 return target;
3051 }
3052
3053 /* As expand_unop, but will fail rather than attempt the operation in a
3054 different mode or with a libcall. */
3055 static rtx
3056 expand_unop_direct (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3057 int unsignedp)
3058 {
3059 if (optab_handler (unoptab, mode)->insn_code != CODE_FOR_nothing)
3060 {
3061 int icode = (int) optab_handler (unoptab, mode)->insn_code;
3062 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3063 rtx xop0 = op0;
3064 rtx last = get_last_insn ();
3065 rtx pat, temp;
3066
3067 if (target)
3068 temp = target;
3069 else
3070 temp = gen_reg_rtx (mode);
3071
3072 if (GET_MODE (xop0) != VOIDmode
3073 && GET_MODE (xop0) != mode0)
3074 xop0 = convert_to_mode (mode0, xop0, unsignedp);
3075
3076 /* Now, if insn doesn't accept our operand, put it into a pseudo. */
3077
3078 if (!insn_data[icode].operand[1].predicate (xop0, mode0))
3079 xop0 = copy_to_mode_reg (mode0, xop0);
3080
3081 if (!insn_data[icode].operand[0].predicate (temp, mode))
3082 temp = gen_reg_rtx (mode);
3083
3084 pat = GEN_FCN (icode) (temp, xop0);
3085 if (pat)
3086 {
3087 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
3088 && ! add_equal_note (pat, temp, unoptab->code, xop0, NULL_RTX))
3089 {
3090 delete_insns_since (last);
3091 return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
3092 }
3093
3094 emit_insn (pat);
3095
3096 return temp;
3097 }
3098 else
3099 delete_insns_since (last);
3100 }
3101 return 0;
3102 }
3103
3104 /* Generate code to perform an operation specified by UNOPTAB
3105 on operand OP0, with result having machine-mode MODE.
3106
3107 UNSIGNEDP is for the case where we have to widen the operands
3108 to perform the operation. It says to use zero-extension.
3109
3110 If TARGET is nonzero, the value
3111 is generated there, if it is convenient to do so.
3112 In all cases an rtx is returned for the locus of the value;
3113 this may or may not be TARGET. */
3114
3115 rtx
3116 expand_unop (enum machine_mode mode, optab unoptab, rtx op0, rtx target,
3117 int unsignedp)
3118 {
3119 enum mode_class mclass = GET_MODE_CLASS (mode);
3120 enum machine_mode wider_mode;
3121 rtx temp;
3122 rtx libfunc;
3123
3124 temp = expand_unop_direct (mode, unoptab, op0, target, unsignedp);
3125 if (temp)
3126 return temp;
3127
3128 /* It can't be done in this mode. Can we open-code it in a wider mode? */
3129
3130 /* Widening (or narrowing) clz needs special treatment. */
3131 if (unoptab == clz_optab)
3132 {
3133 temp = widen_clz (mode, op0, target);
3134 if (temp)
3135 return temp;
3136
3137 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3138 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3139 {
3140 temp = expand_doubleword_clz (mode, op0, target);
3141 if (temp)
3142 return temp;
3143 }
3144
3145 goto try_libcall;
3146 }
3147
3148 /* Widening (or narrowing) bswap needs special treatment. */
3149 if (unoptab == bswap_optab)
3150 {
3151 temp = widen_bswap (mode, op0, target);
3152 if (temp)
3153 return temp;
3154
3155 if (GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
3156 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3157 {
3158 temp = expand_doubleword_bswap (mode, op0, target);
3159 if (temp)
3160 return temp;
3161 }
3162
3163 goto try_libcall;
3164 }
3165
3166 if (CLASS_HAS_WIDER_MODES_P (mclass))
3167 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3168 wider_mode != VOIDmode;
3169 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3170 {
3171 if (optab_handler (unoptab, wider_mode)->insn_code != CODE_FOR_nothing)
3172 {
3173 rtx xop0 = op0;
3174 rtx last = get_last_insn ();
3175
3176 /* For certain operations, we need not actually extend
3177 the narrow operand, as long as we will truncate the
3178 results to the same narrowness. */
3179
3180 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3181 (unoptab == neg_optab
3182 || unoptab == one_cmpl_optab)
3183 && mclass == MODE_INT);
3184
3185 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3186 unsignedp);
3187
3188 if (temp)
3189 {
3190 if (mclass != MODE_INT
3191 || !TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
3192 GET_MODE_BITSIZE (wider_mode)))
3193 {
3194 if (target == 0)
3195 target = gen_reg_rtx (mode);
3196 convert_move (target, temp, 0);
3197 return target;
3198 }
3199 else
3200 return gen_lowpart (mode, temp);
3201 }
3202 else
3203 delete_insns_since (last);
3204 }
3205 }
3206
3207 /* These can be done a word at a time. */
3208 if (unoptab == one_cmpl_optab
3209 && mclass == MODE_INT
3210 && GET_MODE_SIZE (mode) > UNITS_PER_WORD
3211 && optab_handler (unoptab, word_mode)->insn_code != CODE_FOR_nothing)
3212 {
3213 int i;
3214 rtx insns;
3215
3216 if (target == 0 || target == op0)
3217 target = gen_reg_rtx (mode);
3218
3219 start_sequence ();
3220
3221 /* Do the actual arithmetic. */
3222 for (i = 0; i < GET_MODE_BITSIZE (mode) / BITS_PER_WORD; i++)
3223 {
3224 rtx target_piece = operand_subword (target, i, 1, mode);
3225 rtx x = expand_unop (word_mode, unoptab,
3226 operand_subword_force (op0, i, mode),
3227 target_piece, unsignedp);
3228
3229 if (target_piece != x)
3230 emit_move_insn (target_piece, x);
3231 }
3232
3233 insns = get_insns ();
3234 end_sequence ();
3235
3236 emit_insn (insns);
3237 return target;
3238 }
3239
3240 if (unoptab->code == NEG)
3241 {
3242 /* Try negating floating point values by flipping the sign bit. */
3243 if (SCALAR_FLOAT_MODE_P (mode))
3244 {
3245 temp = expand_absneg_bit (NEG, mode, op0, target);
3246 if (temp)
3247 return temp;
3248 }
3249
3250 /* If there is no negation pattern, and we have no negative zero,
3251 try subtracting from zero. */
3252 if (!HONOR_SIGNED_ZEROS (mode))
3253 {
3254 temp = expand_binop (mode, (unoptab == negv_optab
3255 ? subv_optab : sub_optab),
3256 CONST0_RTX (mode), op0, target,
3257 unsignedp, OPTAB_DIRECT);
3258 if (temp)
3259 return temp;
3260 }
3261 }
3262
3263 /* Try calculating parity (x) as popcount (x) % 2. */
3264 if (unoptab == parity_optab)
3265 {
3266 temp = expand_parity (mode, op0, target);
3267 if (temp)
3268 return temp;
3269 }
3270
3271 /* Try implementing ffs (x) in terms of clz (x). */
3272 if (unoptab == ffs_optab)
3273 {
3274 temp = expand_ffs (mode, op0, target);
3275 if (temp)
3276 return temp;
3277 }
3278
3279 /* Try implementing ctz (x) in terms of clz (x). */
3280 if (unoptab == ctz_optab)
3281 {
3282 temp = expand_ctz (mode, op0, target);
3283 if (temp)
3284 return temp;
3285 }
3286
3287 try_libcall:
3288 /* Now try a library call in this mode. */
3289 libfunc = optab_libfunc (unoptab, mode);
3290 if (libfunc)
3291 {
3292 rtx insns;
3293 rtx value;
3294 rtx eq_value;
3295 enum machine_mode outmode = mode;
3296
3297 /* All of these functions return small values. Thus we choose to
3298 have them return something that isn't a double-word. */
3299 if (unoptab == ffs_optab || unoptab == clz_optab || unoptab == ctz_optab
3300 || unoptab == popcount_optab || unoptab == parity_optab)
3301 outmode
3302 = GET_MODE (hard_libcall_value (TYPE_MODE (integer_type_node)));
3303
3304 start_sequence ();
3305
3306 /* Pass 1 for NO_QUEUE so we don't lose any increments
3307 if the libcall is cse'd or moved. */
3308 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, outmode,
3309 1, op0, mode);
3310 insns = get_insns ();
3311 end_sequence ();
3312
3313 target = gen_reg_rtx (outmode);
3314 eq_value = gen_rtx_fmt_e (unoptab->code, mode, op0);
3315 if (GET_MODE_SIZE (outmode) < GET_MODE_SIZE (mode))
3316 eq_value = simplify_gen_unary (TRUNCATE, outmode, eq_value, mode);
3317 else if (GET_MODE_SIZE (outmode) > GET_MODE_SIZE (mode))
3318 eq_value = simplify_gen_unary (ZERO_EXTEND, outmode, eq_value, mode);
3319 emit_libcall_block (insns, target, value, eq_value);
3320
3321 return target;
3322 }
3323
3324 /* It can't be done in this mode. Can we do it in a wider mode? */
3325
3326 if (CLASS_HAS_WIDER_MODES_P (mclass))
3327 {
3328 for (wider_mode = GET_MODE_WIDER_MODE (mode);
3329 wider_mode != VOIDmode;
3330 wider_mode = GET_MODE_WIDER_MODE (wider_mode))
3331 {
3332 if ((optab_handler (unoptab, wider_mode)->insn_code
3333 != CODE_FOR_nothing)
3334 || optab_libfunc (unoptab, wider_mode))
3335 {
3336 rtx xop0 = op0;
3337 rtx last = get_last_insn ();
3338
3339 /* For certain operations, we need not actually extend
3340 the narrow operand, as long as we will truncate the
3341 results to the same narrowness. */
3342
3343 xop0 = widen_operand (xop0, wider_mode, mode, unsignedp,
3344 (unoptab == neg_optab
3345 || unoptab == one_cmpl_optab)
3346 && mclass == MODE_INT);
3347
3348 temp = expand_unop (wider_mode, unoptab, xop0, NULL_RTX,
3349 unsignedp);
3350
3351 /* If we are generating clz using wider mode, adjust the
3352 result. */
3353 if (unoptab == clz_optab && temp != 0)
3354 temp = expand_binop (wider_mode, sub_optab, temp,
3355 GEN_INT (GET_MODE_BITSIZE (wider_mode)
3356 - GET_MODE_BITSIZE (mode)),
3357 target, true, OPTAB_DIRECT);
3358
3359 if (temp)
3360 {
3361 if (mclass != MODE_INT)
3362 {
3363 if (target == 0)
3364 target = gen_reg_rtx (mode);
3365 convert_move (target, temp, 0);
3366 return target;
3367 }
3368 else
3369 return gen_lowpart (mode, temp);
3370 }
3371 else
3372 delete_insns_since (last);
3373 }
3374 }
3375 }
3376
3377 /* One final attempt at implementing negation via subtraction,
3378 this time allowing widening of the operand. */
3379 if (unoptab->code == NEG && !HONOR_SIGNED_ZEROS (mode))
3380 {
3381 rtx temp;
3382 temp = expand_binop (mode,
3383 unoptab == negv_optab ? subv_optab : sub_optab,
3384 CONST0_RTX (mode), op0,
3385 target, unsignedp, OPTAB_LIB_WIDEN);
3386 if (temp)
3387 return temp;
3388 }
3389
3390 return 0;
3391 }
3392 \f
3393 /* Emit code to compute the absolute value of OP0, with result to
3394 TARGET if convenient. (TARGET may be 0.) The return value says
3395 where the result actually is to be found.
3396
3397 MODE is the mode of the operand; the mode of the result is
3398 different but can be deduced from MODE.
3399
3400 */
3401
3402 rtx
3403 expand_abs_nojump (enum machine_mode mode, rtx op0, rtx target,
3404 int result_unsignedp)
3405 {
3406 rtx temp;
3407
3408 if (! flag_trapv)
3409 result_unsignedp = 1;
3410
3411 /* First try to do it with a special abs instruction. */
3412 temp = expand_unop (mode, result_unsignedp ? abs_optab : absv_optab,
3413 op0, target, 0);
3414 if (temp != 0)
3415 return temp;
3416
3417 /* For floating point modes, try clearing the sign bit. */
3418 if (SCALAR_FLOAT_MODE_P (mode))
3419 {
3420 temp = expand_absneg_bit (ABS, mode, op0, target);
3421 if (temp)
3422 return temp;
3423 }
3424
3425 /* If we have a MAX insn, we can do this as MAX (x, -x). */
3426 if (optab_handler (smax_optab, mode)->insn_code != CODE_FOR_nothing
3427 && !HONOR_SIGNED_ZEROS (mode))
3428 {
3429 rtx last = get_last_insn ();
3430
3431 temp = expand_unop (mode, neg_optab, op0, NULL_RTX, 0);
3432 if (temp != 0)
3433 temp = expand_binop (mode, smax_optab, op0, temp, target, 0,
3434 OPTAB_WIDEN);
3435
3436 if (temp != 0)
3437 return temp;
3438
3439 delete_insns_since (last);
3440 }
3441
3442 /* If this machine has expensive jumps, we can do integer absolute
3443 value of X as (((signed) x >> (W-1)) ^ x) - ((signed) x >> (W-1)),
3444 where W is the width of MODE. */
3445
3446 if (GET_MODE_CLASS (mode) == MODE_INT
3447 && BRANCH_COST (optimize_insn_for_speed_p (),
3448 false) >= 2)
3449 {
3450 rtx extended = expand_shift (RSHIFT_EXPR, mode, op0,
3451 size_int (GET_MODE_BITSIZE (mode) - 1),
3452 NULL_RTX, 0);
3453
3454 temp = expand_binop (mode, xor_optab, extended, op0, target, 0,
3455 OPTAB_LIB_WIDEN);
3456 if (temp != 0)
3457 temp = expand_binop (mode, result_unsignedp ? sub_optab : subv_optab,
3458 temp, extended, target, 0, OPTAB_LIB_WIDEN);
3459
3460 if (temp != 0)
3461 return temp;
3462 }
3463
3464 return NULL_RTX;
3465 }
3466
3467 rtx
3468 expand_abs (enum machine_mode mode, rtx op0, rtx target,
3469 int result_unsignedp, int safe)
3470 {
3471 rtx temp, op1;
3472
3473 if (! flag_trapv)
3474 result_unsignedp = 1;
3475
3476 temp = expand_abs_nojump (mode, op0, target, result_unsignedp);
3477 if (temp != 0)
3478 return temp;
3479
3480 /* If that does not win, use conditional jump and negate. */
3481
3482 /* It is safe to use the target if it is the same
3483 as the source if this is also a pseudo register */
3484 if (op0 == target && REG_P (op0)
3485 && REGNO (op0) >= FIRST_PSEUDO_REGISTER)
3486 safe = 1;
3487
3488 op1 = gen_label_rtx ();
3489 if (target == 0 || ! safe
3490 || GET_MODE (target) != mode
3491 || (MEM_P (target) && MEM_VOLATILE_P (target))
3492 || (REG_P (target)
3493 && REGNO (target) < FIRST_PSEUDO_REGISTER))
3494 target = gen_reg_rtx (mode);
3495
3496 emit_move_insn (target, op0);
3497 NO_DEFER_POP;
3498
3499 do_compare_rtx_and_jump (target, CONST0_RTX (mode), GE, 0, mode,
3500 NULL_RTX, NULL_RTX, op1);
3501
3502 op0 = expand_unop (mode, result_unsignedp ? neg_optab : negv_optab,
3503 target, target, 0);
3504 if (op0 != target)
3505 emit_move_insn (target, op0);
3506 emit_label (op1);
3507 OK_DEFER_POP;
3508 return target;
3509 }
3510
3511 /* A subroutine of expand_copysign, perform the copysign operation using the
3512 abs and neg primitives advertised to exist on the target. The assumption
3513 is that we have a split register file, and leaving op0 in fp registers,
3514 and not playing with subregs so much, will help the register allocator. */
3515
3516 static rtx
3517 expand_copysign_absneg (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3518 int bitpos, bool op0_is_abs)
3519 {
3520 enum machine_mode imode;
3521 int icode;
3522 rtx sign, label;
3523
3524 if (target == op1)
3525 target = NULL_RTX;
3526
3527 /* Check if the back end provides an insn that handles signbit for the
3528 argument's mode. */
3529 icode = (int) signbit_optab->handlers [(int) mode].insn_code;
3530 if (icode != CODE_FOR_nothing)
3531 {
3532 imode = insn_data[icode].operand[0].mode;
3533 sign = gen_reg_rtx (imode);
3534 emit_unop_insn (icode, sign, op1, UNKNOWN);
3535 }
3536 else
3537 {
3538 HOST_WIDE_INT hi, lo;
3539
3540 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3541 {
3542 imode = int_mode_for_mode (mode);
3543 if (imode == BLKmode)
3544 return NULL_RTX;
3545 op1 = gen_lowpart (imode, op1);
3546 }
3547 else
3548 {
3549 int word;
3550
3551 imode = word_mode;
3552 if (FLOAT_WORDS_BIG_ENDIAN)
3553 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3554 else
3555 word = bitpos / BITS_PER_WORD;
3556 bitpos = bitpos % BITS_PER_WORD;
3557 op1 = operand_subword_force (op1, word, mode);
3558 }
3559
3560 if (bitpos < HOST_BITS_PER_WIDE_INT)
3561 {
3562 hi = 0;
3563 lo = (HOST_WIDE_INT) 1 << bitpos;
3564 }
3565 else
3566 {
3567 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3568 lo = 0;
3569 }
3570
3571 sign = gen_reg_rtx (imode);
3572 sign = expand_binop (imode, and_optab, op1,
3573 immed_double_const (lo, hi, imode),
3574 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3575 }
3576
3577 if (!op0_is_abs)
3578 {
3579 op0 = expand_unop (mode, abs_optab, op0, target, 0);
3580 if (op0 == NULL)
3581 return NULL_RTX;
3582 target = op0;
3583 }
3584 else
3585 {
3586 if (target == NULL_RTX)
3587 target = copy_to_reg (op0);
3588 else
3589 emit_move_insn (target, op0);
3590 }
3591
3592 label = gen_label_rtx ();
3593 emit_cmp_and_jump_insns (sign, const0_rtx, EQ, NULL_RTX, imode, 1, label);
3594
3595 if (GET_CODE (op0) == CONST_DOUBLE)
3596 op0 = simplify_unary_operation (NEG, mode, op0, mode);
3597 else
3598 op0 = expand_unop (mode, neg_optab, op0, target, 0);
3599 if (op0 != target)
3600 emit_move_insn (target, op0);
3601
3602 emit_label (label);
3603
3604 return target;
3605 }
3606
3607
3608 /* A subroutine of expand_copysign, perform the entire copysign operation
3609 with integer bitmasks. BITPOS is the position of the sign bit; OP0_IS_ABS
3610 is true if op0 is known to have its sign bit clear. */
3611
3612 static rtx
3613 expand_copysign_bit (enum machine_mode mode, rtx op0, rtx op1, rtx target,
3614 int bitpos, bool op0_is_abs)
3615 {
3616 enum machine_mode imode;
3617 HOST_WIDE_INT hi, lo;
3618 int word, nwords, i;
3619 rtx temp, insns;
3620
3621 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
3622 {
3623 imode = int_mode_for_mode (mode);
3624 if (imode == BLKmode)
3625 return NULL_RTX;
3626 word = 0;
3627 nwords = 1;
3628 }
3629 else
3630 {
3631 imode = word_mode;
3632
3633 if (FLOAT_WORDS_BIG_ENDIAN)
3634 word = (GET_MODE_BITSIZE (mode) - bitpos) / BITS_PER_WORD;
3635 else
3636 word = bitpos / BITS_PER_WORD;
3637 bitpos = bitpos % BITS_PER_WORD;
3638 nwords = (GET_MODE_BITSIZE (mode) + BITS_PER_WORD - 1) / BITS_PER_WORD;
3639 }
3640
3641 if (bitpos < HOST_BITS_PER_WIDE_INT)
3642 {
3643 hi = 0;
3644 lo = (HOST_WIDE_INT) 1 << bitpos;
3645 }
3646 else
3647 {
3648 hi = (HOST_WIDE_INT) 1 << (bitpos - HOST_BITS_PER_WIDE_INT);
3649 lo = 0;
3650 }
3651
3652 if (target == 0 || target == op0 || target == op1)
3653 target = gen_reg_rtx (mode);
3654
3655 if (nwords > 1)
3656 {
3657 start_sequence ();
3658
3659 for (i = 0; i < nwords; ++i)
3660 {
3661 rtx targ_piece = operand_subword (target, i, 1, mode);
3662 rtx op0_piece = operand_subword_force (op0, i, mode);
3663
3664 if (i == word)
3665 {
3666 if (!op0_is_abs)
3667 op0_piece = expand_binop (imode, and_optab, op0_piece,
3668 immed_double_const (~lo, ~hi, imode),
3669 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3670
3671 op1 = expand_binop (imode, and_optab,
3672 operand_subword_force (op1, i, mode),
3673 immed_double_const (lo, hi, imode),
3674 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3675
3676 temp = expand_binop (imode, ior_optab, op0_piece, op1,
3677 targ_piece, 1, OPTAB_LIB_WIDEN);
3678 if (temp != targ_piece)
3679 emit_move_insn (targ_piece, temp);
3680 }
3681 else
3682 emit_move_insn (targ_piece, op0_piece);
3683 }
3684
3685 insns = get_insns ();
3686 end_sequence ();
3687
3688 emit_insn (insns);
3689 }
3690 else
3691 {
3692 op1 = expand_binop (imode, and_optab, gen_lowpart (imode, op1),
3693 immed_double_const (lo, hi, imode),
3694 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3695
3696 op0 = gen_lowpart (imode, op0);
3697 if (!op0_is_abs)
3698 op0 = expand_binop (imode, and_optab, op0,
3699 immed_double_const (~lo, ~hi, imode),
3700 NULL_RTX, 1, OPTAB_LIB_WIDEN);
3701
3702 temp = expand_binop (imode, ior_optab, op0, op1,
3703 gen_lowpart (imode, target), 1, OPTAB_LIB_WIDEN);
3704 target = lowpart_subreg_maybe_copy (mode, temp, imode);
3705 }
3706
3707 return target;
3708 }
3709
3710 /* Expand the C99 copysign operation. OP0 and OP1 must be the same
3711 scalar floating point mode. Return NULL if we do not know how to
3712 expand the operation inline. */
3713
3714 rtx
3715 expand_copysign (rtx op0, rtx op1, rtx target)
3716 {
3717 enum machine_mode mode = GET_MODE (op0);
3718 const struct real_format *fmt;
3719 bool op0_is_abs;
3720 rtx temp;
3721
3722 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
3723 gcc_assert (GET_MODE (op1) == mode);
3724
3725 /* First try to do it with a special instruction. */
3726 temp = expand_binop (mode, copysign_optab, op0, op1,
3727 target, 0, OPTAB_DIRECT);
3728 if (temp)
3729 return temp;
3730
3731 fmt = REAL_MODE_FORMAT (mode);
3732 if (fmt == NULL || !fmt->has_signed_zero)
3733 return NULL_RTX;
3734
3735 op0_is_abs = false;
3736 if (GET_CODE (op0) == CONST_DOUBLE)
3737 {
3738 if (real_isneg (CONST_DOUBLE_REAL_VALUE (op0)))
3739 op0 = simplify_unary_operation (ABS, mode, op0, mode);
3740 op0_is_abs = true;
3741 }
3742
3743 if (fmt->signbit_ro >= 0
3744 && (GET_CODE (op0) == CONST_DOUBLE
3745 || (optab_handler (neg_optab, mode)->insn_code != CODE_FOR_nothing
3746 && optab_handler (abs_optab, mode)->insn_code != CODE_FOR_nothing)))
3747 {
3748 temp = expand_copysign_absneg (mode, op0, op1, target,
3749 fmt->signbit_ro, op0_is_abs);
3750 if (temp)
3751 return temp;
3752 }
3753
3754 if (fmt->signbit_rw < 0)
3755 return NULL_RTX;
3756 return expand_copysign_bit (mode, op0, op1, target,
3757 fmt->signbit_rw, op0_is_abs);
3758 }
3759 \f
3760 /* Generate an instruction whose insn-code is INSN_CODE,
3761 with two operands: an output TARGET and an input OP0.
3762 TARGET *must* be nonzero, and the output is always stored there.
3763 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3764 the value that is stored into TARGET.
3765
3766 Return false if expansion failed. */
3767
3768 bool
3769 maybe_emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3770 {
3771 rtx temp;
3772 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
3773 rtx pat;
3774 rtx last = get_last_insn ();
3775
3776 temp = target;
3777
3778 /* Now, if insn does not accept our operands, put them into pseudos. */
3779
3780 if (!insn_data[icode].operand[1].predicate (op0, mode0))
3781 op0 = copy_to_mode_reg (mode0, op0);
3782
3783 if (!insn_data[icode].operand[0].predicate (temp, GET_MODE (temp)))
3784 temp = gen_reg_rtx (GET_MODE (temp));
3785
3786 pat = GEN_FCN (icode) (temp, op0);
3787 if (!pat)
3788 {
3789 delete_insns_since (last);
3790 return false;
3791 }
3792
3793 if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX && code != UNKNOWN)
3794 add_equal_note (pat, temp, code, op0, NULL_RTX);
3795
3796 emit_insn (pat);
3797
3798 if (temp != target)
3799 emit_move_insn (target, temp);
3800 return true;
3801 }
3802 /* Generate an instruction whose insn-code is INSN_CODE,
3803 with two operands: an output TARGET and an input OP0.
3804 TARGET *must* be nonzero, and the output is always stored there.
3805 CODE is an rtx code such that (CODE OP0) is an rtx that describes
3806 the value that is stored into TARGET. */
3807
3808 void
3809 emit_unop_insn (int icode, rtx target, rtx op0, enum rtx_code code)
3810 {
3811 bool ok = maybe_emit_unop_insn (icode, target, op0, code);
3812 gcc_assert (ok);
3813 }
3814 \f
3815 struct no_conflict_data
3816 {
3817 rtx target, first, insn;
3818 bool must_stay;
3819 };
3820
3821 /* Called via note_stores by emit_libcall_block. Set P->must_stay if
3822 the currently examined clobber / store has to stay in the list of
3823 insns that constitute the actual libcall block. */
3824 static void
3825 no_conflict_move_test (rtx dest, const_rtx set, void *p0)
3826 {
3827 struct no_conflict_data *p= (struct no_conflict_data *) p0;
3828
3829 /* If this inns directly contributes to setting the target, it must stay. */
3830 if (reg_overlap_mentioned_p (p->target, dest))
3831 p->must_stay = true;
3832 /* If we haven't committed to keeping any other insns in the list yet,
3833 there is nothing more to check. */
3834 else if (p->insn == p->first)
3835 return;
3836 /* If this insn sets / clobbers a register that feeds one of the insns
3837 already in the list, this insn has to stay too. */
3838 else if (reg_overlap_mentioned_p (dest, PATTERN (p->first))
3839 || (CALL_P (p->first) && (find_reg_fusage (p->first, USE, dest)))
3840 || reg_used_between_p (dest, p->first, p->insn)
3841 /* Likewise if this insn depends on a register set by a previous
3842 insn in the list, or if it sets a result (presumably a hard
3843 register) that is set or clobbered by a previous insn.
3844 N.B. the modified_*_p (SET_DEST...) tests applied to a MEM
3845 SET_DEST perform the former check on the address, and the latter
3846 check on the MEM. */
3847 || (GET_CODE (set) == SET
3848 && (modified_in_p (SET_SRC (set), p->first)
3849 || modified_in_p (SET_DEST (set), p->first)
3850 || modified_between_p (SET_SRC (set), p->first, p->insn)
3851 || modified_between_p (SET_DEST (set), p->first, p->insn))))
3852 p->must_stay = true;
3853 }
3854
3855 \f
3856 /* Emit code to make a call to a constant function or a library call.
3857
3858 INSNS is a list containing all insns emitted in the call.
3859 These insns leave the result in RESULT. Our block is to copy RESULT
3860 to TARGET, which is logically equivalent to EQUIV.
3861
3862 We first emit any insns that set a pseudo on the assumption that these are
3863 loading constants into registers; doing so allows them to be safely cse'ed
3864 between blocks. Then we emit all the other insns in the block, followed by
3865 an insn to move RESULT to TARGET. This last insn will have a REQ_EQUAL
3866 note with an operand of EQUIV. */
3867
3868 void
3869 emit_libcall_block (rtx insns, rtx target, rtx result, rtx equiv)
3870 {
3871 rtx final_dest = target;
3872 rtx prev, next, last, insn;
3873
3874 /* If this is a reg with REG_USERVAR_P set, then it could possibly turn
3875 into a MEM later. Protect the libcall block from this change. */
3876 if (! REG_P (target) || REG_USERVAR_P (target))
3877 target = gen_reg_rtx (GET_MODE (target));
3878
3879 /* If we're using non-call exceptions, a libcall corresponding to an
3880 operation that may trap may also trap. */
3881 if (flag_non_call_exceptions && may_trap_p (equiv))
3882 {
3883 for (insn = insns; insn; insn = NEXT_INSN (insn))
3884 if (CALL_P (insn))
3885 {
3886 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3887
3888 if (note != 0 && INTVAL (XEXP (note, 0)) <= 0)
3889 remove_note (insn, note);
3890 }
3891 }
3892 else
3893 /* look for any CALL_INSNs in this sequence, and attach a REG_EH_REGION
3894 reg note to indicate that this call cannot throw or execute a nonlocal
3895 goto (unless there is already a REG_EH_REGION note, in which case
3896 we update it). */
3897 for (insn = insns; insn; insn = NEXT_INSN (insn))
3898 if (CALL_P (insn))
3899 {
3900 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3901
3902 if (note != 0)
3903 XEXP (note, 0) = constm1_rtx;
3904 else
3905 add_reg_note (insn, REG_EH_REGION, constm1_rtx);
3906 }
3907
3908 /* First emit all insns that set pseudos. Remove them from the list as
3909 we go. Avoid insns that set pseudos which were referenced in previous
3910 insns. These can be generated by move_by_pieces, for example,
3911 to update an address. Similarly, avoid insns that reference things
3912 set in previous insns. */
3913
3914 for (insn = insns; insn; insn = next)
3915 {
3916 rtx set = single_set (insn);
3917
3918 next = NEXT_INSN (insn);
3919
3920 if (set != 0 && REG_P (SET_DEST (set))
3921 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3922 {
3923 struct no_conflict_data data;
3924
3925 data.target = const0_rtx;
3926 data.first = insns;
3927 data.insn = insn;
3928 data.must_stay = 0;
3929 note_stores (PATTERN (insn), no_conflict_move_test, &data);
3930 if (! data.must_stay)
3931 {
3932 if (PREV_INSN (insn))
3933 NEXT_INSN (PREV_INSN (insn)) = next;
3934 else
3935 insns = next;
3936
3937 if (next)
3938 PREV_INSN (next) = PREV_INSN (insn);
3939
3940 add_insn (insn);
3941 }
3942 }
3943
3944 /* Some ports use a loop to copy large arguments onto the stack.
3945 Don't move anything outside such a loop. */
3946 if (LABEL_P (insn))
3947 break;
3948 }
3949
3950 prev = get_last_insn ();
3951
3952 /* Write the remaining insns followed by the final copy. */
3953
3954 for (insn = insns; insn; insn = next)
3955 {
3956 next = NEXT_INSN (insn);
3957
3958 add_insn (insn);
3959 }
3960
3961 last = emit_move_insn (target, result);
3962 if (optab_handler (mov_optab, GET_MODE (target))->insn_code
3963 != CODE_FOR_nothing)
3964 set_unique_reg_note (last, REG_EQUAL, copy_rtx (equiv));
3965
3966 if (final_dest != target)
3967 emit_move_insn (final_dest, target);
3968 }
3969 \f
3970 /* Nonzero if we can perform a comparison of mode MODE straightforwardly.
3971 PURPOSE describes how this comparison will be used. CODE is the rtx
3972 comparison code we will be using.
3973
3974 ??? Actually, CODE is slightly weaker than that. A target is still
3975 required to implement all of the normal bcc operations, but not
3976 required to implement all (or any) of the unordered bcc operations. */
3977
3978 int
3979 can_compare_p (enum rtx_code code, enum machine_mode mode,
3980 enum can_compare_purpose purpose)
3981 {
3982 rtx test;
3983 test = gen_rtx_fmt_ee (code, mode, const0_rtx, const0_rtx);
3984 do
3985 {
3986 int icode;
3987
3988 if (optab_handler (cmp_optab, mode)->insn_code != CODE_FOR_nothing)
3989 {
3990 if (purpose == ccp_jump)
3991 return bcc_gen_fctn[(int) code] != NULL;
3992 else if (purpose == ccp_store_flag)
3993 return setcc_gen_code[(int) code] != CODE_FOR_nothing;
3994 else
3995 /* There's only one cmov entry point, and it's allowed to fail. */
3996 return 1;
3997 }
3998 if (purpose == ccp_jump
3999 && (icode = optab_handler (cbranch_optab, mode)->insn_code) != CODE_FOR_nothing
4000 && insn_data[icode].operand[0].predicate (test, mode))
4001 return 1;
4002 if (purpose == ccp_store_flag
4003 && (icode = optab_handler (cstore_optab, mode)->insn_code) != CODE_FOR_nothing
4004 && insn_data[icode].operand[1].predicate (test, mode))
4005 return 1;
4006 if (purpose == ccp_cmov
4007 && optab_handler (cmov_optab, mode)->insn_code != CODE_FOR_nothing)
4008 return 1;
4009
4010 mode = GET_MODE_WIDER_MODE (mode);
4011 PUT_MODE (test, mode);
4012 }
4013 while (mode != VOIDmode);
4014
4015 return 0;
4016 }
4017
4018 /* This function is called when we are going to emit a compare instruction that
4019 compares the values found in *PX and *PY, using the rtl operator COMPARISON.
4020
4021 *PMODE is the mode of the inputs (in case they are const_int).
4022 *PUNSIGNEDP nonzero says that the operands are unsigned;
4023 this matters if they need to be widened.
4024
4025 If they have mode BLKmode, then SIZE specifies the size of both operands.
4026
4027 This function performs all the setup necessary so that the caller only has
4028 to emit a single comparison insn. This setup can involve doing a BLKmode
4029 comparison or emitting a library call to perform the comparison if no insn
4030 is available to handle it.
4031 The values which are passed in through pointers can be modified; the caller
4032 should perform the comparison on the modified values. Constant
4033 comparisons must have already been folded. */
4034
4035 static void
4036 prepare_cmp_insn (rtx *px, rtx *py, enum rtx_code *pcomparison, rtx size,
4037 enum machine_mode *pmode, int *punsignedp,
4038 enum can_compare_purpose purpose)
4039 {
4040 enum machine_mode mode = *pmode;
4041 rtx x = *px, y = *py;
4042 int unsignedp = *punsignedp;
4043 rtx libfunc;
4044
4045 /* If we are inside an appropriately-short loop and we are optimizing,
4046 force expensive constants into a register. */
4047 if (CONSTANT_P (x) && optimize
4048 && (rtx_cost (x, COMPARE, optimize_insn_for_speed_p ())
4049 > COSTS_N_INSNS (1)))
4050 x = force_reg (mode, x);
4051
4052 if (CONSTANT_P (y) && optimize
4053 && (rtx_cost (y, COMPARE, optimize_insn_for_speed_p ())
4054 > COSTS_N_INSNS (1)))
4055 y = force_reg (mode, y);
4056
4057 #ifdef HAVE_cc0
4058 /* Make sure if we have a canonical comparison. The RTL
4059 documentation states that canonical comparisons are required only
4060 for targets which have cc0. */
4061 gcc_assert (!CONSTANT_P (x) || CONSTANT_P (y));
4062 #endif
4063
4064 /* Don't let both operands fail to indicate the mode. */
4065 if (GET_MODE (x) == VOIDmode && GET_MODE (y) == VOIDmode)
4066 x = force_reg (mode, x);
4067
4068 /* Handle all BLKmode compares. */
4069
4070 if (mode == BLKmode)
4071 {
4072 enum machine_mode cmp_mode, result_mode;
4073 enum insn_code cmp_code;
4074 tree length_type;
4075 rtx libfunc;
4076 rtx result;
4077 rtx opalign
4078 = GEN_INT (MIN (MEM_ALIGN (x), MEM_ALIGN (y)) / BITS_PER_UNIT);
4079
4080 gcc_assert (size);
4081
4082 /* Try to use a memory block compare insn - either cmpstr
4083 or cmpmem will do. */
4084 for (cmp_mode = GET_CLASS_NARROWEST_MODE (MODE_INT);
4085 cmp_mode != VOIDmode;
4086 cmp_mode = GET_MODE_WIDER_MODE (cmp_mode))
4087 {
4088 cmp_code = cmpmem_optab[cmp_mode];
4089 if (cmp_code == CODE_FOR_nothing)
4090 cmp_code = cmpstr_optab[cmp_mode];
4091 if (cmp_code == CODE_FOR_nothing)
4092 cmp_code = cmpstrn_optab[cmp_mode];
4093 if (cmp_code == CODE_FOR_nothing)
4094 continue;
4095
4096 /* Must make sure the size fits the insn's mode. */
4097 if ((GET_CODE (size) == CONST_INT
4098 && INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode)))
4099 || (GET_MODE_BITSIZE (GET_MODE (size))
4100 > GET_MODE_BITSIZE (cmp_mode)))
4101 continue;
4102
4103 result_mode = insn_data[cmp_code].operand[0].mode;
4104 result = gen_reg_rtx (result_mode);
4105 size = convert_to_mode (cmp_mode, size, 1);
4106 emit_insn (GEN_FCN (cmp_code) (result, x, y, size, opalign));
4107
4108 *px = result;
4109 *py = const0_rtx;
4110 *pmode = result_mode;
4111 return;
4112 }
4113
4114 /* Otherwise call a library function, memcmp. */
4115 libfunc = memcmp_libfunc;
4116 length_type = sizetype;
4117 result_mode = TYPE_MODE (integer_type_node);
4118 cmp_mode = TYPE_MODE (length_type);
4119 size = convert_to_mode (TYPE_MODE (length_type), size,
4120 TYPE_UNSIGNED (length_type));
4121
4122 result = emit_library_call_value (libfunc, 0, LCT_PURE,
4123 result_mode, 3,
4124 XEXP (x, 0), Pmode,
4125 XEXP (y, 0), Pmode,
4126 size, cmp_mode);
4127 *px = result;
4128 *py = const0_rtx;
4129 *pmode = result_mode;
4130 return;
4131 }
4132
4133 /* Don't allow operands to the compare to trap, as that can put the
4134 compare and branch in different basic blocks. */
4135 if (flag_non_call_exceptions)
4136 {
4137 if (may_trap_p (x))
4138 x = force_reg (mode, x);
4139 if (may_trap_p (y))
4140 y = force_reg (mode, y);
4141 }
4142
4143 *px = x;
4144 *py = y;
4145 if (GET_MODE_CLASS (mode) == MODE_CC)
4146 {
4147 gcc_assert (can_compare_p (*pcomparison, CCmode, purpose));
4148 return;
4149 }
4150 else if (can_compare_p (*pcomparison, mode, purpose))
4151 return;
4152
4153 /* Handle a lib call just for the mode we are using. */
4154 libfunc = optab_libfunc (cmp_optab, mode);
4155 if (libfunc && !SCALAR_FLOAT_MODE_P (mode))
4156 {
4157 rtx result;
4158
4159 /* If we want unsigned, and this mode has a distinct unsigned
4160 comparison routine, use that. */
4161 if (unsignedp)
4162 {
4163 rtx ulibfunc = optab_libfunc (ucmp_optab, mode);
4164 if (ulibfunc)
4165 libfunc = ulibfunc;
4166 }
4167
4168 result = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4169 targetm.libgcc_cmp_return_mode (),
4170 2, x, mode, y, mode);
4171
4172 /* There are two kinds of comparison routines. Biased routines
4173 return 0/1/2, and unbiased routines return -1/0/1. Other parts
4174 of gcc expect that the comparison operation is equivalent
4175 to the modified comparison. For signed comparisons compare the
4176 result against 1 in the biased case, and zero in the unbiased
4177 case. For unsigned comparisons always compare against 1 after
4178 biasing the unbiased result by adding 1. This gives us a way to
4179 represent LTU. */
4180 *px = result;
4181 *pmode = word_mode;
4182 *py = const1_rtx;
4183
4184 if (!TARGET_LIB_INT_CMP_BIASED)
4185 {
4186 if (*punsignedp)
4187 *px = plus_constant (result, 1);
4188 else
4189 *py = const0_rtx;
4190 }
4191 return;
4192 }
4193
4194 gcc_assert (SCALAR_FLOAT_MODE_P (mode));
4195 prepare_float_lib_cmp (px, py, pcomparison, pmode, punsignedp);
4196 }
4197
4198 /* Before emitting an insn with code ICODE, make sure that X, which is going
4199 to be used for operand OPNUM of the insn, is converted from mode MODE to
4200 WIDER_MODE (UNSIGNEDP determines whether it is an unsigned conversion), and
4201 that it is accepted by the operand predicate. Return the new value. */
4202
4203 static rtx
4204 prepare_operand (int icode, rtx x, int opnum, enum machine_mode mode,
4205 enum machine_mode wider_mode, int unsignedp)
4206 {
4207 if (mode != wider_mode)
4208 x = convert_modes (wider_mode, mode, x, unsignedp);
4209
4210 if (!insn_data[icode].operand[opnum].predicate
4211 (x, insn_data[icode].operand[opnum].mode))
4212 {
4213 if (reload_completed)
4214 return NULL_RTX;
4215 x = copy_to_mode_reg (insn_data[icode].operand[opnum].mode, x);
4216 }
4217
4218 return x;
4219 }
4220
4221 /* Subroutine of emit_cmp_and_jump_insns; this function is called when we know
4222 we can do the comparison.
4223 The arguments are the same as for emit_cmp_and_jump_insns; but LABEL may
4224 be NULL_RTX which indicates that only a comparison is to be generated. */
4225
4226 static void
4227 emit_cmp_and_jump_insn_1 (rtx x, rtx y, enum machine_mode mode,
4228 enum rtx_code comparison, int unsignedp, rtx label)
4229 {
4230 rtx test = gen_rtx_fmt_ee (comparison, mode, x, y);
4231 enum mode_class mclass = GET_MODE_CLASS (mode);
4232 enum machine_mode wider_mode = mode;
4233
4234 /* Try combined insns first. */
4235 do
4236 {
4237 enum machine_mode optab_mode = mclass == MODE_CC ? CCmode : wider_mode;
4238 enum insn_code icode;
4239 PUT_MODE (test, wider_mode);
4240
4241 if (label)
4242 {
4243 icode = optab_handler (cbranch_optab, optab_mode)->insn_code;
4244
4245 if (icode != CODE_FOR_nothing
4246 && insn_data[icode].operand[0].predicate (test, wider_mode))
4247 {
4248 x = prepare_operand (icode, x, 1, mode, wider_mode, unsignedp);
4249 y = prepare_operand (icode, y, 2, mode, wider_mode, unsignedp);
4250 emit_jump_insn (GEN_FCN (icode) (test, x, y, label));
4251 return;
4252 }
4253 }
4254
4255 /* Handle some compares against zero. */
4256 icode = optab_handler (tst_optab, optab_mode)->insn_code;
4257 if (y == CONST0_RTX (mode) && icode != CODE_FOR_nothing)
4258 {
4259 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4260 emit_insn (GEN_FCN (icode) (x));
4261 if (label)
4262 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4263 return;
4264 }
4265
4266 /* Handle compares for which there is a directly suitable insn. */
4267
4268 icode = optab_handler (cmp_optab, optab_mode)->insn_code;
4269 if (icode != CODE_FOR_nothing)
4270 {
4271 x = prepare_operand (icode, x, 0, mode, wider_mode, unsignedp);
4272 y = prepare_operand (icode, y, 1, mode, wider_mode, unsignedp);
4273 emit_insn (GEN_FCN (icode) (x, y));
4274 if (label)
4275 emit_jump_insn (bcc_gen_fctn[(int) comparison] (label));
4276 return;
4277 }
4278
4279 if (!CLASS_HAS_WIDER_MODES_P (mclass))
4280 break;
4281
4282 wider_mode = GET_MODE_WIDER_MODE (wider_mode);
4283 }
4284 while (wider_mode != VOIDmode);
4285
4286 gcc_unreachable ();
4287 }
4288
4289 /* Generate code to compare X with Y so that the condition codes are
4290 set and to jump to LABEL if the condition is true. If X is a
4291 constant and Y is not a constant, then the comparison is swapped to
4292 ensure that the comparison RTL has the canonical form.
4293
4294 UNSIGNEDP nonzero says that X and Y are unsigned; this matters if they
4295 need to be widened by emit_cmp_insn. UNSIGNEDP is also used to select
4296 the proper branch condition code.
4297
4298 If X and Y have mode BLKmode, then SIZE specifies the size of both X and Y.
4299
4300 MODE is the mode of the inputs (in case they are const_int).
4301
4302 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). It will
4303 be passed unchanged to emit_cmp_insn, then potentially converted into an
4304 unsigned variant based on UNSIGNEDP to select a proper jump instruction. */
4305
4306 void
4307 emit_cmp_and_jump_insns (rtx x, rtx y, enum rtx_code comparison, rtx size,
4308 enum machine_mode mode, int unsignedp, rtx label)
4309 {
4310 rtx op0 = x, op1 = y;
4311
4312 /* Swap operands and condition to ensure canonical RTL. */
4313 if (swap_commutative_operands_p (x, y))
4314 {
4315 /* If we're not emitting a branch, callers are required to pass
4316 operands in an order conforming to canonical RTL. We relax this
4317 for commutative comparisons so callers using EQ don't need to do
4318 swapping by hand. */
4319 gcc_assert (label || (comparison == swap_condition (comparison)));
4320
4321 op0 = y, op1 = x;
4322 comparison = swap_condition (comparison);
4323 }
4324
4325 #ifdef HAVE_cc0
4326 /* If OP0 is still a constant, then both X and Y must be constants.
4327 Force X into a register to create canonical RTL. */
4328 if (CONSTANT_P (op0))
4329 op0 = force_reg (mode, op0);
4330 #endif
4331
4332 if (unsignedp)
4333 comparison = unsigned_condition (comparison);
4334
4335 prepare_cmp_insn (&op0, &op1, &comparison, size, &mode, &unsignedp,
4336 ccp_jump);
4337 emit_cmp_and_jump_insn_1 (op0, op1, mode, comparison, unsignedp, label);
4338 }
4339
4340 /* Like emit_cmp_and_jump_insns, but generate only the comparison. */
4341
4342 void
4343 emit_cmp_insn (rtx x, rtx y, enum rtx_code comparison, rtx size,
4344 enum machine_mode mode, int unsignedp)
4345 {
4346 emit_cmp_and_jump_insns (x, y, comparison, size, mode, unsignedp, 0);
4347 }
4348 \f
4349 /* Emit a library call comparison between floating point X and Y.
4350 COMPARISON is the rtl operator to compare with (EQ, NE, GT, etc.). */
4351
4352 static void
4353 prepare_float_lib_cmp (rtx *px, rtx *py, enum rtx_code *pcomparison,
4354 enum machine_mode *pmode, int *punsignedp)
4355 {
4356 enum rtx_code comparison = *pcomparison;
4357 enum rtx_code swapped = swap_condition (comparison);
4358 enum rtx_code reversed = reverse_condition_maybe_unordered (comparison);
4359 rtx x = *px;
4360 rtx y = *py;
4361 enum machine_mode orig_mode = GET_MODE (x);
4362 enum machine_mode mode, cmp_mode;
4363 rtx value, target, insns, equiv;
4364 rtx libfunc = 0;
4365 bool reversed_p = false;
4366 cmp_mode = targetm.libgcc_cmp_return_mode ();
4367
4368 for (mode = orig_mode;
4369 mode != VOIDmode;
4370 mode = GET_MODE_WIDER_MODE (mode))
4371 {
4372 if ((libfunc = optab_libfunc (code_to_optab[comparison], mode)))
4373 break;
4374
4375 if ((libfunc = optab_libfunc (code_to_optab[swapped] , mode)))
4376 {
4377 rtx tmp;
4378 tmp = x; x = y; y = tmp;
4379 comparison = swapped;
4380 break;
4381 }
4382
4383 if ((libfunc = optab_libfunc (code_to_optab[reversed], mode))
4384 && FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, reversed))
4385 {
4386 comparison = reversed;
4387 reversed_p = true;
4388 break;
4389 }
4390 }
4391
4392 gcc_assert (mode != VOIDmode);
4393
4394 if (mode != orig_mode)
4395 {
4396 x = convert_to_mode (mode, x, 0);
4397 y = convert_to_mode (mode, y, 0);
4398 }
4399
4400 /* Attach a REG_EQUAL note describing the semantics of the libcall to
4401 the RTL. The allows the RTL optimizers to delete the libcall if the
4402 condition can be determined at compile-time. */
4403 if (comparison == UNORDERED)
4404 {
4405 rtx temp = simplify_gen_relational (NE, cmp_mode, mode, x, x);
4406 equiv = simplify_gen_relational (NE, cmp_mode, mode, y, y);
4407 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4408 temp, const_true_rtx, equiv);
4409 }
4410 else
4411 {
4412 equiv = simplify_gen_relational (comparison, cmp_mode, mode, x, y);
4413 if (! FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4414 {
4415 rtx true_rtx, false_rtx;
4416
4417 switch (comparison)
4418 {
4419 case EQ:
4420 true_rtx = const0_rtx;
4421 false_rtx = const_true_rtx;
4422 break;
4423
4424 case NE:
4425 true_rtx = const_true_rtx;
4426 false_rtx = const0_rtx;
4427 break;
4428
4429 case GT:
4430 true_rtx = const1_rtx;
4431 false_rtx = const0_rtx;
4432 break;
4433
4434 case GE:
4435 true_rtx = const0_rtx;
4436 false_rtx = constm1_rtx;
4437 break;
4438
4439 case LT:
4440 true_rtx = constm1_rtx;
4441 false_rtx = const0_rtx;
4442 break;
4443
4444 case LE:
4445 true_rtx = const0_rtx;
4446 false_rtx = const1_rtx;
4447 break;
4448
4449 default:
4450 gcc_unreachable ();
4451 }
4452 equiv = simplify_gen_ternary (IF_THEN_ELSE, cmp_mode, cmp_mode,
4453 equiv, true_rtx, false_rtx);
4454 }
4455 }
4456
4457 start_sequence ();
4458 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
4459 cmp_mode, 2, x, mode, y, mode);
4460 insns = get_insns ();
4461 end_sequence ();
4462
4463 target = gen_reg_rtx (cmp_mode);
4464 emit_libcall_block (insns, target, value, equiv);
4465
4466 if (comparison == UNORDERED
4467 || FLOAT_LIB_COMPARE_RETURNS_BOOL (mode, comparison))
4468 comparison = reversed_p ? EQ : NE;
4469
4470 *px = target;
4471 *py = const0_rtx;
4472 *pmode = cmp_mode;
4473 *pcomparison = comparison;
4474 *punsignedp = 0;
4475 }
4476 \f
4477 /* Generate code to indirectly jump to a location given in the rtx LOC. */
4478
4479 void
4480 emit_indirect_jump (rtx loc)
4481 {
4482 if (!insn_data[(int) CODE_FOR_indirect_jump].operand[0].predicate
4483 (loc, Pmode))
4484 loc = copy_to_mode_reg (Pmode, loc);
4485
4486 emit_jump_insn (gen_indirect_jump (loc));
4487 emit_barrier ();
4488 }
4489 \f
4490 #ifdef HAVE_conditional_move
4491
4492 /* Emit a conditional move instruction if the machine supports one for that
4493 condition and machine mode.
4494
4495 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4496 the mode to use should they be constants. If it is VOIDmode, they cannot
4497 both be constants.
4498
4499 OP2 should be stored in TARGET if the comparison is true, otherwise OP3
4500 should be stored there. MODE is the mode to use should they be constants.
4501 If it is VOIDmode, they cannot both be constants.
4502
4503 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4504 is not supported. */
4505
4506 rtx
4507 emit_conditional_move (rtx target, enum rtx_code code, rtx op0, rtx op1,
4508 enum machine_mode cmode, rtx op2, rtx op3,
4509 enum machine_mode mode, int unsignedp)
4510 {
4511 rtx tem, subtarget, comparison, insn;
4512 enum insn_code icode;
4513 enum rtx_code reversed;
4514
4515 /* If one operand is constant, make it the second one. Only do this
4516 if the other operand is not constant as well. */
4517
4518 if (swap_commutative_operands_p (op0, op1))
4519 {
4520 tem = op0;
4521 op0 = op1;
4522 op1 = tem;
4523 code = swap_condition (code);
4524 }
4525
4526 /* get_condition will prefer to generate LT and GT even if the old
4527 comparison was against zero, so undo that canonicalization here since
4528 comparisons against zero are cheaper. */
4529 if (code == LT && op1 == const1_rtx)
4530 code = LE, op1 = const0_rtx;
4531 else if (code == GT && op1 == constm1_rtx)
4532 code = GE, op1 = const0_rtx;
4533
4534 if (cmode == VOIDmode)
4535 cmode = GET_MODE (op0);
4536
4537 if (swap_commutative_operands_p (op2, op3)
4538 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4539 != UNKNOWN))
4540 {
4541 tem = op2;
4542 op2 = op3;
4543 op3 = tem;
4544 code = reversed;
4545 }
4546
4547 if (mode == VOIDmode)
4548 mode = GET_MODE (op2);
4549
4550 icode = movcc_gen_code[mode];
4551
4552 if (icode == CODE_FOR_nothing)
4553 return 0;
4554
4555 if (!target)
4556 target = gen_reg_rtx (mode);
4557
4558 subtarget = target;
4559
4560 /* If the insn doesn't accept these operands, put them in pseudos. */
4561
4562 if (!insn_data[icode].operand[0].predicate
4563 (subtarget, insn_data[icode].operand[0].mode))
4564 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4565
4566 if (!insn_data[icode].operand[2].predicate
4567 (op2, insn_data[icode].operand[2].mode))
4568 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4569
4570 if (!insn_data[icode].operand[3].predicate
4571 (op3, insn_data[icode].operand[3].mode))
4572 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4573
4574 /* Everything should now be in the suitable form, so emit the compare insn
4575 and then the conditional move. */
4576
4577 comparison
4578 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4579
4580 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4581 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4582 return NULL and let the caller figure out how best to deal with this
4583 situation. */
4584 if (GET_CODE (comparison) != code)
4585 return NULL_RTX;
4586
4587 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4588
4589 /* If that failed, then give up. */
4590 if (insn == 0)
4591 return 0;
4592
4593 emit_insn (insn);
4594
4595 if (subtarget != target)
4596 convert_move (target, subtarget, 0);
4597
4598 return target;
4599 }
4600
4601 /* Return nonzero if a conditional move of mode MODE is supported.
4602
4603 This function is for combine so it can tell whether an insn that looks
4604 like a conditional move is actually supported by the hardware. If we
4605 guess wrong we lose a bit on optimization, but that's it. */
4606 /* ??? sparc64 supports conditionally moving integers values based on fp
4607 comparisons, and vice versa. How do we handle them? */
4608
4609 int
4610 can_conditionally_move_p (enum machine_mode mode)
4611 {
4612 if (movcc_gen_code[mode] != CODE_FOR_nothing)
4613 return 1;
4614
4615 return 0;
4616 }
4617
4618 #endif /* HAVE_conditional_move */
4619
4620 /* Emit a conditional addition instruction if the machine supports one for that
4621 condition and machine mode.
4622
4623 OP0 and OP1 are the operands that should be compared using CODE. CMODE is
4624 the mode to use should they be constants. If it is VOIDmode, they cannot
4625 both be constants.
4626
4627 OP2 should be stored in TARGET if the comparison is true, otherwise OP2+OP3
4628 should be stored there. MODE is the mode to use should they be constants.
4629 If it is VOIDmode, they cannot both be constants.
4630
4631 The result is either TARGET (perhaps modified) or NULL_RTX if the operation
4632 is not supported. */
4633
4634 rtx
4635 emit_conditional_add (rtx target, enum rtx_code code, rtx op0, rtx op1,
4636 enum machine_mode cmode, rtx op2, rtx op3,
4637 enum machine_mode mode, int unsignedp)
4638 {
4639 rtx tem, subtarget, comparison, insn;
4640 enum insn_code icode;
4641 enum rtx_code reversed;
4642
4643 /* If one operand is constant, make it the second one. Only do this
4644 if the other operand is not constant as well. */
4645
4646 if (swap_commutative_operands_p (op0, op1))
4647 {
4648 tem = op0;
4649 op0 = op1;
4650 op1 = tem;
4651 code = swap_condition (code);
4652 }
4653
4654 /* get_condition will prefer to generate LT and GT even if the old
4655 comparison was against zero, so undo that canonicalization here since
4656 comparisons against zero are cheaper. */
4657 if (code == LT && op1 == const1_rtx)
4658 code = LE, op1 = const0_rtx;
4659 else if (code == GT && op1 == constm1_rtx)
4660 code = GE, op1 = const0_rtx;
4661
4662 if (cmode == VOIDmode)
4663 cmode = GET_MODE (op0);
4664
4665 if (swap_commutative_operands_p (op2, op3)
4666 && ((reversed = reversed_comparison_code_parts (code, op0, op1, NULL))
4667 != UNKNOWN))
4668 {
4669 tem = op2;
4670 op2 = op3;
4671 op3 = tem;
4672 code = reversed;
4673 }
4674
4675 if (mode == VOIDmode)
4676 mode = GET_MODE (op2);
4677
4678 icode = optab_handler (addcc_optab, mode)->insn_code;
4679
4680 if (icode == CODE_FOR_nothing)
4681 return 0;
4682
4683 if (!target)
4684 target = gen_reg_rtx (mode);
4685
4686 /* If the insn doesn't accept these operands, put them in pseudos. */
4687
4688 if (!insn_data[icode].operand[0].predicate
4689 (target, insn_data[icode].operand[0].mode))
4690 subtarget = gen_reg_rtx (insn_data[icode].operand[0].mode);
4691 else
4692 subtarget = target;
4693
4694 if (!insn_data[icode].operand[2].predicate
4695 (op2, insn_data[icode].operand[2].mode))
4696 op2 = copy_to_mode_reg (insn_data[icode].operand[2].mode, op2);
4697
4698 if (!insn_data[icode].operand[3].predicate
4699 (op3, insn_data[icode].operand[3].mode))
4700 op3 = copy_to_mode_reg (insn_data[icode].operand[3].mode, op3);
4701
4702 /* Everything should now be in the suitable form, so emit the compare insn
4703 and then the conditional move. */
4704
4705 comparison
4706 = compare_from_rtx (op0, op1, code, unsignedp, cmode, NULL_RTX);
4707
4708 /* ??? Watch for const0_rtx (nop) and const_true_rtx (unconditional)? */
4709 /* We can get const0_rtx or const_true_rtx in some circumstances. Just
4710 return NULL and let the caller figure out how best to deal with this
4711 situation. */
4712 if (GET_CODE (comparison) != code)
4713 return NULL_RTX;
4714
4715 insn = GEN_FCN (icode) (subtarget, comparison, op2, op3);
4716
4717 /* If that failed, then give up. */
4718 if (insn == 0)
4719 return 0;
4720
4721 emit_insn (insn);
4722
4723 if (subtarget != target)
4724 convert_move (target, subtarget, 0);
4725
4726 return target;
4727 }
4728 \f
4729 /* These functions attempt to generate an insn body, rather than
4730 emitting the insn, but if the gen function already emits them, we
4731 make no attempt to turn them back into naked patterns. */
4732
4733 /* Generate and return an insn body to add Y to X. */
4734
4735 rtx
4736 gen_add2_insn (rtx x, rtx y)
4737 {
4738 int icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4739
4740 gcc_assert (insn_data[icode].operand[0].predicate
4741 (x, insn_data[icode].operand[0].mode));
4742 gcc_assert (insn_data[icode].operand[1].predicate
4743 (x, insn_data[icode].operand[1].mode));
4744 gcc_assert (insn_data[icode].operand[2].predicate
4745 (y, insn_data[icode].operand[2].mode));
4746
4747 return GEN_FCN (icode) (x, x, y);
4748 }
4749
4750 /* Generate and return an insn body to add r1 and c,
4751 storing the result in r0. */
4752
4753 rtx
4754 gen_add3_insn (rtx r0, rtx r1, rtx c)
4755 {
4756 int icode = (int) optab_handler (add_optab, GET_MODE (r0))->insn_code;
4757
4758 if (icode == CODE_FOR_nothing
4759 || !(insn_data[icode].operand[0].predicate
4760 (r0, insn_data[icode].operand[0].mode))
4761 || !(insn_data[icode].operand[1].predicate
4762 (r1, insn_data[icode].operand[1].mode))
4763 || !(insn_data[icode].operand[2].predicate
4764 (c, insn_data[icode].operand[2].mode)))
4765 return NULL_RTX;
4766
4767 return GEN_FCN (icode) (r0, r1, c);
4768 }
4769
4770 int
4771 have_add2_insn (rtx x, rtx y)
4772 {
4773 int icode;
4774
4775 gcc_assert (GET_MODE (x) != VOIDmode);
4776
4777 icode = (int) optab_handler (add_optab, GET_MODE (x))->insn_code;
4778
4779 if (icode == CODE_FOR_nothing)
4780 return 0;
4781
4782 if (!(insn_data[icode].operand[0].predicate
4783 (x, insn_data[icode].operand[0].mode))
4784 || !(insn_data[icode].operand[1].predicate
4785 (x, insn_data[icode].operand[1].mode))
4786 || !(insn_data[icode].operand[2].predicate
4787 (y, insn_data[icode].operand[2].mode)))
4788 return 0;
4789
4790 return 1;
4791 }
4792
4793 /* Generate and return an insn body to subtract Y from X. */
4794
4795 rtx
4796 gen_sub2_insn (rtx x, rtx y)
4797 {
4798 int icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4799
4800 gcc_assert (insn_data[icode].operand[0].predicate
4801 (x, insn_data[icode].operand[0].mode));
4802 gcc_assert (insn_data[icode].operand[1].predicate
4803 (x, insn_data[icode].operand[1].mode));
4804 gcc_assert (insn_data[icode].operand[2].predicate
4805 (y, insn_data[icode].operand[2].mode));
4806
4807 return GEN_FCN (icode) (x, x, y);
4808 }
4809
4810 /* Generate and return an insn body to subtract r1 and c,
4811 storing the result in r0. */
4812
4813 rtx
4814 gen_sub3_insn (rtx r0, rtx r1, rtx c)
4815 {
4816 int icode = (int) optab_handler (sub_optab, GET_MODE (r0))->insn_code;
4817
4818 if (icode == CODE_FOR_nothing
4819 || !(insn_data[icode].operand[0].predicate
4820 (r0, insn_data[icode].operand[0].mode))
4821 || !(insn_data[icode].operand[1].predicate
4822 (r1, insn_data[icode].operand[1].mode))
4823 || !(insn_data[icode].operand[2].predicate
4824 (c, insn_data[icode].operand[2].mode)))
4825 return NULL_RTX;
4826
4827 return GEN_FCN (icode) (r0, r1, c);
4828 }
4829
4830 int
4831 have_sub2_insn (rtx x, rtx y)
4832 {
4833 int icode;
4834
4835 gcc_assert (GET_MODE (x) != VOIDmode);
4836
4837 icode = (int) optab_handler (sub_optab, GET_MODE (x))->insn_code;
4838
4839 if (icode == CODE_FOR_nothing)
4840 return 0;
4841
4842 if (!(insn_data[icode].operand[0].predicate
4843 (x, insn_data[icode].operand[0].mode))
4844 || !(insn_data[icode].operand[1].predicate
4845 (x, insn_data[icode].operand[1].mode))
4846 || !(insn_data[icode].operand[2].predicate
4847 (y, insn_data[icode].operand[2].mode)))
4848 return 0;
4849
4850 return 1;
4851 }
4852
4853 /* Generate the body of an instruction to copy Y into X.
4854 It may be a list of insns, if one insn isn't enough. */
4855
4856 rtx
4857 gen_move_insn (rtx x, rtx y)
4858 {
4859 rtx seq;
4860
4861 start_sequence ();
4862 emit_move_insn_1 (x, y);
4863 seq = get_insns ();
4864 end_sequence ();
4865 return seq;
4866 }
4867 \f
4868 /* Return the insn code used to extend FROM_MODE to TO_MODE.
4869 UNSIGNEDP specifies zero-extension instead of sign-extension. If
4870 no such operation exists, CODE_FOR_nothing will be returned. */
4871
4872 enum insn_code
4873 can_extend_p (enum machine_mode to_mode, enum machine_mode from_mode,
4874 int unsignedp)
4875 {
4876 convert_optab tab;
4877 #ifdef HAVE_ptr_extend
4878 if (unsignedp < 0)
4879 return CODE_FOR_ptr_extend;
4880 #endif
4881
4882 tab = unsignedp ? zext_optab : sext_optab;
4883 return convert_optab_handler (tab, to_mode, from_mode)->insn_code;
4884 }
4885
4886 /* Generate the body of an insn to extend Y (with mode MFROM)
4887 into X (with mode MTO). Do zero-extension if UNSIGNEDP is nonzero. */
4888
4889 rtx
4890 gen_extend_insn (rtx x, rtx y, enum machine_mode mto,
4891 enum machine_mode mfrom, int unsignedp)
4892 {
4893 enum insn_code icode = can_extend_p (mto, mfrom, unsignedp);
4894 return GEN_FCN (icode) (x, y);
4895 }
4896 \f
4897 /* can_fix_p and can_float_p say whether the target machine
4898 can directly convert a given fixed point type to
4899 a given floating point type, or vice versa.
4900 The returned value is the CODE_FOR_... value to use,
4901 or CODE_FOR_nothing if these modes cannot be directly converted.
4902
4903 *TRUNCP_PTR is set to 1 if it is necessary to output
4904 an explicit FTRUNC insn before the fix insn; otherwise 0. */
4905
4906 static enum insn_code
4907 can_fix_p (enum machine_mode fixmode, enum machine_mode fltmode,
4908 int unsignedp, int *truncp_ptr)
4909 {
4910 convert_optab tab;
4911 enum insn_code icode;
4912
4913 tab = unsignedp ? ufixtrunc_optab : sfixtrunc_optab;
4914 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4915 if (icode != CODE_FOR_nothing)
4916 {
4917 *truncp_ptr = 0;
4918 return icode;
4919 }
4920
4921 /* FIXME: This requires a port to define both FIX and FTRUNC pattern
4922 for this to work. We need to rework the fix* and ftrunc* patterns
4923 and documentation. */
4924 tab = unsignedp ? ufix_optab : sfix_optab;
4925 icode = convert_optab_handler (tab, fixmode, fltmode)->insn_code;
4926 if (icode != CODE_FOR_nothing
4927 && optab_handler (ftrunc_optab, fltmode)->insn_code != CODE_FOR_nothing)
4928 {
4929 *truncp_ptr = 1;
4930 return icode;
4931 }
4932
4933 *truncp_ptr = 0;
4934 return CODE_FOR_nothing;
4935 }
4936
4937 static enum insn_code
4938 can_float_p (enum machine_mode fltmode, enum machine_mode fixmode,
4939 int unsignedp)
4940 {
4941 convert_optab tab;
4942
4943 tab = unsignedp ? ufloat_optab : sfloat_optab;
4944 return convert_optab_handler (tab, fltmode, fixmode)->insn_code;
4945 }
4946 \f
4947 /* Generate code to convert FROM to floating point
4948 and store in TO. FROM must be fixed point and not VOIDmode.
4949 UNSIGNEDP nonzero means regard FROM as unsigned.
4950 Normally this is done by correcting the final value
4951 if it is negative. */
4952
4953 void
4954 expand_float (rtx to, rtx from, int unsignedp)
4955 {
4956 enum insn_code icode;
4957 rtx target = to;
4958 enum machine_mode fmode, imode;
4959 bool can_do_signed = false;
4960
4961 /* Crash now, because we won't be able to decide which mode to use. */
4962 gcc_assert (GET_MODE (from) != VOIDmode);
4963
4964 /* Look for an insn to do the conversion. Do it in the specified
4965 modes if possible; otherwise convert either input, output or both to
4966 wider mode. If the integer mode is wider than the mode of FROM,
4967 we can do the conversion signed even if the input is unsigned. */
4968
4969 for (fmode = GET_MODE (to); fmode != VOIDmode;
4970 fmode = GET_MODE_WIDER_MODE (fmode))
4971 for (imode = GET_MODE (from); imode != VOIDmode;
4972 imode = GET_MODE_WIDER_MODE (imode))
4973 {
4974 int doing_unsigned = unsignedp;
4975
4976 if (fmode != GET_MODE (to)
4977 && significand_size (fmode) < GET_MODE_BITSIZE (GET_MODE (from)))
4978 continue;
4979
4980 icode = can_float_p (fmode, imode, unsignedp);
4981 if (icode == CODE_FOR_nothing && unsignedp)
4982 {
4983 enum insn_code scode = can_float_p (fmode, imode, 0);
4984 if (scode != CODE_FOR_nothing)
4985 can_do_signed = true;
4986 if (imode != GET_MODE (from))
4987 icode = scode, doing_unsigned = 0;
4988 }
4989
4990 if (icode != CODE_FOR_nothing)
4991 {
4992 if (imode != GET_MODE (from))
4993 from = convert_to_mode (imode, from, unsignedp);
4994
4995 if (fmode != GET_MODE (to))
4996 target = gen_reg_rtx (fmode);
4997
4998 emit_unop_insn (icode, target, from,
4999 doing_unsigned ? UNSIGNED_FLOAT : FLOAT);
5000
5001 if (target != to)
5002 convert_move (to, target, 0);
5003 return;
5004 }
5005 }
5006
5007 /* Unsigned integer, and no way to convert directly. Convert as signed,
5008 then unconditionally adjust the result. */
5009 if (unsignedp && can_do_signed)
5010 {
5011 rtx label = gen_label_rtx ();
5012 rtx temp;
5013 REAL_VALUE_TYPE offset;
5014
5015 /* Look for a usable floating mode FMODE wider than the source and at
5016 least as wide as the target. Using FMODE will avoid rounding woes
5017 with unsigned values greater than the signed maximum value. */
5018
5019 for (fmode = GET_MODE (to); fmode != VOIDmode;
5020 fmode = GET_MODE_WIDER_MODE (fmode))
5021 if (GET_MODE_BITSIZE (GET_MODE (from)) < GET_MODE_BITSIZE (fmode)
5022 && can_float_p (fmode, GET_MODE (from), 0) != CODE_FOR_nothing)
5023 break;
5024
5025 if (fmode == VOIDmode)
5026 {
5027 /* There is no such mode. Pretend the target is wide enough. */
5028 fmode = GET_MODE (to);
5029
5030 /* Avoid double-rounding when TO is narrower than FROM. */
5031 if ((significand_size (fmode) + 1)
5032 < GET_MODE_BITSIZE (GET_MODE (from)))
5033 {
5034 rtx temp1;
5035 rtx neglabel = gen_label_rtx ();
5036
5037 /* Don't use TARGET if it isn't a register, is a hard register,
5038 or is the wrong mode. */
5039 if (!REG_P (target)
5040 || REGNO (target) < FIRST_PSEUDO_REGISTER
5041 || GET_MODE (target) != fmode)
5042 target = gen_reg_rtx (fmode);
5043
5044 imode = GET_MODE (from);
5045 do_pending_stack_adjust ();
5046
5047 /* Test whether the sign bit is set. */
5048 emit_cmp_and_jump_insns (from, const0_rtx, LT, NULL_RTX, imode,
5049 0, neglabel);
5050
5051 /* The sign bit is not set. Convert as signed. */
5052 expand_float (target, from, 0);
5053 emit_jump_insn (gen_jump (label));
5054 emit_barrier ();
5055
5056 /* The sign bit is set.
5057 Convert to a usable (positive signed) value by shifting right
5058 one bit, while remembering if a nonzero bit was shifted
5059 out; i.e., compute (from & 1) | (from >> 1). */
5060
5061 emit_label (neglabel);
5062 temp = expand_binop (imode, and_optab, from, const1_rtx,
5063 NULL_RTX, 1, OPTAB_LIB_WIDEN);
5064 temp1 = expand_shift (RSHIFT_EXPR, imode, from, integer_one_node,
5065 NULL_RTX, 1);
5066 temp = expand_binop (imode, ior_optab, temp, temp1, temp, 1,
5067 OPTAB_LIB_WIDEN);
5068 expand_float (target, temp, 0);
5069
5070 /* Multiply by 2 to undo the shift above. */
5071 temp = expand_binop (fmode, add_optab, target, target,
5072 target, 0, OPTAB_LIB_WIDEN);
5073 if (temp != target)
5074 emit_move_insn (target, temp);
5075
5076 do_pending_stack_adjust ();
5077 emit_label (label);
5078 goto done;
5079 }
5080 }
5081
5082 /* If we are about to do some arithmetic to correct for an
5083 unsigned operand, do it in a pseudo-register. */
5084
5085 if (GET_MODE (to) != fmode
5086 || !REG_P (to) || REGNO (to) < FIRST_PSEUDO_REGISTER)
5087 target = gen_reg_rtx (fmode);
5088
5089 /* Convert as signed integer to floating. */
5090 expand_float (target, from, 0);
5091
5092 /* If FROM is negative (and therefore TO is negative),
5093 correct its value by 2**bitwidth. */
5094
5095 do_pending_stack_adjust ();
5096 emit_cmp_and_jump_insns (from, const0_rtx, GE, NULL_RTX, GET_MODE (from),
5097 0, label);
5098
5099
5100 real_2expN (&offset, GET_MODE_BITSIZE (GET_MODE (from)), fmode);
5101 temp = expand_binop (fmode, add_optab, target,
5102 CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode),
5103 target, 0, OPTAB_LIB_WIDEN);
5104 if (temp != target)
5105 emit_move_insn (target, temp);
5106
5107 do_pending_stack_adjust ();
5108 emit_label (label);
5109 goto done;
5110 }
5111
5112 /* No hardware instruction available; call a library routine. */
5113 {
5114 rtx libfunc;
5115 rtx insns;
5116 rtx value;
5117 convert_optab tab = unsignedp ? ufloat_optab : sfloat_optab;
5118
5119 if (GET_MODE_SIZE (GET_MODE (from)) < GET_MODE_SIZE (SImode))
5120 from = convert_to_mode (SImode, from, unsignedp);
5121
5122 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5123 gcc_assert (libfunc);
5124
5125 start_sequence ();
5126
5127 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5128 GET_MODE (to), 1, from,
5129 GET_MODE (from));
5130 insns = get_insns ();
5131 end_sequence ();
5132
5133 emit_libcall_block (insns, target, value,
5134 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FLOAT : FLOAT,
5135 GET_MODE (to), from));
5136 }
5137
5138 done:
5139
5140 /* Copy result to requested destination
5141 if we have been computing in a temp location. */
5142
5143 if (target != to)
5144 {
5145 if (GET_MODE (target) == GET_MODE (to))
5146 emit_move_insn (to, target);
5147 else
5148 convert_move (to, target, 0);
5149 }
5150 }
5151 \f
5152 /* Generate code to convert FROM to fixed point and store in TO. FROM
5153 must be floating point. */
5154
5155 void
5156 expand_fix (rtx to, rtx from, int unsignedp)
5157 {
5158 enum insn_code icode;
5159 rtx target = to;
5160 enum machine_mode fmode, imode;
5161 int must_trunc = 0;
5162
5163 /* We first try to find a pair of modes, one real and one integer, at
5164 least as wide as FROM and TO, respectively, in which we can open-code
5165 this conversion. If the integer mode is wider than the mode of TO,
5166 we can do the conversion either signed or unsigned. */
5167
5168 for (fmode = GET_MODE (from); fmode != VOIDmode;
5169 fmode = GET_MODE_WIDER_MODE (fmode))
5170 for (imode = GET_MODE (to); imode != VOIDmode;
5171 imode = GET_MODE_WIDER_MODE (imode))
5172 {
5173 int doing_unsigned = unsignedp;
5174
5175 icode = can_fix_p (imode, fmode, unsignedp, &must_trunc);
5176 if (icode == CODE_FOR_nothing && imode != GET_MODE (to) && unsignedp)
5177 icode = can_fix_p (imode, fmode, 0, &must_trunc), doing_unsigned = 0;
5178
5179 if (icode != CODE_FOR_nothing)
5180 {
5181 rtx last = get_last_insn ();
5182 if (fmode != GET_MODE (from))
5183 from = convert_to_mode (fmode, from, 0);
5184
5185 if (must_trunc)
5186 {
5187 rtx temp = gen_reg_rtx (GET_MODE (from));
5188 from = expand_unop (GET_MODE (from), ftrunc_optab, from,
5189 temp, 0);
5190 }
5191
5192 if (imode != GET_MODE (to))
5193 target = gen_reg_rtx (imode);
5194
5195 if (maybe_emit_unop_insn (icode, target, from,
5196 doing_unsigned ? UNSIGNED_FIX : FIX))
5197 {
5198 if (target != to)
5199 convert_move (to, target, unsignedp);
5200 return;
5201 }
5202 delete_insns_since (last);
5203 }
5204 }
5205
5206 /* For an unsigned conversion, there is one more way to do it.
5207 If we have a signed conversion, we generate code that compares
5208 the real value to the largest representable positive number. If if
5209 is smaller, the conversion is done normally. Otherwise, subtract
5210 one plus the highest signed number, convert, and add it back.
5211
5212 We only need to check all real modes, since we know we didn't find
5213 anything with a wider integer mode.
5214
5215 This code used to extend FP value into mode wider than the destination.
5216 This is needed for decimal float modes which cannot accurately
5217 represent one plus the highest signed number of the same size, but
5218 not for binary modes. Consider, for instance conversion from SFmode
5219 into DImode.
5220
5221 The hot path through the code is dealing with inputs smaller than 2^63
5222 and doing just the conversion, so there is no bits to lose.
5223
5224 In the other path we know the value is positive in the range 2^63..2^64-1
5225 inclusive. (as for other input overflow happens and result is undefined)
5226 So we know that the most important bit set in mantissa corresponds to
5227 2^63. The subtraction of 2^63 should not generate any rounding as it
5228 simply clears out that bit. The rest is trivial. */
5229
5230 if (unsignedp && GET_MODE_BITSIZE (GET_MODE (to)) <= HOST_BITS_PER_WIDE_INT)
5231 for (fmode = GET_MODE (from); fmode != VOIDmode;
5232 fmode = GET_MODE_WIDER_MODE (fmode))
5233 if (CODE_FOR_nothing != can_fix_p (GET_MODE (to), fmode, 0, &must_trunc)
5234 && (!DECIMAL_FLOAT_MODE_P (fmode)
5235 || GET_MODE_BITSIZE (fmode) > GET_MODE_BITSIZE (GET_MODE (to))))
5236 {
5237 int bitsize;
5238 REAL_VALUE_TYPE offset;
5239 rtx limit, lab1, lab2, insn;
5240
5241 bitsize = GET_MODE_BITSIZE (GET_MODE (to));
5242 real_2expN (&offset, bitsize - 1, fmode);
5243 limit = CONST_DOUBLE_FROM_REAL_VALUE (offset, fmode);
5244 lab1 = gen_label_rtx ();
5245 lab2 = gen_label_rtx ();
5246
5247 if (fmode != GET_MODE (from))
5248 from = convert_to_mode (fmode, from, 0);
5249
5250 /* See if we need to do the subtraction. */
5251 do_pending_stack_adjust ();
5252 emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
5253 0, lab1);
5254
5255 /* If not, do the signed "fix" and branch around fixup code. */
5256 expand_fix (to, from, 0);
5257 emit_jump_insn (gen_jump (lab2));
5258 emit_barrier ();
5259
5260 /* Otherwise, subtract 2**(N-1), convert to signed number,
5261 then add 2**(N-1). Do the addition using XOR since this
5262 will often generate better code. */
5263 emit_label (lab1);
5264 target = expand_binop (GET_MODE (from), sub_optab, from, limit,
5265 NULL_RTX, 0, OPTAB_LIB_WIDEN);
5266 expand_fix (to, target, 0);
5267 target = expand_binop (GET_MODE (to), xor_optab, to,
5268 gen_int_mode
5269 ((HOST_WIDE_INT) 1 << (bitsize - 1),
5270 GET_MODE (to)),
5271 to, 1, OPTAB_LIB_WIDEN);
5272
5273 if (target != to)
5274 emit_move_insn (to, target);
5275
5276 emit_label (lab2);
5277
5278 if (optab_handler (mov_optab, GET_MODE (to))->insn_code
5279 != CODE_FOR_nothing)
5280 {
5281 /* Make a place for a REG_NOTE and add it. */
5282 insn = emit_move_insn (to, to);
5283 set_unique_reg_note (insn,
5284 REG_EQUAL,
5285 gen_rtx_fmt_e (UNSIGNED_FIX,
5286 GET_MODE (to),
5287 copy_rtx (from)));
5288 }
5289
5290 return;
5291 }
5292
5293 /* We can't do it with an insn, so use a library call. But first ensure
5294 that the mode of TO is at least as wide as SImode, since those are the
5295 only library calls we know about. */
5296
5297 if (GET_MODE_SIZE (GET_MODE (to)) < GET_MODE_SIZE (SImode))
5298 {
5299 target = gen_reg_rtx (SImode);
5300
5301 expand_fix (target, from, unsignedp);
5302 }
5303 else
5304 {
5305 rtx insns;
5306 rtx value;
5307 rtx libfunc;
5308
5309 convert_optab tab = unsignedp ? ufix_optab : sfix_optab;
5310 libfunc = convert_optab_libfunc (tab, GET_MODE (to), GET_MODE (from));
5311 gcc_assert (libfunc);
5312
5313 start_sequence ();
5314
5315 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
5316 GET_MODE (to), 1, from,
5317 GET_MODE (from));
5318 insns = get_insns ();
5319 end_sequence ();
5320
5321 emit_libcall_block (insns, target, value,
5322 gen_rtx_fmt_e (unsignedp ? UNSIGNED_FIX : FIX,
5323 GET_MODE (to), from));
5324 }
5325
5326 if (target != to)
5327 {
5328 if (GET_MODE (to) == GET_MODE (target))
5329 emit_move_insn (to, target);
5330 else
5331 convert_move (to, target, 0);
5332 }
5333 }
5334
5335 /* Generate code to convert FROM or TO a fixed-point.
5336 If UINTP is true, either TO or FROM is an unsigned integer.
5337 If SATP is true, we need to saturate the result. */
5338
5339 void
5340 expand_fixed_convert (rtx to, rtx from, int uintp, int satp)
5341 {
5342 enum machine_mode to_mode = GET_MODE (to);
5343 enum machine_mode from_mode = GET_MODE (from);
5344 convert_optab tab;
5345 enum rtx_code this_code;
5346 enum insn_code code;
5347 rtx insns, value;
5348 rtx libfunc;
5349
5350 if (to_mode == from_mode)
5351 {
5352 emit_move_insn (to, from);
5353 return;
5354 }
5355
5356 if (uintp)
5357 {
5358 tab = satp ? satfractuns_optab : fractuns_optab;
5359 this_code = satp ? UNSIGNED_SAT_FRACT : UNSIGNED_FRACT_CONVERT;
5360 }
5361 else
5362 {
5363 tab = satp ? satfract_optab : fract_optab;
5364 this_code = satp ? SAT_FRACT : FRACT_CONVERT;
5365 }
5366 code = tab->handlers[to_mode][from_mode].insn_code;
5367 if (code != CODE_FOR_nothing)
5368 {
5369 emit_unop_insn (code, to, from, this_code);
5370 return;
5371 }
5372
5373 libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
5374 gcc_assert (libfunc);
5375
5376 start_sequence ();
5377 value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
5378 1, from, from_mode);
5379 insns = get_insns ();
5380 end_sequence ();
5381
5382 emit_libcall_block (insns, to, value,
5383 gen_rtx_fmt_e (tab->code, to_mode, from));
5384 }
5385
5386 /* Generate code to convert FROM to fixed point and store in TO. FROM
5387 must be floating point, TO must be signed. Use the conversion optab
5388 TAB to do the conversion. */
5389
5390 bool
5391 expand_sfix_optab (rtx to, rtx from, convert_optab tab)
5392 {
5393 enum insn_code icode;
5394 rtx target = to;
5395 enum machine_mode fmode, imode;
5396
5397 /* We first try to find a pair of modes, one real and one integer, at
5398 least as wide as FROM and TO, respectively, in which we can open-code
5399 this conversion. If the integer mode is wider than the mode of TO,
5400 we can do the conversion either signed or unsigned. */
5401
5402 for (fmode = GET_MODE (from); fmode != VOIDmode;
5403 fmode = GET_MODE_WIDER_MODE (fmode))
5404 for (imode = GET_MODE (to); imode != VOIDmode;
5405 imode = GET_MODE_WIDER_MODE (imode))
5406 {
5407 icode = convert_optab_handler (tab, imode, fmode)->insn_code;
5408 if (icode != CODE_FOR_nothing)
5409 {
5410 rtx last = get_last_insn ();
5411 if (fmode != GET_MODE (from))
5412 from = convert_to_mode (fmode, from, 0);
5413
5414 if (imode != GET_MODE (to))
5415 target = gen_reg_rtx (imode);
5416
5417 if (!maybe_emit_unop_insn (icode, target, from, UNKNOWN))
5418 {
5419 delete_insns_since (last);
5420 continue;
5421 }
5422 if (target != to)
5423 convert_move (to, target, 0);
5424 return true;
5425 }
5426 }
5427
5428 return false;
5429 }
5430 \f
5431 /* Report whether we have an instruction to perform the operation
5432 specified by CODE on operands of mode MODE. */
5433 int
5434 have_insn_for (enum rtx_code code, enum machine_mode mode)
5435 {
5436 return (code_to_optab[(int) code] != 0
5437 && (optab_handler (code_to_optab[(int) code], mode)->insn_code
5438 != CODE_FOR_nothing));
5439 }
5440
5441 /* Set all insn_code fields to CODE_FOR_nothing. */
5442
5443 static void
5444 init_insn_codes (void)
5445 {
5446 unsigned int i;
5447
5448 for (i = 0; i < (unsigned int) OTI_MAX; i++)
5449 {
5450 unsigned int j;
5451 optab op;
5452
5453 op = &optab_table[i];
5454 for (j = 0; j < NUM_MACHINE_MODES; j++)
5455 optab_handler (op, j)->insn_code = CODE_FOR_nothing;
5456 }
5457 for (i = 0; i < (unsigned int) COI_MAX; i++)
5458 {
5459 unsigned int j, k;
5460 convert_optab op;
5461
5462 op = &convert_optab_table[i];
5463 for (j = 0; j < NUM_MACHINE_MODES; j++)
5464 for (k = 0; k < NUM_MACHINE_MODES; k++)
5465 convert_optab_handler (op, j, k)->insn_code = CODE_FOR_nothing;
5466 }
5467 }
5468
5469 /* Initialize OP's code to CODE, and write it into the code_to_optab table. */
5470 static inline void
5471 init_optab (optab op, enum rtx_code code)
5472 {
5473 op->code = code;
5474 code_to_optab[(int) code] = op;
5475 }
5476
5477 /* Same, but fill in its code as CODE, and do _not_ write it into
5478 the code_to_optab table. */
5479 static inline void
5480 init_optabv (optab op, enum rtx_code code)
5481 {
5482 op->code = code;
5483 }
5484
5485 /* Conversion optabs never go in the code_to_optab table. */
5486 static void
5487 init_convert_optab (convert_optab op, enum rtx_code code)
5488 {
5489 op->code = code;
5490 }
5491
5492 /* Initialize the libfunc fields of an entire group of entries in some
5493 optab. Each entry is set equal to a string consisting of a leading
5494 pair of underscores followed by a generic operation name followed by
5495 a mode name (downshifted to lowercase) followed by a single character
5496 representing the number of operands for the given operation (which is
5497 usually one of the characters '2', '3', or '4').
5498
5499 OPTABLE is the table in which libfunc fields are to be initialized.
5500 OPNAME is the generic (string) name of the operation.
5501 SUFFIX is the character which specifies the number of operands for
5502 the given generic operation.
5503 MODE is the mode to generate for.
5504 */
5505
5506 static void
5507 gen_libfunc (optab optable, const char *opname, int suffix, enum machine_mode mode)
5508 {
5509 unsigned opname_len = strlen (opname);
5510 const char *mname = GET_MODE_NAME (mode);
5511 unsigned mname_len = strlen (mname);
5512 char *libfunc_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5513 char *p;
5514 const char *q;
5515
5516 p = libfunc_name;
5517 *p++ = '_';
5518 *p++ = '_';
5519 for (q = opname; *q; )
5520 *p++ = *q++;
5521 for (q = mname; *q; q++)
5522 *p++ = TOLOWER (*q);
5523 *p++ = suffix;
5524 *p = '\0';
5525
5526 set_optab_libfunc (optable, mode,
5527 ggc_alloc_string (libfunc_name, p - libfunc_name));
5528 }
5529
5530 /* Like gen_libfunc, but verify that integer operation is involved. */
5531
5532 static void
5533 gen_int_libfunc (optab optable, const char *opname, char suffix,
5534 enum machine_mode mode)
5535 {
5536 int maxsize = 2 * BITS_PER_WORD;
5537
5538 if (GET_MODE_CLASS (mode) != MODE_INT)
5539 return;
5540 if (maxsize < LONG_LONG_TYPE_SIZE)
5541 maxsize = LONG_LONG_TYPE_SIZE;
5542 if (GET_MODE_CLASS (mode) != MODE_INT
5543 || mode < word_mode || GET_MODE_BITSIZE (mode) > maxsize)
5544 return;
5545 gen_libfunc (optable, opname, suffix, mode);
5546 }
5547
5548 /* Like gen_libfunc, but verify that FP and set decimal prefix if needed. */
5549
5550 static void
5551 gen_fp_libfunc (optab optable, const char *opname, char suffix,
5552 enum machine_mode mode)
5553 {
5554 char *dec_opname;
5555
5556 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5557 gen_libfunc (optable, opname, suffix, mode);
5558 if (DECIMAL_FLOAT_MODE_P (mode))
5559 {
5560 dec_opname = XALLOCAVEC (char, sizeof (DECIMAL_PREFIX) + strlen (opname));
5561 /* For BID support, change the name to have either a bid_ or dpd_ prefix
5562 depending on the low level floating format used. */
5563 memcpy (dec_opname, DECIMAL_PREFIX, sizeof (DECIMAL_PREFIX) - 1);
5564 strcpy (dec_opname + sizeof (DECIMAL_PREFIX) - 1, opname);
5565 gen_libfunc (optable, dec_opname, suffix, mode);
5566 }
5567 }
5568
5569 /* Like gen_libfunc, but verify that fixed-point operation is involved. */
5570
5571 static void
5572 gen_fixed_libfunc (optab optable, const char *opname, char suffix,
5573 enum machine_mode mode)
5574 {
5575 if (!ALL_FIXED_POINT_MODE_P (mode))
5576 return;
5577 gen_libfunc (optable, opname, suffix, mode);
5578 }
5579
5580 /* Like gen_libfunc, but verify that signed fixed-point operation is
5581 involved. */
5582
5583 static void
5584 gen_signed_fixed_libfunc (optab optable, const char *opname, char suffix,
5585 enum machine_mode mode)
5586 {
5587 if (!SIGNED_FIXED_POINT_MODE_P (mode))
5588 return;
5589 gen_libfunc (optable, opname, suffix, mode);
5590 }
5591
5592 /* Like gen_libfunc, but verify that unsigned fixed-point operation is
5593 involved. */
5594
5595 static void
5596 gen_unsigned_fixed_libfunc (optab optable, const char *opname, char suffix,
5597 enum machine_mode mode)
5598 {
5599 if (!UNSIGNED_FIXED_POINT_MODE_P (mode))
5600 return;
5601 gen_libfunc (optable, opname, suffix, mode);
5602 }
5603
5604 /* Like gen_libfunc, but verify that FP or INT operation is involved. */
5605
5606 static void
5607 gen_int_fp_libfunc (optab optable, const char *name, char suffix,
5608 enum machine_mode mode)
5609 {
5610 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5611 gen_fp_libfunc (optable, name, suffix, mode);
5612 if (INTEGRAL_MODE_P (mode))
5613 gen_int_libfunc (optable, name, suffix, mode);
5614 }
5615
5616 /* Like gen_libfunc, but verify that FP or INT operation is involved
5617 and add 'v' suffix for integer operation. */
5618
5619 static void
5620 gen_intv_fp_libfunc (optab optable, const char *name, char suffix,
5621 enum machine_mode mode)
5622 {
5623 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5624 gen_fp_libfunc (optable, name, suffix, mode);
5625 if (GET_MODE_CLASS (mode) == MODE_INT)
5626 {
5627 int len = strlen (name);
5628 char *v_name = XALLOCAVEC (char, len + 2);
5629 strcpy (v_name, name);
5630 v_name[len] = 'v';
5631 v_name[len + 1] = 0;
5632 gen_int_libfunc (optable, v_name, suffix, mode);
5633 }
5634 }
5635
5636 /* Like gen_libfunc, but verify that FP or INT or FIXED operation is
5637 involved. */
5638
5639 static void
5640 gen_int_fp_fixed_libfunc (optab optable, const char *name, char suffix,
5641 enum machine_mode mode)
5642 {
5643 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5644 gen_fp_libfunc (optable, name, suffix, mode);
5645 if (INTEGRAL_MODE_P (mode))
5646 gen_int_libfunc (optable, name, suffix, mode);
5647 if (ALL_FIXED_POINT_MODE_P (mode))
5648 gen_fixed_libfunc (optable, name, suffix, mode);
5649 }
5650
5651 /* Like gen_libfunc, but verify that FP or INT or signed FIXED operation is
5652 involved. */
5653
5654 static void
5655 gen_int_fp_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5656 enum machine_mode mode)
5657 {
5658 if (DECIMAL_FLOAT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_FLOAT)
5659 gen_fp_libfunc (optable, name, suffix, mode);
5660 if (INTEGRAL_MODE_P (mode))
5661 gen_int_libfunc (optable, name, suffix, mode);
5662 if (SIGNED_FIXED_POINT_MODE_P (mode))
5663 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5664 }
5665
5666 /* Like gen_libfunc, but verify that INT or FIXED operation is
5667 involved. */
5668
5669 static void
5670 gen_int_fixed_libfunc (optab optable, const char *name, char suffix,
5671 enum machine_mode mode)
5672 {
5673 if (INTEGRAL_MODE_P (mode))
5674 gen_int_libfunc (optable, name, suffix, mode);
5675 if (ALL_FIXED_POINT_MODE_P (mode))
5676 gen_fixed_libfunc (optable, name, suffix, mode);
5677 }
5678
5679 /* Like gen_libfunc, but verify that INT or signed FIXED operation is
5680 involved. */
5681
5682 static void
5683 gen_int_signed_fixed_libfunc (optab optable, const char *name, char suffix,
5684 enum machine_mode mode)
5685 {
5686 if (INTEGRAL_MODE_P (mode))
5687 gen_int_libfunc (optable, name, suffix, mode);
5688 if (SIGNED_FIXED_POINT_MODE_P (mode))
5689 gen_signed_fixed_libfunc (optable, name, suffix, mode);
5690 }
5691
5692 /* Like gen_libfunc, but verify that INT or unsigned FIXED operation is
5693 involved. */
5694
5695 static void
5696 gen_int_unsigned_fixed_libfunc (optab optable, const char *name, char suffix,
5697 enum machine_mode mode)
5698 {
5699 if (INTEGRAL_MODE_P (mode))
5700 gen_int_libfunc (optable, name, suffix, mode);
5701 if (UNSIGNED_FIXED_POINT_MODE_P (mode))
5702 gen_unsigned_fixed_libfunc (optable, name, suffix, mode);
5703 }
5704
5705 /* Initialize the libfunc fields of an entire group of entries of an
5706 inter-mode-class conversion optab. The string formation rules are
5707 similar to the ones for init_libfuncs, above, but instead of having
5708 a mode name and an operand count these functions have two mode names
5709 and no operand count. */
5710
5711 static void
5712 gen_interclass_conv_libfunc (convert_optab tab,
5713 const char *opname,
5714 enum machine_mode tmode,
5715 enum machine_mode fmode)
5716 {
5717 size_t opname_len = strlen (opname);
5718 size_t mname_len = 0;
5719
5720 const char *fname, *tname;
5721 const char *q;
5722 char *libfunc_name, *suffix;
5723 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5724 char *p;
5725
5726 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5727 depends on which underlying decimal floating point format is used. */
5728 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5729
5730 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5731
5732 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5733 nondec_name[0] = '_';
5734 nondec_name[1] = '_';
5735 memcpy (&nondec_name[2], opname, opname_len);
5736 nondec_suffix = nondec_name + opname_len + 2;
5737
5738 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5739 dec_name[0] = '_';
5740 dec_name[1] = '_';
5741 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5742 memcpy (&dec_name[2+dec_len], opname, opname_len);
5743 dec_suffix = dec_name + dec_len + opname_len + 2;
5744
5745 fname = GET_MODE_NAME (fmode);
5746 tname = GET_MODE_NAME (tmode);
5747
5748 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5749 {
5750 libfunc_name = dec_name;
5751 suffix = dec_suffix;
5752 }
5753 else
5754 {
5755 libfunc_name = nondec_name;
5756 suffix = nondec_suffix;
5757 }
5758
5759 p = suffix;
5760 for (q = fname; *q; p++, q++)
5761 *p = TOLOWER (*q);
5762 for (q = tname; *q; p++, q++)
5763 *p = TOLOWER (*q);
5764
5765 *p = '\0';
5766
5767 set_conv_libfunc (tab, tmode, fmode,
5768 ggc_alloc_string (libfunc_name, p - libfunc_name));
5769 }
5770
5771 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5772 int->fp conversion. */
5773
5774 static void
5775 gen_int_to_fp_conv_libfunc (convert_optab tab,
5776 const char *opname,
5777 enum machine_mode tmode,
5778 enum machine_mode fmode)
5779 {
5780 if (GET_MODE_CLASS (fmode) != MODE_INT)
5781 return;
5782 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5783 return;
5784 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5785 }
5786
5787 /* ufloat_optab is special by using floatun for FP and floatuns decimal fp
5788 naming scheme. */
5789
5790 static void
5791 gen_ufloat_conv_libfunc (convert_optab tab,
5792 const char *opname ATTRIBUTE_UNUSED,
5793 enum machine_mode tmode,
5794 enum machine_mode fmode)
5795 {
5796 if (DECIMAL_FLOAT_MODE_P (tmode))
5797 gen_int_to_fp_conv_libfunc (tab, "floatuns", tmode, fmode);
5798 else
5799 gen_int_to_fp_conv_libfunc (tab, "floatun", tmode, fmode);
5800 }
5801
5802 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5803 fp->int conversion. */
5804
5805 static void
5806 gen_int_to_fp_nondecimal_conv_libfunc (convert_optab tab,
5807 const char *opname,
5808 enum machine_mode tmode,
5809 enum machine_mode fmode)
5810 {
5811 if (GET_MODE_CLASS (fmode) != MODE_INT)
5812 return;
5813 if (GET_MODE_CLASS (tmode) != MODE_FLOAT)
5814 return;
5815 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5816 }
5817
5818 /* Same as gen_interclass_conv_libfunc but verify that we are producing
5819 fp->int conversion with no decimal floating point involved. */
5820
5821 static void
5822 gen_fp_to_int_conv_libfunc (convert_optab tab,
5823 const char *opname,
5824 enum machine_mode tmode,
5825 enum machine_mode fmode)
5826 {
5827 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5828 return;
5829 if (GET_MODE_CLASS (tmode) != MODE_INT)
5830 return;
5831 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5832 }
5833
5834 /* Initialize the libfunc fields of an of an intra-mode-class conversion optab.
5835 The string formation rules are
5836 similar to the ones for init_libfunc, above. */
5837
5838 static void
5839 gen_intraclass_conv_libfunc (convert_optab tab, const char *opname,
5840 enum machine_mode tmode, enum machine_mode fmode)
5841 {
5842 size_t opname_len = strlen (opname);
5843 size_t mname_len = 0;
5844
5845 const char *fname, *tname;
5846 const char *q;
5847 char *nondec_name, *dec_name, *nondec_suffix, *dec_suffix;
5848 char *libfunc_name, *suffix;
5849 char *p;
5850
5851 /* If this is a decimal conversion, add the current BID vs. DPD prefix that
5852 depends on which underlying decimal floating point format is used. */
5853 const size_t dec_len = sizeof (DECIMAL_PREFIX) - 1;
5854
5855 mname_len = strlen (GET_MODE_NAME (tmode)) + strlen (GET_MODE_NAME (fmode));
5856
5857 nondec_name = XALLOCAVEC (char, 2 + opname_len + mname_len + 1 + 1);
5858 nondec_name[0] = '_';
5859 nondec_name[1] = '_';
5860 memcpy (&nondec_name[2], opname, opname_len);
5861 nondec_suffix = nondec_name + opname_len + 2;
5862
5863 dec_name = XALLOCAVEC (char, 2 + dec_len + opname_len + mname_len + 1 + 1);
5864 dec_name[0] = '_';
5865 dec_name[1] = '_';
5866 memcpy (&dec_name[2], DECIMAL_PREFIX, dec_len);
5867 memcpy (&dec_name[2 + dec_len], opname, opname_len);
5868 dec_suffix = dec_name + dec_len + opname_len + 2;
5869
5870 fname = GET_MODE_NAME (fmode);
5871 tname = GET_MODE_NAME (tmode);
5872
5873 if (DECIMAL_FLOAT_MODE_P(fmode) || DECIMAL_FLOAT_MODE_P(tmode))
5874 {
5875 libfunc_name = dec_name;
5876 suffix = dec_suffix;
5877 }
5878 else
5879 {
5880 libfunc_name = nondec_name;
5881 suffix = nondec_suffix;
5882 }
5883
5884 p = suffix;
5885 for (q = fname; *q; p++, q++)
5886 *p = TOLOWER (*q);
5887 for (q = tname; *q; p++, q++)
5888 *p = TOLOWER (*q);
5889
5890 *p++ = '2';
5891 *p = '\0';
5892
5893 set_conv_libfunc (tab, tmode, fmode,
5894 ggc_alloc_string (libfunc_name, p - libfunc_name));
5895 }
5896
5897 /* Pick proper libcall for trunc_optab. We need to chose if we do
5898 truncation or extension and interclass or intraclass. */
5899
5900 static void
5901 gen_trunc_conv_libfunc (convert_optab tab,
5902 const char *opname,
5903 enum machine_mode tmode,
5904 enum machine_mode fmode)
5905 {
5906 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5907 return;
5908 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5909 return;
5910 if (tmode == fmode)
5911 return;
5912
5913 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5914 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5915 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5916
5917 if (GET_MODE_PRECISION (fmode) <= GET_MODE_PRECISION (tmode))
5918 return;
5919
5920 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5921 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5922 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5923 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5924 }
5925
5926 /* Pick proper libcall for extend_optab. We need to chose if we do
5927 truncation or extension and interclass or intraclass. */
5928
5929 static void
5930 gen_extend_conv_libfunc (convert_optab tab,
5931 const char *opname ATTRIBUTE_UNUSED,
5932 enum machine_mode tmode,
5933 enum machine_mode fmode)
5934 {
5935 if (GET_MODE_CLASS (tmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (tmode))
5936 return;
5937 if (GET_MODE_CLASS (fmode) != MODE_FLOAT && !DECIMAL_FLOAT_MODE_P (fmode))
5938 return;
5939 if (tmode == fmode)
5940 return;
5941
5942 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (fmode))
5943 || (GET_MODE_CLASS (fmode) == MODE_FLOAT && DECIMAL_FLOAT_MODE_P (tmode)))
5944 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5945
5946 if (GET_MODE_PRECISION (fmode) > GET_MODE_PRECISION (tmode))
5947 return;
5948
5949 if ((GET_MODE_CLASS (tmode) == MODE_FLOAT
5950 && GET_MODE_CLASS (fmode) == MODE_FLOAT)
5951 || (DECIMAL_FLOAT_MODE_P (fmode) && DECIMAL_FLOAT_MODE_P (tmode)))
5952 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5953 }
5954
5955 /* Pick proper libcall for fract_optab. We need to chose if we do
5956 interclass or intraclass. */
5957
5958 static void
5959 gen_fract_conv_libfunc (convert_optab tab,
5960 const char *opname,
5961 enum machine_mode tmode,
5962 enum machine_mode fmode)
5963 {
5964 if (tmode == fmode)
5965 return;
5966 if (!(ALL_FIXED_POINT_MODE_P (tmode) || ALL_FIXED_POINT_MODE_P (fmode)))
5967 return;
5968
5969 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
5970 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
5971 else
5972 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5973 }
5974
5975 /* Pick proper libcall for fractuns_optab. */
5976
5977 static void
5978 gen_fractuns_conv_libfunc (convert_optab tab,
5979 const char *opname,
5980 enum machine_mode tmode,
5981 enum machine_mode fmode)
5982 {
5983 if (tmode == fmode)
5984 return;
5985 /* One mode must be a fixed-point mode, and the other must be an integer
5986 mode. */
5987 if (!((ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT)
5988 || (ALL_FIXED_POINT_MODE_P (fmode)
5989 && GET_MODE_CLASS (tmode) == MODE_INT)))
5990 return;
5991
5992 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
5993 }
5994
5995 /* Pick proper libcall for satfract_optab. We need to chose if we do
5996 interclass or intraclass. */
5997
5998 static void
5999 gen_satfract_conv_libfunc (convert_optab tab,
6000 const char *opname,
6001 enum machine_mode tmode,
6002 enum machine_mode fmode)
6003 {
6004 if (tmode == fmode)
6005 return;
6006 /* TMODE must be a fixed-point mode. */
6007 if (!ALL_FIXED_POINT_MODE_P (tmode))
6008 return;
6009
6010 if (GET_MODE_CLASS (tmode) == GET_MODE_CLASS (fmode))
6011 gen_intraclass_conv_libfunc (tab, opname, tmode, fmode);
6012 else
6013 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6014 }
6015
6016 /* Pick proper libcall for satfractuns_optab. */
6017
6018 static void
6019 gen_satfractuns_conv_libfunc (convert_optab tab,
6020 const char *opname,
6021 enum machine_mode tmode,
6022 enum machine_mode fmode)
6023 {
6024 if (tmode == fmode)
6025 return;
6026 /* TMODE must be a fixed-point mode, and FMODE must be an integer mode. */
6027 if (!(ALL_FIXED_POINT_MODE_P (tmode) && GET_MODE_CLASS (fmode) == MODE_INT))
6028 return;
6029
6030 gen_interclass_conv_libfunc (tab, opname, tmode, fmode);
6031 }
6032
6033 /* A table of previously-created libfuncs, hashed by name. */
6034 static GTY ((param_is (union tree_node))) htab_t libfunc_decls;
6035
6036 /* Hashtable callbacks for libfunc_decls. */
6037
6038 static hashval_t
6039 libfunc_decl_hash (const void *entry)
6040 {
6041 return htab_hash_string (IDENTIFIER_POINTER (DECL_NAME ((const_tree) entry)));
6042 }
6043
6044 static int
6045 libfunc_decl_eq (const void *entry1, const void *entry2)
6046 {
6047 return DECL_NAME ((const_tree) entry1) == (const_tree) entry2;
6048 }
6049
6050 rtx
6051 init_one_libfunc (const char *name)
6052 {
6053 tree id, decl;
6054 void **slot;
6055 hashval_t hash;
6056
6057 if (libfunc_decls == NULL)
6058 libfunc_decls = htab_create_ggc (37, libfunc_decl_hash,
6059 libfunc_decl_eq, NULL);
6060
6061 /* See if we have already created a libfunc decl for this function. */
6062 id = get_identifier (name);
6063 hash = htab_hash_string (name);
6064 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, INSERT);
6065 decl = (tree) *slot;
6066 if (decl == NULL)
6067 {
6068 /* Create a new decl, so that it can be passed to
6069 targetm.encode_section_info. */
6070 /* ??? We don't have any type information except for this is
6071 a function. Pretend this is "int foo()". */
6072 decl = build_decl (FUNCTION_DECL, get_identifier (name),
6073 build_function_type (integer_type_node, NULL_TREE));
6074 DECL_ARTIFICIAL (decl) = 1;
6075 DECL_EXTERNAL (decl) = 1;
6076 TREE_PUBLIC (decl) = 1;
6077
6078 /* Zap the nonsensical SYMBOL_REF_DECL for this. What we're left with
6079 are the flags assigned by targetm.encode_section_info. */
6080 SET_SYMBOL_REF_DECL (XEXP (DECL_RTL (decl), 0), NULL);
6081
6082 *slot = decl;
6083 }
6084 return XEXP (DECL_RTL (decl), 0);
6085 }
6086
6087 /* Adjust the assembler name of libfunc NAME to ASMSPEC. */
6088
6089 rtx
6090 set_user_assembler_libfunc (const char *name, const char *asmspec)
6091 {
6092 tree id, decl;
6093 void **slot;
6094 hashval_t hash;
6095
6096 id = get_identifier (name);
6097 hash = htab_hash_string (name);
6098 slot = htab_find_slot_with_hash (libfunc_decls, id, hash, NO_INSERT);
6099 gcc_assert (slot);
6100 decl = (tree) *slot;
6101 set_user_assembler_name (decl, asmspec);
6102 return XEXP (DECL_RTL (decl), 0);
6103 }
6104
6105 /* Call this to reset the function entry for one optab (OPTABLE) in mode
6106 MODE to NAME, which should be either 0 or a string constant. */
6107 void
6108 set_optab_libfunc (optab optable, enum machine_mode mode, const char *name)
6109 {
6110 rtx val;
6111 struct libfunc_entry e;
6112 struct libfunc_entry **slot;
6113 e.optab = (size_t) (optable - &optab_table[0]);
6114 e.mode1 = mode;
6115 e.mode2 = VOIDmode;
6116
6117 if (name)
6118 val = init_one_libfunc (name);
6119 else
6120 val = 0;
6121 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6122 if (*slot == NULL)
6123 *slot = GGC_NEW (struct libfunc_entry);
6124 (*slot)->optab = (size_t) (optable - &optab_table[0]);
6125 (*slot)->mode1 = mode;
6126 (*slot)->mode2 = VOIDmode;
6127 (*slot)->libfunc = val;
6128 }
6129
6130 /* Call this to reset the function entry for one conversion optab
6131 (OPTABLE) from mode FMODE to mode TMODE to NAME, which should be
6132 either 0 or a string constant. */
6133 void
6134 set_conv_libfunc (convert_optab optable, enum machine_mode tmode,
6135 enum machine_mode fmode, const char *name)
6136 {
6137 rtx val;
6138 struct libfunc_entry e;
6139 struct libfunc_entry **slot;
6140 e.optab = (size_t) (optable - &convert_optab_table[0]);
6141 e.mode1 = tmode;
6142 e.mode2 = fmode;
6143
6144 if (name)
6145 val = init_one_libfunc (name);
6146 else
6147 val = 0;
6148 slot = (struct libfunc_entry **) htab_find_slot (libfunc_hash, &e, INSERT);
6149 if (*slot == NULL)
6150 *slot = GGC_NEW (struct libfunc_entry);
6151 (*slot)->optab = (size_t) (optable - &convert_optab_table[0]);
6152 (*slot)->mode1 = tmode;
6153 (*slot)->mode2 = fmode;
6154 (*slot)->libfunc = val;
6155 }
6156
6157 /* Call this to initialize the contents of the optabs
6158 appropriately for the current target machine. */
6159
6160 void
6161 init_optabs (void)
6162 {
6163 unsigned int i;
6164 enum machine_mode int_mode;
6165 static bool reinit;
6166
6167 libfunc_hash = htab_create_ggc (10, hash_libfunc, eq_libfunc, NULL);
6168 /* Start by initializing all tables to contain CODE_FOR_nothing. */
6169
6170 for (i = 0; i < NUM_RTX_CODE; i++)
6171 setcc_gen_code[i] = CODE_FOR_nothing;
6172
6173 #ifdef HAVE_conditional_move
6174 for (i = 0; i < NUM_MACHINE_MODES; i++)
6175 movcc_gen_code[i] = CODE_FOR_nothing;
6176 #endif
6177
6178 for (i = 0; i < NUM_MACHINE_MODES; i++)
6179 {
6180 vcond_gen_code[i] = CODE_FOR_nothing;
6181 vcondu_gen_code[i] = CODE_FOR_nothing;
6182 }
6183
6184 #if GCC_VERSION >= 4000
6185 /* We statically initialize the insn_codes with CODE_FOR_nothing. */
6186 if (reinit)
6187 init_insn_codes ();
6188 #else
6189 init_insn_codes ();
6190 #endif
6191
6192 init_optab (add_optab, PLUS);
6193 init_optabv (addv_optab, PLUS);
6194 init_optab (sub_optab, MINUS);
6195 init_optabv (subv_optab, MINUS);
6196 init_optab (ssadd_optab, SS_PLUS);
6197 init_optab (usadd_optab, US_PLUS);
6198 init_optab (sssub_optab, SS_MINUS);
6199 init_optab (ussub_optab, US_MINUS);
6200 init_optab (smul_optab, MULT);
6201 init_optab (ssmul_optab, SS_MULT);
6202 init_optab (usmul_optab, US_MULT);
6203 init_optabv (smulv_optab, MULT);
6204 init_optab (smul_highpart_optab, UNKNOWN);
6205 init_optab (umul_highpart_optab, UNKNOWN);
6206 init_optab (smul_widen_optab, UNKNOWN);
6207 init_optab (umul_widen_optab, UNKNOWN);
6208 init_optab (usmul_widen_optab, UNKNOWN);
6209 init_optab (smadd_widen_optab, UNKNOWN);
6210 init_optab (umadd_widen_optab, UNKNOWN);
6211 init_optab (ssmadd_widen_optab, UNKNOWN);
6212 init_optab (usmadd_widen_optab, UNKNOWN);
6213 init_optab (smsub_widen_optab, UNKNOWN);
6214 init_optab (umsub_widen_optab, UNKNOWN);
6215 init_optab (ssmsub_widen_optab, UNKNOWN);
6216 init_optab (usmsub_widen_optab, UNKNOWN);
6217 init_optab (sdiv_optab, DIV);
6218 init_optab (ssdiv_optab, SS_DIV);
6219 init_optab (usdiv_optab, US_DIV);
6220 init_optabv (sdivv_optab, DIV);
6221 init_optab (sdivmod_optab, UNKNOWN);
6222 init_optab (udiv_optab, UDIV);
6223 init_optab (udivmod_optab, UNKNOWN);
6224 init_optab (smod_optab, MOD);
6225 init_optab (umod_optab, UMOD);
6226 init_optab (fmod_optab, UNKNOWN);
6227 init_optab (remainder_optab, UNKNOWN);
6228 init_optab (ftrunc_optab, UNKNOWN);
6229 init_optab (and_optab, AND);
6230 init_optab (ior_optab, IOR);
6231 init_optab (xor_optab, XOR);
6232 init_optab (ashl_optab, ASHIFT);
6233 init_optab (ssashl_optab, SS_ASHIFT);
6234 init_optab (usashl_optab, US_ASHIFT);
6235 init_optab (ashr_optab, ASHIFTRT);
6236 init_optab (lshr_optab, LSHIFTRT);
6237 init_optab (rotl_optab, ROTATE);
6238 init_optab (rotr_optab, ROTATERT);
6239 init_optab (smin_optab, SMIN);
6240 init_optab (smax_optab, SMAX);
6241 init_optab (umin_optab, UMIN);
6242 init_optab (umax_optab, UMAX);
6243 init_optab (pow_optab, UNKNOWN);
6244 init_optab (atan2_optab, UNKNOWN);
6245
6246 /* These three have codes assigned exclusively for the sake of
6247 have_insn_for. */
6248 init_optab (mov_optab, SET);
6249 init_optab (movstrict_optab, STRICT_LOW_PART);
6250 init_optab (cmp_optab, COMPARE);
6251
6252 init_optab (storent_optab, UNKNOWN);
6253
6254 init_optab (ucmp_optab, UNKNOWN);
6255 init_optab (tst_optab, UNKNOWN);
6256
6257 init_optab (eq_optab, EQ);
6258 init_optab (ne_optab, NE);
6259 init_optab (gt_optab, GT);
6260 init_optab (ge_optab, GE);
6261 init_optab (lt_optab, LT);
6262 init_optab (le_optab, LE);
6263 init_optab (unord_optab, UNORDERED);
6264
6265 init_optab (neg_optab, NEG);
6266 init_optab (ssneg_optab, SS_NEG);
6267 init_optab (usneg_optab, US_NEG);
6268 init_optabv (negv_optab, NEG);
6269 init_optab (abs_optab, ABS);
6270 init_optabv (absv_optab, ABS);
6271 init_optab (addcc_optab, UNKNOWN);
6272 init_optab (one_cmpl_optab, NOT);
6273 init_optab (bswap_optab, BSWAP);
6274 init_optab (ffs_optab, FFS);
6275 init_optab (clz_optab, CLZ);
6276 init_optab (ctz_optab, CTZ);
6277 init_optab (popcount_optab, POPCOUNT);
6278 init_optab (parity_optab, PARITY);
6279 init_optab (sqrt_optab, SQRT);
6280 init_optab (floor_optab, UNKNOWN);
6281 init_optab (ceil_optab, UNKNOWN);
6282 init_optab (round_optab, UNKNOWN);
6283 init_optab (btrunc_optab, UNKNOWN);
6284 init_optab (nearbyint_optab, UNKNOWN);
6285 init_optab (rint_optab, UNKNOWN);
6286 init_optab (sincos_optab, UNKNOWN);
6287 init_optab (sin_optab, UNKNOWN);
6288 init_optab (asin_optab, UNKNOWN);
6289 init_optab (cos_optab, UNKNOWN);
6290 init_optab (acos_optab, UNKNOWN);
6291 init_optab (exp_optab, UNKNOWN);
6292 init_optab (exp10_optab, UNKNOWN);
6293 init_optab (exp2_optab, UNKNOWN);
6294 init_optab (expm1_optab, UNKNOWN);
6295 init_optab (ldexp_optab, UNKNOWN);
6296 init_optab (scalb_optab, UNKNOWN);
6297 init_optab (logb_optab, UNKNOWN);
6298 init_optab (ilogb_optab, UNKNOWN);
6299 init_optab (log_optab, UNKNOWN);
6300 init_optab (log10_optab, UNKNOWN);
6301 init_optab (log2_optab, UNKNOWN);
6302 init_optab (log1p_optab, UNKNOWN);
6303 init_optab (tan_optab, UNKNOWN);
6304 init_optab (atan_optab, UNKNOWN);
6305 init_optab (copysign_optab, UNKNOWN);
6306 init_optab (signbit_optab, UNKNOWN);
6307
6308 init_optab (isinf_optab, UNKNOWN);
6309
6310 init_optab (strlen_optab, UNKNOWN);
6311 init_optab (cbranch_optab, UNKNOWN);
6312 init_optab (cmov_optab, UNKNOWN);
6313 init_optab (cstore_optab, UNKNOWN);
6314 init_optab (push_optab, UNKNOWN);
6315
6316 init_optab (reduc_smax_optab, UNKNOWN);
6317 init_optab (reduc_umax_optab, UNKNOWN);
6318 init_optab (reduc_smin_optab, UNKNOWN);
6319 init_optab (reduc_umin_optab, UNKNOWN);
6320 init_optab (reduc_splus_optab, UNKNOWN);
6321 init_optab (reduc_uplus_optab, UNKNOWN);
6322
6323 init_optab (ssum_widen_optab, UNKNOWN);
6324 init_optab (usum_widen_optab, UNKNOWN);
6325 init_optab (sdot_prod_optab, UNKNOWN);
6326 init_optab (udot_prod_optab, UNKNOWN);
6327
6328 init_optab (vec_extract_optab, UNKNOWN);
6329 init_optab (vec_extract_even_optab, UNKNOWN);
6330 init_optab (vec_extract_odd_optab, UNKNOWN);
6331 init_optab (vec_interleave_high_optab, UNKNOWN);
6332 init_optab (vec_interleave_low_optab, UNKNOWN);
6333 init_optab (vec_set_optab, UNKNOWN);
6334 init_optab (vec_init_optab, UNKNOWN);
6335 init_optab (vec_shl_optab, UNKNOWN);
6336 init_optab (vec_shr_optab, UNKNOWN);
6337 init_optab (vec_realign_load_optab, UNKNOWN);
6338 init_optab (movmisalign_optab, UNKNOWN);
6339 init_optab (vec_widen_umult_hi_optab, UNKNOWN);
6340 init_optab (vec_widen_umult_lo_optab, UNKNOWN);
6341 init_optab (vec_widen_smult_hi_optab, UNKNOWN);
6342 init_optab (vec_widen_smult_lo_optab, UNKNOWN);
6343 init_optab (vec_unpacks_hi_optab, UNKNOWN);
6344 init_optab (vec_unpacks_lo_optab, UNKNOWN);
6345 init_optab (vec_unpacku_hi_optab, UNKNOWN);
6346 init_optab (vec_unpacku_lo_optab, UNKNOWN);
6347 init_optab (vec_unpacks_float_hi_optab, UNKNOWN);
6348 init_optab (vec_unpacks_float_lo_optab, UNKNOWN);
6349 init_optab (vec_unpacku_float_hi_optab, UNKNOWN);
6350 init_optab (vec_unpacku_float_lo_optab, UNKNOWN);
6351 init_optab (vec_pack_trunc_optab, UNKNOWN);
6352 init_optab (vec_pack_usat_optab, UNKNOWN);
6353 init_optab (vec_pack_ssat_optab, UNKNOWN);
6354 init_optab (vec_pack_ufix_trunc_optab, UNKNOWN);
6355 init_optab (vec_pack_sfix_trunc_optab, UNKNOWN);
6356
6357 init_optab (powi_optab, UNKNOWN);
6358
6359 /* Conversions. */
6360 init_convert_optab (sext_optab, SIGN_EXTEND);
6361 init_convert_optab (zext_optab, ZERO_EXTEND);
6362 init_convert_optab (trunc_optab, TRUNCATE);
6363 init_convert_optab (sfix_optab, FIX);
6364 init_convert_optab (ufix_optab, UNSIGNED_FIX);
6365 init_convert_optab (sfixtrunc_optab, UNKNOWN);
6366 init_convert_optab (ufixtrunc_optab, UNKNOWN);
6367 init_convert_optab (sfloat_optab, FLOAT);
6368 init_convert_optab (ufloat_optab, UNSIGNED_FLOAT);
6369 init_convert_optab (lrint_optab, UNKNOWN);
6370 init_convert_optab (lround_optab, UNKNOWN);
6371 init_convert_optab (lfloor_optab, UNKNOWN);
6372 init_convert_optab (lceil_optab, UNKNOWN);
6373
6374 init_convert_optab (fract_optab, FRACT_CONVERT);
6375 init_convert_optab (fractuns_optab, UNSIGNED_FRACT_CONVERT);
6376 init_convert_optab (satfract_optab, SAT_FRACT);
6377 init_convert_optab (satfractuns_optab, UNSIGNED_SAT_FRACT);
6378
6379 for (i = 0; i < NUM_MACHINE_MODES; i++)
6380 {
6381 movmem_optab[i] = CODE_FOR_nothing;
6382 cmpstr_optab[i] = CODE_FOR_nothing;
6383 cmpstrn_optab[i] = CODE_FOR_nothing;
6384 cmpmem_optab[i] = CODE_FOR_nothing;
6385 setmem_optab[i] = CODE_FOR_nothing;
6386
6387 sync_add_optab[i] = CODE_FOR_nothing;
6388 sync_sub_optab[i] = CODE_FOR_nothing;
6389 sync_ior_optab[i] = CODE_FOR_nothing;
6390 sync_and_optab[i] = CODE_FOR_nothing;
6391 sync_xor_optab[i] = CODE_FOR_nothing;
6392 sync_nand_optab[i] = CODE_FOR_nothing;
6393 sync_old_add_optab[i] = CODE_FOR_nothing;
6394 sync_old_sub_optab[i] = CODE_FOR_nothing;
6395 sync_old_ior_optab[i] = CODE_FOR_nothing;
6396 sync_old_and_optab[i] = CODE_FOR_nothing;
6397 sync_old_xor_optab[i] = CODE_FOR_nothing;
6398 sync_old_nand_optab[i] = CODE_FOR_nothing;
6399 sync_new_add_optab[i] = CODE_FOR_nothing;
6400 sync_new_sub_optab[i] = CODE_FOR_nothing;
6401 sync_new_ior_optab[i] = CODE_FOR_nothing;
6402 sync_new_and_optab[i] = CODE_FOR_nothing;
6403 sync_new_xor_optab[i] = CODE_FOR_nothing;
6404 sync_new_nand_optab[i] = CODE_FOR_nothing;
6405 sync_compare_and_swap[i] = CODE_FOR_nothing;
6406 sync_lock_test_and_set[i] = CODE_FOR_nothing;
6407 sync_lock_release[i] = CODE_FOR_nothing;
6408
6409 reload_in_optab[i] = reload_out_optab[i] = CODE_FOR_nothing;
6410 }
6411
6412 /* Fill in the optabs with the insns we support. */
6413 init_all_optabs ();
6414
6415 /* Initialize the optabs with the names of the library functions. */
6416 add_optab->libcall_basename = "add";
6417 add_optab->libcall_suffix = '3';
6418 add_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6419 addv_optab->libcall_basename = "add";
6420 addv_optab->libcall_suffix = '3';
6421 addv_optab->libcall_gen = gen_intv_fp_libfunc;
6422 ssadd_optab->libcall_basename = "ssadd";
6423 ssadd_optab->libcall_suffix = '3';
6424 ssadd_optab->libcall_gen = gen_signed_fixed_libfunc;
6425 usadd_optab->libcall_basename = "usadd";
6426 usadd_optab->libcall_suffix = '3';
6427 usadd_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6428 sub_optab->libcall_basename = "sub";
6429 sub_optab->libcall_suffix = '3';
6430 sub_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6431 subv_optab->libcall_basename = "sub";
6432 subv_optab->libcall_suffix = '3';
6433 subv_optab->libcall_gen = gen_intv_fp_libfunc;
6434 sssub_optab->libcall_basename = "sssub";
6435 sssub_optab->libcall_suffix = '3';
6436 sssub_optab->libcall_gen = gen_signed_fixed_libfunc;
6437 ussub_optab->libcall_basename = "ussub";
6438 ussub_optab->libcall_suffix = '3';
6439 ussub_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6440 smul_optab->libcall_basename = "mul";
6441 smul_optab->libcall_suffix = '3';
6442 smul_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6443 smulv_optab->libcall_basename = "mul";
6444 smulv_optab->libcall_suffix = '3';
6445 smulv_optab->libcall_gen = gen_intv_fp_libfunc;
6446 ssmul_optab->libcall_basename = "ssmul";
6447 ssmul_optab->libcall_suffix = '3';
6448 ssmul_optab->libcall_gen = gen_signed_fixed_libfunc;
6449 usmul_optab->libcall_basename = "usmul";
6450 usmul_optab->libcall_suffix = '3';
6451 usmul_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6452 sdiv_optab->libcall_basename = "div";
6453 sdiv_optab->libcall_suffix = '3';
6454 sdiv_optab->libcall_gen = gen_int_fp_signed_fixed_libfunc;
6455 sdivv_optab->libcall_basename = "divv";
6456 sdivv_optab->libcall_suffix = '3';
6457 sdivv_optab->libcall_gen = gen_int_libfunc;
6458 ssdiv_optab->libcall_basename = "ssdiv";
6459 ssdiv_optab->libcall_suffix = '3';
6460 ssdiv_optab->libcall_gen = gen_signed_fixed_libfunc;
6461 udiv_optab->libcall_basename = "udiv";
6462 udiv_optab->libcall_suffix = '3';
6463 udiv_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6464 usdiv_optab->libcall_basename = "usdiv";
6465 usdiv_optab->libcall_suffix = '3';
6466 usdiv_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6467 sdivmod_optab->libcall_basename = "divmod";
6468 sdivmod_optab->libcall_suffix = '4';
6469 sdivmod_optab->libcall_gen = gen_int_libfunc;
6470 udivmod_optab->libcall_basename = "udivmod";
6471 udivmod_optab->libcall_suffix = '4';
6472 udivmod_optab->libcall_gen = gen_int_libfunc;
6473 smod_optab->libcall_basename = "mod";
6474 smod_optab->libcall_suffix = '3';
6475 smod_optab->libcall_gen = gen_int_libfunc;
6476 umod_optab->libcall_basename = "umod";
6477 umod_optab->libcall_suffix = '3';
6478 umod_optab->libcall_gen = gen_int_libfunc;
6479 ftrunc_optab->libcall_basename = "ftrunc";
6480 ftrunc_optab->libcall_suffix = '2';
6481 ftrunc_optab->libcall_gen = gen_fp_libfunc;
6482 and_optab->libcall_basename = "and";
6483 and_optab->libcall_suffix = '3';
6484 and_optab->libcall_gen = gen_int_libfunc;
6485 ior_optab->libcall_basename = "ior";
6486 ior_optab->libcall_suffix = '3';
6487 ior_optab->libcall_gen = gen_int_libfunc;
6488 xor_optab->libcall_basename = "xor";
6489 xor_optab->libcall_suffix = '3';
6490 xor_optab->libcall_gen = gen_int_libfunc;
6491 ashl_optab->libcall_basename = "ashl";
6492 ashl_optab->libcall_suffix = '3';
6493 ashl_optab->libcall_gen = gen_int_fixed_libfunc;
6494 ssashl_optab->libcall_basename = "ssashl";
6495 ssashl_optab->libcall_suffix = '3';
6496 ssashl_optab->libcall_gen = gen_signed_fixed_libfunc;
6497 usashl_optab->libcall_basename = "usashl";
6498 usashl_optab->libcall_suffix = '3';
6499 usashl_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6500 ashr_optab->libcall_basename = "ashr";
6501 ashr_optab->libcall_suffix = '3';
6502 ashr_optab->libcall_gen = gen_int_signed_fixed_libfunc;
6503 lshr_optab->libcall_basename = "lshr";
6504 lshr_optab->libcall_suffix = '3';
6505 lshr_optab->libcall_gen = gen_int_unsigned_fixed_libfunc;
6506 smin_optab->libcall_basename = "min";
6507 smin_optab->libcall_suffix = '3';
6508 smin_optab->libcall_gen = gen_int_fp_libfunc;
6509 smax_optab->libcall_basename = "max";
6510 smax_optab->libcall_suffix = '3';
6511 smax_optab->libcall_gen = gen_int_fp_libfunc;
6512 umin_optab->libcall_basename = "umin";
6513 umin_optab->libcall_suffix = '3';
6514 umin_optab->libcall_gen = gen_int_libfunc;
6515 umax_optab->libcall_basename = "umax";
6516 umax_optab->libcall_suffix = '3';
6517 umax_optab->libcall_gen = gen_int_libfunc;
6518 neg_optab->libcall_basename = "neg";
6519 neg_optab->libcall_suffix = '2';
6520 neg_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6521 ssneg_optab->libcall_basename = "ssneg";
6522 ssneg_optab->libcall_suffix = '2';
6523 ssneg_optab->libcall_gen = gen_signed_fixed_libfunc;
6524 usneg_optab->libcall_basename = "usneg";
6525 usneg_optab->libcall_suffix = '2';
6526 usneg_optab->libcall_gen = gen_unsigned_fixed_libfunc;
6527 negv_optab->libcall_basename = "neg";
6528 negv_optab->libcall_suffix = '2';
6529 negv_optab->libcall_gen = gen_intv_fp_libfunc;
6530 one_cmpl_optab->libcall_basename = "one_cmpl";
6531 one_cmpl_optab->libcall_suffix = '2';
6532 one_cmpl_optab->libcall_gen = gen_int_libfunc;
6533 ffs_optab->libcall_basename = "ffs";
6534 ffs_optab->libcall_suffix = '2';
6535 ffs_optab->libcall_gen = gen_int_libfunc;
6536 clz_optab->libcall_basename = "clz";
6537 clz_optab->libcall_suffix = '2';
6538 clz_optab->libcall_gen = gen_int_libfunc;
6539 ctz_optab->libcall_basename = "ctz";
6540 ctz_optab->libcall_suffix = '2';
6541 ctz_optab->libcall_gen = gen_int_libfunc;
6542 popcount_optab->libcall_basename = "popcount";
6543 popcount_optab->libcall_suffix = '2';
6544 popcount_optab->libcall_gen = gen_int_libfunc;
6545 parity_optab->libcall_basename = "parity";
6546 parity_optab->libcall_suffix = '2';
6547 parity_optab->libcall_gen = gen_int_libfunc;
6548
6549 /* Comparison libcalls for integers MUST come in pairs,
6550 signed/unsigned. */
6551 cmp_optab->libcall_basename = "cmp";
6552 cmp_optab->libcall_suffix = '2';
6553 cmp_optab->libcall_gen = gen_int_fp_fixed_libfunc;
6554 ucmp_optab->libcall_basename = "ucmp";
6555 ucmp_optab->libcall_suffix = '2';
6556 ucmp_optab->libcall_gen = gen_int_libfunc;
6557
6558 /* EQ etc are floating point only. */
6559 eq_optab->libcall_basename = "eq";
6560 eq_optab->libcall_suffix = '2';
6561 eq_optab->libcall_gen = gen_fp_libfunc;
6562 ne_optab->libcall_basename = "ne";
6563 ne_optab->libcall_suffix = '2';
6564 ne_optab->libcall_gen = gen_fp_libfunc;
6565 gt_optab->libcall_basename = "gt";
6566 gt_optab->libcall_suffix = '2';
6567 gt_optab->libcall_gen = gen_fp_libfunc;
6568 ge_optab->libcall_basename = "ge";
6569 ge_optab->libcall_suffix = '2';
6570 ge_optab->libcall_gen = gen_fp_libfunc;
6571 lt_optab->libcall_basename = "lt";
6572 lt_optab->libcall_suffix = '2';
6573 lt_optab->libcall_gen = gen_fp_libfunc;
6574 le_optab->libcall_basename = "le";
6575 le_optab->libcall_suffix = '2';
6576 le_optab->libcall_gen = gen_fp_libfunc;
6577 unord_optab->libcall_basename = "unord";
6578 unord_optab->libcall_suffix = '2';
6579 unord_optab->libcall_gen = gen_fp_libfunc;
6580
6581 powi_optab->libcall_basename = "powi";
6582 powi_optab->libcall_suffix = '2';
6583 powi_optab->libcall_gen = gen_fp_libfunc;
6584
6585 /* Conversions. */
6586 sfloat_optab->libcall_basename = "float";
6587 sfloat_optab->libcall_gen = gen_int_to_fp_conv_libfunc;
6588 ufloat_optab->libcall_gen = gen_ufloat_conv_libfunc;
6589 sfix_optab->libcall_basename = "fix";
6590 sfix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6591 ufix_optab->libcall_basename = "fixuns";
6592 ufix_optab->libcall_gen = gen_fp_to_int_conv_libfunc;
6593 lrint_optab->libcall_basename = "lrint";
6594 lrint_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6595 lround_optab->libcall_basename = "lround";
6596 lround_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6597 lfloor_optab->libcall_basename = "lfloor";
6598 lfloor_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6599 lceil_optab->libcall_basename = "lceil";
6600 lceil_optab->libcall_gen = gen_int_to_fp_nondecimal_conv_libfunc;
6601
6602 /* trunc_optab is also used for FLOAT_EXTEND. */
6603 sext_optab->libcall_basename = "extend";
6604 sext_optab->libcall_gen = gen_extend_conv_libfunc;
6605 trunc_optab->libcall_basename = "trunc";
6606 trunc_optab->libcall_gen = gen_trunc_conv_libfunc;
6607
6608 /* Conversions for fixed-point modes and other modes. */
6609 fract_optab->libcall_basename = "fract";
6610 fract_optab->libcall_gen = gen_fract_conv_libfunc;
6611 satfract_optab->libcall_basename = "satfract";
6612 satfract_optab->libcall_gen = gen_satfract_conv_libfunc;
6613 fractuns_optab->libcall_basename = "fractuns";
6614 fractuns_optab->libcall_gen = gen_fractuns_conv_libfunc;
6615 satfractuns_optab->libcall_basename = "satfractuns";
6616 satfractuns_optab->libcall_gen = gen_satfractuns_conv_libfunc;
6617
6618 /* The ffs function operates on `int'. Fall back on it if we do not
6619 have a libgcc2 function for that width. */
6620 if (INT_TYPE_SIZE < BITS_PER_WORD)
6621 {
6622 int_mode = mode_for_size (INT_TYPE_SIZE, MODE_INT, 0);
6623 set_optab_libfunc (ffs_optab, mode_for_size (INT_TYPE_SIZE, MODE_INT, 0),
6624 "ffs");
6625 }
6626
6627 /* Explicitly initialize the bswap libfuncs since we need them to be
6628 valid for things other than word_mode. */
6629 set_optab_libfunc (bswap_optab, SImode, "__bswapsi2");
6630 set_optab_libfunc (bswap_optab, DImode, "__bswapdi2");
6631
6632 /* Use cabs for double complex abs, since systems generally have cabs.
6633 Don't define any libcall for float complex, so that cabs will be used. */
6634 if (complex_double_type_node)
6635 set_optab_libfunc (abs_optab, TYPE_MODE (complex_double_type_node), "cabs");
6636
6637 abort_libfunc = init_one_libfunc ("abort");
6638 memcpy_libfunc = init_one_libfunc ("memcpy");
6639 memmove_libfunc = init_one_libfunc ("memmove");
6640 memcmp_libfunc = init_one_libfunc ("memcmp");
6641 memset_libfunc = init_one_libfunc ("memset");
6642 setbits_libfunc = init_one_libfunc ("__setbits");
6643
6644 #ifndef DONT_USE_BUILTIN_SETJMP
6645 setjmp_libfunc = init_one_libfunc ("__builtin_setjmp");
6646 longjmp_libfunc = init_one_libfunc ("__builtin_longjmp");
6647 #else
6648 setjmp_libfunc = init_one_libfunc ("setjmp");
6649 longjmp_libfunc = init_one_libfunc ("longjmp");
6650 #endif
6651 unwind_sjlj_register_libfunc = init_one_libfunc ("_Unwind_SjLj_Register");
6652 unwind_sjlj_unregister_libfunc
6653 = init_one_libfunc ("_Unwind_SjLj_Unregister");
6654
6655 /* For function entry/exit instrumentation. */
6656 profile_function_entry_libfunc
6657 = init_one_libfunc ("__cyg_profile_func_enter");
6658 profile_function_exit_libfunc
6659 = init_one_libfunc ("__cyg_profile_func_exit");
6660
6661 gcov_flush_libfunc = init_one_libfunc ("__gcov_flush");
6662
6663 if (HAVE_conditional_trap)
6664 trap_rtx = gen_rtx_fmt_ee (EQ, VOIDmode, NULL_RTX, NULL_RTX);
6665
6666 /* Allow the target to add more libcalls or rename some, etc. */
6667 targetm.init_libfuncs ();
6668
6669 reinit = true;
6670 }
6671
6672 /* Print information about the current contents of the optabs on
6673 STDERR. */
6674
6675 void
6676 debug_optab_libfuncs (void)
6677 {
6678 int i;
6679 int j;
6680 int k;
6681
6682 /* Dump the arithmetic optabs. */
6683 for (i = 0; i != (int) OTI_MAX; i++)
6684 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6685 {
6686 optab o;
6687 rtx l;
6688
6689 o = &optab_table[i];
6690 l = optab_libfunc (o, (enum machine_mode) j);
6691 if (l)
6692 {
6693 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6694 fprintf (stderr, "%s\t%s:\t%s\n",
6695 GET_RTX_NAME (o->code),
6696 GET_MODE_NAME (j),
6697 XSTR (l, 0));
6698 }
6699 }
6700
6701 /* Dump the conversion optabs. */
6702 for (i = 0; i < (int) COI_MAX; ++i)
6703 for (j = 0; j < NUM_MACHINE_MODES; ++j)
6704 for (k = 0; k < NUM_MACHINE_MODES; ++k)
6705 {
6706 convert_optab o;
6707 rtx l;
6708
6709 o = &convert_optab_table[i];
6710 l = convert_optab_libfunc (o, (enum machine_mode) j,
6711 (enum machine_mode) k);
6712 if (l)
6713 {
6714 gcc_assert (GET_CODE (l) == SYMBOL_REF);
6715 fprintf (stderr, "%s\t%s\t%s:\t%s\n",
6716 GET_RTX_NAME (o->code),
6717 GET_MODE_NAME (j),
6718 GET_MODE_NAME (k),
6719 XSTR (l, 0));
6720 }
6721 }
6722 }
6723
6724 \f
6725 /* Generate insns to trap with code TCODE if OP1 and OP2 satisfy condition
6726 CODE. Return 0 on failure. */
6727
6728 rtx
6729 gen_cond_trap (enum rtx_code code ATTRIBUTE_UNUSED, rtx op1,
6730 rtx op2 ATTRIBUTE_UNUSED, rtx tcode ATTRIBUTE_UNUSED)
6731 {
6732 enum machine_mode mode = GET_MODE (op1);
6733 enum insn_code icode;
6734 rtx insn;
6735
6736 if (!HAVE_conditional_trap)
6737 return 0;
6738
6739 if (mode == VOIDmode)
6740 return 0;
6741
6742 icode = optab_handler (cmp_optab, mode)->insn_code;
6743 if (icode == CODE_FOR_nothing)
6744 return 0;
6745
6746 start_sequence ();
6747 op1 = prepare_operand (icode, op1, 0, mode, mode, 0);
6748 op2 = prepare_operand (icode, op2, 1, mode, mode, 0);
6749 if (!op1 || !op2)
6750 {
6751 end_sequence ();
6752 return 0;
6753 }
6754 emit_insn (GEN_FCN (icode) (op1, op2));
6755
6756 PUT_CODE (trap_rtx, code);
6757 gcc_assert (HAVE_conditional_trap);
6758 insn = gen_conditional_trap (trap_rtx, tcode);
6759 if (insn)
6760 {
6761 emit_insn (insn);
6762 insn = get_insns ();
6763 }
6764 end_sequence ();
6765
6766 return insn;
6767 }
6768
6769 /* Return rtx code for TCODE. Use UNSIGNEDP to select signed
6770 or unsigned operation code. */
6771
6772 static enum rtx_code
6773 get_rtx_code (enum tree_code tcode, bool unsignedp)
6774 {
6775 enum rtx_code code;
6776 switch (tcode)
6777 {
6778 case EQ_EXPR:
6779 code = EQ;
6780 break;
6781 case NE_EXPR:
6782 code = NE;
6783 break;
6784 case LT_EXPR:
6785 code = unsignedp ? LTU : LT;
6786 break;
6787 case LE_EXPR:
6788 code = unsignedp ? LEU : LE;
6789 break;
6790 case GT_EXPR:
6791 code = unsignedp ? GTU : GT;
6792 break;
6793 case GE_EXPR:
6794 code = unsignedp ? GEU : GE;
6795 break;
6796
6797 case UNORDERED_EXPR:
6798 code = UNORDERED;
6799 break;
6800 case ORDERED_EXPR:
6801 code = ORDERED;
6802 break;
6803 case UNLT_EXPR:
6804 code = UNLT;
6805 break;
6806 case UNLE_EXPR:
6807 code = UNLE;
6808 break;
6809 case UNGT_EXPR:
6810 code = UNGT;
6811 break;
6812 case UNGE_EXPR:
6813 code = UNGE;
6814 break;
6815 case UNEQ_EXPR:
6816 code = UNEQ;
6817 break;
6818 case LTGT_EXPR:
6819 code = LTGT;
6820 break;
6821
6822 default:
6823 gcc_unreachable ();
6824 }
6825 return code;
6826 }
6827
6828 /* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
6829 unsigned operators. Do not generate compare instruction. */
6830
6831 static rtx
6832 vector_compare_rtx (tree cond, bool unsignedp, enum insn_code icode)
6833 {
6834 enum rtx_code rcode;
6835 tree t_op0, t_op1;
6836 rtx rtx_op0, rtx_op1;
6837
6838 /* This is unlikely. While generating VEC_COND_EXPR, auto vectorizer
6839 ensures that condition is a relational operation. */
6840 gcc_assert (COMPARISON_CLASS_P (cond));
6841
6842 rcode = get_rtx_code (TREE_CODE (cond), unsignedp);
6843 t_op0 = TREE_OPERAND (cond, 0);
6844 t_op1 = TREE_OPERAND (cond, 1);
6845
6846 /* Expand operands. */
6847 rtx_op0 = expand_expr (t_op0, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op0)),
6848 EXPAND_STACK_PARM);
6849 rtx_op1 = expand_expr (t_op1, NULL_RTX, TYPE_MODE (TREE_TYPE (t_op1)),
6850 EXPAND_STACK_PARM);
6851
6852 if (!insn_data[icode].operand[4].predicate (rtx_op0, GET_MODE (rtx_op0))
6853 && GET_MODE (rtx_op0) != VOIDmode)
6854 rtx_op0 = force_reg (GET_MODE (rtx_op0), rtx_op0);
6855
6856 if (!insn_data[icode].operand[5].predicate (rtx_op1, GET_MODE (rtx_op1))
6857 && GET_MODE (rtx_op1) != VOIDmode)
6858 rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
6859
6860 return gen_rtx_fmt_ee (rcode, VOIDmode, rtx_op0, rtx_op1);
6861 }
6862
6863 /* Return insn code for VEC_COND_EXPR EXPR. */
6864
6865 static inline enum insn_code
6866 get_vcond_icode (tree expr, enum machine_mode mode)
6867 {
6868 enum insn_code icode = CODE_FOR_nothing;
6869
6870 if (TYPE_UNSIGNED (TREE_TYPE (expr)))
6871 icode = vcondu_gen_code[mode];
6872 else
6873 icode = vcond_gen_code[mode];
6874 return icode;
6875 }
6876
6877 /* Return TRUE iff, appropriate vector insns are available
6878 for vector cond expr expr in VMODE mode. */
6879
6880 bool
6881 expand_vec_cond_expr_p (tree expr, enum machine_mode vmode)
6882 {
6883 if (get_vcond_icode (expr, vmode) == CODE_FOR_nothing)
6884 return false;
6885 return true;
6886 }
6887
6888 /* Generate insns for VEC_COND_EXPR. */
6889
6890 rtx
6891 expand_vec_cond_expr (tree vec_cond_expr, rtx target)
6892 {
6893 enum insn_code icode;
6894 rtx comparison, rtx_op1, rtx_op2, cc_op0, cc_op1;
6895 enum machine_mode mode = TYPE_MODE (TREE_TYPE (vec_cond_expr));
6896 bool unsignedp = TYPE_UNSIGNED (TREE_TYPE (vec_cond_expr));
6897
6898 icode = get_vcond_icode (vec_cond_expr, mode);
6899 if (icode == CODE_FOR_nothing)
6900 return 0;
6901
6902 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6903 target = gen_reg_rtx (mode);
6904
6905 /* Get comparison rtx. First expand both cond expr operands. */
6906 comparison = vector_compare_rtx (TREE_OPERAND (vec_cond_expr, 0),
6907 unsignedp, icode);
6908 cc_op0 = XEXP (comparison, 0);
6909 cc_op1 = XEXP (comparison, 1);
6910 /* Expand both operands and force them in reg, if required. */
6911 rtx_op1 = expand_normal (TREE_OPERAND (vec_cond_expr, 1));
6912 if (!insn_data[icode].operand[1].predicate (rtx_op1, mode)
6913 && mode != VOIDmode)
6914 rtx_op1 = force_reg (mode, rtx_op1);
6915
6916 rtx_op2 = expand_normal (TREE_OPERAND (vec_cond_expr, 2));
6917 if (!insn_data[icode].operand[2].predicate (rtx_op2, mode)
6918 && mode != VOIDmode)
6919 rtx_op2 = force_reg (mode, rtx_op2);
6920
6921 /* Emit instruction! */
6922 emit_insn (GEN_FCN (icode) (target, rtx_op1, rtx_op2,
6923 comparison, cc_op0, cc_op1));
6924
6925 return target;
6926 }
6927
6928 \f
6929 /* This is an internal subroutine of the other compare_and_swap expanders.
6930 MEM, OLD_VAL and NEW_VAL are as you'd expect for a compare-and-swap
6931 operation. TARGET is an optional place to store the value result of
6932 the operation. ICODE is the particular instruction to expand. Return
6933 the result of the operation. */
6934
6935 static rtx
6936 expand_val_compare_and_swap_1 (rtx mem, rtx old_val, rtx new_val,
6937 rtx target, enum insn_code icode)
6938 {
6939 enum machine_mode mode = GET_MODE (mem);
6940 rtx insn;
6941
6942 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
6943 target = gen_reg_rtx (mode);
6944
6945 if (GET_MODE (old_val) != VOIDmode && GET_MODE (old_val) != mode)
6946 old_val = convert_modes (mode, GET_MODE (old_val), old_val, 1);
6947 if (!insn_data[icode].operand[2].predicate (old_val, mode))
6948 old_val = force_reg (mode, old_val);
6949
6950 if (GET_MODE (new_val) != VOIDmode && GET_MODE (new_val) != mode)
6951 new_val = convert_modes (mode, GET_MODE (new_val), new_val, 1);
6952 if (!insn_data[icode].operand[3].predicate (new_val, mode))
6953 new_val = force_reg (mode, new_val);
6954
6955 insn = GEN_FCN (icode) (target, mem, old_val, new_val);
6956 if (insn == NULL_RTX)
6957 return NULL_RTX;
6958 emit_insn (insn);
6959
6960 return target;
6961 }
6962
6963 /* Expand a compare-and-swap operation and return its value. */
6964
6965 rtx
6966 expand_val_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6967 {
6968 enum machine_mode mode = GET_MODE (mem);
6969 enum insn_code icode = sync_compare_and_swap[mode];
6970
6971 if (icode == CODE_FOR_nothing)
6972 return NULL_RTX;
6973
6974 return expand_val_compare_and_swap_1 (mem, old_val, new_val, target, icode);
6975 }
6976
6977 /* Helper function to find the MODE_CC set in a sync_compare_and_swap
6978 pattern. */
6979
6980 static void
6981 find_cc_set (rtx x, const_rtx pat, void *data)
6982 {
6983 if (REG_P (x) && GET_MODE_CLASS (GET_MODE (x)) == MODE_CC
6984 && GET_CODE (pat) == SET)
6985 {
6986 rtx *p_cc_reg = (rtx *) data;
6987 gcc_assert (!*p_cc_reg);
6988 *p_cc_reg = x;
6989 }
6990 }
6991
6992 /* Expand a compare-and-swap operation and store true into the result if
6993 the operation was successful and false otherwise. Return the result.
6994 Unlike other routines, TARGET is not optional. */
6995
6996 rtx
6997 expand_bool_compare_and_swap (rtx mem, rtx old_val, rtx new_val, rtx target)
6998 {
6999 enum machine_mode mode = GET_MODE (mem);
7000 enum insn_code icode;
7001 rtx subtarget, seq, cc_reg;
7002
7003 /* If the target supports a compare-and-swap pattern that simultaneously
7004 sets some flag for success, then use it. Otherwise use the regular
7005 compare-and-swap and follow that immediately with a compare insn. */
7006 icode = sync_compare_and_swap[mode];
7007 if (icode == CODE_FOR_nothing)
7008 return NULL_RTX;
7009
7010 do
7011 {
7012 start_sequence ();
7013 subtarget = expand_val_compare_and_swap_1 (mem, old_val, new_val,
7014 NULL_RTX, icode);
7015 cc_reg = NULL_RTX;
7016 if (subtarget == NULL_RTX)
7017 {
7018 end_sequence ();
7019 return NULL_RTX;
7020 }
7021
7022 if (have_insn_for (COMPARE, CCmode))
7023 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7024 seq = get_insns ();
7025 end_sequence ();
7026
7027 /* We might be comparing against an old value. Try again. :-( */
7028 if (!cc_reg && MEM_P (old_val))
7029 {
7030 seq = NULL_RTX;
7031 old_val = force_reg (mode, old_val);
7032 }
7033 }
7034 while (!seq);
7035
7036 emit_insn (seq);
7037 if (cc_reg)
7038 return emit_store_flag (target, EQ, cc_reg, const0_rtx, VOIDmode, 0, 1);
7039 else
7040 return emit_store_flag (target, EQ, subtarget, old_val, VOIDmode, 1, 1);
7041 }
7042
7043 /* This is a helper function for the other atomic operations. This function
7044 emits a loop that contains SEQ that iterates until a compare-and-swap
7045 operation at the end succeeds. MEM is the memory to be modified. SEQ is
7046 a set of instructions that takes a value from OLD_REG as an input and
7047 produces a value in NEW_REG as an output. Before SEQ, OLD_REG will be
7048 set to the current contents of MEM. After SEQ, a compare-and-swap will
7049 attempt to update MEM with NEW_REG. The function returns true when the
7050 loop was generated successfully. */
7051
7052 static bool
7053 expand_compare_and_swap_loop (rtx mem, rtx old_reg, rtx new_reg, rtx seq)
7054 {
7055 enum machine_mode mode = GET_MODE (mem);
7056 enum insn_code icode;
7057 rtx label, cmp_reg, subtarget, cc_reg;
7058
7059 /* The loop we want to generate looks like
7060
7061 cmp_reg = mem;
7062 label:
7063 old_reg = cmp_reg;
7064 seq;
7065 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
7066 if (cmp_reg != old_reg)
7067 goto label;
7068
7069 Note that we only do the plain load from memory once. Subsequent
7070 iterations use the value loaded by the compare-and-swap pattern. */
7071
7072 label = gen_label_rtx ();
7073 cmp_reg = gen_reg_rtx (mode);
7074
7075 emit_move_insn (cmp_reg, mem);
7076 emit_label (label);
7077 emit_move_insn (old_reg, cmp_reg);
7078 if (seq)
7079 emit_insn (seq);
7080
7081 /* If the target supports a compare-and-swap pattern that simultaneously
7082 sets some flag for success, then use it. Otherwise use the regular
7083 compare-and-swap and follow that immediately with a compare insn. */
7084 icode = sync_compare_and_swap[mode];
7085 if (icode == CODE_FOR_nothing)
7086 return false;
7087
7088 subtarget = expand_val_compare_and_swap_1 (mem, old_reg, new_reg,
7089 cmp_reg, icode);
7090 if (subtarget == NULL_RTX)
7091 return false;
7092
7093 cc_reg = NULL_RTX;
7094 if (have_insn_for (COMPARE, CCmode))
7095 note_stores (PATTERN (get_last_insn ()), find_cc_set, &cc_reg);
7096 if (cc_reg)
7097 {
7098 cmp_reg = cc_reg;
7099 old_reg = const0_rtx;
7100 }
7101 else
7102 {
7103 if (subtarget != cmp_reg)
7104 emit_move_insn (cmp_reg, subtarget);
7105 }
7106
7107 /* ??? Mark this jump predicted not taken? */
7108 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, const0_rtx, GET_MODE (cmp_reg), 1,
7109 label);
7110 return true;
7111 }
7112
7113 /* This function generates the atomic operation MEM CODE= VAL. In this
7114 case, we do not care about any resulting value. Returns NULL if we
7115 cannot generate the operation. */
7116
7117 rtx
7118 expand_sync_operation (rtx mem, rtx val, enum rtx_code code)
7119 {
7120 enum machine_mode mode = GET_MODE (mem);
7121 enum insn_code icode;
7122 rtx insn;
7123
7124 /* Look to see if the target supports the operation directly. */
7125 switch (code)
7126 {
7127 case PLUS:
7128 icode = sync_add_optab[mode];
7129 break;
7130 case IOR:
7131 icode = sync_ior_optab[mode];
7132 break;
7133 case XOR:
7134 icode = sync_xor_optab[mode];
7135 break;
7136 case AND:
7137 icode = sync_and_optab[mode];
7138 break;
7139 case NOT:
7140 icode = sync_nand_optab[mode];
7141 break;
7142
7143 case MINUS:
7144 icode = sync_sub_optab[mode];
7145 if (icode == CODE_FOR_nothing || CONST_INT_P (val))
7146 {
7147 icode = sync_add_optab[mode];
7148 if (icode != CODE_FOR_nothing)
7149 {
7150 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7151 code = PLUS;
7152 }
7153 }
7154 break;
7155
7156 default:
7157 gcc_unreachable ();
7158 }
7159
7160 /* Generate the direct operation, if present. */
7161 if (icode != CODE_FOR_nothing)
7162 {
7163 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7164 val = convert_modes (mode, GET_MODE (val), val, 1);
7165 if (!insn_data[icode].operand[1].predicate (val, mode))
7166 val = force_reg (mode, val);
7167
7168 insn = GEN_FCN (icode) (mem, val);
7169 if (insn)
7170 {
7171 emit_insn (insn);
7172 return const0_rtx;
7173 }
7174 }
7175
7176 /* Failing that, generate a compare-and-swap loop in which we perform the
7177 operation with normal arithmetic instructions. */
7178 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7179 {
7180 rtx t0 = gen_reg_rtx (mode), t1;
7181
7182 start_sequence ();
7183
7184 t1 = t0;
7185 if (code == NOT)
7186 {
7187 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7188 true, OPTAB_LIB_WIDEN);
7189 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7190 }
7191 else
7192 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7193 true, OPTAB_LIB_WIDEN);
7194 insn = get_insns ();
7195 end_sequence ();
7196
7197 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7198 return const0_rtx;
7199 }
7200
7201 return NULL_RTX;
7202 }
7203
7204 /* This function generates the atomic operation MEM CODE= VAL. In this
7205 case, we do care about the resulting value: if AFTER is true then
7206 return the value MEM holds after the operation, if AFTER is false
7207 then return the value MEM holds before the operation. TARGET is an
7208 optional place for the result value to be stored. */
7209
7210 rtx
7211 expand_sync_fetch_operation (rtx mem, rtx val, enum rtx_code code,
7212 bool after, rtx target)
7213 {
7214 enum machine_mode mode = GET_MODE (mem);
7215 enum insn_code old_code, new_code, icode;
7216 bool compensate;
7217 rtx insn;
7218
7219 /* Look to see if the target supports the operation directly. */
7220 switch (code)
7221 {
7222 case PLUS:
7223 old_code = sync_old_add_optab[mode];
7224 new_code = sync_new_add_optab[mode];
7225 break;
7226 case IOR:
7227 old_code = sync_old_ior_optab[mode];
7228 new_code = sync_new_ior_optab[mode];
7229 break;
7230 case XOR:
7231 old_code = sync_old_xor_optab[mode];
7232 new_code = sync_new_xor_optab[mode];
7233 break;
7234 case AND:
7235 old_code = sync_old_and_optab[mode];
7236 new_code = sync_new_and_optab[mode];
7237 break;
7238 case NOT:
7239 old_code = sync_old_nand_optab[mode];
7240 new_code = sync_new_nand_optab[mode];
7241 break;
7242
7243 case MINUS:
7244 old_code = sync_old_sub_optab[mode];
7245 new_code = sync_new_sub_optab[mode];
7246 if ((old_code == CODE_FOR_nothing && new_code == CODE_FOR_nothing)
7247 || CONST_INT_P (val))
7248 {
7249 old_code = sync_old_add_optab[mode];
7250 new_code = sync_new_add_optab[mode];
7251 if (old_code != CODE_FOR_nothing || new_code != CODE_FOR_nothing)
7252 {
7253 val = expand_simple_unop (mode, NEG, val, NULL_RTX, 1);
7254 code = PLUS;
7255 }
7256 }
7257 break;
7258
7259 default:
7260 gcc_unreachable ();
7261 }
7262
7263 /* If the target does supports the proper new/old operation, great. But
7264 if we only support the opposite old/new operation, check to see if we
7265 can compensate. In the case in which the old value is supported, then
7266 we can always perform the operation again with normal arithmetic. In
7267 the case in which the new value is supported, then we can only handle
7268 this in the case the operation is reversible. */
7269 compensate = false;
7270 if (after)
7271 {
7272 icode = new_code;
7273 if (icode == CODE_FOR_nothing)
7274 {
7275 icode = old_code;
7276 if (icode != CODE_FOR_nothing)
7277 compensate = true;
7278 }
7279 }
7280 else
7281 {
7282 icode = old_code;
7283 if (icode == CODE_FOR_nothing
7284 && (code == PLUS || code == MINUS || code == XOR))
7285 {
7286 icode = new_code;
7287 if (icode != CODE_FOR_nothing)
7288 compensate = true;
7289 }
7290 }
7291
7292 /* If we found something supported, great. */
7293 if (icode != CODE_FOR_nothing)
7294 {
7295 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7296 target = gen_reg_rtx (mode);
7297
7298 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7299 val = convert_modes (mode, GET_MODE (val), val, 1);
7300 if (!insn_data[icode].operand[2].predicate (val, mode))
7301 val = force_reg (mode, val);
7302
7303 insn = GEN_FCN (icode) (target, mem, val);
7304 if (insn)
7305 {
7306 emit_insn (insn);
7307
7308 /* If we need to compensate for using an operation with the
7309 wrong return value, do so now. */
7310 if (compensate)
7311 {
7312 if (!after)
7313 {
7314 if (code == PLUS)
7315 code = MINUS;
7316 else if (code == MINUS)
7317 code = PLUS;
7318 }
7319
7320 if (code == NOT)
7321 {
7322 target = expand_simple_binop (mode, AND, target, val,
7323 NULL_RTX, true,
7324 OPTAB_LIB_WIDEN);
7325 target = expand_simple_unop (mode, code, target,
7326 NULL_RTX, true);
7327 }
7328 else
7329 target = expand_simple_binop (mode, code, target, val,
7330 NULL_RTX, true,
7331 OPTAB_LIB_WIDEN);
7332 }
7333
7334 return target;
7335 }
7336 }
7337
7338 /* Failing that, generate a compare-and-swap loop in which we perform the
7339 operation with normal arithmetic instructions. */
7340 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7341 {
7342 rtx t0 = gen_reg_rtx (mode), t1;
7343
7344 if (!target || !register_operand (target, mode))
7345 target = gen_reg_rtx (mode);
7346
7347 start_sequence ();
7348
7349 if (!after)
7350 emit_move_insn (target, t0);
7351 t1 = t0;
7352 if (code == NOT)
7353 {
7354 t1 = expand_simple_binop (mode, AND, t1, val, NULL_RTX,
7355 true, OPTAB_LIB_WIDEN);
7356 t1 = expand_simple_unop (mode, code, t1, NULL_RTX, true);
7357 }
7358 else
7359 t1 = expand_simple_binop (mode, code, t1, val, NULL_RTX,
7360 true, OPTAB_LIB_WIDEN);
7361 if (after)
7362 emit_move_insn (target, t1);
7363
7364 insn = get_insns ();
7365 end_sequence ();
7366
7367 if (t1 != NULL && expand_compare_and_swap_loop (mem, t0, t1, insn))
7368 return target;
7369 }
7370
7371 return NULL_RTX;
7372 }
7373
7374 /* This function expands a test-and-set operation. Ideally we atomically
7375 store VAL in MEM and return the previous value in MEM. Some targets
7376 may not support this operation and only support VAL with the constant 1;
7377 in this case while the return value will be 0/1, but the exact value
7378 stored in MEM is target defined. TARGET is an option place to stick
7379 the return value. */
7380
7381 rtx
7382 expand_sync_lock_test_and_set (rtx mem, rtx val, rtx target)
7383 {
7384 enum machine_mode mode = GET_MODE (mem);
7385 enum insn_code icode;
7386 rtx insn;
7387
7388 /* If the target supports the test-and-set directly, great. */
7389 icode = sync_lock_test_and_set[mode];
7390 if (icode != CODE_FOR_nothing)
7391 {
7392 if (!target || !insn_data[icode].operand[0].predicate (target, mode))
7393 target = gen_reg_rtx (mode);
7394
7395 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7396 val = convert_modes (mode, GET_MODE (val), val, 1);
7397 if (!insn_data[icode].operand[2].predicate (val, mode))
7398 val = force_reg (mode, val);
7399
7400 insn = GEN_FCN (icode) (target, mem, val);
7401 if (insn)
7402 {
7403 emit_insn (insn);
7404 return target;
7405 }
7406 }
7407
7408 /* Otherwise, use a compare-and-swap loop for the exchange. */
7409 if (sync_compare_and_swap[mode] != CODE_FOR_nothing)
7410 {
7411 if (!target || !register_operand (target, mode))
7412 target = gen_reg_rtx (mode);
7413 if (GET_MODE (val) != VOIDmode && GET_MODE (val) != mode)
7414 val = convert_modes (mode, GET_MODE (val), val, 1);
7415 if (expand_compare_and_swap_loop (mem, target, val, NULL_RTX))
7416 return target;
7417 }
7418
7419 return NULL_RTX;
7420 }
7421
7422 #include "gt-optabs.h"